From 57eaca7d111416bfcc659b0c879544cb7b660fc6 Mon Sep 17 00:00:00 2001 From: Divyen Patel Date: Thu, 11 Feb 2021 13:25:57 -0800 Subject: [PATCH 01/36] using single instance of tagmanager for getting shared datastores in the topology --- pkg/common/cns-lib/vsphere/virtualmachine.go | 28 +++----------------- pkg/csi/service/node.go | 13 ++++++++- pkg/csi/service/vanilla/controller.go | 23 ++++++++++++++-- pkg/csi/service/vanilla/controller_test.go | 3 ++- pkg/csi/service/vanilla/nodes.go | 6 +++-- 5 files changed, 42 insertions(+), 31 deletions(-) diff --git a/pkg/common/cns-lib/vsphere/virtualmachine.go b/pkg/common/cns-lib/vsphere/virtualmachine.go index 44432353d7..3865279b4d 100644 --- a/pkg/common/cns-lib/vsphere/virtualmachine.go +++ b/pkg/common/cns-lib/vsphere/virtualmachine.go @@ -244,20 +244,9 @@ func (vm *VirtualMachine) GetAncestors(ctx context.Context) ([]mo.ManagedEntity, } // GetZoneRegion returns zone and region of the node vm -func (vm *VirtualMachine) GetZoneRegion(ctx context.Context, zoneCategoryName string, regionCategoryName string) (zone string, region string, err error) { +func (vm *VirtualMachine) GetZoneRegion(ctx context.Context, zoneCategoryName string, regionCategoryName string, tagManager *tags.Manager) (zone string, region string, err error) { log := logger.GetLogger(ctx) log.Debugf("GetZoneRegion: called with zoneCategoryName: %s, regionCategoryName: %s", zoneCategoryName, regionCategoryName) - tagManager, err := vm.GetTagManager(ctx) - if err != nil || tagManager == nil { - log.Errorf("failed to get tagManager. Error: %v", err) - return "", "", err - } - defer func() { - err = tagManager.Logout(ctx) - if err != nil { - log.Errorf("failed to logout tagManager. err: %v", err) - } - }() var objects []mo.ManagedEntity objects, err = vm.GetAncestors(ctx) if err != nil { @@ -305,21 +294,10 @@ func (vm *VirtualMachine) GetZoneRegion(ctx context.Context, zoneCategoryName st // IsInZoneRegion checks if virtual machine belongs to specified zone and region // This function returns true if virtual machine belongs to specified zone/region, else returns false. -func (vm *VirtualMachine) IsInZoneRegion(ctx context.Context, zoneCategoryName string, regionCategoryName string, zoneValue string, regionValue string) (bool, error) { +func (vm *VirtualMachine) IsInZoneRegion(ctx context.Context, zoneCategoryName string, regionCategoryName string, zoneValue string, regionValue string, tagManager *tags.Manager) (bool, error) { log := logger.GetLogger(ctx) log.Infof("IsInZoneRegion: called with zoneCategoryName: %s, regionCategoryName: %s, zoneValue: %s, regionValue: %s", zoneCategoryName, regionCategoryName, zoneValue, regionValue) - tagManager, err := vm.GetTagManager(ctx) - if err != nil || tagManager == nil { - log.Errorf("failed to get tagManager. Error: %v", err) - return false, err - } - defer func() { - err = tagManager.Logout(ctx) - if err != nil { - log.Errorf("failed to logout tagManager. err: %v", err) - } - }() - vmZone, vmRegion, err := vm.GetZoneRegion(ctx, zoneCategoryName, regionCategoryName) + vmZone, vmRegion, err := vm.GetZoneRegion(ctx, zoneCategoryName, regionCategoryName, tagManager) if err != nil { log.Errorf("failed to get accessibleTopology for vm: %v, err: %v", vm.Reference(), err) return false, err diff --git a/pkg/csi/service/node.go b/pkg/csi/service/node.go index b2b0aa16af..5fce102aab 100644 --- a/pkg/csi/service/node.go +++ b/pkg/csi/service/node.go @@ -701,7 +701,18 @@ func (s *service) NodeGetInfo( return nil, status.Errorf(codes.Internal, err.Error()) } } - zone, region, err := nodeVM.GetZoneRegion(ctx, cfg.Labels.Zone, cfg.Labels.Region) + tagManager, err := cnsvsphere.GetTagManager(ctx, vcenter) + if err != nil { + log.Errorf("failed to create tagManager. Err: %v", err) + return nil, status.Errorf(codes.Internal, err.Error()) + } + defer func() { + err := tagManager.Logout(ctx) + if err != nil { + log.Errorf("failed to logout tagManager. err: %v", err) + } + }() + zone, region, err := nodeVM.GetZoneRegion(ctx, cfg.Labels.Zone, cfg.Labels.Region, tagManager) if err != nil { log.Errorf("failed to get accessibleTopology for vm: %v, err: %v", nodeVM.Reference(), err) return nil, status.Errorf(codes.Internal, err.Error()) diff --git a/pkg/csi/service/vanilla/controller.go b/pkg/csi/service/vanilla/controller.go index d6249ff063..6ef11e6a1d 100644 --- a/pkg/csi/service/vanilla/controller.go +++ b/pkg/csi/service/vanilla/controller.go @@ -34,6 +34,7 @@ import ( "github.com/vmware/govmomi/cns" cnstypes "github.com/vmware/govmomi/cns/types" "github.com/vmware/govmomi/units" + "github.com/vmware/govmomi/vapi/tags" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" @@ -51,7 +52,7 @@ import ( type NodeManagerInterface interface { Initialize(ctx context.Context) error GetSharedDatastoresInK8SCluster(ctx context.Context) ([]*cnsvsphere.DatastoreInfo, error) - GetSharedDatastoresInTopology(ctx context.Context, topologyRequirement *csi.TopologyRequirement, zoneKey string, regionKey string) ([]*cnsvsphere.DatastoreInfo, map[string][]map[string]string, error) + GetSharedDatastoresInTopology(ctx context.Context, topologyRequirement *csi.TopologyRequirement, tagManager *tags.Manager, zoneKey string, regionKey string) ([]*cnsvsphere.DatastoreInfo, map[string][]map[string]string, error) GetNodeByName(ctx context.Context, nodeName string) (*cnsvsphere.VirtualMachine, error) GetAllNodes(ctx context.Context) ([]*cnsvsphere.VirtualMachine, error) } @@ -379,7 +380,25 @@ func (c *controller) createBlockVolume(ctx context.Context, req *csi.CreateVolum log.Errorf(errMsg) return nil, status.Error(codes.NotFound, errMsg) } - sharedDatastores, datastoreTopologyMap, err = c.nodeMgr.GetSharedDatastoresInTopology(ctx, topologyRequirement, c.manager.CnsConfig.Labels.Zone, c.manager.CnsConfig.Labels.Region) + vcenter, err := c.manager.VcenterManager.GetVirtualCenter(ctx, c.manager.VcenterConfig.Host) + if err != nil { + errMsg := fmt.Sprintf("Failed to get vCenter. Err: %v", err) + log.Errorf(errMsg) + return nil, status.Error(codes.NotFound, errMsg) + } + tagManager, err := cnsvsphere.GetTagManager(ctx, vcenter) + if err != nil { + errMsg := fmt.Sprintf("Failed to get tagManager. Err: %v", err) + log.Errorf(errMsg) + return nil, status.Error(codes.NotFound, errMsg) + } + defer func() { + err := tagManager.Logout(ctx) + if err != nil { + log.Errorf("failed to logout tagManager. err: %v", err) + } + }() + sharedDatastores, datastoreTopologyMap, err = c.nodeMgr.GetSharedDatastoresInTopology(ctx, topologyRequirement, tagManager, c.manager.CnsConfig.Labels.Zone, c.manager.CnsConfig.Labels.Region) if err != nil || len(sharedDatastores) == 0 { msg := fmt.Sprintf("failed to get shared datastores in topology: %+v. Error: %+v", topologyRequirement, err) log.Error(msg) diff --git a/pkg/csi/service/vanilla/controller_test.go b/pkg/csi/service/vanilla/controller_test.go index 507034cf06..c28de19e4e 100644 --- a/pkg/csi/service/vanilla/controller_test.go +++ b/pkg/csi/service/vanilla/controller_test.go @@ -36,6 +36,7 @@ import ( pbmsim "github.com/vmware/govmomi/pbm/simulator" "github.com/vmware/govmomi/property" "github.com/vmware/govmomi/simulator" + "github.com/vmware/govmomi/vapi/tags" "github.com/vmware/govmomi/vim25" "github.com/vmware/govmomi/vim25/mo" "github.com/vmware/govmomi/vim25/types" @@ -225,7 +226,7 @@ func (f *FakeNodeManager) GetAllNodes(ctx context.Context) ([]*cnsvsphere.Virtua return nil, nil } -func (f *FakeNodeManager) GetSharedDatastoresInTopology(ctx context.Context, topologyRequirement *csi.TopologyRequirement, zoneKey string, regionKey string) ([]*cnsvsphere.DatastoreInfo, map[string][]map[string]string, error) { +func (f *FakeNodeManager) GetSharedDatastoresInTopology(ctx context.Context, topologyRequirement *csi.TopologyRequirement, tagManager *tags.Manager, zoneKey string, regionKey string) ([]*cnsvsphere.DatastoreInfo, map[string][]map[string]string, error) { return nil, nil, nil } diff --git a/pkg/csi/service/vanilla/nodes.go b/pkg/csi/service/vanilla/nodes.go index 3f88800cf3..a1b9bc94dd 100644 --- a/pkg/csi/service/vanilla/nodes.go +++ b/pkg/csi/service/vanilla/nodes.go @@ -20,6 +20,8 @@ import ( "context" "fmt" + "github.com/vmware/govmomi/vapi/tags" + cnsnode "sigs.k8s.io/vsphere-csi-driver/pkg/common/cns-lib/node" cnsvsphere "sigs.k8s.io/vsphere-csi-driver/pkg/common/cns-lib/vsphere" "sigs.k8s.io/vsphere-csi-driver/pkg/csi/service/common" @@ -133,7 +135,7 @@ func (nodes *Nodes) GetAllNodes(ctx context.Context) ([]*cnsvsphere.VirtualMachi // ds:///vmfs/volumes/vsan:524fae1aaca129a5-1ee55a87f26ae626/: // [map[failure-domain.beta.kubernetes.io/region:k8s-region-us failure-domain.beta.kubernetes.io/zone:k8s-zone-us-west] // map[failure-domain.beta.kubernetes.io/region:k8s-region-us failure-domain.beta.kubernetes.io/zone:k8s-zone-us-east]]]] -func (nodes *Nodes) GetSharedDatastoresInTopology(ctx context.Context, topologyRequirement *csi.TopologyRequirement, zoneCategoryName string, regionCategoryName string) ([]*cnsvsphere.DatastoreInfo, map[string][]map[string]string, error) { +func (nodes *Nodes) GetSharedDatastoresInTopology(ctx context.Context, topologyRequirement *csi.TopologyRequirement, tagManager *tags.Manager, zoneCategoryName string, regionCategoryName string) ([]*cnsvsphere.DatastoreInfo, map[string][]map[string]string, error) { log := logger.GetLogger(ctx) log.Debugf("GetSharedDatastoresInTopology: called with topologyRequirement: %+v, zoneCategoryName: %s, regionCategoryName: %s", topologyRequirement, zoneCategoryName, regionCategoryName) allNodes, err := nodes.cnsNodeManager.GetAllNodes(ctx) @@ -152,7 +154,7 @@ func (nodes *Nodes) GetSharedDatastoresInTopology(ctx context.Context, topologyR log.Debugf("getNodesInZoneRegion: called with zoneValue: %s, regionValue: %s", zoneValue, regionValue) var nodeVMsInZoneAndRegion []*cnsvsphere.VirtualMachine for _, nodeVM := range allNodes { - isNodeInZoneRegion, err := nodeVM.IsInZoneRegion(ctx, zoneCategoryName, regionCategoryName, zoneValue, regionValue) + isNodeInZoneRegion, err := nodeVM.IsInZoneRegion(ctx, zoneCategoryName, regionCategoryName, zoneValue, regionValue, tagManager) if err != nil { log.Errorf("Error checking if node VM: %v belongs to zone [%s] and region [%s]. err: %+v", nodeVM, zoneValue, regionValue, err) return nil, err From 3ef18fcd7c919670bdb3797336ab3ae4a8476b3f Mon Sep 17 00:00:00 2001 From: shahra Date: Mon, 22 Feb 2021 08:11:03 -0800 Subject: [PATCH 02/36] Revert "Assign full permissions for file volumes created in WCP" This reverts commit 9e08b95cf2fd9407b52f89b20a31626856723fad. --- pkg/common/config/config.go | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/pkg/common/config/config.go b/pkg/common/config/config.go index bb5a5227bc..d6763c644d 100644 --- a/pkg/common/config/config.go +++ b/pkg/common/config/config.go @@ -293,15 +293,16 @@ func validateConfig(ctx context.Context, cfg *Config) error { vcConfig.InsecureFlag = cfg.Global.InsecureFlag } } - + clusterFlavor, err := GetClusterFlavor(ctx) + if err != nil { + return err + } if cfg.NetPermissions == nil { // If no net permissions are given, assume default log.Info("No Net Permissions given in Config. Using default permissions.") - // TODO: - // For now, adding full permissions for READ/WRITE for all file volumes on all flavors - // Later when ACLs are implemented for file volumes in WCP, give full permissions - // only for Vanilla and no permissions for WCP. - cfg.NetPermissions = map[string]*NetPermissionConfig{"#": GetDefaultNetPermission()} + if clusterFlavor == cnstypes.CnsClusterFlavorVanilla { + cfg.NetPermissions = map[string]*NetPermissionConfig{"#": GetDefaultNetPermission()} + } } else { for key, netPerm := range cfg.NetPermissions { if netPerm.Permissions == "" { From c7ae5cde38a62756334489e7488ac15ae5bd98ca Mon Sep 17 00:00:00 2001 From: shahra Date: Tue, 23 Feb 2021 11:11:05 -0800 Subject: [PATCH 03/36] Use gocsi env variable to remove lock contention for same volume id --- .../vanilla/deploy/vsphere-csi-controller-deployment.yaml | 2 ++ .../vanilla/deploy/vsphere-csi-controller-deployment.yaml | 2 ++ .../vanilla/deploy/vsphere-csi-controller-deployment.yaml | 2 ++ .../vanilla/deploy/vsphere-csi-controller-deployment.yaml | 2 ++ 4 files changed, 8 insertions(+) diff --git a/manifests/dev/vsphere-67u3/vanilla/deploy/vsphere-csi-controller-deployment.yaml b/manifests/dev/vsphere-67u3/vanilla/deploy/vsphere-csi-controller-deployment.yaml index 4352757298..a214645f20 100644 --- a/manifests/dev/vsphere-67u3/vanilla/deploy/vsphere-csi-controller-deployment.yaml +++ b/manifests/dev/vsphere-67u3/vanilla/deploy/vsphere-csi-controller-deployment.yaml @@ -71,6 +71,8 @@ spec: valueFrom: fieldRef: fieldPath: metadata.namespace + - name: X_CSI_SERIAL_VOL_ACCESS_TIMEOUT + value: 3m volumeMounts: - mountPath: /etc/cloud name: vsphere-config-volume diff --git a/manifests/dev/vsphere-7.0/vanilla/deploy/vsphere-csi-controller-deployment.yaml b/manifests/dev/vsphere-7.0/vanilla/deploy/vsphere-csi-controller-deployment.yaml index fc7a387741..acd88e46a8 100644 --- a/manifests/dev/vsphere-7.0/vanilla/deploy/vsphere-csi-controller-deployment.yaml +++ b/manifests/dev/vsphere-7.0/vanilla/deploy/vsphere-csi-controller-deployment.yaml @@ -84,6 +84,8 @@ spec: valueFrom: fieldRef: fieldPath: metadata.namespace + - name: X_CSI_SERIAL_VOL_ACCESS_TIMEOUT + value: 3m volumeMounts: - mountPath: /etc/cloud name: vsphere-config-volume diff --git a/manifests/dev/vsphere-7.0u1/vanilla/deploy/vsphere-csi-controller-deployment.yaml b/manifests/dev/vsphere-7.0u1/vanilla/deploy/vsphere-csi-controller-deployment.yaml index d196136879..b7a6fa3cbc 100644 --- a/manifests/dev/vsphere-7.0u1/vanilla/deploy/vsphere-csi-controller-deployment.yaml +++ b/manifests/dev/vsphere-7.0u1/vanilla/deploy/vsphere-csi-controller-deployment.yaml @@ -86,6 +86,8 @@ spec: valueFrom: fieldRef: fieldPath: metadata.namespace + - name: X_CSI_SERIAL_VOL_ACCESS_TIMEOUT + value: 3m volumeMounts: - mountPath: /etc/cloud name: vsphere-config-volume diff --git a/manifests/dev/vsphere-7.0u2/vanilla/deploy/vsphere-csi-controller-deployment.yaml b/manifests/dev/vsphere-7.0u2/vanilla/deploy/vsphere-csi-controller-deployment.yaml index 535466d100..79ba065d64 100644 --- a/manifests/dev/vsphere-7.0u2/vanilla/deploy/vsphere-csi-controller-deployment.yaml +++ b/manifests/dev/vsphere-7.0u2/vanilla/deploy/vsphere-csi-controller-deployment.yaml @@ -89,6 +89,8 @@ spec: valueFrom: fieldRef: fieldPath: metadata.namespace + - name: X_CSI_SERIAL_VOL_ACCESS_TIMEOUT + value: 3m volumeMounts: - mountPath: /etc/cloud name: vsphere-config-volume From 27be0754be319ed395123ccba56884dc2ecc88e3 Mon Sep 17 00:00:00 2001 From: shahra Date: Mon, 8 Feb 2021 12:24:41 -0800 Subject: [PATCH 04/36] Set cluster distribution in full sync for volumes created before upgrade --- go.mod | 2 +- go.sum | 4 ++-- pkg/syncer/fullsync.go | 52 ++++++++++++++++++++++++++++++------------ 3 files changed, 41 insertions(+), 17 deletions(-) diff --git a/go.mod b/go.mod index 78ed0b9d6b..2a9d9d5200 100644 --- a/go.mod +++ b/go.mod @@ -30,7 +30,7 @@ require ( github.com/stretchr/testify v1.6.1 // indirect github.com/thecodeteam/gofsutil v0.1.2 // indirect github.com/vmware-tanzu/vm-operator-api v0.1.3 - github.com/vmware/govmomi v0.24.1-0.20210127152625-854ba4efe87e + github.com/vmware/govmomi v0.24.1-0.20210211225628-8e9d4eb7d357 go.uber.org/zap v1.15.0 golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5 // indirect diff --git a/go.sum b/go.sum index 4b4823bef5..38c13c3e6c 100644 --- a/go.sum +++ b/go.sum @@ -757,8 +757,8 @@ github.com/vishvananda/netns v0.0.0-20171111001504-be1fbeda1936/go.mod h1:ZjcWmF github.com/vmware-tanzu/vm-operator-api v0.1.3 h1:4vxewu0jAN3fSoCBI6FhjmRGJ7ci0R2WNu/I6hacTYs= github.com/vmware-tanzu/vm-operator-api v0.1.3/go.mod h1:mubK0QMyaA2TbeAmGsu2GVfiqDFppNUAUqoMPoKFgzM= github.com/vmware/govmomi v0.20.3/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU= -github.com/vmware/govmomi v0.24.1-0.20210127152625-854ba4efe87e h1:QPfnwPHD91grdm5OBiWkrRftSNrhpKODGsRC3/jM18U= -github.com/vmware/govmomi v0.24.1-0.20210127152625-854ba4efe87e/go.mod h1:Y+Wq4lst78L85Ge/F8+ORXIWiKYqaro1vhAulACy9Lc= +github.com/vmware/govmomi v0.24.1-0.20210211225628-8e9d4eb7d357 h1:8n/rCTYyci4UqVOReJg/TeUxoPVStntNQF3Y7dlxxEA= +github.com/vmware/govmomi v0.24.1-0.20210211225628-8e9d4eb7d357/go.mod h1:Y+Wq4lst78L85Ge/F8+ORXIWiKYqaro1vhAulACy9Lc= github.com/vmware/vmw-guestinfo v0.0.0-20170707015358-25eff159a728/go.mod h1:x9oS4Wk2s2u4tS29nEaDLdzvuHdB19CvSGJjPgkZJNk= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= diff --git a/pkg/syncer/fullsync.go b/pkg/syncer/fullsync.go index bc0691b255..29f128a5b2 100644 --- a/pkg/syncer/fullsync.go +++ b/pkg/syncer/fullsync.go @@ -21,6 +21,7 @@ import ( "sync" "github.com/davecgh/go-spew/spew" + "github.com/vmware/govmomi/cns" cnstypes "github.com/vmware/govmomi/cns/types" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/labels" @@ -29,6 +30,7 @@ import ( cnsvsphere "sigs.k8s.io/vsphere-csi-driver/pkg/common/cns-lib/vsphere" "sigs.k8s.io/vsphere-csi-driver/pkg/csi/service/common" "sigs.k8s.io/vsphere-csi-driver/pkg/csi/service/logger" + "sigs.k8s.io/vsphere-csi-driver/pkg/syncer/types" ) // csiFullSync reconciles volume metadata on a vanilla k8s cluster @@ -98,16 +100,23 @@ func csiFullSync(ctx context.Context, metadataSyncer *metadataSyncInformer) { log.Errorf("FullSync: failed to queryAllVolume with err %+v", err) return } - // get map of "volume to EntityMetadata" in CNS and "volume to EntityMetadata" in kubernetes - volumeToCnsEntityMetadataMap, volumeToK8sEntityMetadataMap, err := fullSyncGetEntityMetadata(ctx, k8sPVs, queryAllResult.Volumes, pvToPVCMap, pvcToPodMap, metadataSyncer, migrationFeatureStateForFullSync) + + volumeToCnsEntityMetadataMap, volumeToK8sEntityMetadataMap, volumeClusterDistributionMap, err := fullSyncConstructVolumeMaps(ctx, k8sPVs, queryAllResult.Volumes, pvToPVCMap, pvcToPodMap, metadataSyncer, migrationFeatureStateForFullSync) if err != nil { log.Errorf("FullSync: fullSyncGetEntityMetadata failed with err %+v", err) return } log.Debugf("FullSync: pvToCnsEntityMetadataMap %+v \n pvToK8sEntityMetadataMap: %+v \n", spew.Sdump(volumeToCnsEntityMetadataMap), spew.Sdump(volumeToK8sEntityMetadataMap)) + log.Debugf("FullSync: volumes where clusterDistribution is set: %+v", volumeClusterDistributionMap) + + vcenter, err := types.GetVirtualCenterInstance(ctx, metadataSyncer.configInfo, false) + if err != nil { + log.Errorf("FullSync: failed to get vcenter with error %+v", err) + return + } // Get specs for create and update volume calls containerCluster := cnsvsphere.GetContainerCluster(metadataSyncer.configInfo.Cfg.Global.ClusterID, metadataSyncer.configInfo.Cfg.VirtualCenter[metadataSyncer.host].User, metadataSyncer.clusterFlavor, metadataSyncer.configInfo.Cfg.Global.ClusterDistribution) - createSpecArray, updateSpecArray := fullSyncGetVolumeSpecs(ctx, k8sPVs, volumeToCnsEntityMetadataMap, volumeToK8sEntityMetadataMap, containerCluster, metadataSyncer, migrationFeatureStateForFullSync) + createSpecArray, updateSpecArray := fullSyncGetVolumeSpecs(ctx, vcenter.Client.Version, k8sPVs, volumeToCnsEntityMetadataMap, volumeToK8sEntityMetadataMap, volumeClusterDistributionMap, containerCluster, metadataSyncer, migrationFeatureStateForFullSync) volToBeDeleted, err := getVolumesToBeDeleted(ctx, queryAllResult.Volumes, k8sPVMap, metadataSyncer, migrationFeatureStateForFullSync) if err != nil { log.Errorf("FullSync: failed to get list of volumes to be deleted with err %+v", err) @@ -307,12 +316,15 @@ func buildCnsMetadataList(ctx context.Context, pv *v1.PersistentVolume, pvToPVCM return metadataList } -// fullSyncGetEntityMetadata builds and return map of volume to EntityMetadata in CNS (volumeToCnsEntityMetadataMap) -// and map of volume to EntityMetadata in kubernetes (volumeToK8sEntityMetadataMap) -func fullSyncGetEntityMetadata(ctx context.Context, pvList []*v1.PersistentVolume, cnsVolumeList []cnstypes.CnsVolume, pvToPVCMap pvcMap, pvcToPodMap podMap, metadataSyncer *metadataSyncInformer, migrationFeatureStateForFullSync bool) (map[string][]cnstypes.BaseCnsEntityMetadata, map[string][]cnstypes.BaseCnsEntityMetadata, error) { +// fullSyncConstructVolumeMaps builds and returns map of volume to EntityMetadata in CNS (volumeToCnsEntityMetadataMap), +// map of volume to EntityMetadata in kubernetes (volumeToK8sEntityMetadataMap) and set of volumes where ClusterDistribution +// is populated (volumeClusterDistributionMap) +func fullSyncConstructVolumeMaps(ctx context.Context, pvList []*v1.PersistentVolume, cnsVolumeList []cnstypes.CnsVolume, pvToPVCMap pvcMap, pvcToPodMap podMap, metadataSyncer *metadataSyncInformer, migrationFeatureStateForFullSync bool) (map[string][]cnstypes.BaseCnsEntityMetadata, map[string][]cnstypes.BaseCnsEntityMetadata, map[string]bool, error) { log := logger.GetLogger(ctx) volumeToCnsEntityMetadataMap := make(map[string][]cnstypes.BaseCnsEntityMetadata) volumeToK8sEntityMetadataMap := make(map[string][]cnstypes.BaseCnsEntityMetadata) + volumeClusterDistributionMap := make(map[string]bool) + cnsVolumeMap := make(map[string]bool) for _, vol := range cnsVolumeList { @@ -330,7 +342,7 @@ func fullSyncGetEntityMetadata(ctx context.Context, pvList []*v1.PersistentVolum volumeHandle, err = volumeMigrationService.GetVolumeID(ctx, migrationVolumeSpec) if err != nil { log.Errorf("FullSync: Failed to get VolumeID from volumeMigrationService for migration VolumeSpec: %v with error %+v", migrationVolumeSpec, err) - return nil, nil, err + return nil, nil, nil, err } } else { // Do nothing for other cases @@ -351,12 +363,12 @@ func fullSyncGetEntityMetadata(ctx context.Context, pvList []*v1.PersistentVolum // volumes in the queryFilter.VolumeIds should be one which is present in both k8s and in CNS. if len(queryVolumeIds) == 0 { log.Warn("could not find any volume which is present in both k8s and in CNS") - return volumeToCnsEntityMetadataMap, volumeToK8sEntityMetadataMap, nil + return volumeToCnsEntityMetadataMap, volumeToK8sEntityMetadataMap, volumeClusterDistributionMap, nil } allQueryResults, err := fullSyncGetQueryResults(ctx, queryVolumeIds, metadataSyncer.configInfo.Cfg.Global.ClusterID, metadataSyncer.volumeManager) if err != nil { log.Errorf("FullSync: fullSyncGetQueryResults failed to query volume metadata from vc. Err: %v", err) - return nil, nil, err + return nil, nil, nil, err } for _, queryResult := range allQueryResults { @@ -369,14 +381,19 @@ func fullSyncGetEntityMetadata(ctx context.Context, pvList []*v1.PersistentVolum } } volumeToCnsEntityMetadataMap[volume.VolumeId.Id] = cnsMetadata + if len(volume.Metadata.ContainerClusterArray) == 1 && metadataSyncer.configInfo.Cfg.Global.ClusterID == volume.Metadata.ContainerClusterArray[0].ClusterId && metadataSyncer.configInfo.Cfg.Global.ClusterDistribution == volume.Metadata.ContainerClusterArray[0].ClusterDistribution { + log.Debugf("Volume %s has cluster distribution set to %s", volume.Name, volume.Metadata.ContainerClusterArray[0].ClusterDistribution) + volumeClusterDistributionMap[volume.VolumeId.Id] = true + } + } } - return volumeToCnsEntityMetadataMap, volumeToK8sEntityMetadataMap, nil + return volumeToCnsEntityMetadataMap, volumeToK8sEntityMetadataMap, volumeClusterDistributionMap, nil } // fullSyncGetVolumeSpecs return list of CnsVolumeCreateSpec for volumes which needs to be created in CNS and a list of // CnsVolumeMetadataUpdateSpec for volumes which needs to be updated in CNS. -func fullSyncGetVolumeSpecs(ctx context.Context, pvList []*v1.PersistentVolume, volumeToCnsEntityMetadataMap map[string][]cnstypes.BaseCnsEntityMetadata, volumeToK8sEntityMetadataMap map[string][]cnstypes.BaseCnsEntityMetadata, containerCluster cnstypes.CnsContainerCluster, metadataSyncer *metadataSyncInformer, migrationFeatureStateForFullSync bool) ([]cnstypes.CnsVolumeCreateSpec, []cnstypes.CnsVolumeMetadataUpdateSpec) { +func fullSyncGetVolumeSpecs(ctx context.Context, vCenterVersion string, pvList []*v1.PersistentVolume, volumeToCnsEntityMetadataMap map[string][]cnstypes.BaseCnsEntityMetadata, volumeToK8sEntityMetadataMap map[string][]cnstypes.BaseCnsEntityMetadata, volumeClusterDistributionMap map[string]bool, containerCluster cnstypes.CnsContainerCluster, metadataSyncer *metadataSyncInformer, migrationFeatureStateForFullSync bool) ([]cnstypes.CnsVolumeCreateSpec, []cnstypes.CnsVolumeMetadataUpdateSpec) { log := logger.GetLogger(ctx) var createSpecArray []cnstypes.CnsVolumeCreateSpec var updateSpecArray []cnstypes.CnsVolumeMetadataUpdateSpec @@ -397,7 +414,7 @@ func fullSyncGetVolumeSpecs(ctx context.Context, pvList []*v1.PersistentVolume, } volumeToCnsEntityMetadata, presentInCNS := volumeToCnsEntityMetadataMap[volumeHandle] volumeToK8sEntityMetadata, presentInK8S := volumeToK8sEntityMetadataMap[volumeHandle] - + _, volumeClusterDistributionSet := volumeClusterDistributionMap[volumeHandle] if !presentInK8S { log.Infof("FullSync: Skipping volume: %s with VolumeId %q. Volume is not present in the k8s", pv.Name, volumeHandle) continue @@ -413,7 +430,7 @@ func fullSyncGetVolumeSpecs(ctx context.Context, pvList []*v1.PersistentVolume, } } else { // volume exist in K8S and CNS, Check if update is required. - if isUpdateRequired(ctx, volumeToK8sEntityMetadata, volumeToCnsEntityMetadata) { + if isUpdateRequired(ctx, vCenterVersion, volumeToK8sEntityMetadata, volumeToCnsEntityMetadata, volumeClusterDistributionSet) { log.Infof("FullSync: update is required for volume: %q", volumeHandle) operationType = "updateVolume" } else { @@ -574,10 +591,17 @@ func buildPVCMapPodMap(ctx context.Context, pvList []*v1.PersistentVolume, metad // isUpdateRequired compares the input metadata list from K8S and metadata list from CNS and // returns true if update operation is required else returns false. -func isUpdateRequired(ctx context.Context, k8sMetadataList []cnstypes.BaseCnsEntityMetadata, cnsMetadataList []cnstypes.BaseCnsEntityMetadata) bool { +func isUpdateRequired(ctx context.Context, vCenterVersion string, k8sMetadataList []cnstypes.BaseCnsEntityMetadata, cnsMetadataList []cnstypes.BaseCnsEntityMetadata, volumeClusterDistributionSet bool) bool { log := logger.GetLogger(ctx) log.Debugf("FullSync: isUpdateRequired called with k8sMetadataList: %+v \n", spew.Sdump(k8sMetadataList)) log.Debugf("FullSync: isUpdateRequired called with cnsMetadataList: %+v \n", spew.Sdump(cnsMetadataList)) + if vCenterVersion != cns.ReleaseVSAN67u3 && vCenterVersion != cns.ReleaseVSAN70 && vCenterVersion != cns.ReleaseVSAN70u1 { + // Update is required if cluster distribution is not set on volume on vSphere 7.0u2 and above + if !volumeClusterDistributionSet { + return true + } + } + if len(k8sMetadataList) == len(cnsMetadataList) { // Same number of entries for volume in K8s and CNS // Need to check if entries match From 824ade724c8078782807bde169cde4e4fe5883e4 Mon Sep 17 00:00:00 2001 From: Chethan Venkatesh Date: Wed, 24 Feb 2021 15:19:50 -0800 Subject: [PATCH 05/36] Set allow root to true in ACLs and perform hard mount for file volumes in TKG --- pkg/csi/service/node.go | 3 +++ .../cnsfileaccessconfig/cnsfileaccessconfig_controller.go | 1 + 2 files changed, 4 insertions(+) diff --git a/pkg/csi/service/node.go b/pkg/csi/service/node.go index 5fce102aab..2368a0dd2a 100644 --- a/pkg/csi/service/node.go +++ b/pkg/csi/service/node.go @@ -1042,6 +1042,9 @@ func publishFileVol( if params.ro { mntFlags = append(mntFlags, "ro") } + if cnstypes.CnsClusterFlavor(os.Getenv(csitypes.EnvClusterFlavor)) == cnstypes.CnsClusterFlavorGuest { + mntFlags = append(mntFlags, "hard") + } // Retrieve the file share access point from publish context mntSrc, ok := req.GetPublishContext()[common.Nfsv4AccessPoint] if !ok { diff --git a/pkg/syncer/cnsoperator/controller/cnsfileaccessconfig/cnsfileaccessconfig_controller.go b/pkg/syncer/cnsoperator/controller/cnsfileaccessconfig/cnsfileaccessconfig_controller.go index 86338b4481..26b5511474 100644 --- a/pkg/syncer/cnsoperator/controller/cnsfileaccessconfig/cnsfileaccessconfig_controller.go +++ b/pkg/syncer/cnsoperator/controller/cnsfileaccessconfig/cnsfileaccessconfig_controller.go @@ -434,6 +434,7 @@ func (r *ReconcileCnsFileAccessConfig) configureVolumeACLs(ctx context.Context, vSanFileShareNetPermissions = append(vSanFileShareNetPermissions, vsanfstypes.VsanFileShareNetPermission{ Ips: tkgVMIP, Permissions: vsanFileShareAccessType, + AllowRoot: true, }) cnsNFSAccessControlSpecList := make([]cnstypes.CnsNFSAccessControlSpec, 0) From a91f7e8c5f6d83988349fa4e8a4157c804912b44 Mon Sep 17 00:00:00 2001 From: Sandeep Pissay Srinivasa Rao Date: Tue, 23 Feb 2021 13:20:03 -0800 Subject: [PATCH 06/36] Running a http server in syncer container to expose prometheus metrics. --- cmd/syncer/main.go | 19 +++++++++++++++ .../vsphere-csi-controller-deployment.yaml | 24 +++++++++++++++++++ .../vsphere-csi-controller-deployment.yaml | 24 +++++++++++++++++++ .../vsphere-csi-controller-deployment.yaml | 23 ++++++++++++++++++ .../guestcluster/1.17/pvcsi.yaml | 23 ++++++++++++++++++ .../guestcluster/1.18/pvcsi.yaml | 23 ++++++++++++++++++ .../guestcluster/1.19/pvcsi.yaml | 23 ++++++++++++++++++ .../1.17/vsphere-csi-controller.yaml | 23 ++++++++++++++++++ .../1.18/vsphere-csi-controller.yaml | 23 ++++++++++++++++++ .../1.19/vsphere-csi-controller.yaml | 23 ++++++++++++++++++ .../vsphere-csi-controller-deployment.yaml | 23 ++++++++++++++++++ pkg/common/cns-lib/volume/manager.go | 2 +- .../service => }/common/prometheus/metrics.go | 12 +++++++--- pkg/csi/service/vanilla/controller.go | 2 +- pkg/csi/service/wcp/controller.go | 2 +- pkg/csi/service/wcpguest/controller.go | 2 +- 16 files changed, 264 insertions(+), 7 deletions(-) rename pkg/{csi/service => }/common/prometheus/metrics.go (94%) diff --git a/cmd/syncer/main.go b/cmd/syncer/main.go index 0a3f4f5e0d..618be90e11 100644 --- a/cmd/syncer/main.go +++ b/cmd/syncer/main.go @@ -20,8 +20,12 @@ import ( "context" "flag" "fmt" + "net/http" "os" + "github.com/prometheus/client_golang/prometheus/promhttp" + "sigs.k8s.io/vsphere-csi-driver/pkg/common/prometheus" + "github.com/kubernetes-csi/csi-lib-utils/leaderelection" cnstypes "github.com/vmware/govmomi/cns/types" @@ -103,6 +107,21 @@ func main() { } }() } + + // Go module to keep the metrics http server running all the time. + go func() { + prometheus.SyncerInfo.WithLabelValues(syncer.Version).Set(1) + for { + log.Info("Starting the http server to expose Prometheus metrics..") + http.Handle("/metrics", promhttp.Handler()) + err = http.ListenAndServe(":2113", nil) + if err != nil { + log.Warnf("Http server that exposes the Prometheus exited with err: %+v", err) + } + log.Info("Restarting http server to expose Prometheus metrics..") + } + }() + // Initialize syncer components that are dependant on the outcome of leader election, if enabled. run = initSyncerComponents(ctx, clusterFlavor, configInfo, &syncer.COInitParams) diff --git a/manifests/dev/vsphere-67u3/vanilla/deploy/vsphere-csi-controller-deployment.yaml b/manifests/dev/vsphere-67u3/vanilla/deploy/vsphere-csi-controller-deployment.yaml index a214645f20..58bfbacf21 100644 --- a/manifests/dev/vsphere-67u3/vanilla/deploy/vsphere-csi-controller-deployment.yaml +++ b/manifests/dev/vsphere-67u3/vanilla/deploy/vsphere-csi-controller-deployment.yaml @@ -108,6 +108,10 @@ spec: - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" - "--fss-namespace=$(CSI_NAMESPACE)" imagePullPolicy: "Always" + ports: + - containerPort: 2113 + name: prometheus + protocol: TCP env: - name: FULL_SYNC_INTERVAL_MINUTES value: "30" @@ -163,3 +167,23 @@ metadata: spec: attachRequired: true podInfoOnMount: false +--- +apiVersion: v1 +kind: Service +metadata: + name: vsphere-csi-controller + namespace: kube-system + labels: + app: vsphere-csi-controller +spec: + ports: + - name: ctlr + port: 2112 + targetPort: 2112 + protocol: TCP + - name: syncer + port: 2113 + targetPort: 2113 + protocol: TCP + selector: + app: vsphere-csi-controller \ No newline at end of file diff --git a/manifests/dev/vsphere-7.0/vanilla/deploy/vsphere-csi-controller-deployment.yaml b/manifests/dev/vsphere-7.0/vanilla/deploy/vsphere-csi-controller-deployment.yaml index acd88e46a8..5f8a019e60 100644 --- a/manifests/dev/vsphere-7.0/vanilla/deploy/vsphere-csi-controller-deployment.yaml +++ b/manifests/dev/vsphere-7.0/vanilla/deploy/vsphere-csi-controller-deployment.yaml @@ -121,6 +121,10 @@ spec: - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" - "--fss-namespace=$(CSI_NAMESPACE)" imagePullPolicy: "Always" + ports: + - containerPort: 2113 + name: prometheus + protocol: TCP env: - name: FULL_SYNC_INTERVAL_MINUTES value: "30" @@ -176,3 +180,23 @@ metadata: spec: attachRequired: true podInfoOnMount: false +--- +apiVersion: v1 +kind: Service +metadata: + name: vsphere-csi-controller + namespace: kube-system + labels: + app: vsphere-csi-controller +spec: + ports: + - name: ctlr + port: 2112 + targetPort: 2112 + protocol: TCP + - name: syncer + port: 2113 + targetPort: 2113 + protocol: TCP + selector: + app: vsphere-csi-controller diff --git a/manifests/dev/vsphere-7.0u1/vanilla/deploy/vsphere-csi-controller-deployment.yaml b/manifests/dev/vsphere-7.0u1/vanilla/deploy/vsphere-csi-controller-deployment.yaml index b7a6fa3cbc..cc4a65d94a 100644 --- a/manifests/dev/vsphere-7.0u1/vanilla/deploy/vsphere-csi-controller-deployment.yaml +++ b/manifests/dev/vsphere-7.0u1/vanilla/deploy/vsphere-csi-controller-deployment.yaml @@ -121,6 +121,10 @@ spec: - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" - "--fss-namespace=$(CSI_NAMESPACE)" imagePullPolicy: "Always" + ports: + - containerPort: 2113 + name: prometheus + protocol: TCP env: - name: FULL_SYNC_INTERVAL_MINUTES value: "30" @@ -181,3 +185,22 @@ spec: attachRequired: true podInfoOnMount: false --- +apiVersion: v1 +kind: Service +metadata: + name: vsphere-csi-controller + namespace: kube-system + labels: + app: vsphere-csi-controller +spec: + ports: + - name: ctlr + port: 2112 + targetPort: 2112 + protocol: TCP + - name: syncer + port: 2113 + targetPort: 2113 + protocol: TCP + selector: + app: vsphere-csi-controller diff --git a/manifests/dev/vsphere-7.0u2/guestcluster/1.17/pvcsi.yaml b/manifests/dev/vsphere-7.0u2/guestcluster/1.17/pvcsi.yaml index 887be3fce6..b63192b1fb 100644 --- a/manifests/dev/vsphere-7.0u2/guestcluster/1.17/pvcsi.yaml +++ b/manifests/dev/vsphere-7.0u2/guestcluster/1.17/pvcsi.yaml @@ -187,6 +187,10 @@ spec: - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" - "--fss-namespace=$(CSI_NAMESPACE)" imagePullPolicy: "IfNotPresent" + ports: + - containerPort: 2113 + name: prometheus + protocol: TCP env: - name: FULL_SYNC_INTERVAL_MINUTES value: "30" @@ -409,3 +413,22 @@ metadata: name: internal-feature-states.csi.vsphere.vmware.com namespace: {{ .PVCSINamespace }} --- +apiVersion: v1 +kind: Service +metadata: + name: vsphere-csi-controller + namespace: {{ .PVCSINamespace }} + labels: + app: vsphere-csi-controller +spec: + ports: + - name: ctlr + port: 2112 + targetPort: 2112 + protocol: TCP + - name: syncer + port: 2113 + targetPort: 2113 + protocol: TCP + selector: + app: vsphere-csi-controller \ No newline at end of file diff --git a/manifests/dev/vsphere-7.0u2/guestcluster/1.18/pvcsi.yaml b/manifests/dev/vsphere-7.0u2/guestcluster/1.18/pvcsi.yaml index 119b870821..7ba276bf7b 100644 --- a/manifests/dev/vsphere-7.0u2/guestcluster/1.18/pvcsi.yaml +++ b/manifests/dev/vsphere-7.0u2/guestcluster/1.18/pvcsi.yaml @@ -187,6 +187,10 @@ spec: - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" - "--fss-namespace=$(CSI_NAMESPACE)" imagePullPolicy: "IfNotPresent" + ports: + - containerPort: 2113 + name: prometheus + protocol: TCP env: - name: FULL_SYNC_INTERVAL_MINUTES value: "30" @@ -409,3 +413,22 @@ metadata: name: internal-feature-states.csi.vsphere.vmware.com namespace: {{ .PVCSINamespace }} --- +apiVersion: v1 +kind: Service +metadata: + name: vsphere-csi-controller + namespace: {{ .PVCSINamespace }} + labels: + app: vsphere-csi-controller +spec: + ports: + - name: ctlr + port: 2112 + targetPort: 2112 + protocol: TCP + - name: syncer + port: 2113 + targetPort: 2113 + protocol: TCP + selector: + app: vsphere-csi-controller \ No newline at end of file diff --git a/manifests/dev/vsphere-7.0u2/guestcluster/1.19/pvcsi.yaml b/manifests/dev/vsphere-7.0u2/guestcluster/1.19/pvcsi.yaml index 804dccf891..91ec2570c8 100644 --- a/manifests/dev/vsphere-7.0u2/guestcluster/1.19/pvcsi.yaml +++ b/manifests/dev/vsphere-7.0u2/guestcluster/1.19/pvcsi.yaml @@ -187,6 +187,10 @@ spec: - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" - "--fss-namespace=$(CSI_NAMESPACE)" imagePullPolicy: "IfNotPresent" + ports: + - containerPort: 2113 + name: prometheus + protocol: TCP env: - name: FULL_SYNC_INTERVAL_MINUTES value: "30" @@ -409,3 +413,22 @@ metadata: name: internal-feature-states.csi.vsphere.vmware.com namespace: {{ .PVCSINamespace }} --- +apiVersion: v1 +kind: Service +metadata: + name: vsphere-csi-controller + namespace: {{ .PVCSINamespace }} + labels: + app: vsphere-csi-controller +spec: + ports: + - name: ctlr + port: 2112 + targetPort: 2112 + protocol: TCP + - name: syncer + port: 2113 + targetPort: 2113 + protocol: TCP + selector: + app: vsphere-csi-controller diff --git a/manifests/dev/vsphere-7.0u2/supervisorcluster/1.17/vsphere-csi-controller.yaml b/manifests/dev/vsphere-7.0u2/supervisorcluster/1.17/vsphere-csi-controller.yaml index ba757ca8e8..c64fb7b471 100644 --- a/manifests/dev/vsphere-7.0u2/supervisorcluster/1.17/vsphere-csi-controller.yaml +++ b/manifests/dev/vsphere-7.0u2/supervisorcluster/1.17/vsphere-csi-controller.yaml @@ -269,6 +269,10 @@ spec: - name: LOGGER_LEVEL value: "PRODUCTION" # Options: DEVELOPMENT, PRODUCTION imagePullPolicy: "IfNotPresent" + ports: + - containerPort: 2113 + name: prometheus + protocol: TCP volumeMounts: - mountPath: /etc/vmware/wcp name: vsphere-config-volume @@ -299,3 +303,22 @@ metadata: name: csi-feature-states namespace: vmware-system-csi --- +apiVersion: v1 +kind: Service +metadata: + name: vsphere-csi-controller + namespace: vmware-system-csi + labels: + app: vsphere-csi-controller +spec: + ports: + - name: ctlr + port: 2112 + targetPort: 2112 + protocol: TCP + - name: syncer + port: 2113 + targetPort: 2113 + protocol: TCP + selector: + app: vsphere-csi-controller diff --git a/manifests/dev/vsphere-7.0u2/supervisorcluster/1.18/vsphere-csi-controller.yaml b/manifests/dev/vsphere-7.0u2/supervisorcluster/1.18/vsphere-csi-controller.yaml index 6fc6a3085a..61cbf54fa8 100644 --- a/manifests/dev/vsphere-7.0u2/supervisorcluster/1.18/vsphere-csi-controller.yaml +++ b/manifests/dev/vsphere-7.0u2/supervisorcluster/1.18/vsphere-csi-controller.yaml @@ -269,6 +269,10 @@ spec: - name: LOGGER_LEVEL value: "PRODUCTION" # Options: DEVELOPMENT, PRODUCTION imagePullPolicy: "IfNotPresent" + ports: + - containerPort: 2113 + name: prometheus + protocol: TCP volumeMounts: - mountPath: /etc/vmware/wcp name: vsphere-config-volume @@ -299,3 +303,22 @@ metadata: name: csi-feature-states namespace: vmware-system-csi --- +apiVersion: v1 +kind: Service +metadata: + name: vsphere-csi-controller + namespace: vmware-system-csi + labels: + app: vsphere-csi-controller +spec: + ports: + - name: ctlr + port: 2112 + targetPort: 2112 + protocol: TCP + - name: syncer + port: 2113 + targetPort: 2113 + protocol: TCP + selector: + app: vsphere-csi-controller diff --git a/manifests/dev/vsphere-7.0u2/supervisorcluster/1.19/vsphere-csi-controller.yaml b/manifests/dev/vsphere-7.0u2/supervisorcluster/1.19/vsphere-csi-controller.yaml index c152092515..6603bc2122 100644 --- a/manifests/dev/vsphere-7.0u2/supervisorcluster/1.19/vsphere-csi-controller.yaml +++ b/manifests/dev/vsphere-7.0u2/supervisorcluster/1.19/vsphere-csi-controller.yaml @@ -278,6 +278,10 @@ spec: - name: INCLUSTER_CLIENT_BURST value: "50" imagePullPolicy: "IfNotPresent" + ports: + - containerPort: 2113 + name: prometheus + protocol: TCP volumeMounts: - mountPath: /etc/vmware/wcp name: vsphere-config-volume @@ -309,3 +313,22 @@ metadata: name: csi-feature-states namespace: vmware-system-csi --- +apiVersion: v1 +kind: Service +metadata: + name: vsphere-csi-controller + namespace: vmware-system-csi + labels: + app: vsphere-csi-controller +spec: + ports: + - name: ctlr + port: 2112 + targetPort: 2112 + protocol: TCP + - name: syncer + port: 2113 + targetPort: 2113 + protocol: TCP + selector: + app: vsphere-csi-controller diff --git a/manifests/dev/vsphere-7.0u2/vanilla/deploy/vsphere-csi-controller-deployment.yaml b/manifests/dev/vsphere-7.0u2/vanilla/deploy/vsphere-csi-controller-deployment.yaml index 79ba065d64..05d7326181 100644 --- a/manifests/dev/vsphere-7.0u2/vanilla/deploy/vsphere-csi-controller-deployment.yaml +++ b/manifests/dev/vsphere-7.0u2/vanilla/deploy/vsphere-csi-controller-deployment.yaml @@ -124,6 +124,10 @@ spec: - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" - "--fss-namespace=$(CSI_NAMESPACE)" imagePullPolicy: "Always" + ports: + - containerPort: 2113 + name: prometheus + protocol: TCP env: - name: FULL_SYNC_INTERVAL_MINUTES value: "30" @@ -187,3 +191,22 @@ spec: attachRequired: true podInfoOnMount: false --- +apiVersion: v1 +kind: Service +metadata: + name: vsphere-csi-controller + namespace: kube-system + labels: + app: vsphere-csi-controller +spec: + ports: + - name: ctlr + port: 2112 + targetPort: 2112 + protocol: TCP + - name: syncer + port: 2113 + targetPort: 2113 + protocol: TCP + selector: + app: vsphere-csi-controller \ No newline at end of file diff --git a/pkg/common/cns-lib/volume/manager.go b/pkg/common/cns-lib/volume/manager.go index de9d68ee3c..e9f3605065 100644 --- a/pkg/common/cns-lib/volume/manager.go +++ b/pkg/common/cns-lib/volume/manager.go @@ -23,7 +23,7 @@ import ( "sync" "time" - "sigs.k8s.io/vsphere-csi-driver/pkg/csi/service/common/prometheus" + "sigs.k8s.io/vsphere-csi-driver/pkg/common/prometheus" "sigs.k8s.io/vsphere-csi-driver/pkg/csi/service/logger" "github.com/davecgh/go-spew/spew" diff --git a/pkg/csi/service/common/prometheus/metrics.go b/pkg/common/prometheus/metrics.go similarity index 94% rename from pkg/csi/service/common/prometheus/metrics.go rename to pkg/common/prometheus/metrics.go index 2e9c039470..82e9c35dd7 100644 --- a/pkg/csi/service/common/prometheus/metrics.go +++ b/pkg/common/prometheus/metrics.go @@ -74,14 +74,20 @@ const ( var ( // CsiInfo is a gauge metric to observe the CSI version. CsiInfo = promauto.NewGaugeVec(prometheus.GaugeOpts{ - Name: "csi_info", + Name: "vsphere_csi_info", Help: "CSI Info", }, []string{"version"}) + // SyncerInfo is a gauge metric to observe the CSI version. + SyncerInfo = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Name: "vsphere_syncer_info", + Help: "Syncer Info", + }, []string{"version"}) + // CsiControlOpsHistVec is a histogram vector metric to observe various control // operations in CSI. CsiControlOpsHistVec = promauto.NewHistogramVec(prometheus.HistogramOpts{ - Name: "csi_volume_ops_histogram", + Name: "vsphere_csi_volume_ops_histogram", Help: "Histogram vector for CSI volume operations.", // Creating more buckets for operations that takes few seconds and less buckets // for those that are taking a long time. A CSI operation taking a long time is @@ -97,7 +103,7 @@ var ( // operations on CNS. Note that this captures the time taken by CNS into a bucket // as seen by the client(CSI in this case). CnsControlOpsHistVec = promauto.NewHistogramVec(prometheus.HistogramOpts{ - Name: "cns_volume_ops_histogram", + Name: "vsphere_cns_volume_ops_histogram", Help: "Histogram vector for CNS operations.", // Creating more buckets for operations that takes few seconds and less buckets // for those that are taking a long time. A CNS operation taking a long time is diff --git a/pkg/csi/service/vanilla/controller.go b/pkg/csi/service/vanilla/controller.go index 6ef11e6a1d..fc24d3abdd 100644 --- a/pkg/csi/service/vanilla/controller.go +++ b/pkg/csi/service/vanilla/controller.go @@ -27,7 +27,7 @@ import ( "time" "github.com/prometheus/client_golang/prometheus/promhttp" - "sigs.k8s.io/vsphere-csi-driver/pkg/csi/service/common/prometheus" + "sigs.k8s.io/vsphere-csi-driver/pkg/common/prometheus" "github.com/container-storage-interface/spec/lib/go/csi" "github.com/fsnotify/fsnotify" diff --git a/pkg/csi/service/wcp/controller.go b/pkg/csi/service/wcp/controller.go index ec67c13828..9dd20907da 100644 --- a/pkg/csi/service/wcp/controller.go +++ b/pkg/csi/service/wcp/controller.go @@ -24,7 +24,7 @@ import ( "time" "github.com/prometheus/client_golang/prometheus/promhttp" - "sigs.k8s.io/vsphere-csi-driver/pkg/csi/service/common/prometheus" + "sigs.k8s.io/vsphere-csi-driver/pkg/common/prometheus" "github.com/container-storage-interface/spec/lib/go/csi" "github.com/fsnotify/fsnotify" diff --git a/pkg/csi/service/wcpguest/controller.go b/pkg/csi/service/wcpguest/controller.go index 4d4b743917..f0bf149ac4 100644 --- a/pkg/csi/service/wcpguest/controller.go +++ b/pkg/csi/service/wcpguest/controller.go @@ -24,7 +24,7 @@ import ( "strings" "time" - "sigs.k8s.io/vsphere-csi-driver/pkg/csi/service/common/prometheus" + "sigs.k8s.io/vsphere-csi-driver/pkg/common/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" From b97723f12b1efe69b2d5ac9ed591e5e6cbef5ef1 Mon Sep 17 00:00:00 2001 From: shahra Date: Fri, 26 Feb 2021 13:19:12 -0800 Subject: [PATCH 07/36] Update manifests and documentation for v2.1.1 release --- docs/book/SUMMARY.md | 1 + docs/book/releases/v2.1.0.md | 3 + docs/book/releases/v2.1.1.md | 75 ++++++++ .../vsphere-csi-controller-deployment.yaml | 136 ++++++++++++++ .../vsphere-csi-controller-deployment.yaml | 149 +++++++++++++++ .../vanilla/deploy/vsphere-csi-node-ds.yaml | 125 +++++++++++++ .../vanilla/deploy/validatingwebhook.yaml | 118 ++++++++++++ .../vsphere-csi-controller-deployment.yaml | 169 ++++++++++++++++++ .../vanilla/deploy/vsphere-csi-node-ds.yaml | 138 ++++++++++++++ 9 files changed, 914 insertions(+) create mode 100644 docs/book/releases/v2.1.1.md create mode 100644 manifests/v2.1.1/vsphere-67u3/vanilla/deploy/vsphere-csi-controller-deployment.yaml create mode 100644 manifests/v2.1.1/vsphere-7.0/vanilla/deploy/vsphere-csi-controller-deployment.yaml create mode 100644 manifests/v2.1.1/vsphere-7.0/vanilla/deploy/vsphere-csi-node-ds.yaml create mode 100644 manifests/v2.1.1/vsphere-7.0u1/vanilla/deploy/validatingwebhook.yaml create mode 100644 manifests/v2.1.1/vsphere-7.0u1/vanilla/deploy/vsphere-csi-controller-deployment.yaml create mode 100644 manifests/v2.1.1/vsphere-7.0u1/vanilla/deploy/vsphere-csi-node-ds.yaml diff --git a/docs/book/SUMMARY.md b/docs/book/SUMMARY.md index c91ac633c3..93d53cc0ad 100644 --- a/docs/book/SUMMARY.md +++ b/docs/book/SUMMARY.md @@ -4,6 +4,7 @@ * [Overview](overview.md) * Releases * Native Kubernetes on vSphere + * [v2.1.1](releases/v2.1.1.md) * [v2.1.0](releases/v2.1.0.md) * [v2.0.1](releases/v2.0.1.md) * [v2.0.0](releases/v2.0.0.md) diff --git a/docs/book/releases/v2.1.0.md b/docs/book/releases/v2.1.0.md index 945de69c3c..e10578d5bd 100644 --- a/docs/book/releases/v2.1.0.md +++ b/docs/book/releases/v2.1.0.md @@ -48,6 +48,9 @@ - Impact: In-tree vSphere volumes will not get migrated successfully - Workaround: - Upgrade CSI driver with this fix. +7. When a Pod is rescheduled to a new node, there may be some lock contention which causes a delay in the volume getting detached from the old node and attached to the new node. + - Impact: Rescheduled Pods remain in `Pending` state for an varying amount of time. + - Workaround: Upgrade CSI driver to `v2.1.1`. ### Kubernetes issues diff --git a/docs/book/releases/v2.1.1.md b/docs/book/releases/v2.1.1.md new file mode 100644 index 0000000000..d51ae2f846 --- /dev/null +++ b/docs/book/releases/v2.1.1.md @@ -0,0 +1,75 @@ + +# vSphere CSI Driver - v2.1.1 release + +## New Feature + +- There is no new feature released in v2.1.1. v2.1.1 is the patch release to fix an issue observed in [v2.1.0](v2.1.0.md) release. + +## Notable Changes + +- Fixed lock contention in gocsi by introducing timeout environment variable [#655](https://github.com/kubernetes-sigs/vsphere-csi-driver/pull/665) + +## Deployment files + +- https://github.com/kubernetes-sigs/vsphere-csi-driver/tree/master/manifests/v2.1.1 + +## Minimum Kubernetes Version + +- v1.17.0 +- For vSphere CSI Migration feature the minimum Kubernetes version requirement is v1.19.0 + +## Supported sidecar containers versions + +- csi-provisioner - v2.0.0 +- csi-attacher - v3.0.0 +- csi-resizer - v1.0.0 +- livenessprob - v2.1.0 +- csi-node-driver-registrar - v2.0.1 + +## Known Issues + +### vSphere CSI Driver issues + +1. When the static persistent volume with file-share is re-created with the same PV name, volume is not getting registered as a container volume with vSphere. + - Impact: attach/delete can not be performed on the such Persistent Volume. + - Workaround: wait for 1 hour before re-creating static persistent volume with file-share using the same name. +2. Metadata syncer container deletes the volume physically from the datastore when Persistent Volumes with `Bound` status and reclaim policy `Delete` is deleted by the user when `StorageObjectInUseProtection` is disabled on Kubernetes Cluster. + - Impact: Persistent Volumes Claim goes in the lost status. Volume can not be recovered. + - Workaround: Do not disable `StorageObjectInUseProtection` and attempt to delete Persistent Volume directly without deleting PVC. +3. Migrated in-tree vSphere volume deleted by in-tree vSphere plugin remains on the CNS UI. + - Impact: Migrated in-tree vSphere volumes deleted by in-tree vSphere plugin remains on the CNS UI. + - Workaround: Admin needs to manually reconcile discrepancies in the Managed Virtual Disk Catalog. Admin needs to follow this KB article - https://kb.vmware.com/s/article/2147750 +4. Volume expansion might fail when it is called with pod creation simultaneously. + - Impact: Users can resize the PVC and create a pod using that PVC simultaneously. In this case, pod creation might be completed first using the PVC with original size. Volume expansion will fail because online resize is not supported in vSphere 7.0 Update1. + - Workaround: Wait for the PVC to reach FileVolumeResizePending condition before attaching a pod to it. +5. Deleting PV before deleting PVC, leaves orphan volume on the datastore. + - Impact: Orphan volumes remain on the datastore, and admin needs to delete those volumes manually using `govc` command. + - Upstream issue is tracked at: https://github.com/kubernetes-csi/external-provisioner/issues/546 + - Workaround: + - No workaround. User should not attempt to delete PV which is bound to PVC. User should only delete a PV if they know that the underlying volume in the storage system is gone. + - If user has accidentally left orphan volumes on the datastore by not following the guideline, and if user has captured the volume handles or First Class Disk IDs of deleted PVs, storage admin can help delete those volumes using `govc disk.rm ` command. +6. When in-tree vSphere plugin is configured to use default datastore in this format default-datastore: `/datastore/`, migration of the volume will fail as mentioned here: https://github.com/kubernetes-sigs/vsphere-csi-driver/issues/628. If default datastore is configured in this format `` then we do not see this issue + - Impact: In-tree vSphere volumes will not get migrated successfully + - Workaround: + - Upgrade CSI driver with this fix. + +### Kubernetes issues + +1. Filesystem resize is skipped if the original PVC is deleted when FilesystemResizePending condition is still on the PVC, but PV and its associated volume on the storage system are not deleted due to the Retain policy. + - Issue: https://github.com/kubernetes/kubernetes/issues/88683 + - Impact: User may create a new PVC to statically bind to the undeleted PV. In this case, the volume on the storage system is resized but the filesystem is not resized accordingly. User may try to write to the volume whose filesystem is out of capacity. + - Workaround: User can log into the container to manually resize the filesystem. +2. Volume associated with a Statefulset cannot be resized + - Issue: https://github.com/kubernetes/enhancements/pull/660 + - Impact: User cannot resize volume in a StatefulSet. + - Workaround: If the statefulset is not managed by an operator, there is a slightly risky workaround which the user can use on their own discretion depending upon their use case. Please refer to https://serverfault.com/questions/955293/how-to-increase-disk-size-in-a-stateful-set for more details. +3. Recover from volume expansion failure. + - Impact: If volume expansion fails because storage system does not support it, there is no way to recover. + - Issue: https://github.com/kubernetes/enhancements/pull/1516 + - Workaround: None + +## vSphere issues + +1. CNS file volume has a limitation of 8K for metadata. + - Impact: It is quite possible that we will not be able to push all the metadata to CNS file share as we need support a max of 64 clients per file volume. + - Workaround: None, This is vSphere limitation. diff --git a/manifests/v2.1.1/vsphere-67u3/vanilla/deploy/vsphere-csi-controller-deployment.yaml b/manifests/v2.1.1/vsphere-67u3/vanilla/deploy/vsphere-csi-controller-deployment.yaml new file mode 100644 index 0000000000..08c974ddd2 --- /dev/null +++ b/manifests/v2.1.1/vsphere-67u3/vanilla/deploy/vsphere-csi-controller-deployment.yaml @@ -0,0 +1,136 @@ +kind: Deployment +apiVersion: apps/v1 +metadata: + name: vsphere-csi-controller + namespace: kube-system +spec: + replicas: 1 + selector: + matchLabels: + app: vsphere-csi-controller + template: + metadata: + labels: + app: vsphere-csi-controller + role: vsphere-csi + spec: + serviceAccountName: vsphere-csi-controller + nodeSelector: + node-role.kubernetes.io/master: "" + tolerations: + - operator: "Exists" + effect: NoSchedule + - operator: "Exists" + effect: NoExecute + dnsPolicy: "Default" + containers: + - name: csi-attacher + image: quay.io/k8scsi/csi-attacher:v3.0.0 + args: + - "--v=4" + - "--timeout=300s" + - "--csi-address=$(ADDRESS)" + - "--leader-election" + env: + - name: ADDRESS + value: /csi/csi.sock + volumeMounts: + - mountPath: /csi + name: socket-dir + - name: vsphere-csi-controller + image: gcr.io/cloud-provider-vsphere/csi/release/driver:v2.1.1 + imagePullPolicy: "Always" + env: + - name: CSI_ENDPOINT + value: unix:///var/lib/csi/sockets/pluginproxy/csi.sock + - name: X_CSI_MODE + value: "controller" + - name: VSPHERE_CSI_CONFIG + value: "/etc/cloud/csi-vsphere.conf" + - name: LOGGER_LEVEL + value: "PRODUCTION" # Options: DEVELOPMENT, PRODUCTION + - name: X_CSI_SERIAL_VOL_ACCESS_TIMEOUT + value: 3m + volumeMounts: + - mountPath: /etc/cloud + name: vsphere-config-volume + readOnly: true + - mountPath: /var/lib/csi/sockets/pluginproxy/ + name: socket-dir + ports: + - name: healthz + containerPort: 9808 + protocol: TCP + livenessProbe: + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 10 + timeoutSeconds: 3 + periodSeconds: 5 + failureThreshold: 3 + - name: liveness-probe + image: quay.io/k8scsi/livenessprobe:v2.1.0 + args: + - "--csi-address=$(ADDRESS)" + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/csi.sock + volumeMounts: + - mountPath: /var/lib/csi/sockets/pluginproxy/ + name: socket-dir + - name: vsphere-syncer + image: gcr.io/cloud-provider-vsphere/csi/release/syncer:v2.1.1 + args: + - "--leader-election" + imagePullPolicy: "Always" + env: + - name: FULL_SYNC_INTERVAL_MINUTES + value: "30" + - name: VSPHERE_CSI_CONFIG + value: "/etc/cloud/csi-vsphere.conf" + - name: LOGGER_LEVEL + value: "PRODUCTION" # Options: DEVELOPMENT, PRODUCTION + volumeMounts: + - mountPath: /etc/cloud + name: vsphere-config-volume + readOnly: true + - name: csi-provisioner + image: quay.io/k8scsi/csi-provisioner:v2.0.0 + args: + - "--v=4" + - "--timeout=300s" + - "--csi-address=$(ADDRESS)" + - "--leader-election" + - "--default-fstype=ext4" + # needed only for topology aware setup + #- "--feature-gates=Topology=true" + #- "--strict-topology" + env: + - name: ADDRESS + value: /csi/csi.sock + volumeMounts: + - mountPath: /csi + name: socket-dir + volumes: + - name: vsphere-config-volume + secret: + secretName: vsphere-config-secret + - name: socket-dir + emptyDir: {} +--- +apiVersion: v1 +data: + "csi-migration": "false" +kind: ConfigMap +metadata: + name: internal-feature-states.csi.vsphere.vmware.com + namespace: kube-system +--- +apiVersion: storage.k8s.io/v1beta1 +kind: CSIDriver +metadata: + name: csi.vsphere.vmware.com +spec: + attachRequired: true + podInfoOnMount: false diff --git a/manifests/v2.1.1/vsphere-7.0/vanilla/deploy/vsphere-csi-controller-deployment.yaml b/manifests/v2.1.1/vsphere-7.0/vanilla/deploy/vsphere-csi-controller-deployment.yaml new file mode 100644 index 0000000000..fc990b68cd --- /dev/null +++ b/manifests/v2.1.1/vsphere-7.0/vanilla/deploy/vsphere-csi-controller-deployment.yaml @@ -0,0 +1,149 @@ +kind: Deployment +apiVersion: apps/v1 +metadata: + name: vsphere-csi-controller + namespace: kube-system +spec: + replicas: 1 + selector: + matchLabels: + app: vsphere-csi-controller + template: + metadata: + labels: + app: vsphere-csi-controller + role: vsphere-csi + spec: + serviceAccountName: vsphere-csi-controller + nodeSelector: + node-role.kubernetes.io/master: "" + tolerations: + - operator: "Exists" + effect: NoSchedule + - operator: "Exists" + effect: NoExecute + dnsPolicy: "Default" + containers: + - name: csi-attacher + image: quay.io/k8scsi/csi-attacher:v3.0.0 + args: + - "--v=4" + - "--timeout=300s" + - "--csi-address=$(ADDRESS)" + - "--leader-election" + env: + - name: ADDRESS + value: /csi/csi.sock + volumeMounts: + - mountPath: /csi + name: socket-dir + - name: csi-resizer + image: quay.io/k8scsi/csi-resizer:v1.0.0 + args: + - "--v=4" + - "--timeout=300s" + - "--csi-address=$(ADDRESS)" + - "--leader-election" + env: + - name: ADDRESS + value: /csi/csi.sock + volumeMounts: + - mountPath: /csi + name: socket-dir + - name: vsphere-csi-controller + image: gcr.io/cloud-provider-vsphere/csi/release/driver:v2.1.1 + imagePullPolicy: "Always" + env: + - name: CSI_ENDPOINT + value: unix:///var/lib/csi/sockets/pluginproxy/csi.sock + - name: X_CSI_MODE + value: "controller" + - name: VSPHERE_CSI_CONFIG + value: "/etc/cloud/csi-vsphere.conf" + - name: LOGGER_LEVEL + value: "PRODUCTION" # Options: DEVELOPMENT, PRODUCTION + - name: X_CSI_SERIAL_VOL_ACCESS_TIMEOUT + value: 3m + volumeMounts: + - mountPath: /etc/cloud + name: vsphere-config-volume + readOnly: true + - mountPath: /var/lib/csi/sockets/pluginproxy/ + name: socket-dir + ports: + - name: healthz + containerPort: 9808 + protocol: TCP + livenessProbe: + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 10 + timeoutSeconds: 3 + periodSeconds: 5 + failureThreshold: 3 + - name: liveness-probe + image: quay.io/k8scsi/livenessprobe:v2.1.0 + args: + - "--csi-address=$(ADDRESS)" + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/csi.sock + volumeMounts: + - mountPath: /var/lib/csi/sockets/pluginproxy/ + name: socket-dir + - name: vsphere-syncer + image: gcr.io/cloud-provider-vsphere/csi/release/syncer:v2.1.1 + args: + - "--leader-election" + imagePullPolicy: "Always" + env: + - name: FULL_SYNC_INTERVAL_MINUTES + value: "30" + - name: VSPHERE_CSI_CONFIG + value: "/etc/cloud/csi-vsphere.conf" + - name: LOGGER_LEVEL + value: "PRODUCTION" # Options: DEVELOPMENT, PRODUCTION + volumeMounts: + - mountPath: /etc/cloud + name: vsphere-config-volume + readOnly: true + - name: csi-provisioner + image: quay.io/k8scsi/csi-provisioner:v2.0.0 + args: + - "--v=4" + - "--timeout=300s" + - "--csi-address=$(ADDRESS)" + - "--leader-election" + - "--default-fstype=ext4" + # needed only for topology aware setup + #- "--feature-gates=Topology=true" + #- "--strict-topology" + env: + - name: ADDRESS + value: /csi/csi.sock + volumeMounts: + - mountPath: /csi + name: socket-dir + volumes: + - name: vsphere-config-volume + secret: + secretName: vsphere-config-secret + - name: socket-dir + emptyDir: {} +--- +apiVersion: v1 +data: + "csi-migration": "false" +kind: ConfigMap +metadata: + name: internal-feature-states.csi.vsphere.vmware.com + namespace: kube-system +--- +apiVersion: storage.k8s.io/v1beta1 +kind: CSIDriver +metadata: + name: csi.vsphere.vmware.com +spec: + attachRequired: true + podInfoOnMount: false diff --git a/manifests/v2.1.1/vsphere-7.0/vanilla/deploy/vsphere-csi-node-ds.yaml b/manifests/v2.1.1/vsphere-7.0/vanilla/deploy/vsphere-csi-node-ds.yaml new file mode 100644 index 0000000000..7fd7636867 --- /dev/null +++ b/manifests/v2.1.1/vsphere-7.0/vanilla/deploy/vsphere-csi-node-ds.yaml @@ -0,0 +1,125 @@ +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: vsphere-csi-node + namespace: kube-system +spec: + selector: + matchLabels: + app: vsphere-csi-node + updateStrategy: + type: "RollingUpdate" + template: + metadata: + labels: + app: vsphere-csi-node + role: vsphere-csi + spec: + dnsPolicy: "Default" + containers: + - name: node-driver-registrar + image: quay.io/k8scsi/csi-node-driver-registrar:v2.0.1 + lifecycle: + preStop: + exec: + command: ["/bin/sh", "-c", "rm -rf /registration/csi.vsphere.vmware.com-reg.sock /csi/csi.sock"] + args: + - "--v=5" + - "--csi-address=$(ADDRESS)" + - "--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)" + env: + - name: ADDRESS + value: /csi/csi.sock + - name: DRIVER_REG_SOCK_PATH + value: /var/lib/kubelet/plugins/csi.vsphere.vmware.com/csi.sock + securityContext: + privileged: true + volumeMounts: + - name: plugin-dir + mountPath: /csi + - name: registration-dir + mountPath: /registration + - name: vsphere-csi-node + image: gcr.io/cloud-provider-vsphere/csi/release/driver:v2.1.1 + imagePullPolicy: "Always" + env: + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: CSI_ENDPOINT + value: unix:///csi/csi.sock + - name: X_CSI_MODE + value: "node" + - name: X_CSI_SPEC_REQ_VALIDATION + value: "false" + # needed only for topology aware setups + #- name: VSPHERE_CSI_CONFIG + # value: "/etc/cloud/csi-vsphere.conf" # here csi-vsphere.conf is the name of the file used for creating secret using "--from-file" flag + - name: X_CSI_DEBUG + value: "true" + - name: LOGGER_LEVEL + value: "PRODUCTION" # Options: DEVELOPMENT, PRODUCTION + securityContext: + privileged: true + capabilities: + add: ["SYS_ADMIN"] + allowPrivilegeEscalation: true + volumeMounts: + # needed only for topology aware setups + #- name: vsphere-config-volume + # mountPath: /etc/cloud + # readOnly: true + - name: plugin-dir + mountPath: /csi + - name: pods-mount-dir + mountPath: /var/lib/kubelet + # needed so that any mounts setup inside this container are + # propagated back to the host machine. + mountPropagation: "Bidirectional" + - name: device-dir + mountPath: /dev + ports: + - name: healthz + containerPort: 9808 + protocol: TCP + livenessProbe: + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 10 + timeoutSeconds: 3 + periodSeconds: 5 + failureThreshold: 3 + - name: liveness-probe + image: quay.io/k8scsi/livenessprobe:v2.1.0 + args: + - --csi-address=/csi/csi.sock + volumeMounts: + - name: plugin-dir + mountPath: /csi + volumes: + # needed only for topology aware setups + #- name: vsphere-config-volume + # secret: + # secretName: vsphere-config-secret + - name: registration-dir + hostPath: + path: /var/lib/kubelet/plugins_registry + type: Directory + - name: plugin-dir + hostPath: + path: /var/lib/kubelet/plugins/csi.vsphere.vmware.com/ + type: DirectoryOrCreate + - name: pods-mount-dir + hostPath: + path: /var/lib/kubelet + type: Directory + - name: device-dir + hostPath: + path: /dev + tolerations: + - effect: NoExecute + operator: Exists + - effect: NoSchedule + operator: Exists diff --git a/manifests/v2.1.1/vsphere-7.0u1/vanilla/deploy/validatingwebhook.yaml b/manifests/v2.1.1/vsphere-7.0u1/vanilla/deploy/validatingwebhook.yaml new file mode 100644 index 0000000000..43f72dfa58 --- /dev/null +++ b/manifests/v2.1.1/vsphere-7.0u1/vanilla/deploy/validatingwebhook.yaml @@ -0,0 +1,118 @@ +# Requires k8s 1.19+ +--- +apiVersion: v1 +kind: Service +metadata: + name: vsphere-webhook-svc + namespace: kube-system + labels: + app: vsphere-csi-webhook +spec: + ports: + - port: 443 + targetPort: 8443 + selector: + app: vsphere-csi-webhook +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + name: validation.csi.vsphere.vmware.com +webhooks: + - name: validation.csi.vsphere.vmware.com + clientConfig: + service: + name: vsphere-webhook-svc + namespace: kube-system + path: "/validate" + caBundle: ${CA_BUNDLE} + rules: + - apiGroups: ["storage.k8s.io"] + apiVersions: ["v1", "v1beta1"] + operations: ["CREATE", "UPDATE"] + resources: ["storageclasses"] + sideEffects: None + admissionReviewVersions: ["v1"] + failurePolicy: Fail +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: vsphere-csi-webhook + namespace: kube-system +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: vsphere-csi-webhook-role + namespace: kube-system +rules: + - apiGroups: [""] + resources: ["configmaps"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: vsphere-csi-webhook-role-binding +subjects: + - kind: ServiceAccount + name: vsphere-csi-webhook + namespace: kube-system +roleRef: + kind: ClusterRole + name: vsphere-csi-webhook-role + apiGroup: rbac.authorization.k8s.io +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + name: vsphere-csi-webhook + namespace: kube-system +spec: + replicas: 1 + selector: + matchLabels: + app: vsphere-csi-webhook + template: + metadata: + labels: + app: vsphere-csi-webhook + role: vsphere-csi-webhook + spec: + serviceAccountName: vsphere-csi-webhook + nodeSelector: + node-role.kubernetes.io/master: "" + tolerations: + - operator: "Exists" + effect: NoSchedule + - operator: "Exists" + effect: NoExecute + dnsPolicy: "Default" + containers: + - name: vsphere-webhook + image: gcr.io/cloud-provider-vsphere/csi/release/syncer:v2.1.1 + args: + - "--operation-mode=WEBHOOK_SERVER" + - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" + - "--fss-namespace=$(CSI_NAMESPACE)" + imagePullPolicy: "Always" + env: + - name: WEBHOOK_CONFIG_PATH + value: "/etc/webhook/webhook.config" + - name: LOGGER_LEVEL + value: "PRODUCTION" # Options: DEVELOPMENT, PRODUCTION + - name: CSI_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + volumeMounts: + - mountPath: /etc/webhook + name: webhook-certs + readOnly: true + volumes: + - name: socket-dir + emptyDir: {} + - name: webhook-certs + secret: + secretName: vsphere-webhook-certs \ No newline at end of file diff --git a/manifests/v2.1.1/vsphere-7.0u1/vanilla/deploy/vsphere-csi-controller-deployment.yaml b/manifests/v2.1.1/vsphere-7.0u1/vanilla/deploy/vsphere-csi-controller-deployment.yaml new file mode 100644 index 0000000000..a4ce5afb6e --- /dev/null +++ b/manifests/v2.1.1/vsphere-7.0u1/vanilla/deploy/vsphere-csi-controller-deployment.yaml @@ -0,0 +1,169 @@ +kind: Deployment +apiVersion: apps/v1 +metadata: + name: vsphere-csi-controller + namespace: kube-system +spec: + replicas: 1 + selector: + matchLabels: + app: vsphere-csi-controller + template: + metadata: + labels: + app: vsphere-csi-controller + role: vsphere-csi + spec: + serviceAccountName: vsphere-csi-controller + nodeSelector: + node-role.kubernetes.io/master: "" + tolerations: + - operator: "Exists" + effect: NoSchedule + - operator: "Exists" + effect: NoExecute + dnsPolicy: "Default" + containers: + - name: csi-attacher + image: quay.io/k8scsi/csi-attacher:v3.0.0 + args: + - "--v=4" + - "--timeout=300s" + - "--csi-address=$(ADDRESS)" + - "--leader-election" + env: + - name: ADDRESS + value: /csi/csi.sock + volumeMounts: + - mountPath: /csi + name: socket-dir + - name: csi-resizer + image: quay.io/k8scsi/csi-resizer:v1.0.0 + args: + - "--v=4" + - "--timeout=300s" + - "--csi-address=$(ADDRESS)" + - "--leader-election" + env: + - name: ADDRESS + value: /csi/csi.sock + volumeMounts: + - mountPath: /csi + name: socket-dir + - name: vsphere-csi-controller + image: gcr.io/cloud-provider-vsphere/csi/release/driver:v2.1.1 + args: + - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" + - "--fss-namespace=$(CSI_NAMESPACE)" + imagePullPolicy: "Always" + env: + - name: CSI_ENDPOINT + value: unix:///csi/csi.sock + - name: X_CSI_MODE + value: "controller" + - name: VSPHERE_CSI_CONFIG + value: "/etc/cloud/csi-vsphere.conf" + - name: LOGGER_LEVEL + value: "PRODUCTION" # Options: DEVELOPMENT, PRODUCTION + - name: INCLUSTER_CLIENT_QPS + value: "100" + - name: INCLUSTER_CLIENT_BURST + value: "100" + - name: CSI_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: X_CSI_SERIAL_VOL_ACCESS_TIMEOUT + value: 3m + volumeMounts: + - mountPath: /etc/cloud + name: vsphere-config-volume + readOnly: true + - mountPath: /csi + name: socket-dir + ports: + - name: healthz + containerPort: 9808 + protocol: TCP + livenessProbe: + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 10 + timeoutSeconds: 3 + periodSeconds: 5 + failureThreshold: 3 + - name: liveness-probe + image: quay.io/k8scsi/livenessprobe:v2.1.0 + args: + - "--v=4" + - "--csi-address=/csi/csi.sock" + volumeMounts: + - name: socket-dir + mountPath: /csi + - name: vsphere-syncer + image: gcr.io/cloud-provider-vsphere/csi/release/syncer:v2.1.1 + args: + - "--leader-election" + - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" + - "--fss-namespace=$(CSI_NAMESPACE)" + imagePullPolicy: "Always" + env: + - name: FULL_SYNC_INTERVAL_MINUTES + value: "30" + - name: VSPHERE_CSI_CONFIG + value: "/etc/cloud/csi-vsphere.conf" + - name: LOGGER_LEVEL + value: "PRODUCTION" # Options: DEVELOPMENT, PRODUCTION + - name: INCLUSTER_CLIENT_QPS + value: "100" + - name: INCLUSTER_CLIENT_BURST + value: "100" + - name: CSI_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + volumeMounts: + - mountPath: /etc/cloud + name: vsphere-config-volume + readOnly: true + - name: csi-provisioner + image: quay.io/k8scsi/csi-provisioner:v2.0.0 + args: + - "--v=4" + - "--timeout=300s" + - "--csi-address=$(ADDRESS)" + - "--leader-election" + - "--default-fstype=ext4" + # needed only for topology aware setup + #- "--feature-gates=Topology=true" + #- "--strict-topology" + env: + - name: ADDRESS + value: /csi/csi.sock + volumeMounts: + - mountPath: /csi + name: socket-dir + volumes: + - name: vsphere-config-volume + secret: + secretName: vsphere-config-secret + - name: socket-dir + emptyDir: {} +--- +apiVersion: v1 +data: + "csi-migration": "false" # csi-migration feature is only available for vSphere 7.0U1 +kind: ConfigMap +metadata: + name: internal-feature-states.csi.vsphere.vmware.com + namespace: kube-system +--- +apiVersion: storage.k8s.io/v1 # For k8s 1.17 or lower use storage.k8s.io/v1beta1 +kind: CSIDriver +metadata: + name: csi.vsphere.vmware.com +spec: + attachRequired: true + podInfoOnMount: false +--- diff --git a/manifests/v2.1.1/vsphere-7.0u1/vanilla/deploy/vsphere-csi-node-ds.yaml b/manifests/v2.1.1/vsphere-7.0u1/vanilla/deploy/vsphere-csi-node-ds.yaml new file mode 100644 index 0000000000..50ba8e6cc6 --- /dev/null +++ b/manifests/v2.1.1/vsphere-7.0u1/vanilla/deploy/vsphere-csi-node-ds.yaml @@ -0,0 +1,138 @@ +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: vsphere-csi-node + namespace: kube-system +spec: + selector: + matchLabels: + app: vsphere-csi-node + updateStrategy: + type: "RollingUpdate" + rollingUpdate: + maxUnavailable: 1 + template: + metadata: + labels: + app: vsphere-csi-node + role: vsphere-csi + spec: + dnsPolicy: "Default" + containers: + - name: node-driver-registrar + image: quay.io/k8scsi/csi-node-driver-registrar:v2.0.1 + args: + - "--v=5" + - "--csi-address=$(ADDRESS)" + - "--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)" + - "--health-port=9809" + env: + - name: ADDRESS + value: /csi/csi.sock + - name: DRIVER_REG_SOCK_PATH + value: /var/lib/kubelet/plugins/csi.vsphere.vmware.com/csi.sock + securityContext: + privileged: true + volumeMounts: + - name: plugin-dir + mountPath: /csi + - name: registration-dir + mountPath: /registration + ports: + - containerPort: 9809 + name: healthz + livenessProbe: + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 5 + timeoutSeconds: 5 + - name: vsphere-csi-node + image: gcr.io/cloud-provider-vsphere/csi/release/driver:v2.1.1 + args: + - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" + - "--fss-namespace=$(CSI_NAMESPACE)" + imagePullPolicy: "Always" + env: + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: CSI_ENDPOINT + value: unix:///csi/csi.sock + - name: X_CSI_MODE + value: "node" + - name: X_CSI_SPEC_REQ_VALIDATION + value: "false" + # needed only for topology aware setups + #- name: VSPHERE_CSI_CONFIG + # value: "/etc/cloud/csi-vsphere.conf" # here csi-vsphere.conf is the name of the file used for creating secret using "--from-file" flag + - name: X_CSI_DEBUG + value: "true" + - name: LOGGER_LEVEL + value: "PRODUCTION" # Options: DEVELOPMENT, PRODUCTION + - name: CSI_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + securityContext: + privileged: true + capabilities: + add: ["SYS_ADMIN"] + allowPrivilegeEscalation: true + volumeMounts: + # needed only for topology aware setups + #- name: vsphere-config-volume + # mountPath: /etc/cloud + # readOnly: true + - name: plugin-dir + mountPath: /csi + - name: pods-mount-dir + mountPath: /var/lib/kubelet + # needed so that any mounts setup inside this container are + # propagated back to the host machine. + mountPropagation: "Bidirectional" + - name: device-dir + mountPath: /dev + ports: + - containerPort: 9808 + name: healthz + livenessProbe: + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 10 + timeoutSeconds: 5 + - name: liveness-probe + image: quay.io/k8scsi/livenessprobe:v2.1.0 + args: + - "--v=4" + - "--csi-address=/csi/csi.sock" + volumeMounts: + - name: plugin-dir + mountPath: /csi + volumes: + # needed only for topology aware setups + #- name: vsphere-config-volume + # secret: + # secretName: vsphere-config-secret + - name: registration-dir + hostPath: + path: /var/lib/kubelet/plugins_registry + type: Directory + - name: plugin-dir + hostPath: + path: /var/lib/kubelet/plugins/csi.vsphere.vmware.com + type: DirectoryOrCreate + - name: pods-mount-dir + hostPath: + path: /var/lib/kubelet + type: Directory + - name: device-dir + hostPath: + path: /dev + tolerations: + - effect: NoExecute + operator: Exists + - effect: NoSchedule + operator: Exists From 5a755bf4617da0f4f53793bb93bb85c3be9389a8 Mon Sep 17 00:00:00 2001 From: shahra Date: Fri, 26 Feb 2021 16:52:57 -0800 Subject: [PATCH 08/36] Copying YAMLs for v2.1.1 from release branch --- .../vanilla/deploy/vsphere-csi-node-ds.yaml | 125 +++++++++++++++ .../rbac/vsphere-csi-controller-rbac.yaml | 45 ++++++ .../rbac/vsphere-csi-controller-rbac.yaml | 48 ++++++ .../deploy/create-validation-webhook.sh | 56 +++++++ .../deploy/generate-signed-webhook-certs.sh | 148 ++++++++++++++++++ .../rbac/vsphere-csi-controller-rbac.yaml | 54 +++++++ 6 files changed, 476 insertions(+) create mode 100644 manifests/v2.1.1/vsphere-67u3/vanilla/deploy/vsphere-csi-node-ds.yaml create mode 100644 manifests/v2.1.1/vsphere-67u3/vanilla/rbac/vsphere-csi-controller-rbac.yaml create mode 100644 manifests/v2.1.1/vsphere-7.0/vanilla/rbac/vsphere-csi-controller-rbac.yaml create mode 100755 manifests/v2.1.1/vsphere-7.0u1/vanilla/deploy/create-validation-webhook.sh create mode 100755 manifests/v2.1.1/vsphere-7.0u1/vanilla/deploy/generate-signed-webhook-certs.sh create mode 100644 manifests/v2.1.1/vsphere-7.0u1/vanilla/rbac/vsphere-csi-controller-rbac.yaml diff --git a/manifests/v2.1.1/vsphere-67u3/vanilla/deploy/vsphere-csi-node-ds.yaml b/manifests/v2.1.1/vsphere-67u3/vanilla/deploy/vsphere-csi-node-ds.yaml new file mode 100644 index 0000000000..7fd7636867 --- /dev/null +++ b/manifests/v2.1.1/vsphere-67u3/vanilla/deploy/vsphere-csi-node-ds.yaml @@ -0,0 +1,125 @@ +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: vsphere-csi-node + namespace: kube-system +spec: + selector: + matchLabels: + app: vsphere-csi-node + updateStrategy: + type: "RollingUpdate" + template: + metadata: + labels: + app: vsphere-csi-node + role: vsphere-csi + spec: + dnsPolicy: "Default" + containers: + - name: node-driver-registrar + image: quay.io/k8scsi/csi-node-driver-registrar:v2.0.1 + lifecycle: + preStop: + exec: + command: ["/bin/sh", "-c", "rm -rf /registration/csi.vsphere.vmware.com-reg.sock /csi/csi.sock"] + args: + - "--v=5" + - "--csi-address=$(ADDRESS)" + - "--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)" + env: + - name: ADDRESS + value: /csi/csi.sock + - name: DRIVER_REG_SOCK_PATH + value: /var/lib/kubelet/plugins/csi.vsphere.vmware.com/csi.sock + securityContext: + privileged: true + volumeMounts: + - name: plugin-dir + mountPath: /csi + - name: registration-dir + mountPath: /registration + - name: vsphere-csi-node + image: gcr.io/cloud-provider-vsphere/csi/release/driver:v2.1.1 + imagePullPolicy: "Always" + env: + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: CSI_ENDPOINT + value: unix:///csi/csi.sock + - name: X_CSI_MODE + value: "node" + - name: X_CSI_SPEC_REQ_VALIDATION + value: "false" + # needed only for topology aware setups + #- name: VSPHERE_CSI_CONFIG + # value: "/etc/cloud/csi-vsphere.conf" # here csi-vsphere.conf is the name of the file used for creating secret using "--from-file" flag + - name: X_CSI_DEBUG + value: "true" + - name: LOGGER_LEVEL + value: "PRODUCTION" # Options: DEVELOPMENT, PRODUCTION + securityContext: + privileged: true + capabilities: + add: ["SYS_ADMIN"] + allowPrivilegeEscalation: true + volumeMounts: + # needed only for topology aware setups + #- name: vsphere-config-volume + # mountPath: /etc/cloud + # readOnly: true + - name: plugin-dir + mountPath: /csi + - name: pods-mount-dir + mountPath: /var/lib/kubelet + # needed so that any mounts setup inside this container are + # propagated back to the host machine. + mountPropagation: "Bidirectional" + - name: device-dir + mountPath: /dev + ports: + - name: healthz + containerPort: 9808 + protocol: TCP + livenessProbe: + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 10 + timeoutSeconds: 3 + periodSeconds: 5 + failureThreshold: 3 + - name: liveness-probe + image: quay.io/k8scsi/livenessprobe:v2.1.0 + args: + - --csi-address=/csi/csi.sock + volumeMounts: + - name: plugin-dir + mountPath: /csi + volumes: + # needed only for topology aware setups + #- name: vsphere-config-volume + # secret: + # secretName: vsphere-config-secret + - name: registration-dir + hostPath: + path: /var/lib/kubelet/plugins_registry + type: Directory + - name: plugin-dir + hostPath: + path: /var/lib/kubelet/plugins/csi.vsphere.vmware.com/ + type: DirectoryOrCreate + - name: pods-mount-dir + hostPath: + path: /var/lib/kubelet + type: Directory + - name: device-dir + hostPath: + path: /dev + tolerations: + - effect: NoExecute + operator: Exists + - effect: NoSchedule + operator: Exists diff --git a/manifests/v2.1.1/vsphere-67u3/vanilla/rbac/vsphere-csi-controller-rbac.yaml b/manifests/v2.1.1/vsphere-67u3/vanilla/rbac/vsphere-csi-controller-rbac.yaml new file mode 100644 index 0000000000..2b520c4063 --- /dev/null +++ b/manifests/v2.1.1/vsphere-67u3/vanilla/rbac/vsphere-csi-controller-rbac.yaml @@ -0,0 +1,45 @@ +kind: ServiceAccount +apiVersion: v1 +metadata: + name: vsphere-csi-controller + namespace: kube-system +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: vsphere-csi-controller-role +rules: + - apiGroups: [""] + resources: ["nodes", "persistentvolumeclaims", "pods", "configmaps"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "update", "delete", "patch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["get", "list", "watch", "create", "update", "patch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses", "csinodes"] + verbs: ["get", "list", "watch"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments/status"] + verbs: ["patch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: vsphere-csi-controller-binding +subjects: + - kind: ServiceAccount + name: vsphere-csi-controller + namespace: kube-system +roleRef: + kind: ClusterRole + name: vsphere-csi-controller-role + apiGroup: rbac.authorization.k8s.io diff --git a/manifests/v2.1.1/vsphere-7.0/vanilla/rbac/vsphere-csi-controller-rbac.yaml b/manifests/v2.1.1/vsphere-7.0/vanilla/rbac/vsphere-csi-controller-rbac.yaml new file mode 100644 index 0000000000..584b8b365c --- /dev/null +++ b/manifests/v2.1.1/vsphere-7.0/vanilla/rbac/vsphere-csi-controller-rbac.yaml @@ -0,0 +1,48 @@ +kind: ServiceAccount +apiVersion: v1 +metadata: + name: vsphere-csi-controller + namespace: kube-system +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: vsphere-csi-controller-role +rules: + - apiGroups: [""] + resources: ["nodes", "persistentvolumeclaims", "pods", "configmaps"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims/status"] + verbs: ["update", "patch"] + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "update", "delete", "patch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["get", "list", "watch", "create", "update", "patch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses", "csinodes"] + verbs: ["get", "list", "watch"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments/status"] + verbs: ["patch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: vsphere-csi-controller-binding +subjects: + - kind: ServiceAccount + name: vsphere-csi-controller + namespace: kube-system +roleRef: + kind: ClusterRole + name: vsphere-csi-controller-role + apiGroup: rbac.authorization.k8s.io diff --git a/manifests/v2.1.1/vsphere-7.0u1/vanilla/deploy/create-validation-webhook.sh b/manifests/v2.1.1/vsphere-7.0u1/vanilla/deploy/create-validation-webhook.sh new file mode 100755 index 0000000000..7e6081143b --- /dev/null +++ b/manifests/v2.1.1/vsphere-7.0u1/vanilla/deploy/create-validation-webhook.sh @@ -0,0 +1,56 @@ +#!/bin/bash +# Copyright 2020 The Kubernetes Authors. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +set -e + +if [ "$1" = "-h" ] || [ "$1" = "--help" ]; then + cat </dev/null || true +kubectl delete validatingwebhookconfiguration.admissionregistration.k8s.io validation.csi.vsphere.vmware.com --namespace "${namespace}" 2>/dev/null || true +kubectl delete serviceaccount vsphere-csi-webhook --namespace "${namespace}" 2>/dev/null || true +kubectl delete clusterrole.rbac.authorization.k8s.io vsphere-csi-webhook-role 2>/dev/null || true +kubectl delete clusterrolebinding.rbac.authorization.k8s.io vsphere-csi-webhook-role-binding --namespace "${namespace}" 2>/dev/null || true +kubectl delete deployment vsphere-csi-webhook --namespace "${namespace}" || true + +# patch validatingwebhook.yaml with CA_BUNDLE and create service and validatingwebhookconfiguration +sed "s/caBundle: .*$/caBundle: ${CA_BUNDLE}/g" validatingwebhook.yaml | kubectl apply -f - diff --git a/manifests/v2.1.1/vsphere-7.0u1/vanilla/deploy/generate-signed-webhook-certs.sh b/manifests/v2.1.1/vsphere-7.0u1/vanilla/deploy/generate-signed-webhook-certs.sh new file mode 100755 index 0000000000..026cccf938 --- /dev/null +++ b/manifests/v2.1.1/vsphere-7.0u1/vanilla/deploy/generate-signed-webhook-certs.sh @@ -0,0 +1,148 @@ +#!/bin/bash +# Copyright 2020 The Kubernetes Authors. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# File originally from https://github.com/istio/istio/blob/release-0.7/install/kubernetes/webhook-create-signed-cert.sh +set -e + +if [ "$1" = "-h" ] || [ "$1" = "--help" ]; then + cat <> "${tmpdir}"/csr.conf +[req] +req_extensions = v3_req +distinguished_name = req_distinguished_name +[req_distinguished_name] +[ v3_req ] +basicConstraints = CA:FALSE +keyUsage = nonRepudiation, digitalSignature, keyEncipherment +extendedKeyUsage = serverAuth +subjectAltName = @alt_names +[alt_names] +DNS.1 = ${service} +DNS.2 = ${service}.${namespace} +DNS.3 = ${service}.${namespace}.svc +EOF + +openssl genrsa -out "${tmpdir}"/server-key.pem 2048 +openssl req -new -key "${tmpdir}"/server-key.pem -subj "/O=C=US/ST=CA/L=Palo Alto/O=VMware/OU=CNS" -out "${tmpdir}"/server.csr -config "${tmpdir}"/csr.conf + + +# clean-up any previously created CSR for our service. Ignore errors if not present. +kubectl delete csr ${csrName} 2>/dev/null || true + +# create server cert/key CSR and send to k8s API +cat <&2 + exit 1 +fi +echo "${serverCert}" | openssl base64 -d -A -out "${tmpdir}"/server-cert.pem + +cat <"${tmpdir}"/webhook.config +[WebHookConfig] +port = "8443" +cert-file = "/etc/webhook/cert.pem" +key-file = "/etc/webhook/key.pem" +eof + + +# create the secret with CA cert and server cert/key +kubectl create secret generic "${secret}" \ + --from-file=key.pem="${tmpdir}"/server-key.pem \ + --from-file=cert.pem="${tmpdir}"/server-cert.pem \ + --from-file=webhook.config="${tmpdir}"/webhook.config \ + --dry-run=client -o yaml | + kubectl -n "${namespace}" apply -f - \ No newline at end of file diff --git a/manifests/v2.1.1/vsphere-7.0u1/vanilla/rbac/vsphere-csi-controller-rbac.yaml b/manifests/v2.1.1/vsphere-7.0u1/vanilla/rbac/vsphere-csi-controller-rbac.yaml new file mode 100644 index 0000000000..c78a814e47 --- /dev/null +++ b/manifests/v2.1.1/vsphere-7.0u1/vanilla/rbac/vsphere-csi-controller-rbac.yaml @@ -0,0 +1,54 @@ +kind: ServiceAccount +apiVersion: v1 +metadata: + name: vsphere-csi-controller + namespace: kube-system +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: vsphere-csi-controller-role +rules: + - apiGroups: [""] + resources: ["nodes", "persistentvolumeclaims", "pods", "configmaps"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims/status"] + verbs: ["patch"] + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "update", "delete", "patch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["get", "list", "watch", "create", "update", "patch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses", "csinodes"] + verbs: ["get", "list", "watch"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments"] + verbs: ["get", "list", "watch", "patch"] + - apiGroups: ["cns.vmware.com"] + resources: ["cnsvspherevolumemigrations"] + verbs: ["create", "get", "list", "watch", "update", "delete"] + - apiGroups: ["apiextensions.k8s.io"] + resources: ["customresourcedefinitions"] + verbs: ["get", "create"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments/status"] + verbs: ["patch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: vsphere-csi-controller-binding +subjects: + - kind: ServiceAccount + name: vsphere-csi-controller + namespace: kube-system +roleRef: + kind: ClusterRole + name: vsphere-csi-controller-role + apiGroup: rbac.authorization.k8s.io From 9c3767f1420d4bfa610a08e5473670c60292d78f Mon Sep 17 00:00:00 2001 From: Divyen Patel Date: Mon, 22 Feb 2021 11:56:21 -0800 Subject: [PATCH 09/36] updating documentation --- README.md | 1 + docs/book/compatiblity_matrix.md | 24 ++-- docs/book/driver-deployment/prerequisites.md | 133 ++++++++----------- docs/book/limits.md | 7 +- docs/book/releases/v1.0.2.md | 5 +- docs/book/releases/v1.0.3.md | 5 +- docs/book/releases/v2.0.0.md | 9 +- docs/book/releases/v2.0.1.md | 9 +- docs/book/releases/v2.1.0.md | 8 +- docs/book/releases/v2.1.1.md | 10 +- docs/book/supported_features_matrix.md | 4 +- 11 files changed, 106 insertions(+), 109 deletions(-) diff --git a/README.md b/README.md index 0a9116eaaf..0eda70e0d2 100644 --- a/README.md +++ b/README.md @@ -13,6 +13,7 @@ Documentation for vSphere CSI Driver is available here: ## vSphere CSI Driver Releases +* [v2.1.1](docs/book/releases/v2.1.1.md) * [v2.1.0](docs/book/releases/v2.1.0.md) * [v2.0.1](docs/book/releases/v2.0.1.md) * [v2.0.0](docs/book/releases/v2.0.0.md) diff --git a/docs/book/compatiblity_matrix.md b/docs/book/compatiblity_matrix.md index 3eeb99db72..45639776e3 100644 --- a/docs/book/compatiblity_matrix.md +++ b/docs/book/compatiblity_matrix.md @@ -1,17 +1,21 @@ # Compatibility Matrix for vSphere CSI Driver -- vSphere CSI driver is released with specific version of CSI sidecar containers which has the minimum kubernetes version requirement. +- vSphere CSI driver is released with specific version of CSI sidecar containers which has the minimum and maximum kubernetes version requirement. -| vSphere CSI Driver | Minimum Kubernetes Version | -|--------------------|----------------------------| -| v1.0.2 | v1.14.0 | -| v1.0.3 | v1.14.0 | -| v2.0.0 | v1.16.0 | -| v2.0.1 | v1.17.0 | -| v2.1.0 | v1.17.0 | +Note: -- vSphere CSI driver is compatible with vSphere 67u3, vSphere 7.0 and 7.0u1. - - vSphere CSI Driver is backward and forward compatible to vSphere releases +- vmware supports the deprecated release of the driver until End Of Life date. + +| vSphere CSI Driver | Minimum Kubernetes Release | Maximum Kubernetes Release | Deprecated | End of Life | +|--------------------|----------------------------|---------------------------------------| -----------|--------------| +| [v2.1.1](./releases/v2.1.1.md) | 1.17 | 1.19 | No | - | +| [v2.1.0](./releases/v2.1.0.md) | 1.17 | 1.19 | No | - | +| [v2.0.1](./releases/v2.0.1.md) | 1.17 | 1.19 | Yes | January 2022 | +| [v2.0.0](./releases/v2.0.0.md) | 1.16 | 1.18 | Yes | January 2022 | +| [v1.0.3](./releases/v1.0.3.md) | 1.14 | 1.16 | Yes | June 2021 | +| [v1.0.2](./releases/v1.0.2.md) | 1.14 | 1.16 | Yes | January 2021 | + +- vSphere CSI driver is compatible with vSphere 67u3, vSphere 7.0 and 7.0u1. If you have a newer vCenter version but older ESXi hosts, new features added in the newer vCenter will not work until all the ESXi hosts are upgraded to the newer version. - For bug fixes and performance improvements, user can deploy the latest vSphere CSI driver without upgrading vSphere. - Features added in the newer vSphere releases does not work on the older vSphere CSI driver. Refer to [feature matrix](supported_features_matrix.md) to learn about what features added in each release of vSphere and CSI driver. diff --git a/docs/book/driver-deployment/prerequisites.md b/docs/book/driver-deployment/prerequisites.md index 339eb06f15..1c56df8ab2 100644 --- a/docs/book/driver-deployment/prerequisites.md +++ b/docs/book/driver-deployment/prerequisites.md @@ -135,100 +135,73 @@ Follow the steps described under “Install the vSphere Cloud Provider Interface Installation steps for vSphere CPI is briefly described here -1. Create a cloud-config configmap of vSphere configuration - - Here is an example configuration file with dummy values: - - ```bash - tee /etc/kubernetes/vsphere.conf >/dev/null < node.cloudprovider.kubernetes.io/uninitialized=true:NoSchedule +``` - To taint nodes use following command +When the kubelet is started with an “external” cloud provider, this taint is set on a node to mark it as unusable. After a controller from the cloud-controller-manager initializes this node, the kubelet removes this taint. - ```bash - kubectl taint node node.cloudprovider.kubernetes.io/uninitialized=true:NoSchedule - ``` +Step-2: Create a cloud-config configmap of vSphere configuration. Note: This is used for CPI. There is separate secret required for vSphere CSI driver. Here is an example configuration file with dummy values: - When the kubelet is started with an “external” cloud provider, this taint is set on a node to mark it as unusable. After a controller from the cloud-controller-manager initializes this node, the kubelet removes this taint. +```bash +tee /etc/kubernetes/vsphere.conf >/dev/null </dev/null < - ProviderID: vsphere:// - ProviderID: vsphere:// - ProviderID: vsphere:// - ProviderID: vsphere:// - ``` +```bash +kubectl describe nodes | grep "ProviderID" +ProviderID: vsphere:// +ProviderID: vsphere:// +ProviderID: vsphere:// +ProviderID: vsphere:// +ProviderID: vsphere:// +``` - vSphere CSI driver needs the `ProviderID` field to be set for all nodes. - \ No newline at end of file +vSphere CSI driver needs the `ProviderID` field to be set for all nodes. diff --git a/docs/book/limits.md b/docs/book/limits.md index 5426cf2014..b1650533a9 100644 --- a/docs/book/limits.md +++ b/docs/book/limits.md @@ -4,7 +4,10 @@ |-----------------------------------------------------|-----------------------------------|--------------------------------------|----------------------------|-----------------------| | Scale | 10000 volumes for vsan, nfs, vmfs | 32 File shares (5 clients per share) | 7000 volumes | 7000 volumes | | | 840 volumes for vVOL | | | | -| Number of PVCs per VMwith 4 controllers | Max 59 | N/A | Max 29 | Max 59 | +| Number of Block PVs per VM with 4 controllers | Max 59 (with 4 Paravirtual SCSI controllers on VM with 1 slot used for primary disk of Node VM)| N/A | Max 29 | Max 59 | | Multiple instances of CSI pods in Multi-master mode | replica = 1 | replica = 1 | replica = 1 | replica = 1 | -Note: Only a single vCenter is supported by vSphere CSI Driver. To use vSphere CSI driver, make sure node VMs do not spread across multiple vCenter servers. +Note: + +- Only a single vCenter is supported by vSphere CSI Driver. To use vSphere CSI driver, make sure node VMs do not spread across multiple vCenter servers. +- In Vanilla Kubernetes Cluster, with each non Paravirtual SCSI controllers on the Node VM, max limit for block volume per node is reduced by 15. diff --git a/docs/book/releases/v1.0.2.md b/docs/book/releases/v1.0.2.md index bbc3212c5a..55d610f234 100644 --- a/docs/book/releases/v1.0.2.md +++ b/docs/book/releases/v1.0.2.md @@ -20,9 +20,10 @@ - https://github.com/kubernetes-sigs/vsphere-csi-driver/tree/master/manifests/v1.0.2 -## Minimum Kubernetes Version +## Kubernetes Release -- v1.14.0 +- Minimum: 1.14 +- Maximum: 1.16 ## Supported sidecar containers versions diff --git a/docs/book/releases/v1.0.3.md b/docs/book/releases/v1.0.3.md index d8b900812d..7ffd9d17b8 100644 --- a/docs/book/releases/v1.0.3.md +++ b/docs/book/releases/v1.0.3.md @@ -16,9 +16,10 @@ - https://github.com/kubernetes-sigs/vsphere-csi-driver/tree/master/manifests/v1.0.3 -## Minimum Kubernetes Version +## Kubernetes Release -- v1.14.0 +- Minimum: 1.14 +- Maximum: 1.16 ## Supported sidecar containers versions diff --git a/docs/book/releases/v2.0.0.md b/docs/book/releases/v2.0.0.md index 064f52504d..fac8240c6c 100644 --- a/docs/book/releases/v2.0.0.md +++ b/docs/book/releases/v2.0.0.md @@ -17,9 +17,10 @@ - https://github.com/kubernetes-sigs/vsphere-csi-driver/tree/master/manifests/v2.0.0 -## Minimum Kubernetes Version +## Kubernetes Release -- v1.16.0 +- Minimum: 1.16 +- Maximum: 1.18 ## Supported sidecar containers versions @@ -69,6 +70,10 @@ - Workaround: - No workaround. User should not attempt to delete PV which is bound to PVC. User should only delete a PV if they know that the underlying volume in the storage system is gone. - If user has accidentally left orphan volumes on the datastore by not following the guideline, and if user has captured the volume handles or First Class Disk IDs of deleted PVs, storage admin can help delete those volumes using `govc disk.rm ` command. +8. When multiple PVCs and Pods with the same name present on the Cluster, and for any reason, Volume gets de-registered or lost from vCenter CNS Database, Syncer does not re-register volume back. + - Impact: Volume will not re-appear on the CNS UI. If volume needs to be detached and attached to newer node, it will not happen. + - Workaround: + - This issue is fixed in [v2.1.0](./v2.1.0.md) release. Please consider upgrading driver to v2.1.0 ### Kubernetes issues diff --git a/docs/book/releases/v2.0.1.md b/docs/book/releases/v2.0.1.md index be204676a5..2864d960f9 100644 --- a/docs/book/releases/v2.0.1.md +++ b/docs/book/releases/v2.0.1.md @@ -14,9 +14,10 @@ - https://github.com/kubernetes-sigs/vsphere-csi-driver/tree/master/manifests/v2.0.1 -## Minimum Kubernetes Version +## Kubernetes Release -- v1.17.0 +- Minimum: 1.17 +- Maximum: 1.19 ## Supported sidecar containers versions @@ -48,6 +49,10 @@ - Workaround: - No workaround. User should not attempt to delete PV which is bound to PVC. User should only delete a PV if they know that the underlying volume in the storage system is gone. - If user has accidentally left orphan volumes on the datastore by not following the guideline, and if user has captured the volume handles or First Class Disk IDs of deleted PVs, storage admin can help delete those volumes using `govc disk.rm ` command. +6. When multiple PVCs and Pods with the same name present on the Cluster, and for any reason, Volume gets de-registered or lost from vCenter CNS Database, Syncer does not re-register volume back. + - Impact: Volume will not re-appear on the CNS UI. If volume needs to be detached and attached to newer node, it will not happen. + - Workaround: + - This issue is fixed in [v2.1.0](./v2.1.0.md) release. Please consider upgrading driver to v2.1.0 ### Kubernetes issues diff --git a/docs/book/releases/v2.1.0.md b/docs/book/releases/v2.1.0.md index e10578d5bd..83e066d0f0 100644 --- a/docs/book/releases/v2.1.0.md +++ b/docs/book/releases/v2.1.0.md @@ -9,10 +9,12 @@ - https://github.com/kubernetes-sigs/vsphere-csi-driver/tree/master/manifests/v2.1.0 -## Minimum Kubernetes Version +## Kubernetes Release -- v1.17.0 -- For vSphere CSI Migration feature the minimum Kubernetes version requirement is v1.19.0 +- Minimum: 1.17 +- Maximum: 1.19 + +Note: For vSphere CSI Migration feature the minimum Kubernetes version requirement is v1.19.0 ## Supported sidecar containers versions diff --git a/docs/book/releases/v2.1.1.md b/docs/book/releases/v2.1.1.md index d51ae2f846..edf55d9b9d 100644 --- a/docs/book/releases/v2.1.1.md +++ b/docs/book/releases/v2.1.1.md @@ -7,16 +7,18 @@ ## Notable Changes -- Fixed lock contention in gocsi by introducing timeout environment variable [#655](https://github.com/kubernetes-sigs/vsphere-csi-driver/pull/665) +- Fixed lock contention in gocsi by introducing timeout environment variable [#665](https://github.com/kubernetes-sigs/vsphere-csi-driver/pull/665) ## Deployment files - https://github.com/kubernetes-sigs/vsphere-csi-driver/tree/master/manifests/v2.1.1 -## Minimum Kubernetes Version +## Kubernetes Release -- v1.17.0 -- For vSphere CSI Migration feature the minimum Kubernetes version requirement is v1.19.0 +- Minimum: 1.17 +- Maximum: 1.19 + +Note: For vSphere CSI Migration feature the minimum Kubernetes version requirement is v1.19.0 ## Supported sidecar containers versions diff --git a/docs/book/supported_features_matrix.md b/docs/book/supported_features_matrix.md index 26fcd3cad4..effc7fe9e2 100644 --- a/docs/book/supported_features_matrix.md +++ b/docs/book/supported_features_matrix.md @@ -2,14 +2,14 @@ # vSphere CSI Driver - Supported Features Matrix -| | **Native K8s on vSphere 6.7U3 with CSI v1.0.2, v1.0.3, v2.0.0, v2.0.1, v2.1.0** | **Native K8s on vSphere 7.0 with CSI v2.0.0, v2.0.1, v2.1.0** | **Native K8s on vSphere 7.0u1 with CSI v2.1.0** | **vSphere with Tanzu on vSphere 7.0 – Supervisor Cluster** | **vSphere with Tanzu on vSphere 7.0u1 – Supervisor Cluster** | **vSphere with Tanzu on vSphere 7.0 – TKG Service ‘Guest’ Cluster** | **vSphere with Tanzu on vSphere 7.0u1 – TKG Service ‘Guest’ Cluster** | +| | **Native K8s on vSphere 6.7U3 with CSI v1.0.2, v1.0.3, v2.0.0, v2.0.1, v2.1.0, v2.1.1** | **Native K8s on vSphere 7.0 with CSI v2.0.0, v2.0.1, v2.1.0, v2.1.1** | **Native K8s on vSphere 7.0u1 with CSI v2.1.0, v2.1.1** | **vSphere with Tanzu on vSphere 7.0 – Supervisor Cluster** | **vSphere with Tanzu on vSphere 7.0u1 – Supervisor Cluster** | **vSphere with Tanzu on vSphere 7.0 – TKG Service ‘Guest’ Cluster** | **vSphere with Tanzu on vSphere 7.0u1 – TKG Service ‘Guest’ Cluster** | |----------------------------------------------------------|:-------------------------------------------------------------------------------:|:-------------------------------------------------------------:|:-----------------------------------------------:|:----------------------------------------------------------:|:------------------------------------------------------------:|:-------------------------------------------------------------------:|:---------------------------------------------------------------------:| | CNS UI Support | Yes | Yes | Yes | Yes | Yes | Yes | Yes | | Enhanced Object Health in UI | Yes (vSAN only) | Yes (vSAN only) | Yes (vSAN only) | Yes (vSAN only) | Yes (vSAN only) | Yes (vSAN only) | Yes (vSAN only) | | Dynamic Block PV support (`Read-Write-Once` Access Mode) | Yes | Yes | Yes | Yes | Yes | Yes | Yes | | Dynamic File PV support (`Read-Write-Many` Access Mode) | No | Yes (vSAN only) | Yes (vSAN only) | No | No | No | No | | Encryption support via VMcrypt | No | Yes (Block Volume) | Yes (Block Volume) | No | No | No | No | -| Dynamic Virtual Volume (vVOL) PV support | No | Yes | Yes | No | No | No | No | +| Dynamic Virtual Volume (vVOL) PV support | Yes | Yes | Yes | Yes | Yes | Yes | Yes | | Offline Volume Expansion support (beta) | No | Yes (Block Volume) | Yes (Block Volume) | No | No | No | Yes (Block Volume) | | Topology/Availability Zone support (beta) | Yes (Block Volume) | Yes (Block Volume) | Yes (Block Volume) | No | No | No | No | | Static PV Provisioning | Yes | Yes | Yes | No | Yes | Yes | Yes | From 46e57b69b979c8c0391e4feb2e71e641b8c695f0 Mon Sep 17 00:00:00 2001 From: Sandeep Pissay Srinivasa Rao Date: Fri, 26 Feb 2021 15:54:00 -0800 Subject: [PATCH 10/36] Removed QueryAll in DeleteVolume. --- .../vsphere-csi-controller-deployment.yaml | 7 +++-- .../vsphere-csi-controller-deployment.yaml | 7 +++-- .../vsphere-csi-controller-deployment.yaml | 7 +++-- .../vsphere-csi-controller-deployment.yaml | 7 +++-- pkg/csi/service/vanilla/controller.go | 26 +------------------ pkg/csi/service/wcp/controller.go | 26 +------------------ 6 files changed, 14 insertions(+), 66 deletions(-) diff --git a/manifests/dev/vsphere-67u3/vanilla/deploy/vsphere-csi-controller-deployment.yaml b/manifests/dev/vsphere-67u3/vanilla/deploy/vsphere-csi-controller-deployment.yaml index 58bfbacf21..6440dce879 100644 --- a/manifests/dev/vsphere-67u3/vanilla/deploy/vsphere-csi-controller-deployment.yaml +++ b/manifests/dev/vsphere-67u3/vanilla/deploy/vsphere-csi-controller-deployment.yaml @@ -54,10 +54,6 @@ spec: - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" - "--fss-namespace=$(CSI_NAMESPACE)" imagePullPolicy: "Always" - ports: - - containerPort: 2112 - name: prometheus - protocol: TCP env: - name: CSI_ENDPOINT value: unix:///var/lib/csi/sockets/pluginproxy/csi.sock @@ -83,6 +79,9 @@ spec: - name: healthz containerPort: 9808 protocol: TCP + - name: prometheus + containerPort: 2112 + protocol: TCP livenessProbe: httpGet: path: /healthz diff --git a/manifests/dev/vsphere-7.0/vanilla/deploy/vsphere-csi-controller-deployment.yaml b/manifests/dev/vsphere-7.0/vanilla/deploy/vsphere-csi-controller-deployment.yaml index 5f8a019e60..2422890299 100644 --- a/manifests/dev/vsphere-7.0/vanilla/deploy/vsphere-csi-controller-deployment.yaml +++ b/manifests/dev/vsphere-7.0/vanilla/deploy/vsphere-csi-controller-deployment.yaml @@ -67,10 +67,6 @@ spec: - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" - "--fss-namespace=$(CSI_NAMESPACE)" imagePullPolicy: "Always" - ports: - - containerPort: 2112 - name: prometheus - protocol: TCP env: - name: CSI_ENDPOINT value: unix:///var/lib/csi/sockets/pluginproxy/csi.sock @@ -96,6 +92,9 @@ spec: - name: healthz containerPort: 9808 protocol: TCP + - name: prometheus + containerPort: 2112 + protocol: TCP livenessProbe: httpGet: path: /healthz diff --git a/manifests/dev/vsphere-7.0u1/vanilla/deploy/vsphere-csi-controller-deployment.yaml b/manifests/dev/vsphere-7.0u1/vanilla/deploy/vsphere-csi-controller-deployment.yaml index cc4a65d94a..9d75c2ed53 100644 --- a/manifests/dev/vsphere-7.0u1/vanilla/deploy/vsphere-csi-controller-deployment.yaml +++ b/manifests/dev/vsphere-7.0u1/vanilla/deploy/vsphere-csi-controller-deployment.yaml @@ -65,10 +65,6 @@ spec: - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" - "--fss-namespace=$(CSI_NAMESPACE)" imagePullPolicy: "Always" - ports: - - containerPort: 2112 - name: prometheus - protocol: TCP env: - name: CSI_ENDPOINT value: unix:///csi/csi.sock @@ -98,6 +94,9 @@ spec: - name: healthz containerPort: 9808 protocol: TCP + - name: prometheus + containerPort: 2112 + protocol: TCP livenessProbe: httpGet: path: /healthz diff --git a/manifests/dev/vsphere-7.0u2/vanilla/deploy/vsphere-csi-controller-deployment.yaml b/manifests/dev/vsphere-7.0u2/vanilla/deploy/vsphere-csi-controller-deployment.yaml index 05d7326181..b55383435a 100644 --- a/manifests/dev/vsphere-7.0u2/vanilla/deploy/vsphere-csi-controller-deployment.yaml +++ b/manifests/dev/vsphere-7.0u2/vanilla/deploy/vsphere-csi-controller-deployment.yaml @@ -68,10 +68,6 @@ spec: - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" - "--fss-namespace=$(CSI_NAMESPACE)" imagePullPolicy: "Always" - ports: - - containerPort: 2112 - name: prometheus - protocol: TCP env: - name: CSI_ENDPOINT value: unix:///csi/csi.sock @@ -101,6 +97,9 @@ spec: - name: healthz containerPort: 9808 protocol: TCP + - name: prometheus + containerPort: 2112 + protocol: TCP livenessProbe: httpGet: path: /healthz diff --git a/pkg/csi/service/vanilla/controller.go b/pkg/csi/service/vanilla/controller.go index fc24d3abdd..c587f016d6 100644 --- a/pkg/csi/service/vanilla/controller.go +++ b/pkg/csi/service/vanilla/controller.go @@ -668,32 +668,8 @@ func (c *controller) DeleteVolume(ctx context.Context, req *csi.DeleteVolumeRequ log.Error(msg) return nil, status.Errorf(codes.Internal, msg) } - } else { - // Query CNS to get the volume type. - queryFilter := cnstypes.CnsQueryFilter{ - VolumeIds: []cnstypes.CnsVolumeId{{Id: req.VolumeId}}, - } - queryResult, err := c.manager.VolumeManager.QueryAllVolume(ctx, queryFilter, cnstypes.CnsQuerySelection{ - Names: []string{ - string(cnstypes.QuerySelectionNameTypeVolumeType), - }, - }) - if err != nil { - msg := fmt.Sprintf("QueryVolume failed for volumeID: %q. %+v", req.VolumeId, err.Error()) - log.Error(msg) - return nil, status.Error(codes.Internal, msg) - } - if len(queryResult.Volumes) == 0 { - msg := fmt.Sprintf("volumeID %s not found in QueryVolume", req.VolumeId) - log.Error(msg) - return nil, status.Error(codes.Internal, msg) - } - if queryResult.Volumes[0].VolumeType == common.BlockVolumeType { - volumeType = prometheus.PrometheusBlockVolumeType - } else { - volumeType = prometheus.PrometheusFileVolumeType - } } + // TODO: Add code to determine the volume type and set volumeType for Prometheus metric accordingly. err = common.DeleteVolumeUtil(ctx, c.manager.VolumeManager, req.VolumeId, true) if err != nil { msg := fmt.Sprintf("failed to delete volume: %q. Error: %+v", req.VolumeId, err) diff --git a/pkg/csi/service/wcp/controller.go b/pkg/csi/service/wcp/controller.go index 9dd20907da..43be7eb434 100644 --- a/pkg/csi/service/wcp/controller.go +++ b/pkg/csi/service/wcp/controller.go @@ -483,31 +483,7 @@ func (c *controller) DeleteVolume(ctx context.Context, req *csi.DeleteVolumeRequ log.Error(msg) return nil, err } - // Query CNS to get the volume type. - queryFilter := cnstypes.CnsQueryFilter{ - VolumeIds: []cnstypes.CnsVolumeId{{Id: req.VolumeId}}, - } - queryResult, err := c.manager.VolumeManager.QueryAllVolume(ctx, queryFilter, cnstypes.CnsQuerySelection{ - Names: []string{ - string(cnstypes.QuerySelectionNameTypeVolumeType), - }, - }) - if err != nil { - msg := fmt.Sprintf("QueryVolume failed for volumeID: %q. %+v", req.VolumeId, err.Error()) - log.Error(msg) - return nil, status.Error(codes.Internal, msg) - } - if len(queryResult.Volumes) == 0 { - msg := fmt.Sprintf("volumeID %s not found in QueryVolume", req.VolumeId) - log.Error(msg) - return nil, status.Error(codes.Internal, msg) - } - if queryResult.Volumes[0].VolumeType == common.BlockVolumeType { - volumeType = prometheus.PrometheusBlockVolumeType - } else { - volumeType = prometheus.PrometheusFileVolumeType - } - + // TODO: Add code to determine the volume type and set volumeType for Prometheus metric accordingly. err = common.DeleteVolumeUtil(ctx, c.manager.VolumeManager, req.VolumeId, true) if err != nil { msg := fmt.Sprintf("failed to delete volume: %q. Error: %+v", req.VolumeId, err) From c5f73640ef6ed3752d06aeef9c677e9c333d18b7 Mon Sep 17 00:00:00 2001 From: Shalini Bhaskara Date: Thu, 25 Feb 2021 18:49:03 -0800 Subject: [PATCH 11/36] Online resize documentation for vSphere 7.0U2/CSI 2.2 --- docs/book/README.md | 2 +- docs/book/features/volume_expansion.md | 122 +++++++++++++++++++++---- 2 files changed, 107 insertions(+), 17 deletions(-) diff --git a/docs/book/README.md b/docs/book/README.md index 9800192e1a..a82de3501b 100644 --- a/docs/book/README.md +++ b/docs/book/README.md @@ -25,4 +25,4 @@ CNS supports the following Kubernetes distributions: - [Vanilla Kubernetes](https://github.com/kubernetes/kubernetes) - [vSphere with Kubernetes](https://blogs.vmware.com/vsphere/2019/08/introducing-project-pacific.html) aka Supervisor Cluster. For more information, see [vSphere with Kubernetes Configuration and Management](https://docs.vmware.com/en/VMware-vSphere/7.0/vmware-vsphere-with-kubernetes/GUID-152BE7D2-E227-4DAA-B527-557B564D9718.html). -- [Tanzu Kubernetes Grid Service](https://blogs.vmware.com/vsphere/2020/03/vsphere-7-tanzu-kubernetes-clusters.html). For more information, see [Provisioning and Managing Tanzu Kubernetes Clusters Using the Tanzu Kubernetes Grid Service](https://docs.vmware.com/en/VMware-vSphere/7.0/vmware-vsphere-with-kubernetes/GUID-7E00E7C2-D1A1-4F7D-9110-620F30C02547.html). +- [Tanzu Kubernetes Grid Service](https://blogs.vmware.com/vsphere/2020/03/vsphere-7-tanzu-kubernetes-clusters.html). For more information, see [Provisioning and Managing Tanzu Kubernetes Clusters Using the Tanzu Kubernetes Grid Service](https://docs.vmware.com/en/VMware-vSphere/7.0/vmware-vsphere-with-tanzu/GUID-2597788E-2FA4-420E-B9BA-9423F8F7FD9F.html). diff --git a/docs/book/features/volume_expansion.md b/docs/book/features/volume_expansion.md index af3215b9aa..6c84e4f11a 100644 --- a/docs/book/features/volume_expansion.md +++ b/docs/book/features/volume_expansion.md @@ -1,6 +1,15 @@ -# vSphere CSI Driver - Offline Volume Expansion +# vSphere CSI Driver - Volume Expansion -CSI Volume Expansion was introduced as an alpha feature in Kubernetes 1.14 and it was promoted to beta in Kubernetes 1.16. The vSphere CSI driver currently extends this support for dynamically/statically created offline block volumes only i.e allows a block volume to be extended when it is not attached to a node. Check the [supported features](../supported_features_matrix.md) section to verify if your environment conforms to all the required versions and the [known issues](../known_issues.md) section to see if this feature caters to your requirement. Note that offline volume expansion is available from vSphere CSI v2.0 onwards in Vanilla Kubernetes and v2.1 onwards in Tanzu Kubernetes Grid Service (TKGS). Volume expansion is currently not supported in the Supervisor cluster. +CSI Volume Expansion was introduced as an alpha feature in Kubernetes 1.14 and it was promoted to beta in Kubernetes 1.16. The vSphere CSI driver supports volume expansion for dynamically/statically created **block** volumes only. Kubernetes supports two modes of volume expansion - offline and online. When the PVC is being used by a Pod i.e it is mounted on a node, the resulting volume expansion operation is termed as an online expansion. In all other cases, it is an offline expansion. Depending upon the kubernetes flavor and the mode of volume expansion required for your use case, refer to the table below to know the minimum version of the vSphere CSI driver to be used. + +| vSphere CSI flavor (minimum versions required) | Vanilla | Supervisor cluster | Tanzu Kubernetes Grid Service (TKGS) | +|-----------------------------------------------------|-----------------------------------|--------------------------------------|----------------------------| +| Offline volume expansion support | vSphere CSI driver v2.0; vCenter 7.0; ESXi 7.0| vCenter 7.0U2; ESXi 7.0U2 | vCenter 7.0U1; ESXi 7.0U1 | +| Online volume expansion support | vSphere CSI driver v2.2; vCenter 7.0U2; ESXi 7.0U2 | vCenter 7.0U2; ESXi 7.0U2 | vCenter 7.0U2; ESXi 7.0U2 | | + +**NOTE**: vSphere CSI driver v2.2 is not yet released. + +For more information, check the [supported features](../supported_features_matrix.md) section to verify if your environment conforms to all the required versions and the [known issues](../known_issues.md) section to see if this feature caters to your requirement. ## Feature Gate @@ -12,9 +21,9 @@ An external-resizer sidecar container implements the logic of watching the Kuber ## Requirements -If you are using TKGS and your environment adheres to the required kubernetes and vSphere CSI driver versions mentioned above skip this section and directly proceed to the `Expand PVC Example` section below to use this feature. +If you are either on the supervisor cluster or on TKGS, check if your environment adheres to the required kubernetes and vSphere CSI driver versions mentioned above and skip this section to directly proceed to the `Expand PVC` section below to use this feature. -However, in order to try out this feature using the vanilla kubernetes driver, modify the StorageClass definition as mentioned below in your environment. +However, in order to try this feature out on the vanilla kubernetes driver, you need to modify the StorageClass definition in your environment as mentioned below. ### StorageClass @@ -31,24 +40,105 @@ allowVolumeExpansion: true Proceed to create/edit a PVC by using this storage class. -## Expand PVC Example +## Expand PVC + +Prior to increasing the size of a PVC make sure that the PVC is in `Bound` state. If you are using a statically provisioned PVC, ensure that the PVC and the PV specs have the `storageClassName` parameter pointing to a storage class which has `allowVolumeExpansion` set to true. + +### Online mode + +Consider a scenario where you deployed a PVC with a StorageClass in which `allowVolumeExpansion` is set to `true` and then created a pod to use this PVC. + +```bash +$ kubectl get pvc,pv,pod +NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE +persistentvolumeclaim/example-block-pvc Bound pvc-84c89bf9-8455-4633-a8c8-cd623e155dbd 1Gi RWO example-block-sc 8m5s + +NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE +persistentvolume/pvc-84c89bf9-8455-4633-a8c8-cd623e155dbd 1Gi RWO Delete Bound default/example-block-pvc example-block-sc 7m59s + +NAME READY STATUS RESTARTS AGE +pod/example-block-pod 1/1 Running 0 7m1s +``` + +Patch the PVC to increase its requested storage size (in this case, to `2Gi`): + +```bash +$ kubectl patch pvc example-block-pvc -p '{"spec": {"resources": {"requests": {"storage": "2Gi"}}}}' +persistentvolumeclaim/example-block-pvc patched +``` + +This will trigger an expansion in the volume associated with the PVC in vSphere Cloud Native Storage. -Prior to increasing the size of a PVC make sure that the PVC is bound and is not attached to a Pod as only offline volume expansion is supported. +The PVC and the PV will reflect the increase in size after the volume underneath has expanded. The `describe` output of the PVC will look similar to the following: + +```bash +$ kubectl describe pvc example-block-pvc +Name: example-block-pvc +Namespace: default +StorageClass: example-block-sc +Status: Bound +Volume: pvc-84c89bf9-8455-4633-a8c8-cd623e155dbd +Labels: +Annotations: pv.kubernetes.io/bind-completed: yes + pv.kubernetes.io/bound-by-controller: yes + volume.beta.kubernetes.io/storage-provisioner: csi.vsphere.vmware.com +Finalizers: [kubernetes.io/pvc-protection] +Capacity: 2Gi +Access Modes: RWO +VolumeMode: Filesystem +Mounted By: example-block-pod +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal ExternalProvisioning 19m persistentvolume-controller waiting for a volume to be created, either by external provisioner "csi.vsphere.vmware.com" or manually created by system administrator + Normal Provisioning 19m csi.vsphere.vmware.com_vsphere-csi-controller-5d8c5c7d6-9r9kv_7adc4efc-10a6-4615-b90b-790032cc4569 External provisioner is provisioning volume for claim "default/example-block-pvc" + Normal ProvisioningSucceeded 19m csi.vsphere.vmware.com_vsphere-csi-controller-5d8c5c7d6-9r9kv_7adc4efc-10a6-4615-b90b-790032cc4569 Successfully provisioned volume pvc-84c89bf9-8455-4633-a8c8-cd623e155dbd + Warning ExternalExpanding 75s volume_expand Ignoring the PVC: didn't find a plugin capable of expanding the volume; waiting for an external controller to process this PVC. + Normal Resizing 75s external-resizer csi.vsphere.vmware.com External resizer is resizing volume pvc-84c89bf9-8455-4633-a8c8-cd623e155dbd + Normal FileSystemResizeRequired 69s external-resizer csi.vsphere.vmware.com Require file system resize of volume on node + Normal FileSystemResizeSuccessful 6s kubelet, k8s-node-072 MountVolume.NodeExpandVolume succeeded for volume "pvc-84c89bf9-8455-4633-a8c8-cd623e155dbd" +``` + +The PVC will go through events `Resizing` to `FileSystemResizeRequired` to finally `FileSystemResizeSuccessful`. + +The PV will also reflect the expanded size. + +```bash +$ kubectl get pv +NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE +pvc-84c89bf9-8455-4633-a8c8-cd623e155dbd 2Gi RWO Delete Bound default/example-block-pvc example-block-sc 25m +``` + +This marks the completion of the online volume expansion operation. + +### Offline mode + +Consider a scenario where you deployed a PVC with a StorageClass in which `allowVolumeExpansion` is set to `true`. + +```bash +$ kubectl get pvc,pv +NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE +persistentvolumeclaim/example-block-pvc Bound pvc-9e9a325d-ee1c-11e9-a223-005056ad1fc1 1Gi RWO example-block-sc 5m5s + +NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE +persistentvolume/pvc-9e9a325d-ee1c-11e9-a223-005056ad1fc1 1Gi RWO Delete Bound default/example-block-pvc example-block-sc 5m18s +``` -Patch the PVC to increase its request size: +Patch the PVC to increase its requested storage size (in this case, to `2Gi`): ```bash -kubectl patch pvc example-block-pvc -p '{"spec": {"resources": {"requests": {"storage": "2Gi"}}}}' +$ kubectl patch pvc example-block-pvc -p '{"spec": {"resources": {"requests": {"storage": "2Gi"}}}}' +persistentvolumeclaim/example-block-pvc patched ``` -This will trigger an expansion in the volume associated with the PVC in vSphere Cloud Native Storage which finally gets reflected on the capacity of the corresponding PV object. Note that the capacity of PVC will not change until the PVC is attached to a node i.e used by a Pod. +This will trigger an expansion in the volume associated with the PVC in vSphere Cloud Native Storage which finally gets reflected on the capacity of the corresponding PV object. Note that the capacity of the PVC will not change until the PVC is used by a Pod i.e mounted on a node. ```bash -kubectl get pv +$ kubectl get pv NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE pvc-9e9a325d-ee1c-11e9-a223-005056ad1fc1 2Gi RWO Delete Bound default/example-block-pvc example-block-sc 6m44s -kubectl get pvc +$ kubectl get pvc NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE example-block-pvc Bound pvc-9e9a325d-ee1c-11e9-a223-005056ad1fc1 1Gi RWO example-block-sc 6m57s ``` @@ -78,26 +168,26 @@ spec: ``` ```bash -kubectl create -f example-pod.yaml +$ kubectl create -f example-pod.yaml pod/example-block-pod created ``` The Kubelet on the node will trigger the filesystem expansion on the volume when the PVC is attached to the Pod. ```bash -kubectl get pod +$ kubectl get pod NAME READY STATUS RESTARTS AGE example-block-pod 1/1 Running 0 65s ``` ```bash -kubectl get pvc +$ kubectl get pvc NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE example-block-pvc Bound pvc-24114458-9753-428e-9c90-9f568cb25788 2Gi RWO example-block-sc 2m12s -kubectl get pv +$ kubectl get pv NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE pvc-24114458-9753-428e-9c90-9f568cb25788 2Gi RWO Delete Bound default/example-block-pvc example-block-sc 2m3s ``` -You will notice that the capacity of PVC has been modified and the `FilesystemResizePending` condition has been removed from the PVC. Volume expansion is complete. +You will notice that the capacity of PVC has been modified and the `FilesystemResizePending` condition has been removed from the PVC. Offline volume expansion is complete. From 607dbe6415fd91811eadfd8d610ed306cd37c970 Mon Sep 17 00:00:00 2001 From: Liping Xue Date: Wed, 24 Feb 2021 16:27:50 -0800 Subject: [PATCH 12/36] Fix fullsync bug that CNS returns `Duplicated entity for each entity type in one cluster is found.` when UpdateVolumeMetadata. --- docs/book/releases/v2.1.0.md | 4 ++++ docs/book/releases/v2.1.1.md | 4 ++++ pkg/syncer/fullsync.go | 44 +++++++++++++++++++++++++++++++++++- 3 files changed, 51 insertions(+), 1 deletion(-) diff --git a/docs/book/releases/v2.1.0.md b/docs/book/releases/v2.1.0.md index 83e066d0f0..70876df97e 100644 --- a/docs/book/releases/v2.1.0.md +++ b/docs/book/releases/v2.1.0.md @@ -53,6 +53,10 @@ Note: For vSphere CSI Migration feature the minimum Kubernetes version requireme 7. When a Pod is rescheduled to a new node, there may be some lock contention which causes a delay in the volume getting detached from the old node and attached to the new node. - Impact: Rescheduled Pods remain in `Pending` state for an varying amount of time. - Workaround: Upgrade CSI driver to `v2.1.1`. +8. When pod using a PVC is rescheduled to other node when metadatasyncer is down, fullsync might fail with error `Duplicated entity for each entity type in one cluster is found`. + - Impact: CNS may hold stale volume metadata. + - Workaround: + - Upgrade CSI driver with this fix. ### Kubernetes issues diff --git a/docs/book/releases/v2.1.1.md b/docs/book/releases/v2.1.1.md index edf55d9b9d..496601958b 100644 --- a/docs/book/releases/v2.1.1.md +++ b/docs/book/releases/v2.1.1.md @@ -54,6 +54,10 @@ Note: For vSphere CSI Migration feature the minimum Kubernetes version requireme - Impact: In-tree vSphere volumes will not get migrated successfully - Workaround: - Upgrade CSI driver with this fix. +7. When pod using a PVC is rescheduled to other node when metadatasyncer is down, fullsync might fail with error `Duplicated entity for each entity type in one cluster is found`. + - Impact: CNS may hold stale volume metadata. + - Workaround: + - Upgrade CSI driver with this fix. ### Kubernetes issues diff --git a/pkg/syncer/fullsync.go b/pkg/syncer/fullsync.go index 29f128a5b2..975ad0a3ef 100644 --- a/pkg/syncer/fullsync.go +++ b/pkg/syncer/fullsync.go @@ -470,6 +470,12 @@ func fullSyncGetVolumeSpecs(ctx context.Context, vCenterVersion string, pvList [ case "updateVolume": // volume exist in K8S and CNS cache, but metadata is different, need to update this volume log.Debugf("FullSync: Volume with id %q added to volume update list", volumeHandle) + var volumeType string + if IsMultiAttachAllowed(pv) { + volumeType = common.FileVolumeType + } else { + volumeType = common.BlockVolumeType + } updateSpec := cnstypes.CnsVolumeMetadataUpdateSpec{ VolumeId: cnstypes.CnsVolumeId{ Id: volumeHandle, @@ -501,7 +507,43 @@ func fullSyncGetVolumeSpecs(ctx context.Context, vCenterVersion string, pvList [ updateSpec.Metadata.EntityMetadata = append(updateSpec.Metadata.EntityMetadata, oldMetadata) } } - updateSpecArray = append(updateSpecArray, updateSpec) + + if volumeType == common.BlockVolumeType { + // For Block volume, CNS only allow one instances for one EntityMetadata type in UpdateVolumeMetadataSpec + // if there are more than one pod instances in the entity UpdateVolumeMetadataSpec, need to invoke + // multiple UpdateVolumeMetadata call + var metadataList []cnstypes.BaseCnsEntityMetadata + var podMetadataList []cnstypes.BaseCnsEntityMetadata + for _, metadata := range updateSpec.Metadata.EntityMetadata { + entityType := metadata.(*cnstypes.CnsKubernetesEntityMetadata).EntityType + if entityType == string(cnstypes.CnsKubernetesEntityTypePOD) { + podMetadataList = append(podMetadataList, metadata) + } else { + metadataList = append(metadataList, metadata) + } + } + if len(podMetadataList) > 0 { + for _, podMetadata := range podMetadataList { + updateSpecNew := cnstypes.CnsVolumeMetadataUpdateSpec{ + VolumeId: cnstypes.CnsVolumeId{ + Id: volumeHandle, + }, + Metadata: cnstypes.CnsVolumeMetadata{ + ContainerCluster: containerCluster, + ContainerClusterArray: []cnstypes.CnsContainerCluster{containerCluster}, + // Update metadata in CNS with the new metadata present in K8S + EntityMetadata: append(metadataList, podMetadata), + }, + } + log.Debugf("FullSync: updateSpec %+v is added to updateSpecArray\n", spew.Sdump(updateSpecNew)) + updateSpecArray = append(updateSpecArray, updateSpecNew) + } + } else { + updateSpecArray = append(updateSpecArray, updateSpec) + } + } else { + updateSpecArray = append(updateSpecArray, updateSpec) + } } } return createSpecArray, updateSpecArray From 20c7abfac967f8f1eb9f07386ca1ea04d9b98079 Mon Sep 17 00:00:00 2001 From: Shalini Bhaskara Date: Thu, 4 Mar 2021 17:03:44 -0800 Subject: [PATCH 13/36] Do not error out if DC does not have datastores --- pkg/common/cns-lib/vsphere/virtualcenter.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/pkg/common/cns-lib/vsphere/virtualcenter.go b/pkg/common/cns-lib/vsphere/virtualcenter.go index 925b8ff250..1e9ba4118e 100644 --- a/pkg/common/cns-lib/vsphere/virtualcenter.go +++ b/pkg/common/cns-lib/vsphere/virtualcenter.go @@ -385,6 +385,10 @@ func (vc *VirtualCenter) GetVsanDatastores(ctx context.Context) (map[string]*Dat finder.SetDatacenter(dc.Datacenter) datastoresList, err := finder.DatastoreList(ctx, "*") if err != nil { + if _, ok := err.(*find.NotFoundError); ok { + log.Debugf("No datastores found on %q datacenter", dc.Name()) + continue + } log.Errorf("failed to get all the datastores. err: %+v", err) return nil, err } From 6829d4271af198e4df71975d9008a40743b03b47 Mon Sep 17 00:00:00 2001 From: Divyen Patel Date: Fri, 5 Mar 2021 13:04:19 -0800 Subject: [PATCH 14/36] updating go.mod dependencies --- go.mod | 20 ++++++------ go.sum | 100 ++++++++++++++++++++++++++++++++++++++++++++++++++------- 2 files changed, 98 insertions(+), 22 deletions(-) diff --git a/go.mod b/go.mod index 2a9d9d5200..37a456d6a3 100644 --- a/go.mod +++ b/go.mod @@ -14,27 +14,27 @@ require ( github.com/go-logr/zapr v0.1.1 // indirect github.com/golang/protobuf v1.4.3 github.com/google/go-cmp v0.5.0 // indirect - github.com/google/uuid v1.1.1 + github.com/google/uuid v1.2.0 github.com/grpc-ecosystem/go-grpc-middleware v1.2.0 // indirect github.com/grpc-ecosystem/grpc-gateway v1.14.5 // indirect github.com/kr/text v0.2.0 // indirect github.com/kubernetes-csi/csi-lib-utils v0.7.0 github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect - github.com/onsi/ginkgo v1.14.1 - github.com/onsi/gomega v1.10.1 + github.com/onsi/ginkgo v1.15.0 + github.com/onsi/gomega v1.10.5 github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.9.0 - github.com/rexray/gocsi v1.2.1 - github.com/spf13/cobra v1.1.1 - github.com/spf13/viper v1.7.0 + github.com/rexray/gocsi v1.2.2 + github.com/spf13/cobra v1.1.3 + github.com/spf13/viper v1.7.1 github.com/stretchr/testify v1.6.1 // indirect github.com/thecodeteam/gofsutil v0.1.2 // indirect github.com/vmware-tanzu/vm-operator-api v0.1.3 - github.com/vmware/govmomi v0.24.1-0.20210211225628-8e9d4eb7d357 - go.uber.org/zap v1.15.0 + github.com/vmware/govmomi v0.24.1 + go.uber.org/zap v1.16.0 golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5 // indirect - golang.org/x/net v0.0.0-20201021035429-f5854403a974 + golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d // indirect golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9 google.golang.org/appengine v1.6.6 // indirect @@ -43,7 +43,7 @@ require ( gopkg.in/gcfg.v1 v1.2.3 gopkg.in/square/go-jose.v2 v2.5.1 // indirect gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c // indirect - honnef.co/go/tools v0.1.2 // indirect + honnef.co/go/tools v0.1.3 // indirect k8s.io/api v0.18.5 k8s.io/apiextensions-apiserver v0.18.5 k8s.io/apimachinery v0.18.5 diff --git a/go.sum b/go.sum index 38c13c3e6c..d4ac0f8919 100644 --- a/go.sum +++ b/go.sum @@ -32,14 +32,17 @@ github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd/go.mod h1:64YHyfSL2R96J44Nlwm39UHepQbyR5q10x7iYa1ks2E= github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= github.com/Microsoft/hcsshim v0.0.0-20190417211021-672e52e9209d/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= +github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46 h1:lsxEuwrXEAokXB9qhlbKWPpo3KMLZQ5WB5WLQRW1uq0= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/OpenPeeDeeP/depguard v1.0.0/go.mod h1:7/4sitnI9YlQgTLLk734QlzXT8DuHVnAyztLplQjk+o= github.com/OpenPeeDeeP/depguard v1.0.1/go.mod h1:xsIw86fROiiwelg+jB2uM9PiKihMMmUx/1V+TNhjQvM= github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/Rican7/retry v0.1.0/go.mod h1:FgOROf8P5bebcC1DS0PdOQiqGUridaZvikzUmkFW6gg= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= @@ -54,8 +57,10 @@ github.com/akutz/gofsutil v0.1.2/go.mod h1:09JEF8dR0bTTZMQ1m3/+O1rqQyH2lG1ET34PO github.com/akutz/gosync v0.1.0 h1:naxPT/aDYDh79PMwM3XmencmNQeYmpNFSZy4ZE9zIW0= github.com/akutz/gosync v0.1.0/go.mod h1:I8I4aiqJI1nqaeYOOB1WS+CgRJVVPqhct9Y4njywM84= github.com/akutz/memconn v0.1.0/go.mod h1:Jo8rI7m0NieZyLI5e2CDlRdRqRRB4S7Xp77ukDjH+Fw= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc h1:cAKDfWh5VpdgMhJosfJnn5/FoN2SRZ4p7fJNX58YPaU= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf h1:qet1QNfXsQxTZqLG4oE62mJzwPIB8+Tee4RNCL9ulrY= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= @@ -82,6 +87,7 @@ github.com/bazelbuild/buildtools v0.0.0-20190731111112-f720930ceb60/go.mod h1:5J github.com/bazelbuild/buildtools v0.0.0-20190917191645-69366ca98f89/go.mod h1:5JP0TXzWDHXv8qvxRC4InIazwdyDseBDbzESUMKk1yU= github.com/bazelbuild/rules_go v0.0.0-20190719190356-6dae44dc5cab/go.mod h1:MC23Dc/wkXEyk3Wpq6lCqz0ZAYOZDw2DR5y3N1q2i7M= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= @@ -145,6 +151,7 @@ github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfc github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/creack/pty v1.1.7 h1:6pwm8kMQKCmgUg0ZHTm5+/YvRK0s3THD/28+T6/kk4A= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= @@ -180,6 +187,7 @@ github.com/elazarl/goproxy v0.0.0-20200710112657-153946a5f232/go.mod h1:Ro8st/El github.com/elazarl/goproxy/ext v0.0.0-20190711103511-473e67f1d7d2/go.mod h1:gNh8nYJoAm43RfaxurUnxr+N1PwuFV3ZMl/efxlIlY8= github.com/elazarl/goproxy/ext v0.0.0-20200710112657-153946a5f232 h1:gj8NHKvd8kkOMT8gcy4gJBCXsDK2fP0tqKc/F20q73k= github.com/elazarl/goproxy/ext v0.0.0-20200710112657-153946a5f232/go.mod h1:gNh8nYJoAm43RfaxurUnxr+N1PwuFV3ZMl/efxlIlY8= +github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633 h1:H2pdYOb3KQ1/YsqVWoWNLQO+fusocsw354rqGTZtAgw= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.9.5+incompatible h1:spTtZBk5DYEvbxMVutUuTyh1Ao2r4iyvLdACqsl/Ljk= github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= @@ -191,6 +199,7 @@ github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLi github.com/evanphx/json-patch v4.5.0+incompatible h1:ouOWdg56aJriqS0huScTkVXPC5IcNrDCXZ6OoTAWu7M= github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4= +github.com/fatih/camelcase v1.0.0 h1:hxNvNX/xYBp0ovncs8WyWZrOrpBNub/JfaMvbURyft8= github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc= github.com/fatih/color v1.6.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= @@ -198,10 +207,12 @@ github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= +github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= @@ -235,11 +246,14 @@ github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+ github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= +github.com/go-openapi/jsonpointer v0.19.3 h1:gihV7YNZK1iK6Tgwwsxo2rJbD1GTbdm72325Bq8FI3w= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= +github.com/go-openapi/jsonreference v0.19.2 h1:o20suLFB4Ri0tuzpWtyHlh7E7HnkqTNLq6aR6WVNS1w= github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= +github.com/go-openapi/jsonreference v0.19.3 h1:5cxNfTy0UVC3X8JL5ymxzyoUZmo8iZb+jeTWn7tUa8o= github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= @@ -253,6 +267,7 @@ github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nA github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY= +github.com/go-openapi/spec v0.19.3 h1:0XRyw8kguri6Yw4SxhsQA/atC88yqrk0+G4YhI2wabc= github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= @@ -262,12 +277,14 @@ github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dp github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.5 h1:lTz6Ys4CmqqCQmZPBlbQENR1/GucA2bzYTE12Pw4tFY= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= github.com/go-openapi/validate v0.19.5/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4= github.com/go-ozzo/ozzo-validation v3.5.0+incompatible/go.mod h1:gsEKFIVnabGBt6mXmxK0MoFy+cZoTJY6mu5Ll3LVLBU= github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-toolsmith/astcast v1.0.0/go.mod h1:mt2OdQTeAQcY4DQgPSArJjHCcOwlX+Wl/kwN+LbLGQ4= github.com/go-toolsmith/astcopy v1.0.0/go.mod h1:vrgyG+5Bxrnz4MZWPF+pI4R8h3qKRjjyvV/DSez4WVQ= @@ -294,10 +311,12 @@ github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGw github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef h1:veQD95Isof8w9/WXiA+pa3tz3fJXkt5B7QaRBrM62gk= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 h1:ZgQEtGgCBiWRM39fZuwSd1LwSqqSW0hOdXCYYDX0R3I= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.0.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.1.1 h1:G5FRp8JnTd7RQH5kemVNlMeyXQAztQ3mOWV95KxsXH8= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1 h1:qGJ6qTW+x6xX/my+8YUVl4WNpX9B7+/l2tRsHGZ7f2s= @@ -305,12 +324,14 @@ github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFU github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= @@ -339,28 +360,36 @@ github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Z github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/cadvisor v0.35.0/go.mod h1:1nql6U13uTHaLYB8rLS5x9IJc2qT6Xd/Tr1sTX6NE48= +github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0 h1:/QaMHBdZ26BB3SSst0Iwl10Epc+xhTquomWX0oZEB6w= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v0.0.0-20170306145142-6a5e28554805/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.0.0 h1:b4Gk+7WdP/d3HZH8EJsZpvV7EtDOgaZLtnaNGIu1adA= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.2.0 h1:qJYtXnJRWmpe7m/3XlyhrsLrEURqHRM2kxzoxXqyUDs= +github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gnostic v0.4.0 h1:BXDUo8p/DaxC+4FJY/SSx3gvnx9C1VdHNgaUkiEL5mk= @@ -420,6 +449,7 @@ github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2p github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/heketi/heketi v9.0.1-0.20190917153846-c2e2a4ab7ab9+incompatible/go.mod h1:bB9ly3RchcQqsQ9CpyaQwvva7RS5ytVoSoholZQON6o= github.com/heketi/tests v0.0.0-20151005000721-f3775cbcefd6/go.mod h1:xGMAM8JLi7UkZt1i4FQeQy0R2T8GLUwQhOP5M1gBhy4= +github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= @@ -434,8 +464,11 @@ github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht github.com/jonboulle/clockwork v0.1.0 h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/json-iterator/go v1.1.6 h1:MrUvLMLTMxbqFJ9kzlvat/rYZqZnW3u4wkLzWTaFwKs= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.7 h1:KfgG9LzI+pYjr4xvmz/5H4FXjokeP+rlHLhv3iH62Fo= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.8 h1:QiWkFLKq0T7mpzwOTu6BzNDbfTE8OLrYhVKYMLF46Ok= github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= @@ -463,6 +496,7 @@ github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORN github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= @@ -489,6 +523,7 @@ github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e h1:hB2xlXdHp/pmPZq0y3QnmWAArdw9PqbmotexnWx/FU8= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= github.com/marten-seemann/qtls v0.2.3/go.mod h1:xzjG7avBwGGbdZ8dTGxlBnLArsVKLvwmjgmPuiQEcYk= @@ -530,11 +565,14 @@ github.com/mohae/deepcopy v0.0.0-20170603005431-491d3605edfb/go.mod h1:TaXosZuwd github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/mozilla/tls-observatory v0.0.0-20180409132520-8791a200eb40/go.mod h1:SrKMQvPiws7F7iqYp8/TX+IhxCYhzr6N/1yb8cwHsGk= github.com/mrunalp/fileutils v0.0.0-20171103030105-7d4729fb3618/go.mod h1:x8F1gnqOkIEiO4rqoeEEEqQbo7HjGMTvyoq3gej4iT0= +github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d h1:7PxY7LVfSZm7PEeBTyK1rj1gABdCO2mbri6GKO1cMDs= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mvdan/xurls v1.1.0/go.mod h1:tQlNn3BED8bE/15hnSL2HLkDeLWpNPAwtw7wkEq44oU= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0= github.com/naoina/toml v0.1.1/go.mod h1:NBIhNtsFMo3G2szEBne+bO4gS192HuIYRqfvOWb4i1E= @@ -563,8 +601,8 @@ github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W github.com/onsi/ginkgo v1.10.2/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo v1.14.1 h1:jMU0WaQrP0a/YAEq8eJmJKjBoMs+pClEr1vDMlM/Do4= -github.com/onsi/ginkgo v1.14.1/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/ginkgo v1.15.0 h1:1V1NfVQR87RtWAgp1lv9JZJ5Jap+XFGKPi00andXGi4= +github.com/onsi/ginkgo v1.15.0/go.mod h1:hF8qUzuuC8DJGygJH3726JnCZX4MYbRB8yFfISqnKUg= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.3.0/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= @@ -572,8 +610,9 @@ github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1Cpa github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.10.5 h1:7n6FEkpFmfCoo2t+YYqXH0evK+a9ICQz0xcAy9dYcaQ= +github.com/onsi/gomega v1.10.5/go.mod h1:gza4q3jKQJijlu05nKWRCW/GavJumGt8aNRxWg7mt48= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ= github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= @@ -591,15 +630,18 @@ github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnh github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pborman/uuid v1.2.0 h1:J7Q5mO4ysT1dv8hyrUGHb9+ooztCXu1D8MY8DZYsu3g= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.1.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= +github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -613,6 +655,7 @@ github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v0.9.4/go.mod h1:oCXIBxdI62A4cR6aTRJCgetEjecSIYzOEaeAn4iYEpM= +github.com/prometheus/client_golang v1.0.0 h1:vrDKnkGzuGvhNAL56c7DBz29ZL+KxnoR0x7enabFceM= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= @@ -621,6 +664,7 @@ github.com/prometheus/client_golang v1.9.0/go.mod h1:FqZLKOZnGdFAhOK4nqGHa7D66Id github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 h1:gQz4mCbXsO+nc9n1hCxHcGA3Zx3Eo+UHZoInFGUIXNM= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= @@ -628,6 +672,7 @@ github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6T github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.1 h1:K0MGApIoQvMw27RTdJkPbr3JZ7DNbtxQNyi5STVM6Kw= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= @@ -636,8 +681,10 @@ github.com/prometheus/common v0.15.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16 github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.2 h1:6LJUbpNm42llc4HRCuvApCSWB/WfhuNo9K98Q9sNGfs= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/procfs v0.0.11 h1:DhHlBtkHWPYi8O2y31JkK0TF+DGM+51OopZjH/Ia5qI= github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.2.0 h1:wH4vA7pcjKuZzjF7lM8awk4fnuJO6idemZXoKnULUx4= @@ -647,8 +694,8 @@ github.com/quasilyte/go-consistent v0.0.0-20190521200055-c6f3937de18c/go.mod h1: github.com/quobyte/api v0.1.2/go.mod h1:jL7lIHrmqQ7yh05OJ+eEEdHr0u/kmT1Ff9iHd+4H6VI= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uYEyJGbgTkfkS4+E/PavXkNJcbFIpEtjt2B0KDQ5+9M= -github.com/rexray/gocsi v1.2.1 h1:9e15bmlOLxgEVi2MyruU0dxLotULoE4g/zimPsqEkEM= -github.com/rexray/gocsi v1.2.1/go.mod h1:5V3YEu+6P8HFTSzUjldYM1abIb/4mLnW/3qWzu8yD0Y= +github.com/rexray/gocsi v1.2.2 h1:h9F/eSizORihN+XT+mxhq7ClZ3cYo1L9RvasN6dKz8U= +github.com/rexray/gocsi v1.2.2/go.mod h1:X9oJHHpIVGmfKdK8e+JuCXafggk7HxL9mWQOgrsoHpo= github.com/robfig/cron v1.1.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= @@ -675,6 +722,7 @@ github.com/sirupsen/logrus v1.0.5/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjM github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= +github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= @@ -697,16 +745,20 @@ github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkU github.com/spf13/cobra v0.0.1/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.2/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= -github.com/spf13/cobra v1.1.1 h1:KfztREH0tPxJJ+geloSLaAkaPkr4ki2Er5quFV1TDo4= -github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI= +github.com/spf13/cobra v1.1.3 h1:xghbfqPkxzxP3C/f3n5DdpAbdKLj4ZE4BWQI362l53M= +github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= github.com/spf13/jwalterweatherman v0.0.0-20180109140146-7c0cea34c8ec/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.0 h1:oaPbdDe/x0UncahuwiPxW1GYJyilRAdsPnq3e1yaPcI= github.com/spf13/pflag v1.0.0/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.1 h1:aCvUg6QPl3ibpQUxyLkrEkCHtPqYJL4x9AuhqVqFis4= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= @@ -714,6 +766,8 @@ github.com/spf13/viper v1.0.2/go.mod h1:A8kyI5cUJhb8N+3pkfONlcEcZbueH6nhAm0Fq7Sr github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/spf13/viper v1.7.0 h1:xVKxvI7ouOI5I+U9s2eeiUfMaWBVoXA3AWskkrqK0VM= github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= +github.com/spf13/viper v1.7.1 h1:pM5oEahlgWv/WnHXpgbKz7iLIxRf65tye2Ci+XFK5sk= +github.com/spf13/viper v1.7.1/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= github.com/storageos/go-api v0.0.0-20180912212459-343b3eff91fc/go.mod h1:ZrLn+e0ZuF3Y65PNF6dIwbJPZqfmtCXxFm9ckv0agOY= github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= @@ -724,6 +778,7 @@ github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= @@ -757,8 +812,8 @@ github.com/vishvananda/netns v0.0.0-20171111001504-be1fbeda1936/go.mod h1:ZjcWmF github.com/vmware-tanzu/vm-operator-api v0.1.3 h1:4vxewu0jAN3fSoCBI6FhjmRGJ7ci0R2WNu/I6hacTYs= github.com/vmware-tanzu/vm-operator-api v0.1.3/go.mod h1:mubK0QMyaA2TbeAmGsu2GVfiqDFppNUAUqoMPoKFgzM= github.com/vmware/govmomi v0.20.3/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU= -github.com/vmware/govmomi v0.24.1-0.20210211225628-8e9d4eb7d357 h1:8n/rCTYyci4UqVOReJg/TeUxoPVStntNQF3Y7dlxxEA= -github.com/vmware/govmomi v0.24.1-0.20210211225628-8e9d4eb7d357/go.mod h1:Y+Wq4lst78L85Ge/F8+ORXIWiKYqaro1vhAulACy9Lc= +github.com/vmware/govmomi v0.24.1 h1:ecVvrxF28/5g738gLTiYgc62fpGfIPRKheQ1Dj1p35w= +github.com/vmware/govmomi v0.24.1/go.mod h1:Y+Wq4lst78L85Ge/F8+ORXIWiKYqaro1vhAulACy9Lc= github.com/vmware/vmw-guestinfo v0.0.0-20170707015358-25eff159a728/go.mod h1:x9oS4Wk2s2u4tS29nEaDLdzvuHdB19CvSGJjPgkZJNk= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= @@ -783,16 +838,18 @@ go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A= go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= -go.uber.org/zap v1.15.0 h1:ZZCA22JRF2gQE5FoNmhmrf7jeJJ2uhqDUNRYKm8dvmM= -go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= +go.uber.org/zap v1.16.0 h1:uFRZXykJGK9lLY4HtgSw44DnIcAM+kRBP7x5m+NpAOM= +go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ= go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= golang.org/x/build v0.0.0-20190927031335-2835ba2e683f/go.mod h1:fYw7AShPAhGMdXqA9gRadk/CcMsvLlClpE5oBwnS3dM= golang.org/x/crypto v0.0.0-20180426230345-b49d69b5da94/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -811,6 +868,7 @@ golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550 h1:ObdrDkeb4kJdCP557AjRjq69pTHfNouLtWZG7j9rPN8= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= @@ -872,14 +930,20 @@ golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553 h1:efeOvDhwQ29Dj3SdAV/MJf8oukgn+8D8WgaCaRMchF8= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974 h1:IX6qOQeG5uLjB/hjjwjedwfjND0hgjPMMyO1RoIXQNI= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb h1:eBmm0M9fYhWpKZLjQUUKka/LtIxf46G4fxeEz5KJr9U= +golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be h1:vEDujvNQGv4jgYKudGeI/+DAX4Jffq6hpD55MmoEvKs= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a h1:tImsplftrFpALCYumobsd0K86vlAs/eXGFms2txfJfA= golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -929,22 +993,27 @@ golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191220220014-0732a990476f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae h1:Ih9Yo4hSPImZOpfGuA4bR/ORKTAbhZo2AbWNRCnevdo= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201214210602-f9fddec55a1e h1:AyodaIpKjppX+cBfTASF2E1US3H2JFBj920Ot3rtDjs= golang.org/x/sys v0.0.0-20201214210602-f9fddec55a1e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4 h1:myAQVi0cGEoqQVR5POX+8RR2mrocKqNN1hmeMqhX27k= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.0.0-20170915090833-1cbadb444a80/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2 h1:+DCIGbF/swA92ohVg0//6X2IVY3KZs6p9mix0ziNYJM= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -988,11 +1057,14 @@ golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7 h1:EBZoQjiKKPaLbPrbpssUfuHtwM6KV/vb4U85g/cigFY= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0 h1:po9/4sTYwZU9lPhi1tOrb4hCv3qrhiQ77LZfGa2OjwY= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1088,6 +1160,8 @@ gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c h1:grhR+C34yXImVGp7EzNk+DTIk+323eIUWOmEevy6bDo= gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= @@ -1104,6 +1178,8 @@ honnef.co/go/tools v0.0.1-2019.2.2/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.1.2 h1:SMdYLJl312RXuxXziCCHhRsp/tvct9cGKey0yv95tZM= honnef.co/go/tools v0.1.2/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= +honnef.co/go/tools v0.1.3 h1:qTakTkI6ni6LFD5sBwwsdSO+AQqbSIxOauHTTQKZ/7o= +honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= k8s.io/api v0.18.5 h1:fKbCxr+U3fu7k6jB+QeYPD/c6xKYeSJ2KVWmyUypuWM= k8s.io/api v0.18.5/go.mod h1:tN+e/2nbdGKOAH55NMV8oGrMG+3uRlA9GaRfvnCCSNk= k8s.io/apiextensions-apiserver v0.18.5 h1:pvbXjB/BRXZiO+/Erp5Pxr+lnhDCv5uxNxHh3FLGZ/g= From ac35e9fae6a4a5640e4b5958b5dc67faf8b5962a Mon Sep 17 00:00:00 2001 From: Shalini Bhaskara Date: Tue, 9 Mar 2021 16:18:33 -0800 Subject: [PATCH 15/36] Add empty datacenter issue to known issues for current 2.1.x releases --- docs/book/releases/v2.1.0.md | 9 +++++---- docs/book/releases/v2.1.1.md | 9 +++++---- 2 files changed, 10 insertions(+), 8 deletions(-) diff --git a/docs/book/releases/v2.1.0.md b/docs/book/releases/v2.1.0.md index 70876df97e..3583663756 100644 --- a/docs/book/releases/v2.1.0.md +++ b/docs/book/releases/v2.1.0.md @@ -48,15 +48,16 @@ Note: For vSphere CSI Migration feature the minimum Kubernetes version requireme - If user has accidentally left orphan volumes on the datastore by not following the guideline, and if user has captured the volume handles or First Class Disk IDs of deleted PVs, storage admin can help delete those volumes using `govc disk.rm ` command. 6. When in-tree vSphere plugin is configured to use default datastore in this format default-datastore: `/datastore/`, migration of the volume will fail as mentioned here: https://github.com/kubernetes-sigs/vsphere-csi-driver/issues/628. If default datastore is configured in this format `` then we do not see this issue - Impact: In-tree vSphere volumes will not get migrated successfully - - Workaround: - - Upgrade CSI driver with this fix. + - Workaround: This is being fixed in an upcoming version. 7. When a Pod is rescheduled to a new node, there may be some lock contention which causes a delay in the volume getting detached from the old node and attached to the new node. - Impact: Rescheduled Pods remain in `Pending` state for an varying amount of time. - Workaround: Upgrade CSI driver to `v2.1.1`. 8. When pod using a PVC is rescheduled to other node when metadatasyncer is down, fullsync might fail with error `Duplicated entity for each entity type in one cluster is found`. - Impact: CNS may hold stale volume metadata. - - Workaround: - - Upgrade CSI driver with this fix. + - Workaround: This is being fixed in an upcoming version. +9. If there are no datastores present in any one of the datacenters in your vSphere environment, CSI file volume provisioning fails with `failed to get all the datastores` error. + - Impact: File volume provisioning keeps failing. + - Workaround: Either remove the `ReadOnly` privilege on this datacenter for the user listed in the `vsphere-config-secret` secret or add a datastore to this datacenter. Refer to `vsphere-roles-and-privileges` section in the [prerequisites](../driver-deployment/prerequisites.md) page to change the permissions on this datacenter. ### Kubernetes issues diff --git a/docs/book/releases/v2.1.1.md b/docs/book/releases/v2.1.1.md index 496601958b..8550e712e3 100644 --- a/docs/book/releases/v2.1.1.md +++ b/docs/book/releases/v2.1.1.md @@ -52,12 +52,13 @@ Note: For vSphere CSI Migration feature the minimum Kubernetes version requireme - If user has accidentally left orphan volumes on the datastore by not following the guideline, and if user has captured the volume handles or First Class Disk IDs of deleted PVs, storage admin can help delete those volumes using `govc disk.rm ` command. 6. When in-tree vSphere plugin is configured to use default datastore in this format default-datastore: `/datastore/`, migration of the volume will fail as mentioned here: https://github.com/kubernetes-sigs/vsphere-csi-driver/issues/628. If default datastore is configured in this format `` then we do not see this issue - Impact: In-tree vSphere volumes will not get migrated successfully - - Workaround: - - Upgrade CSI driver with this fix. + - Workaround: This is being fixed in an upcoming version. 7. When pod using a PVC is rescheduled to other node when metadatasyncer is down, fullsync might fail with error `Duplicated entity for each entity type in one cluster is found`. - Impact: CNS may hold stale volume metadata. - - Workaround: - - Upgrade CSI driver with this fix. + - Workaround: This is being fixed in an upcoming version. +8. If there are no datastores present in any one of the datacenters in your vSphere environment, CSI file volume provisioning fails with `failed to get all the datastores` error. + - Impact: File volume provisioning keeps failing. + - Workaround: Either remove the `ReadOnly` privilege on this datacenter for the user listed in the `vsphere-config-secret` secret or add a datastore to this datacenter. Refer to `vsphere-roles-and-privileges` section in the [prerequisites](../driver-deployment/prerequisites.md) page to change the permissions on this datacenter. ### Kubernetes issues From 5180e0daaa9d06e1915e917c1862cf4db77d302c Mon Sep 17 00:00:00 2001 From: Liping Xue Date: Fri, 5 Mar 2021 16:48:42 -0800 Subject: [PATCH 16/36] Change YAML file to enable CSI on VMC support. Add doc for CSI on VMC support. --- docs/book/SUMMARY.md | 1 + docs/book/features/vsphere_csi_vmc.md | 35 +++++++++++++++++++ .../vsphere-csi-controller-deployment.yaml | 2 +- .../vsphere-csi-controller-deployment.yaml | 2 +- 4 files changed, 38 insertions(+), 2 deletions(-) create mode 100644 docs/book/features/vsphere_csi_vmc.md diff --git a/docs/book/SUMMARY.md b/docs/book/SUMMARY.md index 93d53cc0ad..791971f5e0 100644 --- a/docs/book/SUMMARY.md +++ b/docs/book/SUMMARY.md @@ -29,6 +29,7 @@ * [Volume Topology](features/volume_topology.md) * [Volume Health](features/volume_health.md) * [vSphere CSI Migration](features/vsphere_csi_migration.md) + * [vSphere CSI on VMC](features/vsphere_csi_vmc.md) * [Known Issues](known_issues.md) * [Troubleshooting](troubleshooting.md) * [Development](development.md) diff --git a/docs/book/features/vsphere_csi_vmc.md b/docs/book/features/vsphere_csi_vmc.md new file mode 100644 index 0000000000..fd4bb76524 --- /dev/null +++ b/docs/book/features/vsphere_csi_vmc.md @@ -0,0 +1,35 @@ + + + +# vSphere CSI Driver - VMware Cloud on AWS (VMC) support + +- [Introduction](#introduction) +- [Deploy vSphere CSI driver on VMC](#deploy-csi-on-vmc) + +**Note:** Feature to support vSphere CSI driver on VMC is released with [v2.2.0](https://github.com/kubernetes-sigs/vsphere-csi-driver/releases/tag/v2.2.0). Currently, it only support block volume. Minimum SDDC version to support this feature is 1.12. + +## Introduction + +[VMware Cloud™ on AWS](https://cloud.vmware.com/vmc-aws) brings VMware’s enterprise-class SDDC software to the AWS Cloud with optimized access to AWS services. Powered by VMware Cloud Foundation, VMware Cloud on AWS integrates our compute, storage and network virtualization products (VMware vSphere®, vSAN™ and NSX®) along with VMware vCenter management, optimized to run on dedicated, elastic, bare-metal AWS infrastructure. + +VMware Cloud on AWS provides two vSAN datastores in each SDDC cluster: WorkloadDatastore, managed by the Cloud Administrator, and vsanDatastore, managed by VMware. Cloudadmin user does not have the privilege to create volume on vsanDatastore and only has the privilege to create volume on WorkloadDatastore. + +Without this feature, cloudadmin user cannot provision PVs using vSphere CSI drvier on VMC due to no privilege to create volume on vSANDatastore. This feature enables cloudadmin user to provision PV using vSphere CSI driver without specifying the datastore where the volume is provisioned. + +## Deploy vSphere CSI driver on VMC + +To deploy vSphere CSI driver on VMC, please make sure to keep roles and privileges up to date as mentioned in the [roles and privileges requirement](https://vsphere-csi-driver.sigs.k8s.io/driver-deployment/prerequisites.html#roles_and_privileges). For cloudadmin user, CNS-DATASTORE role should only be assigned to WorkLoadDatastore. + +Make sure to use cloudadmin user and password in `csi-vsphere.conf` file. This file will be used to create a Kubernetes secret for vSphere credentials, which is required to install vSphere CSI driver. The following is an sample `csi-vsphere.conf` file used to deploy vSphere CSI driver in VMC environment. + +```bash +[Global] +cluster-id = "unique-kubernetes-cluster-id" + +[VirtualCenter "1.2.3.4"] +insecure-flag = "true" +user = "cloudadmin vcenter username" +password = "cloudadmin vcenter password" +port = "443" +datacenters = "list of comma separated datacenter paths where node VMs are present" +``` diff --git a/manifests/dev/vsphere-7.0u1/vanilla/deploy/vsphere-csi-controller-deployment.yaml b/manifests/dev/vsphere-7.0u1/vanilla/deploy/vsphere-csi-controller-deployment.yaml index 9d75c2ed53..f46b571606 100644 --- a/manifests/dev/vsphere-7.0u1/vanilla/deploy/vsphere-csi-controller-deployment.yaml +++ b/manifests/dev/vsphere-7.0u1/vanilla/deploy/vsphere-csi-controller-deployment.yaml @@ -170,7 +170,7 @@ spec: apiVersion: v1 data: "csi-migration": "false" # csi-migration feature is only available for vSphere 7.0U1 - "csi-auth-check": "false" + "csi-auth-check": "true" kind: ConfigMap metadata: name: internal-feature-states.csi.vsphere.vmware.com diff --git a/manifests/dev/vsphere-7.0u2/vanilla/deploy/vsphere-csi-controller-deployment.yaml b/manifests/dev/vsphere-7.0u2/vanilla/deploy/vsphere-csi-controller-deployment.yaml index b55383435a..d4ada8bced 100644 --- a/manifests/dev/vsphere-7.0u2/vanilla/deploy/vsphere-csi-controller-deployment.yaml +++ b/manifests/dev/vsphere-7.0u2/vanilla/deploy/vsphere-csi-controller-deployment.yaml @@ -175,7 +175,7 @@ spec: apiVersion: v1 data: "csi-migration": "false" - "csi-auth-check": "false" + "csi-auth-check": "true" "online-volume-extend": "true" kind: ConfigMap metadata: From c43fffe47d7cb76df32a478891d3a8fb8ece3816 Mon Sep 17 00:00:00 2001 From: Liping Xue Date: Wed, 10 Mar 2021 13:51:45 -0800 Subject: [PATCH 17/36] Modify document to describe CSI on VMC support. --- docs/book/features/vsphere_csi_vmc.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/book/features/vsphere_csi_vmc.md b/docs/book/features/vsphere_csi_vmc.md index fd4bb76524..4cbd800638 100644 --- a/docs/book/features/vsphere_csi_vmc.md +++ b/docs/book/features/vsphere_csi_vmc.md @@ -6,7 +6,7 @@ - [Introduction](#introduction) - [Deploy vSphere CSI driver on VMC](#deploy-csi-on-vmc) -**Note:** Feature to support vSphere CSI driver on VMC is released with [v2.2.0](https://github.com/kubernetes-sigs/vsphere-csi-driver/releases/tag/v2.2.0). Currently, it only support block volume. Minimum SDDC version to support this feature is 1.12. +**Note:** Feature to support vSphere CSI driver on VMC will be released with v2.2.0. v2.2.0 vSphere CSI driver on VMC will only support block volume. The minimum SDDC version to support this feature is 1.12. Please refer to [VMC release notes](https://docs.vmware.com/en/VMware-Cloud-on-AWS/0/rn/vmc-on-aws-relnotes.html) to get more details. ## Introduction From 3eaf88dce2f12a3b8acd04a351dc459cefa3a091 Mon Sep 17 00:00:00 2001 From: S R Ashrith Date: Wed, 3 Mar 2021 13:27:23 +0530 Subject: [PATCH 18/36] fix regression caused by PR 664 --- tests/e2e/fullsync_test_for_block_volume.go | 8 ++++---- tests/e2e/labelupdates.go | 4 ++-- tests/e2e/util.go | 8 +++++++- 3 files changed, 13 insertions(+), 7 deletions(-) diff --git a/tests/e2e/fullsync_test_for_block_volume.go b/tests/e2e/fullsync_test_for_block_volume.go index 8f5994c945..cbc44e1a95 100644 --- a/tests/e2e/fullsync_test_for_block_volume.go +++ b/tests/e2e/fullsync_test_for_block_volume.go @@ -186,7 +186,7 @@ var _ bool = ginkgo.Describe("[csi-block-vanilla] full-sync-test", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By(fmt.Sprintf("Deleting FCD: %s", fcdID)) - err = deleteFcdWithRetriesForSpecificErr(ctx, fcdID, datastore.Reference(), []string{objOrItemNotFoundErr, disklibUnlinkErr}) + err = deleteFcdWithRetriesForSpecificErr(ctx, fcdID, datastore.Reference(), []string{disklibUnlinkErr}, []string{objOrItemNotFoundErr}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) @@ -352,7 +352,7 @@ var _ bool = ginkgo.Describe("[csi-block-vanilla] full-sync-test", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By(fmt.Sprintf("Deleting FCD: %s", fcdID)) - err = deleteFcdWithRetriesForSpecificErr(ctx, fcdID, datastore.Reference(), []string{objOrItemNotFoundErr, disklibUnlinkErr}) + err = deleteFcdWithRetriesForSpecificErr(ctx, fcdID, datastore.Reference(), []string{disklibUnlinkErr}, []string{objOrItemNotFoundErr}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) @@ -631,7 +631,7 @@ var _ bool = ginkgo.Describe("[csi-block-vanilla] full-sync-test", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By(fmt.Sprintf("Deleting FCD: %s", fcdID)) - err = deleteFcdWithRetriesForSpecificErr(ctx, fcdID, datastore.Reference(), []string{objOrItemNotFoundErr, disklibUnlinkErr}) + err = deleteFcdWithRetriesForSpecificErr(ctx, fcdID, datastore.Reference(), []string{disklibUnlinkErr}, []string{objOrItemNotFoundErr}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) @@ -698,7 +698,7 @@ var _ bool = ginkgo.Describe("[csi-block-vanilla] full-sync-test", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By(fmt.Sprintf("Deleting FCD: %s", fcdID)) - err = deleteFcdWithRetriesForSpecificErr(ctx, fcdID, datastore.Reference(), []string{objOrItemNotFoundErr, disklibUnlinkErr}) + err = deleteFcdWithRetriesForSpecificErr(ctx, fcdID, datastore.Reference(), []string{disklibUnlinkErr}, []string{objOrItemNotFoundErr}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) diff --git a/tests/e2e/labelupdates.go b/tests/e2e/labelupdates.go index 5cb8add8ff..a5e18fa60b 100644 --- a/tests/e2e/labelupdates.go +++ b/tests/e2e/labelupdates.go @@ -436,7 +436,7 @@ var _ bool = ginkgo.Describe("[csi-block-vanilla] label-updates", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By(fmt.Sprintf("Deleting FCD: %s", fcdID)) - err = deleteFcdWithRetriesForSpecificErr(ctx, fcdID, datastore.Reference(), []string{objOrItemNotFoundErr, disklibUnlinkErr}) + err = deleteFcdWithRetriesForSpecificErr(ctx, fcdID, datastore.Reference(), []string{disklibUnlinkErr}, []string{objOrItemNotFoundErr}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Deleting the Storage Class") @@ -570,7 +570,7 @@ var _ bool = ginkgo.Describe("[csi-block-vanilla] label-updates", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By(fmt.Sprintf("Deleting FCD: %s", fcdID)) - err = deleteFcdWithRetriesForSpecificErr(ctx, fcdID, datastore.Reference(), []string{objOrItemNotFoundErr, disklibUnlinkErr}) + err = deleteFcdWithRetriesForSpecificErr(ctx, fcdID, datastore.Reference(), []string{disklibUnlinkErr}, []string{objOrItemNotFoundErr}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) diff --git a/tests/e2e/util.go b/tests/e2e/util.go index 8ea2ce052a..f8cd61f4e1 100644 --- a/tests/e2e/util.go +++ b/tests/e2e/util.go @@ -2214,7 +2214,7 @@ func getPersistentVolumeClaimSpecForFileShare(namespace string, labels map[strin } //deleteFcdWithRetriesForSpecificErr method to retry fcd deletion when a specific error is encountered -func deleteFcdWithRetriesForSpecificErr(ctx context.Context, fcdID string, dsRef types.ManagedObjectReference, errsToIgnore []string) error { +func deleteFcdWithRetriesForSpecificErr(ctx context.Context, fcdID string, dsRef types.ManagedObjectReference, errsToIgnore []string, errsToContinue []string) error { var err error waitErr := wait.PollImmediate(poll*15, pollTimeout, func() (bool, error) { framework.Logf("Trying to delete FCD: %s", fcdID) @@ -2227,6 +2227,12 @@ func deleteFcdWithRetriesForSpecificErr(ctx context.Context, fcdID string, dsRef return false, nil } } + for _, errToContinue := range errsToContinue { + if strings.Contains(err.Error(), errToContinue) { + framework.Logf("Hit error '%s' while trying to delete FCD: %s, will ignore this error(treat as success) and proceed to next steps...", err.Error(), fcdID) + return true, nil + } + } return false, err } return true, nil From 65fa896275200dabce7fbedd0ec740bcbdbc6b84 Mon Sep 17 00:00:00 2001 From: kavyashree-r Date: Tue, 23 Feb 2021 23:48:42 +0530 Subject: [PATCH 19/36] SVC Online volume expansion (1st set - 6 Tc's) --- tests/e2e/vsphere_volume_expansion.go | 296 +++++++++++++++++--------- 1 file changed, 197 insertions(+), 99 deletions(-) diff --git a/tests/e2e/vsphere_volume_expansion.go b/tests/e2e/vsphere_volume_expansion.go index e1c467eb55..109e32f75d 100644 --- a/tests/e2e/vsphere_volume_expansion.go +++ b/tests/e2e/vsphere_volume_expansion.go @@ -57,7 +57,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { ) ginkgo.BeforeEach(func() { client = f.ClientSet - namespace = f.Namespace.Name + namespace = getNamespaceToRunTests(f) bootstrap() ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -80,6 +80,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) defaultDatastore = getDefaultDatastore(ctx) + storagePolicyName = GetAndExpectStringEnvVar(envStoragePolicyNameForSharedDatastores) if guestCluster { svcClient, svNamespace := getSvcClientAndNamespace() @@ -87,12 +88,6 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { } }) - ginkgo.AfterEach(func() { - if guestCluster { - svcClient, svNamespace := getSvcClientAndNamespace() - setResourceQuota(svcClient, svNamespace, defaultrqLimit) - } - }) ginkgo.AfterEach(func() { var err error @@ -111,6 +106,15 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } + if supervisorCluster { + ginkgo.By("Delete Resource quota") + deleteResourceQuota(client, namespace) + } + if guestCluster { + svcClient, svNamespace := getSvcClientAndNamespace() + setResourceQuota(svcClient, svNamespace, defaultrqLimit) + } + }) // Test to verify volume expansion is supported if allowVolumeExpansion @@ -250,14 +254,19 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { 10. Make sure file system has increased */ - ginkgo.It("[csi-block-vanilla] Verify online volume expansion on dynamic volume", func() { + ginkgo.It("[csi-block-vanilla] [csi-supervisor] Verify online volume expansion on dynamic volume", func() { ginkgo.By("Invoking Test for Volume Expansion") ctx, cancel := context.WithCancel(context.Background()) defer cancel() + var storageclass *storagev1.StorageClass + var pvclaim *v1.PersistentVolumeClaim + var pv *v1.PersistentVolume + var volHandle string ginkgo.By("Create StorageClass with allowVolumeExpansion set to true, Create PVC") sharedVSANDatastoreURL := GetAndExpectStringEnvVar(envSharedDatastoreURL) - volHandle, pvclaim, pv, storageclass := createSCwithVolumeExpansionTrueAndDynamicPVC(f, client, sharedVSANDatastoreURL, namespace) + volHandle, pvclaim, pv, storageclass = createSCwithVolumeExpansionTrueAndDynamicPVC(f, client, sharedVSANDatastoreURL, storagePolicyName, namespace) + defer func() { err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -268,7 +277,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { }() ginkgo.By("Create POD using the above PVC") - pod := createPODandVerifyVolumeMount(f, client, namespace, pvclaim, volHandle) + pod, vmUUID := createPODandVerifyVolumeMount(f, client, namespace, pvclaim, volHandle) defer func() { // Delete POD @@ -276,10 +285,15 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { err := fpod.DeletePodWithWait(client, pod) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - ginkgo.By("Verify volume is detached from the node") - isDiskDetached, err := e2eVSphere.waitForVolumeDetachedFromNode(client, pv.Spec.CSI.VolumeHandle, pod.Spec.NodeName) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Expect(isDiskDetached).To(gomega.BeTrue(), fmt.Sprintf("Volume %q is not detached from the node %q", pv.Spec.CSI.VolumeHandle, pod.Spec.NodeName)) + if supervisorCluster { + ginkgo.By(fmt.Sprintf("Verify volume: %s is detached from PodVM with vmUUID: %s", pv.Spec.CSI.VolumeHandle, vmUUID)) + _, err := e2eVSphere.getVMByUUIDWithWait(ctx, vmUUID, supervisorClusterOperationsTimeout) + gomega.Expect(err).To(gomega.HaveOccurred(), fmt.Sprintf("PodVM with vmUUID: %s still exists. So volume: %s is not detached from the PodVM", vmUUID, pv.Spec.CSI.VolumeHandle)) + } else { + isDiskDetached, err := e2eVSphere.waitForVolumeDetachedFromNode(client, pv.Spec.CSI.VolumeHandle, pod.Spec.NodeName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(isDiskDetached).To(gomega.BeTrue(), fmt.Sprintf("Volume %q is not detached from the node %q", pv.Spec.CSI.VolumeHandle, pod.Spec.NodeName)) + } }() ginkgo.By("Increase PVC size and verify online volume resize") @@ -317,7 +331,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { }() ginkgo.By("Create POD") - pod := createPODandVerifyVolumeMount(f, client, namespace, pvclaim, volHandle) + pod, _ := createPODandVerifyVolumeMount(f, client, namespace, pvclaim, volHandle) defer func() { // Delete POD @@ -345,12 +359,14 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { 6. Modify PVC to be a smaller size. 7. Verify that the PVC size does not change because volume shrinking is not supported. */ - ginkgo.It("[csi-block-vanilla] Verify online volume expansion shrinking volume not allowed", func() { + ginkgo.It("[csi-block-vanilla] [csi-supervisor] Verify online volume expansion shrinking volume not allowed", func() { ginkgo.By("Invoking Test for Volume Expansion") ctx, cancel := context.WithCancel(context.Background()) defer cancel() - volHandle, pvclaim, pv, storageclass := createSCwithVolumeExpansionTrueAndDynamicPVC(f, client, "", namespace) + ginkgo.By("Create StorageClass with allowVolumeExpansion set to true, Create PVC") + volHandle, pvclaim, pv, storageclass := createSCwithVolumeExpansionTrueAndDynamicPVC(f, client, "", storagePolicyName, namespace) + defer func() { err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -361,7 +377,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { }() ginkgo.By("Create POD") - pod := createPODandVerifyVolumeMount(f, client, namespace, pvclaim, volHandle) + pod, vmUUID := createPODandVerifyVolumeMount(f, client, namespace, pvclaim, volHandle) defer func() { // Delete POD @@ -369,10 +385,15 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { err := fpod.DeletePodWithWait(client, pod) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - ginkgo.By("Verify volume is detached from the node") - isDiskDetached, err := e2eVSphere.waitForVolumeDetachedFromNode(client, pv.Spec.CSI.VolumeHandle, pod.Spec.NodeName) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Expect(isDiskDetached).To(gomega.BeTrue(), fmt.Sprintf("Volume %q is not detached from the node %q", pv.Spec.CSI.VolumeHandle, pod.Spec.NodeName)) + if supervisorCluster { + ginkgo.By(fmt.Sprintf("Verify volume: %s is detached from PodVM with vmUUID: %s", pv.Spec.CSI.VolumeHandle, vmUUID)) + _, err := e2eVSphere.getVMByUUIDWithWait(ctx, vmUUID, supervisorClusterOperationsTimeout) + gomega.Expect(err).To(gomega.HaveOccurred(), fmt.Sprintf("PodVM with vmUUID: %s still exists. So volume: %s is not detached from the PodVM", vmUUID, pv.Spec.CSI.VolumeHandle)) + } else { + isDiskDetached, err := e2eVSphere.waitForVolumeDetachedFromNode(client, pv.Spec.CSI.VolumeHandle, pod.Spec.NodeName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(isDiskDetached).To(gomega.BeTrue(), fmt.Sprintf("Volume %q is not detached from the node %q", pv.Spec.CSI.VolumeHandle, pod.Spec.NodeName)) + } }() // Modify PVC spec to a smaller size @@ -399,13 +420,13 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { 8. Verify the PVC Size should increased by 10Gi 9. Make sure file system has increased */ - ginkgo.It("[csi-block-vanilla] Verify online volume expansion multiple times on the same PVC", func() { + ginkgo.It("[csi-block-vanilla] [csi-supervisor] Verify online volume expansion multiple times on the same PVC", func() { ginkgo.By("Invoking Test for Volume Expansion") ctx, cancel := context.WithCancel(context.Background()) defer cancel() - volHandle, pvclaim, pv, storageclass := createSCwithVolumeExpansionTrueAndDynamicPVC(f, client, "", namespace) + volHandle, pvclaim, pv, storageclass := createSCwithVolumeExpansionTrueAndDynamicPVC(f, client, "", storagePolicyName, namespace) defer func() { err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -415,7 +436,8 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() - pod := createPODandVerifyVolumeMount(f, client, namespace, pvclaim, volHandle) + ginkgo.By("Create POD") + pod, vmUUID := createPODandVerifyVolumeMount(f, client, namespace, pvclaim, volHandle) defer func() { // Delete POD @@ -423,12 +445,18 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { err := fpod.DeletePodWithWait(client, pod) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - ginkgo.By("Verify volume is detached from the node") - isDiskDetached, err := e2eVSphere.waitForVolumeDetachedFromNode(client, pv.Spec.CSI.VolumeHandle, pod.Spec.NodeName) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Expect(isDiskDetached).To(gomega.BeTrue(), fmt.Sprintf("Volume %q is not detached from the node %q", pv.Spec.CSI.VolumeHandle, pod.Spec.NodeName)) + if supervisorCluster { + ginkgo.By(fmt.Sprintf("Verify volume: %s is detached from PodVM with vmUUID: %s", pv.Spec.CSI.VolumeHandle, vmUUID)) + _, err := e2eVSphere.getVMByUUIDWithWait(ctx, vmUUID, supervisorClusterOperationsTimeout) + gomega.Expect(err).To(gomega.HaveOccurred(), fmt.Sprintf("PodVM with vmUUID: %s still exists. So volume: %s is not detached from the PodVM", vmUUID, pv.Spec.CSI.VolumeHandle)) + } else { + isDiskDetached, err := e2eVSphere.waitForVolumeDetachedFromNode(client, pv.Spec.CSI.VolumeHandle, pod.Spec.NodeName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(isDiskDetached).To(gomega.BeTrue(), fmt.Sprintf("Volume %q is not detached from the node %q", pv.Spec.CSI.VolumeHandle, pod.Spec.NodeName)) + } }() + ginkgo.By("Increase PVC size and verify Volume resize") increaseOnlineVolumeMultipleTimes(ctx, f, client, namespace, volHandle, pvclaim, pod) }) @@ -447,13 +475,16 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { 11. Make sure file system has increased */ - ginkgo.It("[csi-block-vanilla] Verify online volume expansion when VSAN-health is down", func() { + ginkgo.It("[csi-block-vanilla] [csi-supervisor] Verify online volume expansion when VSAN-health is down", func() { ginkgo.By("Invoking Test for Volume Expansion") ctx, cancel := context.WithCancel(context.Background()) defer cancel() - volHandle, pvclaim, pv, storageclass := createSCwithVolumeExpansionTrueAndDynamicPVC(f, client, "", namespace) + var originalSizeInMb, fsSize int64 + var err error + + volHandle, pvclaim, pv, storageclass := createSCwithVolumeExpansionTrueAndDynamicPVC(f, client, "", storagePolicyName, namespace) defer func() { err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -463,8 +494,12 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() - pod := createPODandVerifyVolumeMount(f, client, namespace, pvclaim, volHandle) - originalFSSize, err := getFSSizeMb(f, pod) + ginkgo.By("Create POD") + pod, vmUUID := createPODandVerifyVolumeMount(f, client, namespace, pvclaim, volHandle) + + //Fetch original FileSystemSize + ginkgo.By("Verify filesystem size for mount point /mnt/volume1 before expansion") + originalSizeInMb, err = getFSSizeMb(f, pod) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { @@ -473,10 +508,15 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { err := fpod.DeletePodWithWait(client, pod) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - ginkgo.By("Verify volume is detached from the node") - isDiskDetached, err := e2eVSphere.waitForVolumeDetachedFromNode(client, pv.Spec.CSI.VolumeHandle, pod.Spec.NodeName) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Expect(isDiskDetached).To(gomega.BeTrue(), fmt.Sprintf("Volume %q is not detached from the node %q", pv.Spec.CSI.VolumeHandle, pod.Spec.NodeName)) + if supervisorCluster { + ginkgo.By(fmt.Sprintf("Verify volume: %s is detached from PodVM with vmUUID: %s", pv.Spec.CSI.VolumeHandle, vmUUID)) + _, err := e2eVSphere.getVMByUUIDWithWait(ctx, vmUUID, supervisorClusterOperationsTimeout) + gomega.Expect(err).To(gomega.HaveOccurred(), fmt.Sprintf("PodVM with vmUUID: %s still exists. So volume: %s is not detached from the PodVM", vmUUID, pv.Spec.CSI.VolumeHandle)) + } else { + isDiskDetached, err := e2eVSphere.waitForVolumeDetachedFromNode(client, pv.Spec.CSI.VolumeHandle, pod.Spec.NodeName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(isDiskDetached).To(gomega.BeTrue(), fmt.Sprintf("Volume %q is not detached from the node %q", pv.Spec.CSI.VolumeHandle, pod.Spec.NodeName)) + } }() ginkgo.By("Bring down Vsan-health service") @@ -506,7 +546,6 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { ginkgo.By("File system resize should not succeed Since Vsan-health is down. Expect an error") expectedErrMsg := "503 Service Unavailable" framework.Logf("Expected failure message: %+q", expectedErrMsg) - err = waitForEvent(ctx, client, namespace, expectedErrMsg, pvclaim.Name) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -523,15 +562,13 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { expectEqual(len(pvcConditions), 0, "pvc should not have conditions") ginkgo.By("Verify filesystem size for mount point /mnt/volume1") - fsSize, err := getFSSizeMb(f, pod) + fsSize, err = getFSSizeMb(f, pod) gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Filesystem size may be smaller than the size of the block volume // so here we are checking if the new filesystem size is greater than // the original volume size as the filesystem is formatted for the // first time - if fsSize < originalFSSize { - framework.Failf("error updating filesystem size for %q. Resulting filesystem size is %d", pvclaim.Name, fsSize) - } + gomega.Expect(fsSize).Should(gomega.BeNumerically(">", originalSizeInMb), fmt.Sprintf("error updating filesystem size for %q. Resulting filesystem size is %d", pvclaim.Name, fsSize)) ginkgo.By("File system resize finished successfully") }) @@ -550,13 +587,16 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { 10. Make sure data is intact on the PV mounted on the pod 11. Make sure file system has increased */ - ginkgo.It("[csi-block-vanilla] Verify online volume expansion when SPS-Service is down ", func() { + ginkgo.It("[csi-block-vanilla] [csi-supervisor] Verify online volume expansion when SPS-Service is down ", func() { ginkgo.By("Invoking Test for Volume Expansion") ctx, cancel := context.WithCancel(context.Background()) defer cancel() - volHandle, pvclaim, pv, storageclass := createSCwithVolumeExpansionTrueAndDynamicPVC(f, client, "", namespace) + var originalSizeInMb, fsSize int64 + var err error + + volHandle, pvclaim, pv, storageclass := createSCwithVolumeExpansionTrueAndDynamicPVC(f, client, "", storagePolicyName, namespace) defer func() { err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -566,8 +606,12 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() - pod := createPODandVerifyVolumeMount(f, client, namespace, pvclaim, volHandle) - originalFSSize, err := getFSSizeMb(f, pod) + ginkgo.By("Create POD") + pod, vmUUID := createPODandVerifyVolumeMount(f, client, namespace, pvclaim, volHandle) + + //Fetch original FileSystemSize + ginkgo.By("Verify filesystem size for mount point /mnt/volume1 before expansion") + originalSizeInMb, err = getFSSizeMb(f, pod) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { @@ -576,10 +620,15 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { err := fpod.DeletePodWithWait(client, pod) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - ginkgo.By("Verify volume is detached from the node") - isDiskDetached, err := e2eVSphere.waitForVolumeDetachedFromNode(client, pv.Spec.CSI.VolumeHandle, pod.Spec.NodeName) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Expect(isDiskDetached).To(gomega.BeTrue(), fmt.Sprintf("Volume %q is not detached from the node %q", pv.Spec.CSI.VolumeHandle, pod.Spec.NodeName)) + if supervisorCluster { + ginkgo.By(fmt.Sprintf("Verify volume: %s is detached from PodVM with vmUUID: %s", pv.Spec.CSI.VolumeHandle, vmUUID)) + _, err := e2eVSphere.getVMByUUIDWithWait(ctx, vmUUID, supervisorClusterOperationsTimeout) + gomega.Expect(err).To(gomega.HaveOccurred(), fmt.Sprintf("PodVM with vmUUID: %s still exists. So volume: %s is not detached from the PodVM", vmUUID, pv.Spec.CSI.VolumeHandle)) + } else { + isDiskDetached, err := e2eVSphere.waitForVolumeDetachedFromNode(client, pv.Spec.CSI.VolumeHandle, pod.Spec.NodeName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(isDiskDetached).To(gomega.BeTrue(), fmt.Sprintf("Volume %q is not detached from the node %q", pv.Spec.CSI.VolumeHandle, pod.Spec.NodeName)) + } }() ginkgo.By("Bring down SPS service") @@ -625,15 +674,14 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { expectEqual(len(pvcConditions), 0, "pvc should not have conditions") ginkgo.By("Verify filesystem size for mount point /mnt/volume1") - fsSize, err := getFSSizeMb(f, pod) + fsSize, err = getFSSizeMb(f, pod) gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Filesystem size may be smaller than the size of the block volume // so here we are checking if the new filesystem size is greater than // the original volume size as the filesystem is formatted for the // first time - if fsSize < originalFSSize { - framework.Failf("error updating filesystem size for %q. Resulting filesystem size is %d mb", pvclaim.Name, fsSize) - } + gomega.Expect(fsSize).Should(gomega.BeNumerically(">", originalSizeInMb), fmt.Sprintf("error updating filesystem size for %q. Resulting filesystem size is %d", pvclaim.Name, fsSize)) + ginkgo.By("File system resize finished successfully") }) @@ -651,13 +699,17 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { 9. Make sure data is intact on the PV mounted on the pod 10. Make sure file system has increased */ - ginkgo.It("[csi-block-vanilla] Verify online volume expansion by updating PVC with different sizes concurrently", func() { + + ginkgo.It("[csi-block-vanilla] [csi-supervisor] Verify online volume expansion by updating PVC with different sizes concurrently", func() { + ginkgo.By("Invoking Test for Volume Expansion") ctx, cancel := context.WithCancel(context.Background()) defer cancel() + var originalSizeInMb, fsSize int64 + var err error ginkgo.By("Create StorageClass with allowVolumeExpansion set to true, Create PVC") - volHandle, pvclaim, pv, storageclass := createSCwithVolumeExpansionTrueAndDynamicPVC(f, client, "", namespace) + volHandle, pvclaim, pv, storageclass := createSCwithVolumeExpansionTrueAndDynamicPVC(f, client, "", storagePolicyName, namespace) defer func() { err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -668,8 +720,11 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { }() ginkgo.By("Create POD using the above PVC") - pod := createPODandVerifyVolumeMount(f, client, namespace, pvclaim, volHandle) - originalFSSize, err := getFSSizeMb(f, pod) + pod, vmUUID := createPODandVerifyVolumeMount(f, client, namespace, pvclaim, volHandle) + + //Fetch original FileSystemSize + ginkgo.By("Verify filesystem size for mount point /mnt/volume1 before expansion") + originalSizeInMb, err = getFSSizeMb(f, pod) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { @@ -678,10 +733,15 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { err := fpod.DeletePodWithWait(client, pod) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - ginkgo.By("Verify volume is detached from the node") - isDiskDetached, err := e2eVSphere.waitForVolumeDetachedFromNode(client, pv.Spec.CSI.VolumeHandle, pod.Spec.NodeName) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Expect(isDiskDetached).To(gomega.BeTrue(), fmt.Sprintf("Volume %q is not detached from the node %q", pv.Spec.CSI.VolumeHandle, pod.Spec.NodeName)) + if supervisorCluster { + ginkgo.By(fmt.Sprintf("Verify volume: %s is detached from PodVM with vmUUID: %s", pv.Spec.CSI.VolumeHandle, vmUUID)) + _, err := e2eVSphere.getVMByUUIDWithWait(ctx, vmUUID, supervisorClusterOperationsTimeout) + gomega.Expect(err).To(gomega.HaveOccurred(), fmt.Sprintf("PodVM with vmUUID: %s still exists. So volume: %s is not detached from the PodVM", vmUUID, pv.Spec.CSI.VolumeHandle)) + } else { + isDiskDetached, err := e2eVSphere.waitForVolumeDetachedFromNode(client, pv.Spec.CSI.VolumeHandle, pod.Spec.NodeName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(isDiskDetached).To(gomega.BeTrue(), fmt.Sprintf("Volume %q is not detached from the node %q", pv.Spec.CSI.VolumeHandle, pod.Spec.NodeName)) + } }() ginkgo.By("Expanding current pvc") @@ -712,16 +772,16 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { expectEqual(len(pvcConditions), 0, "pvc should not have conditions") ginkgo.By("Verify filesystem size for mount point /mnt/volume1") - fsSize, err := getFSSizeMb(f, pod) - framework.Logf("FileSystemSize after PVC resize %d mb , FileSystemSize Before PVC resize %d mb ", fsSize, originalFSSize) + fsSize, err = getFSSizeMb(f, pod) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.Logf("FileSystemSize after PVC resize %d mb , FileSystemSize Before PVC resize %d mb ", fsSize, originalSizeInMb) gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Filesystem size may be smaller than the size of the block volume // so here we are checking if the new filesystem size is greater than // the original volume size as the filesystem is formatted for the // first time - if fsSize < originalFSSize { - framework.Failf("error updating filesystem size for %q. Resulting filesystem size is %d", pvclaim.Name, fsSize) - } + gomega.Expect(fsSize).Should(gomega.BeNumerically(">", originalSizeInMb), fmt.Sprintf("error updating filesystem size for %q. Resulting filesystem size is %d", pvclaim.Name, fsSize)) + ginkgo.By("File system resize finished successfully") pvcsize := pvclaim.Spec.Resources.Requests[v1.ResourceStorage] @@ -759,7 +819,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { } ginkgo.By("Create StorageClass on shared VVOL datastore with allowVolumeExpansion set to true, Create PVC") - volHandle, pvclaim, pv, storageclass := createSCwithVolumeExpansionTrueAndDynamicPVC(f, client, sharedVVOLdatastoreURL, namespace) + volHandle, pvclaim, pv, storageclass := createSCwithVolumeExpansionTrueAndDynamicPVC(f, client, sharedVVOLdatastoreURL, "", namespace) defer func() { err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -770,7 +830,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { }() ginkgo.By("Create POD using the above PVC") - pod := createPODandVerifyVolumeMount(f, client, namespace, pvclaim, volHandle) + pod, _ := createPODandVerifyVolumeMount(f, client, namespace, pvclaim, volHandle) defer func() { // Delete POD @@ -786,7 +846,6 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { ginkgo.By("Increase PVC size and verify online volume resize") increaseSizeOfPvcAttachedToPod(f, client, namespace, pvclaim, pod) - }) /* @@ -813,7 +872,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { } ginkgo.By("Create StorageClass on shared NFS datastore with allowVolumeExpansion set to true") - volHandle, pvclaim, pv, storageclass := createSCwithVolumeExpansionTrueAndDynamicPVC(f, client, sharedNFSdatastoreURL, namespace) + volHandle, pvclaim, pv, storageclass := createSCwithVolumeExpansionTrueAndDynamicPVC(f, client, sharedNFSdatastoreURL, "", namespace) defer func() { err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -824,7 +883,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { }() ginkgo.By("Create POD using the above PVC") - pod := createPODandVerifyVolumeMount(f, client, namespace, pvclaim, volHandle) + pod, _ := createPODandVerifyVolumeMount(f, client, namespace, pvclaim, volHandle) defer func() { // Delete POD @@ -865,8 +924,9 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { if sharedVMFSdatastoreURL == "" { ginkgo.Skip("Skipping the test because SHARED_VMFS_DATASTORE_URL is not set. This may be due to testbed is not having shared VNFS datastore.") } + ginkgo.By("Create StorageClass on shared VMFS datastore with allowVolumeExpansion set to true") - volHandle, pvclaim, pv, storageclass := createSCwithVolumeExpansionTrueAndDynamicPVC(f, client, sharedVMFSdatastoreURL, namespace) + volHandle, pvclaim, pv, storageclass := createSCwithVolumeExpansionTrueAndDynamicPVC(f, client, sharedVMFSdatastoreURL, "", namespace) defer func() { err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -877,7 +937,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { }() ginkgo.By("Create POD using the above PVC") - pod := createPODandVerifyVolumeMount(f, client, namespace, pvclaim, volHandle) + pod, _ := createPODandVerifyVolumeMount(f, client, namespace, pvclaim, volHandle) defer func() { // Delete POD @@ -901,8 +961,11 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { //increaseOnlineVolumeMultipleTimes this method increases the same volume multiple times and verifies PVC and Filesystem size func increaseOnlineVolumeMultipleTimes(ctx context.Context, f *framework.Framework, client clientset.Interface, namespace string, volHandle string, pvclaim *v1.PersistentVolumeClaim, pod *v1.Pod) { - //Get original FileSystem size - originalSizeInMb, err := getFSSizeMb(f, pod) + var originalSizeInMb, fsSize int64 + var err error + //Fetch original FileSystemSize + ginkgo.By("Verify filesystem size for mount point /mnt/volume1 before expansion") + originalSizeInMb, err = getFSSizeMb(f, pod) gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Modify PVC spec to trigger volume expansion @@ -956,14 +1019,13 @@ func increaseOnlineVolumeMultipleTimes(ctx context.Context, f *framework.Framewo expectEqual(len(pvcConditions), 0, "pvc should not have conditions") ginkgo.By("Verify filesystem size for mount point /mnt/volume1") - fsSize, err := getFSSizeMb(f, pod) + fsSize, err = getFSSizeMb(f, pod) gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Filesystem size may be smaller than the size of the block volume // so here we are checking if the new filesystem size is greater than // the original volume size as the filesystem is formatted. - if fsSize < originalSizeInMb { - framework.Failf("error updating filesystem size for %q. Resulting filesystem size is %d mb", pvclaim.Name, fsSize) - } + gomega.Expect(fsSize).Should(gomega.BeNumerically(">", originalSizeInMb), fmt.Sprintf("error updating filesystem size for %q. Resulting filesystem size is %d", pvclaim.Name, fsSize)) + framework.Logf("File system resize finished successfully %d mb", fsSize) } @@ -1023,21 +1085,35 @@ func createStaticPVC(ctx context.Context, f *framework.Framework, client clients } //createSCwithVolumeExpansionTrueAndDynamicPVC creates storageClass with allowVolumeExpansion set to true and Creates PVC. Waits till PV, PVC are in bound -func createSCwithVolumeExpansionTrueAndDynamicPVC(f *framework.Framework, client clientset.Interface, dsurl string, namespace string) (string, *v1.PersistentVolumeClaim, *v1.PersistentVolume, *storagev1.StorageClass) { +func createSCwithVolumeExpansionTrueAndDynamicPVC(f *framework.Framework, client clientset.Interface, dsurl string, storagePolicyName string, namespace string) (string, *v1.PersistentVolumeClaim, *v1.PersistentVolume, *storagev1.StorageClass) { scParameters := make(map[string]string) scParameters[scParamFsType] = ext4FSType - if dsurl != "" { - scParameters[scParamDatastoreURL] = dsurl - } - // Create Storage class and PVC ginkgo.By("Creating Storage Class and PVC with allowVolumeExpansion = true") var storageclass *storagev1.StorageClass var pvclaim *v1.PersistentVolumeClaim var err error - storageclass, pvclaim, err = createPVCAndStorageClass(client, namespace, nil, scParameters, "", nil, "", true, "") + if vanillaCluster { + if dsurl != "" { + scParameters[scParamDatastoreURL] = dsurl + } + ginkgo.By("CNS_TEST: Running for vanilla k8s setup") + storageclass, pvclaim, err = createPVCAndStorageClass(client, namespace, nil, scParameters, "", nil, "", true, "") + } else if supervisorCluster { + ginkgo.By("CNS_TEST: Running for WCP setup") + framework.Logf("storagePolicyName: %s", storagePolicyName) + profileID := e2eVSphere.GetSpbmPolicyID(storagePolicyName) + scParameters[scParamStoragePolicyID] = profileID + // create resource quota + createResourceQuota(client, namespace, rqLimit, storagePolicyName) + storageclass, pvclaim, err = createPVCAndStorageClass(client, namespace, nil, scParameters, "", nil, "", true, "", storagePolicyName) + } else { + ginkgo.By("CNS_TEST: Running for GC setup") + scParameters[svStorageClassName] = storagePolicyName + storageclass, pvclaim, err = createPVCAndStorageClass(client, namespace, nil, scParameters, "", nil, "", true, "") + } gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Waiting for PVC to be bound @@ -1054,16 +1130,26 @@ func createSCwithVolumeExpansionTrueAndDynamicPVC(f *framework.Framework, client } //createPODandVerifyVolumeMount this method creates POD and verifies VolumeMount -func createPODandVerifyVolumeMount(f *framework.Framework, client clientset.Interface, namespace string, pvclaim *v1.PersistentVolumeClaim, volHandle string) *v1.Pod { +func createPODandVerifyVolumeMount(f *framework.Framework, client clientset.Interface, namespace string, pvclaim *v1.PersistentVolumeClaim, volHandle string) (*v1.Pod, string) { // Create a POD to use this PVC, and verify volume has been attached ginkgo.By("Creating pod to attach PV to the node") pod, err := createPod(client, namespace, nil, []*v1.PersistentVolumeClaim{pvclaim}, false, execCommand) gomega.Expect(err).NotTo(gomega.HaveOccurred()) + var exists bool var vmUUID string ginkgo.By(fmt.Sprintf("Verify volume: %s is attached to the node: %s", volHandle, pod.Spec.NodeName)) - vmUUID = getNodeUUID(client, pod.Spec.NodeName) - + if vanillaCluster { + vmUUID = getNodeUUID(client, pod.Spec.NodeName) + } else if guestCluster { + vmUUID, err = getVMUUIDFromNodeName(pod.Spec.NodeName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } else { + annotations := pod.Annotations + vmUUID, exists = annotations[vmUUIDLabel] + gomega.Expect(exists).To(gomega.BeTrue(), fmt.Sprintf("Pod doesn't have %s annotation", vmUUIDLabel)) + } + framework.Logf("VMUUID : %s", vmUUID) isDiskAttached, err := e2eVSphere.isVolumeAttachedToVM(client, volHandle, vmUUID) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(isDiskAttached).To(gomega.BeTrue(), "Volume is not attached to the node volHandle: %s, vmUUID: %s", volHandle, vmUUID) @@ -1072,14 +1158,16 @@ func createPODandVerifyVolumeMount(f *framework.Framework, client clientset.Inte _, err = framework.LookForStringInPodExec(namespace, pod.Name, []string{"/bin/cat", "/mnt/volume1/fstype"}, "", time.Minute) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - return pod + return pod, vmUUID } //increaseSizeOfPvcAttachedToPod this method increases the PVC size, which is attached to POD func increaseSizeOfPvcAttachedToPod(f *framework.Framework, client clientset.Interface, namespace string, pvclaim *v1.PersistentVolumeClaim, pod *v1.Pod) { - + var originalSizeInMb int64 + var err error //Fetch original FileSystemSize - originalSizeInMb, err := getFSSizeMb(f, pod) + ginkgo.By("Verify filesystem size for mount point /mnt/volume1 before expansion") + originalSizeInMb, err = getFSSizeMb(f, pod) gomega.Expect(err).NotTo(gomega.HaveOccurred()) //resize PVC @@ -1100,16 +1188,16 @@ func increaseSizeOfPvcAttachedToPod(f *framework.Framework, client clientset.Int pvcConditions := pvclaim.Status.Conditions expectEqual(len(pvcConditions), 0, "pvc should not have conditions") + var fsSize int64 ginkgo.By("Verify filesystem size for mount point /mnt/volume1") - fsSize, err := getFSSizeMb(f, pod) + fsSize, err = getFSSizeMb(f, pod) gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.Logf("File system size after expansion : %s", fsSize) // Filesystem size may be smaller than the size of the block volume // so here we are checking if the new filesystem size is greater than // the original volume size as the filesystem is formatted for the // first time - if fsSize < originalSizeInMb { - framework.Failf("error updating filesystem size for %q. Resulting filesystem size is %d mb", pvclaim.Name, fsSize) - } + gomega.Expect(fsSize).Should(gomega.BeNumerically(">", originalSizeInMb), fmt.Sprintf("error updating filesystem size for %q. Resulting filesystem size is %d", pvclaim.Name, fsSize)) ginkgo.By("File system resize finished successfully") } @@ -1984,10 +2072,20 @@ func waitForFSResize(pvc *v1.PersistentVolumeClaim, c clientset.Interface) (*v1. // getFSSizeMb returns filesystem size in Mb func getFSSizeMb(f *framework.Framework, pod *v1.Pod) (int64, error) { - output, err := storage_utils.PodExec(f, pod, "df -T -m | grep /mnt/volume1") - if err != nil { - return -1, fmt.Errorf("unable to find mount path via `df -T`: %v", err) + var output string + var err error + if supervisorCluster { + namespace := getNamespaceToRunTests(f) + cmd := []string{"exec", pod.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", "df -Tkm | grep /mnt/volume1"} + output = framework.RunKubectlOrDie(namespace, cmd...) + gomega.Expect(strings.Contains(output, ext4FSType)).NotTo(gomega.BeFalse()) + } else { + output, err = storage_utils.PodExec(f, pod, "df -T -m | grep /mnt/volume1") + if err != nil { + return -1, fmt.Errorf("unable to find mount path via `df -T`: %v", err) + } } + arrMountOut := strings.Fields(string(output)) if len(arrMountOut) <= 0 { return -1, fmt.Errorf("error when parsing output of `df -T`. output: %s", string(output)) From 3ff7306b2173b430d52494853a8889821672de5a Mon Sep 17 00:00:00 2001 From: kavyashree-r Date: Sun, 21 Feb 2021 14:59:44 +0530 Subject: [PATCH 20/36] SVC offline volume expansion --- tests/e2e/vsphere_volume_expansion.go | 135 +++++++++++++++++++++----- 1 file changed, 110 insertions(+), 25 deletions(-) diff --git a/tests/e2e/vsphere_volume_expansion.go b/tests/e2e/vsphere_volume_expansion.go index 109e32f75d..011a02b5f5 100644 --- a/tests/e2e/vsphere_volume_expansion.go +++ b/tests/e2e/vsphere_volume_expansion.go @@ -65,6 +65,9 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { isVsanhealthServiceStopped = false isSPSServiceStopped = false + storagePolicyName = GetAndExpectStringEnvVar(envStoragePolicyNameForSharedDatastores) + profileID = e2eVSphere.GetSpbmPolicyID(storagePolicyName) + nodeList, err := fnodes.GetReadySchedulableNodes(f.ClientSet) framework.ExpectNoError(err, "Unable to find ready and schedulable Node") if !(len(nodeList.Items) > 0) { @@ -133,7 +136,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { // 9. Delete pod and Wait for Volume Disk to be detached from the Node. // 10. Delete PVC, PV and Storage Class. - ginkgo.It("[csi-block-vanilla] [csi-guest] Verify volume expansion with no filesystem before expansion", func() { + ginkgo.It("[csi-block-vanilla] [csi-supervisor] [csi-guest] Verify volume expansion with no filesystem before expansion", func() { invokeTestForVolumeExpansion(f, client, namespace, "", storagePolicyName, profileID) }) @@ -186,7 +189,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { // 5. Modify PVC's size to a smaller size. // 6. Verify if the PVC expansion fails. - ginkgo.It("[csi-block-vanilla] [csi-guest] Verify volume shrinking not allowed", func() { + ginkgo.It("[csi-block-vanilla] [csi-guest] [csi-supervisor] Verify volume shrinking not allowed", func() { invokeTestForInvalidVolumeShrink(f, client, namespace, storagePolicyName, profileID) }) @@ -222,7 +225,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { // 9. Delete pod and Wait for Volume Disk to be detached from the Node. // 10. Delete PVC, PV and Storage Class. - ginkgo.It("[csi-block-vanilla] [csi-guest] Verify volume expansion can happen multiple times", func() { + ginkgo.It("[csi-block-vanilla] [csi-guest] [csi-supervisor] Verify volume expansion can happen multiple times", func() { invokeTestForExpandVolumeMultipleTimes(f, client, namespace, "", storagePolicyName, profileID) }) @@ -1215,11 +1218,18 @@ func invokeTestForVolumeExpansion(f *framework.Framework, client clientset.Inter // Create a StorageClass that sets allowVolumeExpansion to true if guestCluster { - storagePolicyNameForSharedDatastores := GetAndExpectStringEnvVar(envStoragePolicyNameForSharedDatastores) - scParameters[svStorageClassName] = storagePolicyNameForSharedDatastores + scParameters[svStorageClassName] = storagePolicyName + storageclass, pvclaim, err = createPVCAndStorageClass(client, namespace, nil, scParameters, "", nil, "", true, "") + } else if supervisorCluster { + scParameters[scParamStoragePolicyID] = profileID + // create resource quota + createResourceQuota(client, namespace, rqLimit, storagePolicyName) + storageclass, pvclaim, err = createPVCAndStorageClass(client, namespace, nil, scParameters, "", nil, "", true, "", storagePolicyName) + } else if vanillaCluster { + storageclass, pvclaim, err = createPVCAndStorageClass(client, namespace, nil, scParameters, "", nil, "", true, "") } - storageclass, pvclaim, err = createPVCAndStorageClass(client, namespace, nil, scParameters, "", nil, "", true, "") gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -1227,6 +1237,10 @@ func invokeTestForVolumeExpansion(f *framework.Framework, client clientset.Inter defer func() { err := fpv.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) + if supervisorCluster { + ginkgo.By("Delete Resource quota") + deleteResourceQuota(client, namespace) + } }() // Waiting for PVC to be bound @@ -1311,13 +1325,26 @@ func invokeTestForVolumeExpansion(f *framework.Framework, client clientset.Inter pod, err := createPod(client, namespace, nil, []*v1.PersistentVolumeClaim{pvclaim}, false, execCommand) gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + ginkgo.By("Deleting the pod") + err = fpod.DeletePodWithWait(client, pod) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + var vmUUID string + var exists bool ginkgo.By(fmt.Sprintf("Verify volume: %s is attached to the node: %s", volHandle, pod.Spec.NodeName)) - vmUUID = getNodeUUID(client, pod.Spec.NodeName) - if guestCluster { + if vanillaCluster { + vmUUID = getNodeUUID(client, pod.Spec.NodeName) + } else if guestCluster { vmUUID, err = getVMUUIDFromNodeName(pod.Spec.NodeName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } else { + annotations := pod.Annotations + vmUUID, exists = annotations[vmUUIDLabel] + gomega.Expect(exists).To(gomega.BeTrue(), fmt.Sprintf("Pod doesn't have %s annotation", vmUUIDLabel)) } + framework.Logf("VMUUID : %s", vmUUID) isDiskAttached, err := e2eVSphere.isVolumeAttachedToVM(client, volHandle, vmUUID) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(isDiskAttached).To(gomega.BeTrue(), "Volume is not attached to the node") @@ -1333,9 +1360,13 @@ func invokeTestForVolumeExpansion(f *framework.Framework, client clientset.Inter pvcConditions := pvclaim.Status.Conditions expectEqual(len(pvcConditions), 0, "pvc should not have conditions") + var fsSize int64 + ginkgo.By("Verify filesystem size for mount point /mnt/volume1") - fsSize, err := getFSSizeMb(f, pod) + fsSize, err = getFSSizeMb(f, pod) gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.Logf("File system size after expansion : %s", fsSize) + // Filesystem size may be smaller than the size of the block volume // so here we are checking if the new filesystem size is greater than // the original volume size as the filesystem is formatted for the @@ -1356,9 +1387,16 @@ func invokeTestForVolumeExpansion(f *framework.Framework, client clientset.Inter gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Verify volume is detached from the node") - isDiskDetached, err := e2eVSphere.waitForVolumeDetachedFromNode(client, pv.Spec.CSI.VolumeHandle, pod.Spec.NodeName) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Expect(isDiskDetached).To(gomega.BeTrue(), fmt.Sprintf("Volume %q is not detached from the node %q", pv.Spec.CSI.VolumeHandle, pod.Spec.NodeName)) + if supervisorCluster { + ginkgo.By(fmt.Sprintf("Verify volume: %s is detached from PodVM with vmUUID: %s", pv.Spec.CSI.VolumeHandle, vmUUID)) + _, err := e2eVSphere.getVMByUUIDWithWait(ctx, vmUUID, supervisorClusterOperationsTimeout) + gomega.Expect(err).To(gomega.HaveOccurred(), fmt.Sprintf("PodVM with vmUUID: %s still exists. So volume: %s is not detached from the PodVM", vmUUID, pv.Spec.CSI.VolumeHandle)) + } else { + isDiskDetached, err := e2eVSphere.waitForVolumeDetachedFromNode(client, pv.Spec.CSI.VolumeHandle, pod.Spec.NodeName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(isDiskDetached).To(gomega.BeTrue(), fmt.Sprintf("Volume %q is not detached from the node %q", pv.Spec.CSI.VolumeHandle, pod.Spec.NodeName)) + } + } func invokeTestForVolumeExpansionWithFilesystem(f *framework.Framework, client clientset.Interface, namespace string, expectedContent string, storagePolicyName string, profileID string) { @@ -1612,10 +1650,17 @@ func invokeTestForInvalidVolumeShrink(f *framework.Framework, client clientset.I // Create a StorageClass that sets allowVolumeExpansion to true if guestCluster { - storagePolicyNameForSharedDatastores := GetAndExpectStringEnvVar(envStoragePolicyNameForSharedDatastores) - scParameters[svStorageClassName] = storagePolicyNameForSharedDatastores + scParameters[svStorageClassName] = storagePolicyName + storageclass, pvclaim, err = createPVCAndStorageClass(client, namespace, nil, scParameters, "", nil, "", true, "") + } else if supervisorCluster { + scParameters[scParamStoragePolicyID] = profileID + // create resource quota + createResourceQuota(client, namespace, rqLimit, storagePolicyName) + storageclass, pvclaim, err = createPVCAndStorageClass(client, namespace, nil, scParameters, "", nil, "", true, "", storagePolicyName) + } else if vanillaCluster { + storageclass, pvclaim, err = createPVCAndStorageClass(client, namespace, nil, scParameters, "", nil, "", true, "") } - storageclass, pvclaim, err = createPVCAndStorageClass(client, namespace, nil, scParameters, "", nil, "", true, "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) @@ -1624,6 +1669,10 @@ func invokeTestForInvalidVolumeShrink(f *framework.Framework, client clientset.I defer func() { err := fpv.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) + if supervisorCluster { + ginkgo.By("Delete Resource quota") + deleteResourceQuota(client, namespace) + } }() // Waiting for PVC to be bound @@ -1784,10 +1833,16 @@ func invokeTestForExpandVolumeMultipleTimes(f *framework.Framework, client clien // Create a StorageClass that sets allowVolumeExpansion to true if guestCluster { - storagePolicyNameForSharedDatastores := GetAndExpectStringEnvVar(envStoragePolicyNameForSharedDatastores) - scParameters[svStorageClassName] = storagePolicyNameForSharedDatastores + scParameters[svStorageClassName] = storagePolicyName + storageclass, pvclaim, err = createPVCAndStorageClass(client, namespace, nil, scParameters, "", nil, "", true, "") + } else if supervisorCluster { + scParameters[scParamStoragePolicyID] = profileID + // create resource quota + createResourceQuota(client, namespace, rqLimit, storagePolicyName) + storageclass, pvclaim, err = createPVCAndStorageClass(client, namespace, nil, scParameters, "", nil, "", true, "", storagePolicyName) + } else if vanillaCluster { + storageclass, pvclaim, err = createPVCAndStorageClass(client, namespace, nil, scParameters, "", nil, "", true, "") } - storageclass, pvclaim, err = createPVCAndStorageClass(client, namespace, nil, scParameters, "", nil, "", true, "") gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) @@ -1796,6 +1851,11 @@ func invokeTestForExpandVolumeMultipleTimes(f *framework.Framework, client clien defer func() { err := fpv.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + if supervisorCluster { + ginkgo.By("Delete Resource quota") + deleteResourceQuota(client, namespace) + } }() // Waiting for PVC to be bound @@ -1884,13 +1944,27 @@ func invokeTestForExpandVolumeMultipleTimes(f *framework.Framework, client clien pod, err := createPod(client, namespace, nil, []*v1.PersistentVolumeClaim{pvclaim}, false, execCommand) gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + ginkgo.By("Deleting the pod") + err = fpod.DeletePodWithWait(client, pod) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + var vmUUID string - ginkgo.By(fmt.Sprintf("Verify volume: %s is attached to the node: %s", pv.Spec.CSI.VolumeHandle, pod.Spec.NodeName)) - vmUUID = getNodeUUID(client, pod.Spec.NodeName) - if guestCluster { + var exists bool + ginkgo.By(fmt.Sprintf("Verify volume: %s is attached to the node: %s", volHandle, pod.Spec.NodeName)) + if vanillaCluster { + vmUUID = getNodeUUID(client, pod.Spec.NodeName) + } else if guestCluster { vmUUID, err = getVMUUIDFromNodeName(pod.Spec.NodeName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } else { + annotations := pod.Annotations + vmUUID, exists = annotations[vmUUIDLabel] + gomega.Expect(exists).To(gomega.BeTrue(), fmt.Sprintf("Pod doesn't have %s annotation", vmUUIDLabel)) } + framework.Logf("VMUUID : %s", vmUUID) + isDiskAttached, err := e2eVSphere.isVolumeAttachedToVM(client, volHandle, vmUUID) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(isDiskAttached).To(gomega.BeTrue(), "Volume is not attached to the node") @@ -1906,9 +1980,13 @@ func invokeTestForExpandVolumeMultipleTimes(f *framework.Framework, client clien pvcConditions := pvclaim.Status.Conditions expectEqual(len(pvcConditions), 0, "pvc should not have conditions") + var fsSize int64 + ginkgo.By("Verify filesystem size for mount point /mnt/volume1") - fsSize, err := getFSSizeMb(f, pod) + fsSize, err = getFSSizeMb(f, pod) gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.Logf("File system size after expansion : %s", fsSize) + // Filesystem size may be smaller than the size of the block volume // so here we are checking if the new filesystem size is greater than // the original volume size as the filesystem is formatted for the @@ -1928,9 +2006,16 @@ func invokeTestForExpandVolumeMultipleTimes(f *framework.Framework, client clien gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Verify volume is detached from the node") - isDiskDetached, err := e2eVSphere.waitForVolumeDetachedFromNode(client, pv.Spec.CSI.VolumeHandle, pod.Spec.NodeName) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Expect(isDiskDetached).To(gomega.BeTrue(), fmt.Sprintf("Volume %q is not detached from the node %q", pv.Spec.CSI.VolumeHandle, pod.Spec.NodeName)) + if supervisorCluster { + ginkgo.By(fmt.Sprintf("Verify volume: %s is detached from PodVM with vmUUID: %s", pv.Spec.CSI.VolumeHandle, vmUUID)) + _, err := e2eVSphere.getVMByUUIDWithWait(ctx, vmUUID, supervisorClusterOperationsTimeout) + gomega.Expect(err).To(gomega.HaveOccurred(), fmt.Sprintf("PodVM with vmUUID: %s still exists. So volume: %s is not detached from the PodVM", vmUUID, pv.Spec.CSI.VolumeHandle)) + } else { + isDiskDetached, err := e2eVSphere.waitForVolumeDetachedFromNode(client, pv.Spec.CSI.VolumeHandle, pod.Spec.NodeName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(isDiskDetached).To(gomega.BeTrue(), fmt.Sprintf("Volume %q is not detached from the node %q", pv.Spec.CSI.VolumeHandle, pod.Spec.NodeName)) + } + } func invokeTestForUnsupportedFileVolumeExpansion(f *framework.Framework, client clientset.Interface, namespace string, storagePolicyName string, profileID string) { From 380d824399127ee2414b6e0d3d724fcadfb66ce7 Mon Sep 17 00:00:00 2001 From: Shalini Bhaskara Date: Thu, 11 Mar 2021 15:59:32 -0800 Subject: [PATCH 21/36] Logout VC client if we encounter error while connecting --- pkg/common/cns-lib/vsphere/virtualcenter.go | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/pkg/common/cns-lib/vsphere/virtualcenter.go b/pkg/common/cns-lib/vsphere/virtualcenter.go index 1e9ba4118e..83e1b43543 100644 --- a/pkg/common/cns-lib/vsphere/virtualcenter.go +++ b/pkg/common/cns-lib/vsphere/virtualcenter.go @@ -211,6 +211,14 @@ func (vc *VirtualCenter) Connect(ctx context.Context) error { err := vc.connect(ctx, false) if err != nil { log.Errorf("Cannot connect to vCenter with err: %v", err) + // Logging out of the current session to make sure we + // retry creating a new client in the next attempt + defer func() { + logoutErr := vc.Client.Logout(ctx) + if logoutErr != nil { + log.Errorf("Could not logout of VC session. Error: %v", logoutErr) + } + }() } return err } From e252c0eeb64cc63c5013aa1ad84907033607dc7d Mon Sep 17 00:00:00 2001 From: Chethan Venkatesh Date: Sat, 13 Mar 2021 10:56:04 -0800 Subject: [PATCH 22/36] Update rbac yamls for vSphere 7.0u1 --- .../vsphere-7.0u1/vanilla/rbac/vsphere-csi-controller-rbac.yaml | 2 +- .../v2.1.0/vsphere-7.0u1/rbac/vsphere-csi-controller-rbac.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/manifests/dev/vsphere-7.0u1/vanilla/rbac/vsphere-csi-controller-rbac.yaml b/manifests/dev/vsphere-7.0u1/vanilla/rbac/vsphere-csi-controller-rbac.yaml index c78a814e47..1e240bee3d 100644 --- a/manifests/dev/vsphere-7.0u1/vanilla/rbac/vsphere-csi-controller-rbac.yaml +++ b/manifests/dev/vsphere-7.0u1/vanilla/rbac/vsphere-csi-controller-rbac.yaml @@ -35,7 +35,7 @@ rules: verbs: ["create", "get", "list", "watch", "update", "delete"] - apiGroups: ["apiextensions.k8s.io"] resources: ["customresourcedefinitions"] - verbs: ["get", "create"] + verbs: ["get", "create", "update"] - apiGroups: ["storage.k8s.io"] resources: ["volumeattachments/status"] verbs: ["patch"] diff --git a/manifests/v2.1.0/vsphere-7.0u1/rbac/vsphere-csi-controller-rbac.yaml b/manifests/v2.1.0/vsphere-7.0u1/rbac/vsphere-csi-controller-rbac.yaml index c78a814e47..1e240bee3d 100644 --- a/manifests/v2.1.0/vsphere-7.0u1/rbac/vsphere-csi-controller-rbac.yaml +++ b/manifests/v2.1.0/vsphere-7.0u1/rbac/vsphere-csi-controller-rbac.yaml @@ -35,7 +35,7 @@ rules: verbs: ["create", "get", "list", "watch", "update", "delete"] - apiGroups: ["apiextensions.k8s.io"] resources: ["customresourcedefinitions"] - verbs: ["get", "create"] + verbs: ["get", "create", "update"] - apiGroups: ["storage.k8s.io"] resources: ["volumeattachments/status"] verbs: ["patch"] From 61b4355a93b72da84b92a6ab92ed1201540d62b4 Mon Sep 17 00:00:00 2001 From: Divyen Patel Date: Sun, 14 Mar 2021 22:00:42 -0700 Subject: [PATCH 23/36] prepared deployment yamls for v2.2.0-rc-1 --- .../vsphere-7.0/guestcluster/1.15/pvcsi.yaml | 365 --------------- .../vsphere-7.0/guestcluster/1.16/pvcsi.yaml | 386 ---------------- .../vsphere-csi-controller-deployment.yaml | 160 ------- .../rbac/vsphere-csi-controller-rbac.yaml | 64 --- .../guestcluster/1.15/pvcsi.yaml | 359 --------------- .../guestcluster/1.16/pvcsi.yaml | 384 ---------------- .../guestcluster/1.17/pvcsi.yaml | 383 ---------------- .../vsphere-csi-controller-deployment.yaml | 160 ------- .../rbac/vsphere-csi-controller-rbac.yaml | 64 --- .../vsphere-csi-controller-deployment.yaml | 189 -------- .../rbac/vsphere-csi-controller-rbac.yaml | 68 --- .../guestcluster/1.17/pvcsi.yaml | 434 ------------------ .../guestcluster/1.18/pvcsi.yaml | 434 ------------------ .../guestcluster/1.19/pvcsi.yaml | 434 ------------------ .../1.17/vsphere-csi-controller.yaml | 324 ------------- .../1.18/vsphere-csi-controller.yaml | 324 ------------- .../1.19/vsphere-csi-controller.yaml | 334 -------------- .../vsphere-csi-controller-deployment.yaml | 18 +- .../deploy/vsphere-csi-node-ds.yaml | 43 +- .../rbac/vsphere-csi-controller-rbac.yaml | 0 .../rbac/vsphere-csi-node-rbac.yaml | 0 .../vsphere-csi-controller-deployment.yaml | 22 +- .../deploy/vsphere-csi-node-ds.yaml | 41 +- .../rbac/vsphere-csi-controller-rbac.yaml | 0 .../rbac/vsphere-csi-node-rbac.yaml | 0 .../deploy/create-validation-webhook.sh | 0 .../deploy/generate-signed-webhook-certs.sh | 0 .../deploy/validatingwebhook.yaml | 4 +- .../vsphere-csi-controller-deployment.yaml | 10 +- .../deploy/vsphere-csi-node-ds.yaml | 6 +- .../rbac/vsphere-csi-controller-rbac.yaml | 0 .../deploy/create-validation-webhook.sh | 0 .../deploy/generate-signed-webhook-certs.sh | 0 .../deploy/validatingwebhook.yaml | 4 +- .../vsphere-csi-controller-deployment.yaml | 6 +- .../deploy/vsphere-csi-node-ds.yaml | 6 +- .../rbac/vsphere-csi-controller-rbac.yaml | 0 .../rbac/vsphere-csi-node-rbac.yaml | 0 38 files changed, 88 insertions(+), 4938 deletions(-) delete mode 100644 manifests/dev/vsphere-7.0/guestcluster/1.15/pvcsi.yaml delete mode 100644 manifests/dev/vsphere-7.0/guestcluster/1.16/pvcsi.yaml delete mode 100644 manifests/dev/vsphere-7.0/supervisorcluster/k8s-1.15/deploy/vsphere-csi-controller-deployment.yaml delete mode 100644 manifests/dev/vsphere-7.0/supervisorcluster/k8s-1.15/rbac/vsphere-csi-controller-rbac.yaml delete mode 100644 manifests/dev/vsphere-7.0u1/guestcluster/1.15/pvcsi.yaml delete mode 100644 manifests/dev/vsphere-7.0u1/guestcluster/1.16/pvcsi.yaml delete mode 100644 manifests/dev/vsphere-7.0u1/guestcluster/1.17/pvcsi.yaml delete mode 100644 manifests/dev/vsphere-7.0u1/supervisorcluster/k8s-1.15/deploy/vsphere-csi-controller-deployment.yaml delete mode 100644 manifests/dev/vsphere-7.0u1/supervisorcluster/k8s-1.15/rbac/vsphere-csi-controller-rbac.yaml delete mode 100644 manifests/dev/vsphere-7.0u1/supervisorcluster/k8s-1.16/deploy/vsphere-csi-controller-deployment.yaml delete mode 100644 manifests/dev/vsphere-7.0u1/supervisorcluster/k8s-1.16/rbac/vsphere-csi-controller-rbac.yaml delete mode 100644 manifests/dev/vsphere-7.0u2/guestcluster/1.17/pvcsi.yaml delete mode 100644 manifests/dev/vsphere-7.0u2/guestcluster/1.18/pvcsi.yaml delete mode 100644 manifests/dev/vsphere-7.0u2/guestcluster/1.19/pvcsi.yaml delete mode 100644 manifests/dev/vsphere-7.0u2/supervisorcluster/1.17/vsphere-csi-controller.yaml delete mode 100644 manifests/dev/vsphere-7.0u2/supervisorcluster/1.18/vsphere-csi-controller.yaml delete mode 100644 manifests/dev/vsphere-7.0u2/supervisorcluster/1.19/vsphere-csi-controller.yaml rename manifests/{dev/vsphere-67u3/vanilla => v2.2.0/vsphere-67u3}/deploy/vsphere-csi-controller-deployment.yaml (91%) rename manifests/{dev/vsphere-7.0/vanilla => v2.2.0/vsphere-67u3}/deploy/vsphere-csi-node-ds.yaml (80%) rename manifests/{dev/vsphere-67u3/vanilla => v2.2.0/vsphere-67u3}/rbac/vsphere-csi-controller-rbac.yaml (100%) rename manifests/{dev/vsphere-67u3/vanilla => v2.2.0/vsphere-67u3}/rbac/vsphere-csi-node-rbac.yaml (100%) rename manifests/{dev/vsphere-7.0/vanilla => v2.2.0/vsphere-7.0}/deploy/vsphere-csi-controller-deployment.yaml (91%) rename manifests/{dev/vsphere-67u3/vanilla => v2.2.0/vsphere-7.0}/deploy/vsphere-csi-node-ds.yaml (82%) rename manifests/{dev/vsphere-7.0/vanilla => v2.2.0/vsphere-7.0}/rbac/vsphere-csi-controller-rbac.yaml (100%) rename manifests/{dev/vsphere-7.0/vanilla => v2.2.0/vsphere-7.0}/rbac/vsphere-csi-node-rbac.yaml (100%) rename manifests/{dev/vsphere-7.0u1/vanilla => v2.2.0/vsphere-7.0u1}/deploy/create-validation-webhook.sh (100%) rename manifests/{dev/vsphere-7.0u1/vanilla => v2.2.0/vsphere-7.0u1}/deploy/generate-signed-webhook-certs.sh (100%) rename manifests/{dev/vsphere-7.0u2/vanilla => v2.2.0/vsphere-7.0u1}/deploy/validatingwebhook.yaml (96%) rename manifests/{dev/vsphere-7.0u1/vanilla => v2.2.0/vsphere-7.0u1}/deploy/vsphere-csi-controller-deployment.yaml (94%) rename manifests/{dev/vsphere-7.0u1/vanilla => v2.2.0/vsphere-7.0u1}/deploy/vsphere-csi-node-ds.yaml (96%) rename manifests/{dev/vsphere-7.0u1/vanilla => v2.2.0/vsphere-7.0u1}/rbac/vsphere-csi-controller-rbac.yaml (100%) rename manifests/{dev/vsphere-7.0u2/vanilla => v2.2.0/vsphere-7.0u2}/deploy/create-validation-webhook.sh (100%) rename manifests/{dev/vsphere-7.0u2/vanilla => v2.2.0/vsphere-7.0u2}/deploy/generate-signed-webhook-certs.sh (100%) rename manifests/{dev/vsphere-7.0u1/vanilla => v2.2.0/vsphere-7.0u2}/deploy/validatingwebhook.yaml (96%) rename manifests/{dev/vsphere-7.0u2/vanilla => v2.2.0/vsphere-7.0u2}/deploy/vsphere-csi-controller-deployment.yaml (95%) rename manifests/{dev/vsphere-7.0u2/vanilla => v2.2.0/vsphere-7.0u2}/deploy/vsphere-csi-node-ds.yaml (96%) rename manifests/{dev/vsphere-7.0u2/vanilla => v2.2.0/vsphere-7.0u2}/rbac/vsphere-csi-controller-rbac.yaml (100%) rename manifests/{dev/vsphere-7.0u2/vanilla => v2.2.0/vsphere-7.0u2}/rbac/vsphere-csi-node-rbac.yaml (100%) diff --git a/manifests/dev/vsphere-7.0/guestcluster/1.15/pvcsi.yaml b/manifests/dev/vsphere-7.0/guestcluster/1.15/pvcsi.yaml deleted file mode 100644 index a70439fe7c..0000000000 --- a/manifests/dev/vsphere-7.0/guestcluster/1.15/pvcsi.yaml +++ /dev/null @@ -1,365 +0,0 @@ -apiVersion: v1 -kind: Namespace -metadata: - name: {{ .PVCSINamespace }} ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: vsphere-csi-controller - namespace: {{ .PVCSINamespace }} ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: vsphere-csi-node - namespace: {{ .PVCSINamespace }} ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: vsphere-csi-controller-role -rules: - - apiGroups: [""] - resources: ["nodes", "persistentvolumeclaims", "pods"] - verbs: ["get", "list", "watch"] - - apiGroups: [""] - resources: ["persistentvolumes"] - verbs: ["get", "list", "watch", "create", "update", "delete"] - - apiGroups: [""] - resources: ["events"] - verbs: ["get", "list", "watch", "create", "update", "patch"] - - apiGroups: ["coordination.k8s.io"] - resources: ["leases"] - verbs: ["get", "watch", "list", "delete", "update", "create"] - - apiGroups: ["storage.k8s.io"] - resources: ["storageclasses", "csinodes"] - verbs: ["get", "list", "watch"] - - apiGroups: ["storage.k8s.io"] - resources: ["volumeattachments"] - verbs: ["get", "list", "watch", "update"] - - apiGroups: ["policy"] - resources: ["podsecuritypolicies"] - verbs: ["use"] - resourceNames: ["vmware-system-privileged"] ---- -kind: Role -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: vsphere-csi-node-role - namespace: {{ .PVCSINamespace }} -rules: - - apiGroups: ["policy"] - resources: ["podsecuritypolicies"] - verbs: ["use"] - resourceNames: ["vmware-system-privileged"] - - apiGroups: [""] - resources: ["configmaps"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: vsphere-csi-controller-binding -subjects: - - kind: ServiceAccount - name: vsphere-csi-controller - namespace: {{ .PVCSINamespace }} -roleRef: - kind: ClusterRole - name: vsphere-csi-controller-role - apiGroup: rbac.authorization.k8s.io ---- -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: vsphere-csi-node-binding - namespace: {{ .PVCSINamespace }} -subjects: - - kind: ServiceAccount - name: vsphere-csi-node - namespace: {{ .PVCSINamespace }} -roleRef: - kind: Role - name: vsphere-csi-node-role - apiGroup: rbac.authorization.k8s.io ---- -kind: Deployment -apiVersion: apps/v1 -metadata: - name: vsphere-csi-controller - namespace: {{ .PVCSINamespace }} -spec: - replicas: 1 - strategy: - type: Recreate - selector: - matchLabels: - app: vsphere-csi-controller - template: - metadata: - labels: - app: vsphere-csi-controller - role: vsphere-csi - spec: - serviceAccountName: vsphere-csi-controller - nodeSelector: - node-role.kubernetes.io/master: "" - tolerations: - - operator: "Exists" - key: node-role.kubernetes.io/master - effect: NoSchedule - containers: - - name: csi-attacher - image: vmware.io/csi-attacher/csi-attacher: - args: - - "--v=4" - - "--timeout=300s" - - "--csi-address=$(ADDRESS)" - - "--leader-election" - - "--leader-election-type=leases" - imagePullPolicy: "IfNotPresent" - env: - - name: ADDRESS - value: /csi/csi.sock - volumeMounts: - - mountPath: /csi - name: socket-dir - - name: vsphere-csi-controller - image: vmware.io/vsphere-csi: - args: - - "--supervisor-fss-name=csi-feature-states" - - "--supervisor-fss-namespace=$(CSI_NAMESPACE)" - - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" - - "--fss-namespace=$(CSI_NAMESPACE)" - lifecycle: - preStop: - exec: - command: ["/bin/sh", "-c", "rm -rf /var/lib/csi/sockets/pluginproxy/csi.vsphere.vmware.com"] - imagePullPolicy: "IfNotPresent" - env: - - name: CSI_ENDPOINT - value: unix:///var/lib/csi/sockets/pluginproxy/csi.sock - - name: CLUSTER_FLAVOR - value: "GUEST_CLUSTER" - - name: X_CSI_MODE - value: "controller" - - name: GC_CONFIG - value: /etc/cloud/pvcsi-config/cns-csi.conf - - name: PROVISION_TIMEOUT_MINUTES - value: "4" - - name: ATTACHER_TIMEOUT_MINUTES - value: "4" - - name: LOGGER_LEVEL - value: "PRODUCTION" # Options: DEVELOPMENT, PRODUCTION - - name: SUPERVISOR_CLIENT_QPS - value: "50" - - name: SUPERVISOR_CLIENT_BURST - value: "50" - - name: INCLUSTER_CLIENT_QPS - value: "50" - - name: INCLUSTER_CLIENT_BURST - value: "50" - - name: CSI_NAMESPACE - value: {{ .PVCSINamespace }} - volumeMounts: - - mountPath: /etc/cloud/pvcsi-provider - name: pvcsi-provider-volume - readOnly: true - - mountPath: /etc/cloud/pvcsi-config - name: pvcsi-config-volume - readOnly: true - - mountPath: /var/lib/csi/sockets/pluginproxy/ - name: socket-dir - - name: vsphere-syncer - image: vmware.io/syncer: - args: - - "--leader-election" - - "--supervisor-fss-name=csi-feature-states" - - "--supervisor-fss-namespace=$(CSI_NAMESPACE)" - - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" - - "--fss-namespace=$(CSI_NAMESPACE)" - imagePullPolicy: "IfNotPresent" - env: - - name: FULL_SYNC_INTERVAL_MINUTES - value: "30" - - name: GC_CONFIG - value: /etc/cloud/pvcsi-config/cns-csi.conf - - name: CLUSTER_FLAVOR - value: "GUEST_CLUSTER" - - name: LOGGER_LEVEL - value: "PRODUCTION" # Options: DEVELOPMENT, PRODUCTION - - name: CSI_NAMESPACE - value: {{ .PVCSINamespace }} - volumeMounts: - - mountPath: /etc/cloud/pvcsi-provider - name: pvcsi-provider-volume - readOnly: true - - mountPath: /etc/cloud/pvcsi-config - name: pvcsi-config-volume - readOnly: true - - name: liveness-probe - image: vmware.io/csi-livenessprobe/csi-livenessprobe: - args: - - "--csi-address=$(ADDRESS)" - imagePullPolicy: "IfNotPresent" - env: - - name: ADDRESS - value: /var/lib/csi/sockets/pluginproxy/csi.sock - volumeMounts: - - mountPath: /var/lib/csi/sockets/pluginproxy/ - name: socket-dir - - name: csi-provisioner - image: vmware.io/csi-provisioner/csi-provisioner: - args: - - "--v=4" - - "--timeout=300s" - - "--csi-address=$(ADDRESS)" - - "--enable-leader-election" - - "--leader-election-type=leases" - imagePullPolicy: "IfNotPresent" - env: - - name: ADDRESS - value: /csi/csi.sock - volumeMounts: - - mountPath: /csi - name: socket-dir - volumes: - - name: pvcsi-provider-volume - secret: - secretName: pvcsi-provider-creds - - name: pvcsi-config-volume - configMap: - name: pvcsi-config - - name: socket-dir - hostPath: - path: /var/lib/csi/sockets/pluginproxy/csi.vsphere.vmware.com - type: DirectoryOrCreate ---- -apiVersion: storage.k8s.io/v1beta1 -kind: CSIDriver -metadata: - name: csi.vsphere.vmware.com -spec: - attachRequired: true - podInfoOnMount: false ---- -kind: DaemonSet -apiVersion: apps/v1 -metadata: - name: vsphere-csi-node - namespace: {{ .PVCSINamespace }} -spec: - selector: - matchLabels: - app: vsphere-csi-node - updateStrategy: - type: "RollingUpdate" - template: - metadata: - labels: - app: vsphere-csi-node - role: vsphere-csi - spec: - serviceAccountName: vsphere-csi-node - containers: - - name: node-driver-registrar - image: vmware.io/csi-node-driver-registrar: - imagePullPolicy: "IfNotPresent" - lifecycle: - preStop: - exec: - command: ["/bin/sh", "-c", "rm -rf /registration/csi.vsphere.vmware.com /var/lib/kubelet/plugins_registry/csi.vsphere.vmware.com /var/lib/kubelet/plugins_registry/csi.vsphere.vmware.com-reg.sock"] - args: - - "--v=5" - - "--csi-address=$(ADDRESS)" - - "--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)" - env: - - name: ADDRESS - value: /csi/csi.sock - - name: DRIVER_REG_SOCK_PATH - value: /var/lib/kubelet/plugins_registry/csi.vsphere.vmware.com/csi.sock - volumeMounts: - - name: plugin-dir - mountPath: /csi - - name: registration-dir - mountPath: /registration - - name: vsphere-csi-node - image: vmware.io/vsphere-csi: - args: - - "--supervisor-fss-name=csi-feature-states" - - "--supervisor-fss-namespace=$(CSI_NAMESPACE)" - - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" - - "--fss-namespace=$(CSI_NAMESPACE)" - imagePullPolicy: "IfNotPresent" - env: - - name: NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - - name: CSI_ENDPOINT - value: unix:///csi/csi.sock - - name: X_CSI_MODE - value: "node" - - name: X_CSI_SPEC_REQ_VALIDATION - value: "false" - - name: CLUSTER_FLAVOR - value: "GUEST_CLUSTER" - - name: LOGGER_LEVEL - value: "PRODUCTION" # Options: DEVELOPMENT, PRODUCTION - - name: CSI_NAMESPACE - value: {{ .PVCSINamespace }} - securityContext: - privileged: true - capabilities: - add: ["SYS_ADMIN"] - allowPrivilegeEscalation: true - volumeMounts: - - name: plugin-dir - mountPath: /csi - - name: pods-mount-dir - mountPath: /var/lib/kubelet - mountPropagation: "Bidirectional" - - name: device-dir - mountPath: /dev - - name: liveness-probe - image: vmware.io/csi-livenessprobe/csi-livenessprobe: - args: - - "--csi-address=$(ADDRESS)" - imagePullPolicy: "IfNotPresent" - env: - - name: ADDRESS - value: /csi/csi.sock - volumeMounts: - - name: plugin-dir - mountPath: /csi - volumes: - - name: registration-dir - hostPath: - path: /var/lib/kubelet/plugins_registry - type: DirectoryOrCreate - - name: plugin-dir - hostPath: - path: /var/lib/kubelet/plugins_registry/csi.vsphere.vmware.com - type: DirectoryOrCreate - - name: pods-mount-dir - hostPath: - path: /var/lib/kubelet - type: Directory - - name: device-dir - hostPath: - path: /dev ---- -apiVersion: v1 -data: - cns-csi.conf: | - [GC] - endpoint = "{{ .SupervisorMasterEndpointHostName }}" - port = "{{ .SupervisorMasterPort }}" - tanzukubernetescluster-uid = "{{ .TanzuKubernetesClusterUID }}" - tanzukubernetescluster-name = "{{ .TanzuKubernetesClusterName }}" -kind: ConfigMap -metadata: - name: pvcsi-config - namespace: {{ .PVCSINamespace }} ---- diff --git a/manifests/dev/vsphere-7.0/guestcluster/1.16/pvcsi.yaml b/manifests/dev/vsphere-7.0/guestcluster/1.16/pvcsi.yaml deleted file mode 100644 index a721c75884..0000000000 --- a/manifests/dev/vsphere-7.0/guestcluster/1.16/pvcsi.yaml +++ /dev/null @@ -1,386 +0,0 @@ -apiVersion: v1 -kind: Namespace -metadata: - name: {{ .PVCSINamespace }} ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: vsphere-csi-controller - namespace: {{ .PVCSINamespace }} ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: vsphere-csi-node - namespace: {{ .PVCSINamespace }} ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: vsphere-csi-controller-role -rules: - - apiGroups: [""] - resources: ["nodes", "pods", "configmaps"] - verbs: ["get", "list", "watch"] - - apiGroups: [""] - resources: ["persistentvolumeclaims"] - verbs: ["get", "list", "watch", "update"] - - apiGroups: [""] - resources: ["persistentvolumes"] - verbs: ["get", "list", "watch", "create", "update", "delete", "patch"] - - apiGroups: [""] - resources: ["events"] - verbs: ["get", "list", "watch", "create", "update", "patch"] - - apiGroups: ["coordination.k8s.io"] - resources: ["leases"] - verbs: ["get", "watch", "list", "delete", "update", "create"] - - apiGroups: ["storage.k8s.io"] - resources: ["storageclasses", "csinodes"] - verbs: ["get", "list", "watch"] - - apiGroups: ["storage.k8s.io"] - resources: ["volumeattachments"] - verbs: ["get", "list", "watch", "update"] - - apiGroups: ["policy"] - resources: ["podsecuritypolicies"] - verbs: ["use"] - resourceNames: ["vmware-system-privileged"] - - apiGroups: [""] - resources: ["persistentvolumeclaims/status"] - verbs: ["update", "patch"] ---- -kind: Role -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: vsphere-csi-node-role - namespace: {{ .PVCSINamespace }} -rules: - - apiGroups: ["policy"] - resources: ["podsecuritypolicies"] - verbs: ["use"] - resourceNames: ["vmware-system-privileged"] - - apiGroups: [""] - resources: ["configmaps"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: vsphere-csi-controller-binding -subjects: - - kind: ServiceAccount - name: vsphere-csi-controller - namespace: {{ .PVCSINamespace }} -roleRef: - kind: ClusterRole - name: vsphere-csi-controller-role - apiGroup: rbac.authorization.k8s.io ---- -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: vsphere-csi-node-binding - namespace: {{ .PVCSINamespace }} -subjects: - - kind: ServiceAccount - name: vsphere-csi-node - namespace: {{ .PVCSINamespace }} -roleRef: - kind: Role - name: vsphere-csi-node-role - apiGroup: rbac.authorization.k8s.io ---- -kind: Deployment -apiVersion: apps/v1 -metadata: - name: vsphere-csi-controller - namespace: {{ .PVCSINamespace }} -spec: - replicas: 1 - strategy: - type: Recreate - selector: - matchLabels: - app: vsphere-csi-controller - template: - metadata: - labels: - app: vsphere-csi-controller - role: vsphere-csi - spec: - serviceAccountName: vsphere-csi-controller - nodeSelector: - node-role.kubernetes.io/master: "" - tolerations: - - operator: "Exists" - key: node-role.kubernetes.io/master - effect: NoSchedule - containers: - - name: csi-attacher - image: vmware.io/csi-attacher/csi-attacher: - args: - - "--v=4" - - "--timeout=300s" - - "--csi-address=$(ADDRESS)" - - "--leader-election" - - "--leader-election-type=leases" - imagePullPolicy: "IfNotPresent" - env: - - name: ADDRESS - value: /csi/csi.sock - volumeMounts: - - mountPath: /csi - name: socket-dir - - name: vsphere-csi-controller - image: vmware.io/vsphere-csi: - args: - - "--supervisor-fss-name=csi-feature-states" - - "--supervisor-fss-namespace=$(CSI_NAMESPACE)" - - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" - - "--fss-namespace=$(CSI_NAMESPACE)" - lifecycle: - preStop: - exec: - command: ["/bin/sh", "-c", "rm -rf /var/lib/csi/sockets/pluginproxy/csi.vsphere.vmware.com"] - imagePullPolicy: "IfNotPresent" - env: - - name: CSI_ENDPOINT - value: unix:///var/lib/csi/sockets/pluginproxy/csi.sock - - name: CLUSTER_FLAVOR - value: "GUEST_CLUSTER" - - name: X_CSI_MODE - value: "controller" - - name: GC_CONFIG - value: /etc/cloud/pvcsi-config/cns-csi.conf - - name: PROVISION_TIMEOUT_MINUTES - value: "4" - - name: ATTACHER_TIMEOUT_MINUTES - value: "4" - - name: RESIZE_TIMEOUT_MINUTES - value: "4" - - name: LOGGER_LEVEL - value: "PRODUCTION" # Options: DEVELOPMENT, PRODUCTION - - name: SUPERVISOR_CLIENT_QPS - value: "50" - - name: SUPERVISOR_CLIENT_BURST - value: "50" - - name: INCLUSTER_CLIENT_QPS - value: "50" - - name: INCLUSTER_CLIENT_BURST - value: "50" - - name: CSI_NAMESPACE - value: {{ .PVCSINamespace }} - volumeMounts: - - mountPath: /etc/cloud/pvcsi-provider - name: pvcsi-provider-volume - readOnly: true - - mountPath: /etc/cloud/pvcsi-config - name: pvcsi-config-volume - readOnly: true - - mountPath: /var/lib/csi/sockets/pluginproxy/ - name: socket-dir - - name: vsphere-syncer - image: vmware.io/syncer: - args: - - "--leader-election" - - "--supervisor-fss-name=csi-feature-states" - - "--supervisor-fss-namespace=$(CSI_NAMESPACE)" - - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" - - "--fss-namespace=$(CSI_NAMESPACE)" - imagePullPolicy: "IfNotPresent" - env: - - name: FULL_SYNC_INTERVAL_MINUTES - value: "30" - - name: GC_CONFIG - value: /etc/cloud/pvcsi-config/cns-csi.conf - - name: CLUSTER_FLAVOR - value: "GUEST_CLUSTER" - - name: LOGGER_LEVEL - value: "PRODUCTION" # Options: DEVELOPMENT, PRODUCTION - - name: CSI_NAMESPACE - value: {{ .PVCSINamespace }} - volumeMounts: - - mountPath: /etc/cloud/pvcsi-provider - name: pvcsi-provider-volume - readOnly: true - - mountPath: /etc/cloud/pvcsi-config - name: pvcsi-config-volume - readOnly: true - - name: liveness-probe - image: vmware.io/csi-livenessprobe/csi-livenessprobe: - args: - - "--csi-address=$(ADDRESS)" - imagePullPolicy: "IfNotPresent" - env: - - name: ADDRESS - value: /var/lib/csi/sockets/pluginproxy/csi.sock - volumeMounts: - - mountPath: /var/lib/csi/sockets/pluginproxy/ - name: socket-dir - - name: csi-provisioner - image: vmware.io/csi-provisioner/csi-provisioner: - args: - - "--v=4" - - "--timeout=300s" - - "--csi-address=$(ADDRESS)" - - "--enable-leader-election" - - "--leader-election-type=leases" - imagePullPolicy: "IfNotPresent" - env: - - name: ADDRESS - value: /csi/csi.sock - volumeMounts: - - mountPath: /csi - name: socket-dir - - name: csi-resizer - image: vmware.io/kubernetes-csi_external-resizer/kubernetes-csi_external-resizer: - args: - - "--v=4" - - "--csiTimeout=300s" - - "--csi-address=$(ADDRESS)" - - "--leader-election" - env: - - name: ADDRESS - value: /csi/csi.sock - volumeMounts: - - mountPath: /csi - name: socket-dir - volumes: - - name: pvcsi-provider-volume - secret: - secretName: pvcsi-provider-creds - - name: pvcsi-config-volume - configMap: - name: pvcsi-config - - name: socket-dir - hostPath: - path: /var/lib/csi/sockets/pluginproxy/csi.vsphere.vmware.com - type: DirectoryOrCreate ---- -apiVersion: storage.k8s.io/v1beta1 -kind: CSIDriver -metadata: - name: csi.vsphere.vmware.com -spec: - attachRequired: true - podInfoOnMount: false ---- -kind: DaemonSet -apiVersion: apps/v1 -metadata: - name: vsphere-csi-node - namespace: {{ .PVCSINamespace }} -spec: - selector: - matchLabels: - app: vsphere-csi-node - updateStrategy: - type: "RollingUpdate" - template: - metadata: - labels: - app: vsphere-csi-node - role: vsphere-csi - spec: - serviceAccountName: vsphere-csi-node - containers: - - name: node-driver-registrar - image: vmware.io/csi-node-driver-registrar: - imagePullPolicy: "IfNotPresent" - lifecycle: - preStop: - exec: - command: ["/bin/sh", "-c", "rm -rf /registration/csi.vsphere.vmware.com /var/lib/kubelet/plugins_registry/csi.vsphere.vmware.com /var/lib/kubelet/plugins_registry/csi.vsphere.vmware.com-reg.sock"] - args: - - "--v=5" - - "--csi-address=$(ADDRESS)" - - "--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)" - env: - - name: ADDRESS - value: /csi/csi.sock - - name: DRIVER_REG_SOCK_PATH - value: /var/lib/kubelet/plugins_registry/csi.vsphere.vmware.com/csi.sock - volumeMounts: - - name: plugin-dir - mountPath: /csi - - name: registration-dir - mountPath: /registration - - name: vsphere-csi-node - image: vmware.io/vsphere-csi: - args: - - "--supervisor-fss-name=csi-feature-states" - - "--supervisor-fss-namespace=$(CSI_NAMESPACE)" - - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" - - "--fss-namespace=$(CSI_NAMESPACE)" - imagePullPolicy: "IfNotPresent" - env: - - name: NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - - name: CSI_ENDPOINT - value: unix:///csi/csi.sock - - name: X_CSI_MODE - value: "node" - - name: X_CSI_SPEC_REQ_VALIDATION - value: "false" - - name: CLUSTER_FLAVOR - value: "GUEST_CLUSTER" - - name: LOGGER_LEVEL - value: "PRODUCTION" # Options: DEVELOPMENT, PRODUCTION - - name: CSI_NAMESPACE - value: {{ .PVCSINamespace }} - securityContext: - privileged: true - capabilities: - add: ["SYS_ADMIN"] - allowPrivilegeEscalation: true - volumeMounts: - - name: plugin-dir - mountPath: /csi - - name: pods-mount-dir - mountPath: /var/lib/kubelet - mountPropagation: "Bidirectional" - - name: device-dir - mountPath: /dev - - name: liveness-probe - image: vmware.io/csi-livenessprobe/csi-livenessprobe: - args: - - "--csi-address=$(ADDRESS)" - imagePullPolicy: "IfNotPresent" - env: - - name: ADDRESS - value: /csi/csi.sock - volumeMounts: - - name: plugin-dir - mountPath: /csi - volumes: - - name: registration-dir - hostPath: - path: /var/lib/kubelet/plugins_registry - type: DirectoryOrCreate - - name: plugin-dir - hostPath: - path: /var/lib/kubelet/plugins_registry/csi.vsphere.vmware.com - type: DirectoryOrCreate - - name: pods-mount-dir - hostPath: - path: /var/lib/kubelet - type: Directory - - name: device-dir - hostPath: - path: /dev ---- -apiVersion: v1 -data: - cns-csi.conf: | - [GC] - endpoint = "{{ .SupervisorMasterEndpointHostName }}" - port = "{{ .SupervisorMasterPort }}" - tanzukubernetescluster-uid = "{{ .TanzuKubernetesClusterUID }}" - tanzukubernetescluster-name = "{{ .TanzuKubernetesClusterName }}" -kind: ConfigMap -metadata: - name: pvcsi-config - namespace: {{ .PVCSINamespace }} ---- \ No newline at end of file diff --git a/manifests/dev/vsphere-7.0/supervisorcluster/k8s-1.15/deploy/vsphere-csi-controller-deployment.yaml b/manifests/dev/vsphere-7.0/supervisorcluster/k8s-1.15/deploy/vsphere-csi-controller-deployment.yaml deleted file mode 100644 index b69ef295d9..0000000000 --- a/manifests/dev/vsphere-7.0/supervisorcluster/k8s-1.15/deploy/vsphere-csi-controller-deployment.yaml +++ /dev/null @@ -1,160 +0,0 @@ -kind: Deployment -apiVersion: apps/v1 -metadata: - name: vsphere-csi-controller - namespace: vmware-system-csi -spec: - strategy: - type: Recreate - replicas: 1 - selector: - matchLabels: - app: vsphere-csi-controller - template: - metadata: - labels: - app: vsphere-csi-controller - role: vsphere-csi - spec: - serviceAccount: vsphere-csi-controller - nodeSelector: - node-role.kubernetes.io/master: '' - tolerations: - - operator: "Exists" - key: "node-role.kubernetes.io/master" - effect: "NoSchedule" - hostNetwork: true - containers: - - name: csi-provisioner - image: vmware/csi-provisioner/csi-provisioner:v1.2.1_vmware.2 - args: - - "--v=4" - - "--timeout=300s" - - "--csi-address=$(ADDRESS)" - - "--feature-gates=Topology=true" - - "--strict-topology" - - "--enable-leader-election" - - "--leader-election-type=leases" - env: - - name: ADDRESS - value: /var/lib/csi/sockets/pluginproxy/csi.sock - - name: KUBERNETES_SERVICE_HOST - value: "127.0.0.1" - - name: KUBERNETES_SERVICE_PORT - value: "6443" - - name: VSPHERE_CLOUD_OPERATOR_SERVICE_PORT - value: "29000" - imagePullPolicy: "IfNotPresent" - volumeMounts: - - name: socket-dir - mountPath: /var/lib/csi/sockets/pluginproxy/ - - name: csi-attacher - image: vmware/csi-attacher/csi-attacher:v1.1.1 - args: - - "--v=4" - - "--timeout=300s" - - "--csi-address=$(ADDRESS)" - - "--leader-election" - - "--leader-election-type=leases" - env: - - name: ADDRESS - value: /var/lib/csi/sockets/pluginproxy/csi.sock - - name: KUBERNETES_SERVICE_HOST - value: "127.0.0.1" - - name: KUBERNETES_SERVICE_PORT - value: "6443" - imagePullPolicy: "IfNotPresent" - volumeMounts: - - name: socket-dir - mountPath: /var/lib/csi/sockets/pluginproxy/ - - name: vsphere-csi-controller - image: vmware/vsphere-csi: - args: - - "--supervisor-fss-name=csi-feature-states" - - "--supervisor-fss-namespace=$(CSI_NAMESPACE)" - lifecycle: - preStop: - exec: - command: ["/bin/sh", "-c", "rm -rf /var/lib/csi/sockets/pluginproxy/csi.vsphere.vmware.com"] - env: - - name: CSI_ENDPOINT - value: unix:///var/lib/csi/sockets/pluginproxy/csi.sock - - name: CLUSTER_FLAVOR - value: "WORKLOAD" - - name: X_CSI_MODE - value: "controller" - - name: KUBERNETES_SERVICE_HOST - value: "127.0.0.1" - - name: KUBERNETES_SERVICE_PORT - value: "6443" - - name: POD_LISTENER_SERVICE_PORT - value: "29000" - - name: VSPHERE_CSI_CONFIG - value: "/etc/vmware/wcp/vsphere-cloud-provider.conf" # here vsphere-cloud-provider.conf is the name of the file used for creating secret using "--from-file" flag - - name: LOGGER_LEVEL - value: "PRODUCTION" # Options: DEVELOPMENT, PRODUCTION - - name: INCLUSTER_CLIENT_QPS - value: "50" - - name: INCLUSTER_CLIENT_BURST - value: "50" - - name: CSI_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - imagePullPolicy: "IfNotPresent" - volumeMounts: - - mountPath: /etc/vmware/wcp - name: vsphere-config-volume - readOnly: true - - mountPath: /var/lib/csi/sockets/pluginproxy/ - name: socket-dir - - name: liveness-probe - image: vmware/csi-livenessprobe/csi-livenessprobe:v1.1.0 - args: - - "--csi-address=$(ADDRESS)" - env: - - name: ADDRESS - value: /var/lib/csi/sockets/pluginproxy/csi.sock - volumeMounts: - - mountPath: /var/lib/csi/sockets/pluginproxy/ - name: socket-dir - - name: vsphere-syncer - image: vmware/syncer: - args: - - "--leader-election" - - "--supervisor-fss-name=csi-feature-states" - - "--supervisor-fss-namespace=$(CSI_NAMESPACE)" - env: - - name: CLUSTER_FLAVOR - value: "WORKLOAD" - - name: KUBERNETES_SERVICE_HOST - value: "127.0.0.1" - - name: KUBERNETES_SERVICE_PORT - value: "6443" - - name: FULL_SYNC_INTERVAL_MINUTES - value: "30" - - name: POD_POLL_INTERVAL_SECONDS - value: "2" - - name: POD_LISTENER_SERVICE_PORT - value: "29000" - - name: VSPHERE_CSI_CONFIG - value: "/etc/vmware/wcp/vsphere-cloud-provider.conf" # here vsphere-cloud-provider.conf is the name of the file used for creating secret using "--from-file" flag - - name: LOGGER_LEVEL - value: "PRODUCTION" # Options: DEVELOPMENT, PRODUCTION - - name: CSI_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - imagePullPolicy: "IfNotPresent" - volumeMounts: - - mountPath: /etc/vmware/wcp - name: vsphere-config-volume - readOnly: true - volumes: - - name: vsphere-config-volume - secret: - secretName: vsphere-config-secret - - name: socket-dir - hostPath: - path: /var/lib/csi/sockets/pluginproxy/csi.vsphere.vmware.com - type: DirectoryOrCreate \ No newline at end of file diff --git a/manifests/dev/vsphere-7.0/supervisorcluster/k8s-1.15/rbac/vsphere-csi-controller-rbac.yaml b/manifests/dev/vsphere-7.0/supervisorcluster/k8s-1.15/rbac/vsphere-csi-controller-rbac.yaml deleted file mode 100644 index 3678af52af..0000000000 --- a/manifests/dev/vsphere-7.0/supervisorcluster/k8s-1.15/rbac/vsphere-csi-controller-rbac.yaml +++ /dev/null @@ -1,64 +0,0 @@ -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: vsphere-csi-controller-role -rules: - - apiGroups: [""] - resources: ["nodes", "pods", "resourcequotas"] - verbs: ["get", "list", "watch"] - - apiGroups: [""] - resources: ["persistentvolumeclaims"] - verbs: ["get", "list", "watch", "create", "patch"] - - apiGroups: [""] - resources: ["persistentvolumes"] - verbs: ["get", "list", "watch", "update", "create", "delete"] - - apiGroups: ["storage.k8s.io"] - resources: ["storageclasses", "csinodes"] - verbs: ["get", "list", "watch", "patch"] - - apiGroups: [""] - resources: ["events"] - verbs: ["list", "watch", "create", "update", "patch"] - - apiGroups: ["storage.k8s.io"] - resources: ["volumeattachments"] - verbs: ["get", "list", "watch", "update"] - - apiGroups: ["cns.vmware.com"] - resources: ["cnsnodevmattachments", "cnsvolumemetadatas", "cnsregistervolumes"] - verbs: ["get", "list", "watch", "update"] - - apiGroups: ["cns.vmware.com"] - resources: ["storagepools"] - verbs: ["get", "watch", "list", "delete", "update", "create"] - - apiGroups: ["apiextensions.k8s.io"] - resources: ["customresourcedefinitions"] - verbs: ["get", "create", "update"] - - apiGroups: ["coordination.k8s.io"] - resources: ["leases"] - verbs: ["get", "watch", "list", "delete", "update", "create"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: vsphere-csi-controller-binding -subjects: - - kind: ServiceAccount - name: vsphere-csi-controller - namespace: vmware-system-csi -roleRef: - kind: ClusterRole - name: vsphere-csi-controller-role - apiGroup: rbac.authorization.k8s.io ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: csiRole - namespace: vmware-system-csi -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: wcp-privileged-psp -subjects: - # For the kube-system nodes. - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:serviceaccounts:vmware-system-csi - diff --git a/manifests/dev/vsphere-7.0u1/guestcluster/1.15/pvcsi.yaml b/manifests/dev/vsphere-7.0u1/guestcluster/1.15/pvcsi.yaml deleted file mode 100644 index a019cdab57..0000000000 --- a/manifests/dev/vsphere-7.0u1/guestcluster/1.15/pvcsi.yaml +++ /dev/null @@ -1,359 +0,0 @@ -apiVersion: v1 -kind: Namespace -metadata: - name: {{ .PVCSINamespace }} ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: vsphere-csi-controller - namespace: {{ .PVCSINamespace }} ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: vsphere-csi-controller-role -rules: - - apiGroups: [""] - resources: ["nodes", "persistentvolumeclaims", "pods"] - verbs: ["get", "list", "watch"] - - apiGroups: [""] - resources: ["persistentvolumes"] - verbs: ["get", "list", "watch", "create", "update", "delete"] - - apiGroups: [""] - resources: ["events"] - verbs: ["get", "list", "watch", "create", "update", "patch"] - - apiGroups: ["coordination.k8s.io"] - resources: ["leases"] - verbs: ["get", "watch", "list", "delete", "update", "create"] - - apiGroups: ["storage.k8s.io"] - resources: ["storageclasses", "csinodes"] - verbs: ["get", "list", "watch"] - - apiGroups: ["storage.k8s.io"] - resources: ["volumeattachments"] - verbs: ["get", "list", "watch", "update"] - - apiGroups: ["policy"] - resources: ["podsecuritypolicies"] - verbs: ["use"] - resourceNames: ["vmware-system-privileged"] ---- -kind: Role -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: vsphere-csi-node-role - namespace: {{ .PVCSINamespace }} -rules: - - apiGroups: - - "policy" - resources: - - podsecuritypolicies - verbs: - - use - resourceNames: - - vmware-system-privileged ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: vsphere-csi-controller-binding -subjects: - - kind: ServiceAccount - name: vsphere-csi-controller - namespace: {{ .PVCSINamespace }} -roleRef: - kind: ClusterRole - name: vsphere-csi-controller-role - apiGroup: rbac.authorization.k8s.io ---- -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: vsphere-csi-node-binding - namespace: {{ .PVCSINamespace }} -subjects: - - kind: ServiceAccount - name: default - namespace: {{ .PVCSINamespace }} -roleRef: - kind: Role - name: vsphere-csi-node-role - apiGroup: rbac.authorization.k8s.io ---- -kind: Deployment -apiVersion: apps/v1 -metadata: - name: vsphere-csi-controller - namespace: {{ .PVCSINamespace }} -spec: - replicas: 1 - strategy: - type: Recreate - selector: - matchLabels: - app: vsphere-csi-controller - template: - metadata: - labels: - app: vsphere-csi-controller - role: vsphere-csi - spec: - serviceAccountName: vsphere-csi-controller - nodeSelector: - node-role.kubernetes.io/master: "" - tolerations: - - operator: "Exists" - key: node-role.kubernetes.io/master - effect: NoSchedule - containers: - - name: csi-attacher - image: vmware.io/csi-attacher/csi-attacher: - args: - - "--v=4" - - "--timeout=300s" - - "--csi-address=$(ADDRESS)" - - "--leader-election" - - "--leader-election-type=leases" - imagePullPolicy: "IfNotPresent" - env: - - name: ADDRESS - value: /csi/csi.sock - volumeMounts: - - mountPath: /csi - name: socket-dir - - name: vsphere-csi-controller - image: vmware.io/vsphere-csi: - args: - - "--supervisor-fss-name=csi-feature-states" - - "--supervisor-fss-namespace=$(CSI_NAMESPACE)" - - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" - - "--fss-namespace=$(CSI_NAMESPACE)" - lifecycle: - preStop: - exec: - command: ["/bin/sh", "-c", "rm -rf /var/lib/csi/sockets/pluginproxy/csi.vsphere.vmware.com"] - imagePullPolicy: "IfNotPresent" - env: - - name: CSI_ENDPOINT - value: unix:///var/lib/csi/sockets/pluginproxy/csi.sock - - name: CLUSTER_FLAVOR - value: "GUEST_CLUSTER" - - name: X_CSI_MODE - value: "controller" - - name: GC_CONFIG - value: /etc/cloud/pvcsi-config/cns-csi.conf - - name: PROVISION_TIMEOUT_MINUTES - value: "4" - - name: ATTACHER_TIMEOUT_MINUTES - value: "4" - - name: LOGGER_LEVEL - value: "PRODUCTION" # Options: DEVELOPMENT, PRODUCTION - - name: SUPERVISOR_CLIENT_QPS - value: "50" - - name: SUPERVISOR_CLIENT_BURST - value: "50" - - name: INCLUSTER_CLIENT_QPS - value: "50" - - name: INCLUSTER_CLIENT_BURST - value: "50" - - name: CSI_NAMESPACE - value: {{ .PVCSINamespace }} - volumeMounts: - - mountPath: /etc/cloud/pvcsi-provider - name: pvcsi-provider-volume - readOnly: true - - mountPath: /etc/cloud/pvcsi-config - name: pvcsi-config-volume - readOnly: true - - mountPath: /var/lib/csi/sockets/pluginproxy/ - name: socket-dir - - name: vsphere-syncer - image: vmware.io/syncer: - args: - - "--leader-election" - - "--supervisor-fss-name=csi-feature-states" - - "--supervisor-fss-namespace=$(CSI_NAMESPACE)" - - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" - - "--fss-namespace=$(CSI_NAMESPACE)" - imagePullPolicy: "IfNotPresent" - env: - - name: FULL_SYNC_INTERVAL_MINUTES - value: "30" - - name: GC_CONFIG - value: /etc/cloud/pvcsi-config/cns-csi.conf - - name: CLUSTER_FLAVOR - value: "GUEST_CLUSTER" - - name: LOGGER_LEVEL - value: "PRODUCTION" # Options: DEVELOPMENT, PRODUCTION - - name: CSI_NAMESPACE - value: {{ .PVCSINamespace }} - volumeMounts: - - mountPath: /etc/cloud/pvcsi-provider - name: pvcsi-provider-volume - readOnly: true - - mountPath: /etc/cloud/pvcsi-config - name: pvcsi-config-volume - readOnly: true - - name: liveness-probe - image: vmware.io/csi-livenessprobe/csi-livenessprobe: - args: - - "--csi-address=$(ADDRESS)" - imagePullPolicy: "IfNotPresent" - env: - - name: ADDRESS - value: /var/lib/csi/sockets/pluginproxy/csi.sock - volumeMounts: - - mountPath: /var/lib/csi/sockets/pluginproxy/ - name: socket-dir - - name: csi-provisioner - image: vmware.io/csi-provisioner/csi-provisioner: - args: - - "--v=4" - - "--timeout=300s" - - "--csi-address=$(ADDRESS)" - - "--enable-leader-election" - - "--leader-election-type=leases" - imagePullPolicy: "IfNotPresent" - env: - - name: ADDRESS - value: /csi/csi.sock - volumeMounts: - - mountPath: /csi - name: socket-dir - volumes: - - name: pvcsi-provider-volume - secret: - secretName: pvcsi-provider-creds - - name: pvcsi-config-volume - configMap: - name: pvcsi-config - - name: socket-dir - hostPath: - path: /var/lib/csi/sockets/pluginproxy/csi.vsphere.vmware.com - type: DirectoryOrCreate ---- -apiVersion: storage.k8s.io/v1beta1 -kind: CSIDriver -metadata: - name: csi.vsphere.vmware.com -spec: - attachRequired: true - podInfoOnMount: false ---- -kind: DaemonSet -apiVersion: apps/v1 -metadata: - name: vsphere-csi-node - namespace: {{ .PVCSINamespace }} -spec: - selector: - matchLabels: - app: vsphere-csi-node - updateStrategy: - type: "RollingUpdate" - template: - metadata: - labels: - app: vsphere-csi-node - role: vsphere-csi - spec: - containers: - - name: node-driver-registrar - image: vmware.io/csi-node-driver-registrar: - imagePullPolicy: "IfNotPresent" - lifecycle: - preStop: - exec: - command: ["/bin/sh", "-c", "rm -rf /registration/csi.vsphere.vmware.com /var/lib/kubelet/plugins_registry/csi.vsphere.vmware.com /var/lib/kubelet/plugins_registry/csi.vsphere.vmware.com-reg.sock"] - args: - - "--v=5" - - "--csi-address=$(ADDRESS)" - - "--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)" - env: - - name: ADDRESS - value: /csi/csi.sock - - name: DRIVER_REG_SOCK_PATH - value: /var/lib/kubelet/plugins_registry/csi.vsphere.vmware.com/csi.sock - volumeMounts: - - name: plugin-dir - mountPath: /csi - - name: registration-dir - mountPath: /registration - - name: vsphere-csi-node - image: vmware.io/vsphere-csi: - args: - - "--supervisor-fss-name=csi-feature-states" - - "--supervisor-fss-namespace=$(CSI_NAMESPACE)" - - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" - - "--fss-namespace=$(CSI_NAMESPACE)" - imagePullPolicy: "IfNotPresent" - env: - - name: NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - - name: CSI_ENDPOINT - value: unix:///csi/csi.sock - - name: X_CSI_MODE - value: "node" - - name: X_CSI_SPEC_REQ_VALIDATION - value: "false" - - name: CLUSTER_FLAVOR - value: "GUEST_CLUSTER" - - name: LOGGER_LEVEL - value: "PRODUCTION" # Options: DEVELOPMENT, PRODUCTION - - name: CSI_NAMESPACE - value: {{ .PVCSINamespace }} - securityContext: - privileged: true - capabilities: - add: ["SYS_ADMIN"] - allowPrivilegeEscalation: true - volumeMounts: - - name: plugin-dir - mountPath: /csi - - name: pods-mount-dir - mountPath: /var/lib/kubelet - mountPropagation: "Bidirectional" - - name: device-dir - mountPath: /dev - - name: liveness-probe - image: vmware.io/csi-livenessprobe/csi-livenessprobe: - args: - - "--csi-address=$(ADDRESS)" - imagePullPolicy: "IfNotPresent" - env: - - name: ADDRESS - value: /csi/csi.sock - volumeMounts: - - name: plugin-dir - mountPath: /csi - volumes: - - name: registration-dir - hostPath: - path: /var/lib/kubelet/plugins_registry - type: DirectoryOrCreate - - name: plugin-dir - hostPath: - path: /var/lib/kubelet/plugins_registry/csi.vsphere.vmware.com - type: DirectoryOrCreate - - name: pods-mount-dir - hostPath: - path: /var/lib/kubelet - type: Directory - - name: device-dir - hostPath: - path: /dev ---- -apiVersion: v1 -data: - cns-csi.conf: | - [GC] - endpoint = "{{ .SupervisorMasterEndpointHostName }}" - port = "{{ .SupervisorMasterPort }}" - tanzukubernetescluster-uid = "{{ .TanzuKubernetesClusterUID }}" - tanzukubernetescluster-name = "{{ .TanzuKubernetesClusterName }}" -kind: ConfigMap -metadata: - name: pvcsi-config - namespace: {{ .PVCSINamespace }} ---- diff --git a/manifests/dev/vsphere-7.0u1/guestcluster/1.16/pvcsi.yaml b/manifests/dev/vsphere-7.0u1/guestcluster/1.16/pvcsi.yaml deleted file mode 100644 index a78003bf37..0000000000 --- a/manifests/dev/vsphere-7.0u1/guestcluster/1.16/pvcsi.yaml +++ /dev/null @@ -1,384 +0,0 @@ -apiVersion: v1 -kind: Namespace -metadata: - name: {{ .PVCSINamespace }} ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: vsphere-csi-controller - namespace: {{ .PVCSINamespace }} ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: vsphere-csi-controller-role -rules: - - apiGroups: [""] - resources: ["nodes", "pods", "configmaps"] - verbs: ["get", "list", "watch"] - - apiGroups: [""] - resources: ["persistentvolumeclaims"] - verbs: ["get", "list", "watch", "update"] - - apiGroups: [""] - resources: ["persistentvolumes"] - verbs: ["get", "list", "watch", "create", "update", "delete", "patch"] - - apiGroups: [""] - resources: ["events"] - verbs: ["get", "list", "watch", "create", "update", "patch"] - - apiGroups: ["coordination.k8s.io"] - resources: ["leases"] - verbs: ["get", "watch", "list", "delete", "update", "create"] - - apiGroups: ["storage.k8s.io"] - resources: ["storageclasses", "csinodes"] - verbs: ["get", "list", "watch"] - - apiGroups: ["storage.k8s.io"] - resources: ["volumeattachments"] - verbs: ["get", "list", "watch", "update"] - - apiGroups: ["policy"] - resources: ["podsecuritypolicies"] - verbs: ["use"] - resourceNames: ["vmware-system-privileged"] - - apiGroups: [""] - resources: ["persistentvolumeclaims/status"] - verbs: ["update", "patch"] ---- -kind: Role -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: vsphere-csi-node-role - namespace: {{ .PVCSINamespace }} -rules: - - apiGroups: - - "policy" - resources: - - podsecuritypolicies - verbs: - - use - resourceNames: - - vmware-system-privileged ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: vsphere-csi-controller-binding -subjects: - - kind: ServiceAccount - name: vsphere-csi-controller - namespace: {{ .PVCSINamespace }} -roleRef: - kind: ClusterRole - name: vsphere-csi-controller-role - apiGroup: rbac.authorization.k8s.io ---- -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: vsphere-csi-node-binding - namespace: {{ .PVCSINamespace }} -subjects: - - kind: ServiceAccount - name: default - namespace: {{ .PVCSINamespace }} -roleRef: - kind: Role - name: vsphere-csi-node-role - apiGroup: rbac.authorization.k8s.io ---- -kind: Deployment -apiVersion: apps/v1 -metadata: - name: vsphere-csi-controller - namespace: {{ .PVCSINamespace }} -spec: - replicas: 1 - strategy: - type: Recreate - selector: - matchLabels: - app: vsphere-csi-controller - template: - metadata: - labels: - app: vsphere-csi-controller - role: vsphere-csi - spec: - serviceAccountName: vsphere-csi-controller - nodeSelector: - node-role.kubernetes.io/master: "" - tolerations: - - operator: "Exists" - key: node-role.kubernetes.io/master - effect: NoSchedule - containers: - - name: csi-attacher - image: vmware.io/csi-attacher/csi-attacher: - args: - - "--v=4" - - "--timeout=300s" - - "--csi-address=$(ADDRESS)" - - "--leader-election" - - "--leader-election-type=leases" - imagePullPolicy: "IfNotPresent" - env: - - name: ADDRESS - value: /csi/csi.sock - volumeMounts: - - mountPath: /csi - name: socket-dir - - name: vsphere-csi-controller - image: vmware.io/vsphere-csi: - args: - - "--supervisor-fss-name=csi-feature-states" - - "--supervisor-fss-namespace=$(CSI_NAMESPACE)" - - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" - - "--fss-namespace=$(CSI_NAMESPACE)" - lifecycle: - preStop: - exec: - command: ["/bin/sh", "-c", "rm -rf /var/lib/csi/sockets/pluginproxy/csi.vsphere.vmware.com"] - imagePullPolicy: "IfNotPresent" - env: - - name: CSI_ENDPOINT - value: unix:///var/lib/csi/sockets/pluginproxy/csi.sock - - name: CLUSTER_FLAVOR - value: "GUEST_CLUSTER" - - name: X_CSI_MODE - value: "controller" - - name: GC_CONFIG - value: /etc/cloud/pvcsi-config/cns-csi.conf - - name: PROVISION_TIMEOUT_MINUTES - value: "4" - - name: ATTACHER_TIMEOUT_MINUTES - value: "4" - - name: RESIZE_TIMEOUT_MINUTES - value: "4" - - name: LOGGER_LEVEL - value: "PRODUCTION" # Options: DEVELOPMENT, PRODUCTION - - name: SUPERVISOR_CLIENT_QPS - value: "50" - - name: SUPERVISOR_CLIENT_BURST - value: "50" - - name: INCLUSTER_CLIENT_QPS - value: "50" - - name: INCLUSTER_CLIENT_BURST - value: "50" - - name: CSI_NAMESPACE - value: {{ .PVCSINamespace }} - volumeMounts: - - mountPath: /etc/cloud/pvcsi-provider - name: pvcsi-provider-volume - readOnly: true - - mountPath: /etc/cloud/pvcsi-config - name: pvcsi-config-volume - readOnly: true - - mountPath: /var/lib/csi/sockets/pluginproxy/ - name: socket-dir - - name: vsphere-syncer - image: vmware.io/syncer: - args: - - "--leader-election" - - "--supervisor-fss-name=csi-feature-states" - - "--supervisor-fss-namespace=$(CSI_NAMESPACE)" - - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" - - "--fss-namespace=$(CSI_NAMESPACE)" - imagePullPolicy: "IfNotPresent" - env: - - name: FULL_SYNC_INTERVAL_MINUTES - value: "30" - - name: GC_CONFIG - value: /etc/cloud/pvcsi-config/cns-csi.conf - - name: CLUSTER_FLAVOR - value: "GUEST_CLUSTER" - - name: LOGGER_LEVEL - value: "PRODUCTION" # Options: DEVELOPMENT, PRODUCTION - - name: CSI_NAMESPACE - value: {{ .PVCSINamespace }} - volumeMounts: - - mountPath: /etc/cloud/pvcsi-provider - name: pvcsi-provider-volume - readOnly: true - - mountPath: /etc/cloud/pvcsi-config - name: pvcsi-config-volume - readOnly: true - - name: liveness-probe - image: vmware.io/csi-livenessprobe/csi-livenessprobe: - args: - - "--csi-address=$(ADDRESS)" - imagePullPolicy: "IfNotPresent" - env: - - name: ADDRESS - value: /var/lib/csi/sockets/pluginproxy/csi.sock - volumeMounts: - - mountPath: /var/lib/csi/sockets/pluginproxy/ - name: socket-dir - - name: csi-provisioner - image: vmware.io/csi-provisioner/csi-provisioner: - args: - - "--v=4" - - "--timeout=300s" - - "--csi-address=$(ADDRESS)" - - "--enable-leader-election" - - "--leader-election-type=leases" - imagePullPolicy: "IfNotPresent" - env: - - name: ADDRESS - value: /csi/csi.sock - volumeMounts: - - mountPath: /csi - name: socket-dir - - name: csi-resizer - image: vmware.io/kubernetes-csi_external-resizer/kubernetes-csi_external-resizer: - args: - - "--v=4" - - "--csiTimeout=300s" - - "--csi-address=$(ADDRESS)" - - "--leader-election" - env: - - name: ADDRESS - value: /csi/csi.sock - volumeMounts: - - mountPath: /csi - name: socket-dir - volumes: - - name: pvcsi-provider-volume - secret: - secretName: pvcsi-provider-creds - - name: pvcsi-config-volume - configMap: - name: pvcsi-config - - name: socket-dir - hostPath: - path: /var/lib/csi/sockets/pluginproxy/csi.vsphere.vmware.com - type: DirectoryOrCreate ---- -apiVersion: storage.k8s.io/v1beta1 -kind: CSIDriver -metadata: - name: csi.vsphere.vmware.com -spec: - attachRequired: true - podInfoOnMount: false ---- -kind: DaemonSet -apiVersion: apps/v1 -metadata: - name: vsphere-csi-node - namespace: {{ .PVCSINamespace }} -spec: - selector: - matchLabels: - app: vsphere-csi-node - updateStrategy: - type: "RollingUpdate" - template: - metadata: - labels: - app: vsphere-csi-node - role: vsphere-csi - spec: - containers: - - name: node-driver-registrar - image: vmware.io/csi-node-driver-registrar: - imagePullPolicy: "IfNotPresent" - lifecycle: - preStop: - exec: - command: ["/bin/sh", "-c", "rm -rf /registration/csi.vsphere.vmware.com-reg.sock /csi/csi.sock"] - args: - - "--v=5" - - "--csi-address=$(ADDRESS)" - - "--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)" - env: - - name: ADDRESS - value: /csi/csi.sock - - name: DRIVER_REG_SOCK_PATH - value: /var/lib/kubelet/plugins/csi.vsphere.vmware.com/csi.sock - volumeMounts: - - name: plugin-dir - mountPath: /csi - - name: registration-dir - mountPath: /registration - - name: vsphere-csi-node - image: vmware.io/vsphere-csi: - args: - - "--supervisor-fss-name=csi-feature-states" - - "--supervisor-fss-namespace=$(CSI_NAMESPACE)" - - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" - - "--fss-namespace=$(CSI_NAMESPACE)" - imagePullPolicy: "IfNotPresent" - env: - - name: NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - - name: CSI_ENDPOINT - value: unix:///csi/csi.sock - - name: X_CSI_MODE - value: "node" - - name: X_CSI_SPEC_REQ_VALIDATION - value: "false" - - name: CLUSTER_FLAVOR - value: "GUEST_CLUSTER" - - name: LOGGER_LEVEL - value: "PRODUCTION" # Options: DEVELOPMENT, PRODUCTION - - name: CSI_NAMESPACE - value: {{ .PVCSINamespace }} - securityContext: - privileged: true - capabilities: - add: ["SYS_ADMIN"] - allowPrivilegeEscalation: true - volumeMounts: - - name: plugin-dir - mountPath: /csi - - name: pods-mount-dir - mountPath: /var/lib/kubelet - mountPropagation: "Bidirectional" - - name: device-dir - mountPath: /dev - - name: liveness-probe - image: vmware.io/csi-livenessprobe/csi-livenessprobe: - args: - - --csi-address=/csi/csi.sock - imagePullPolicy: "IfNotPresent" - env: - volumeMounts: - - name: plugin-dir - mountPath: /csi - volumes: - - name: registration-dir - hostPath: - path: /var/lib/kubelet/plugins_registry - type: Directory - - name: plugin-dir - hostPath: - path: /var/lib/kubelet/plugins/csi.vsphere.vmware.com/ - type: DirectoryOrCreate - - name: pods-mount-dir - hostPath: - path: /var/lib/kubelet - type: Directory - - name: device-dir - hostPath: - path: /dev - tolerations: - - effect: NoExecute - operator: Exists - - effect: NoSchedule - operator: Exists - ---- -apiVersion: v1 -data: - cns-csi.conf: | - [GC] - endpoint = "{{ .SupervisorMasterEndpointHostName }}" - port = "{{ .SupervisorMasterPort }}" - tanzukubernetescluster-uid = "{{ .TanzuKubernetesClusterUID }}" - tanzukubernetescluster-name = "{{ .TanzuKubernetesClusterName }}" -kind: ConfigMap -metadata: - name: pvcsi-config - namespace: {{ .PVCSINamespace }} ---- \ No newline at end of file diff --git a/manifests/dev/vsphere-7.0u1/guestcluster/1.17/pvcsi.yaml b/manifests/dev/vsphere-7.0u1/guestcluster/1.17/pvcsi.yaml deleted file mode 100644 index 133b7760ff..0000000000 --- a/manifests/dev/vsphere-7.0u1/guestcluster/1.17/pvcsi.yaml +++ /dev/null @@ -1,383 +0,0 @@ -# This yaml is the same as in k8s 1.15,1.16 -apiVersion: v1 -kind: Namespace -metadata: - name: {{ .PVCSINamespace }} ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: vsphere-csi-controller - namespace: {{ .PVCSINamespace }} ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: vsphere-csi-controller-role -rules: - - apiGroups: [""] - resources: ["nodes", "pods", "configmaps"] - verbs: ["get", "list", "watch"] - - apiGroups: [""] - resources: ["persistentvolumeclaims"] - verbs: ["get", "list", "watch", "update"] - - apiGroups: [""] - resources: ["persistentvolumes"] - verbs: ["get", "list", "watch", "create", "update", "delete", "patch"] - - apiGroups: [""] - resources: ["events"] - verbs: ["get", "list", "watch", "create", "update", "patch"] - - apiGroups: ["coordination.k8s.io"] - resources: ["leases"] - verbs: ["get", "watch", "list", "delete", "update", "create"] - - apiGroups: ["storage.k8s.io"] - resources: ["storageclasses", "csinodes"] - verbs: ["get", "list", "watch"] - - apiGroups: ["storage.k8s.io"] - resources: ["volumeattachments"] - verbs: ["get", "list", "watch", "update"] - - apiGroups: ["policy"] - resources: ["podsecuritypolicies"] - verbs: ["use"] - resourceNames: ["vmware-system-privileged"] - - apiGroups: [""] - resources: ["persistentvolumeclaims/status"] - verbs: ["update", "patch"] ---- -kind: Role -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: vsphere-csi-node-role - namespace: {{ .PVCSINamespace }} -rules: - - apiGroups: - - "policy" - resources: - - podsecuritypolicies - verbs: - - use - resourceNames: - - vmware-system-privileged ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: vsphere-csi-controller-binding -subjects: - - kind: ServiceAccount - name: vsphere-csi-controller - namespace: {{ .PVCSINamespace }} -roleRef: - kind: ClusterRole - name: vsphere-csi-controller-role - apiGroup: rbac.authorization.k8s.io ---- -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: vsphere-csi-node-binding - namespace: {{ .PVCSINamespace }} -subjects: - - kind: ServiceAccount - name: default - namespace: {{ .PVCSINamespace }} -roleRef: - kind: Role - name: vsphere-csi-node-role - apiGroup: rbac.authorization.k8s.io ---- -kind: Deployment -apiVersion: apps/v1 -metadata: - name: vsphere-csi-controller - namespace: {{ .PVCSINamespace }} -spec: - replicas: 1 - strategy: - type: Recreate - selector: - matchLabels: - app: vsphere-csi-controller - template: - metadata: - labels: - app: vsphere-csi-controller - role: vsphere-csi - spec: - serviceAccountName: vsphere-csi-controller - nodeSelector: - node-role.kubernetes.io/master: "" - tolerations: - - operator: "Exists" - key: node-role.kubernetes.io/master - effect: NoSchedule - containers: - - name: csi-attacher - image: vmware.io/csi-attacher/csi-attacher: - args: - - "--v=4" - - "--timeout=300s" - - "--csi-address=$(ADDRESS)" - - "--leader-election" - - "--leader-election-type=leases" - imagePullPolicy: "IfNotPresent" - env: - - name: ADDRESS - value: /csi/csi.sock - volumeMounts: - - mountPath: /csi - name: socket-dir - - name: vsphere-csi-controller - image: vmware.io/vsphere-csi: - args: - - "--supervisor-fss-name=csi-feature-states" - - "--supervisor-fss-namespace=$(CSI_NAMESPACE)" - - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" - - "--fss-namespace=$(CSI_NAMESPACE)" - lifecycle: - preStop: - exec: - command: ["/bin/sh", "-c", "rm -rf /var/lib/csi/sockets/pluginproxy/csi.vsphere.vmware.com"] - imagePullPolicy: "IfNotPresent" - env: - - name: CSI_ENDPOINT - value: unix:///var/lib/csi/sockets/pluginproxy/csi.sock - - name: CLUSTER_FLAVOR - value: "GUEST_CLUSTER" - - name: X_CSI_MODE - value: "controller" - - name: GC_CONFIG - value: /etc/cloud/pvcsi-config/cns-csi.conf - - name: PROVISION_TIMEOUT_MINUTES - value: "4" - - name: ATTACHER_TIMEOUT_MINUTES - value: "4" - - name: RESIZE_TIMEOUT_MINUTES - value: "4" - - name: LOGGER_LEVEL - value: "PRODUCTION" # Options: DEVELOPMENT, PRODUCTION - - name: SUPERVISOR_CLIENT_QPS - value: "50" - - name: SUPERVISOR_CLIENT_BURST - value: "50" - - name: INCLUSTER_CLIENT_QPS - value: "50" - - name: INCLUSTER_CLIENT_BURST - value: "50" - - name: CSI_NAMESPACE - value: {{ .PVCSINamespace }} - volumeMounts: - - mountPath: /etc/cloud/pvcsi-provider - name: pvcsi-provider-volume - readOnly: true - - mountPath: /etc/cloud/pvcsi-config - name: pvcsi-config-volume - readOnly: true - - mountPath: /var/lib/csi/sockets/pluginproxy/ - name: socket-dir - - name: vsphere-syncer - image: vmware.io/syncer: - args: - - "--leader-election" - - "--supervisor-fss-name=csi-feature-states" - - "--supervisor-fss-namespace=$(CSI_NAMESPACE)" - - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" - - "--fss-namespace=$(CSI_NAMESPACE)" - imagePullPolicy: "IfNotPresent" - env: - - name: FULL_SYNC_INTERVAL_MINUTES - value: "30" - - name: GC_CONFIG - value: /etc/cloud/pvcsi-config/cns-csi.conf - - name: CLUSTER_FLAVOR - value: "GUEST_CLUSTER" - - name: LOGGER_LEVEL - value: "PRODUCTION" # Options: DEVELOPMENT, PRODUCTION - - name: CSI_NAMESPACE - value: {{ .PVCSINamespace }} - volumeMounts: - - mountPath: /etc/cloud/pvcsi-provider - name: pvcsi-provider-volume - readOnly: true - - mountPath: /etc/cloud/pvcsi-config - name: pvcsi-config-volume - readOnly: true - - name: liveness-probe - image: vmware.io/csi-livenessprobe/csi-livenessprobe: - args: - - "--csi-address=$(ADDRESS)" - imagePullPolicy: "IfNotPresent" - env: - - name: ADDRESS - value: /var/lib/csi/sockets/pluginproxy/csi.sock - volumeMounts: - - mountPath: /var/lib/csi/sockets/pluginproxy/ - name: socket-dir - - name: csi-provisioner - image: vmware.io/csi-provisioner/csi-provisioner: - args: - - "--v=4" - - "--timeout=300s" - - "--csi-address=$(ADDRESS)" - - "--enable-leader-election" - - "--leader-election-type=leases" - imagePullPolicy: "IfNotPresent" - env: - - name: ADDRESS - value: /csi/csi.sock - volumeMounts: - - mountPath: /csi - name: socket-dir - - name: csi-resizer - image: vmware.io/kubernetes-csi_external-resizer/kubernetes-csi_external-resizer: - args: - - "--v=4" - - "--csiTimeout=300s" - - "--csi-address=$(ADDRESS)" - - "--leader-election" - env: - - name: ADDRESS - value: /csi/csi.sock - volumeMounts: - - mountPath: /csi - name: socket-dir - volumes: - - name: pvcsi-provider-volume - secret: - secretName: pvcsi-provider-creds - - name: pvcsi-config-volume - configMap: - name: pvcsi-config - - name: socket-dir - hostPath: - path: /var/lib/csi/sockets/pluginproxy/csi.vsphere.vmware.com - type: DirectoryOrCreate ---- -apiVersion: storage.k8s.io/v1beta1 -kind: CSIDriver -metadata: - name: csi.vsphere.vmware.com -spec: - attachRequired: true - podInfoOnMount: false ---- -kind: DaemonSet -apiVersion: apps/v1 -metadata: - name: vsphere-csi-node - namespace: {{ .PVCSINamespace }} -spec: - selector: - matchLabels: - app: vsphere-csi-node - updateStrategy: - type: "RollingUpdate" - template: - metadata: - labels: - app: vsphere-csi-node - role: vsphere-csi - spec: - containers: - - name: node-driver-registrar - image: vmware.io/csi-node-driver-registrar: - imagePullPolicy: "IfNotPresent" - lifecycle: - preStop: - exec: - command: ["/bin/sh", "-c", "rm -rf /registration/csi.vsphere.vmware.com-reg.sock /csi/csi.sock"] - args: - - "--v=5" - - "--csi-address=$(ADDRESS)" - - "--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)" - env: - - name: ADDRESS - value: /csi/csi.sock - - name: DRIVER_REG_SOCK_PATH - value: /var/lib/kubelet/plugins/csi.vsphere.vmware.com/csi.sock - volumeMounts: - - name: plugin-dir - mountPath: /csi - - name: registration-dir - mountPath: /registration - - name: vsphere-csi-node - image: vmware.io/vsphere-csi: - args: - - "--supervisor-fss-name=csi-feature-states" - - "--supervisor-fss-namespace=$(CSI_NAMESPACE)" - - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" - - "--fss-namespace=$(CSI_NAMESPACE)" - imagePullPolicy: "IfNotPresent" - env: - - name: NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - - name: CSI_ENDPOINT - value: unix:///csi/csi.sock - - name: X_CSI_MODE - value: "node" - - name: X_CSI_SPEC_REQ_VALIDATION - value: "false" - - name: CLUSTER_FLAVOR - value: "GUEST_CLUSTER" - - name: LOGGER_LEVEL - value: "PRODUCTION" # Options: DEVELOPMENT, PRODUCTION - - name: CSI_NAMESPACE - value: {{ .PVCSINamespace }} - securityContext: - privileged: true - capabilities: - add: ["SYS_ADMIN"] - allowPrivilegeEscalation: true - volumeMounts: - - name: plugin-dir - mountPath: /csi - - name: pods-mount-dir - mountPath: /var/lib/kubelet - mountPropagation: "Bidirectional" - - name: device-dir - mountPath: /dev - - name: liveness-probe - image: vmware.io/csi-livenessprobe/csi-livenessprobe: - args: - - --csi-address=/csi/csi.sock - imagePullPolicy: "IfNotPresent" - volumeMounts: - - name: plugin-dir - mountPath: /csi - volumes: - - name: registration-dir - hostPath: - path: /var/lib/kubelet/plugins_registry - type: Directory - - name: plugin-dir - hostPath: - path: /var/lib/kubelet/plugins/csi.vsphere.vmware.com/ - type: DirectoryOrCreate - - name: pods-mount-dir - hostPath: - path: /var/lib/kubelet - type: Directory - - name: device-dir - hostPath: - path: /dev - tolerations: - - effect: NoExecute - operator: Exists - - effect: NoSchedule - operator: Exists ---- -apiVersion: v1 -data: - cns-csi.conf: | - [GC] - endpoint = "{{ .SupervisorMasterEndpointHostName }}" - port = "{{ .SupervisorMasterPort }}" - tanzukubernetescluster-uid = "{{ .TanzuKubernetesClusterUID }}" - tanzukubernetescluster-name = "{{ .TanzuKubernetesClusterName }}" -kind: ConfigMap -metadata: - name: pvcsi-config - namespace: {{ .PVCSINamespace }} ---- \ No newline at end of file diff --git a/manifests/dev/vsphere-7.0u1/supervisorcluster/k8s-1.15/deploy/vsphere-csi-controller-deployment.yaml b/manifests/dev/vsphere-7.0u1/supervisorcluster/k8s-1.15/deploy/vsphere-csi-controller-deployment.yaml deleted file mode 100644 index dcb715d07a..0000000000 --- a/manifests/dev/vsphere-7.0u1/supervisorcluster/k8s-1.15/deploy/vsphere-csi-controller-deployment.yaml +++ /dev/null @@ -1,160 +0,0 @@ -kind: Deployment -apiVersion: apps/v1 -metadata: - name: vsphere-csi-controller - namespace: vmware-system-csi -spec: - strategy: - type: Recreate - replicas: 1 - selector: - matchLabels: - app: vsphere-csi-controller - template: - metadata: - labels: - app: vsphere-csi-controller - role: vsphere-csi - spec: - serviceAccount: vsphere-csi-controller - nodeSelector: - node-role.kubernetes.io/master: '' - tolerations: - - operator: "Exists" - key: "node-role.kubernetes.io/master" - effect: "NoSchedule" - hostNetwork: true - containers: - - name: csi-provisioner - image: vmware/csi-provisioner/csi-provisioner:v1.2.1_vmware.2 - args: - - "--v=4" - - "--timeout=300s" - - "--csi-address=$(ADDRESS)" - - "--feature-gates=Topology=true" - - "--strict-topology" - - "--enable-leader-election" - - "--leader-election-type=leases" - env: - - name: ADDRESS - value: /var/lib/csi/sockets/pluginproxy/csi.sock - - name: KUBERNETES_SERVICE_HOST - value: "127.0.0.1" - - name: KUBERNETES_SERVICE_PORT - value: "6443" - - name: VSPHERE_CLOUD_OPERATOR_SERVICE_PORT - value: "29000" - imagePullPolicy: "IfNotPresent" - volumeMounts: - - name: socket-dir - mountPath: /var/lib/csi/sockets/pluginproxy/ - - name: csi-attacher - image: vmware/csi-attacher/csi-attacher:v1.1.1 - args: - - "--v=4" - - "--timeout=300s" - - "--csi-address=$(ADDRESS)" - - "--leader-election" - - "--leader-election-type=leases" - env: - - name: ADDRESS - value: /var/lib/csi/sockets/pluginproxy/csi.sock - - name: KUBERNETES_SERVICE_HOST - value: "127.0.0.1" - - name: KUBERNETES_SERVICE_PORT - value: "6443" - imagePullPolicy: "IfNotPresent" - volumeMounts: - - name: socket-dir - mountPath: /var/lib/csi/sockets/pluginproxy/ - - name: vsphere-csi-controller - image: vmware/vsphere-csi: - args: - - "--supervisor-fss-name=csi-feature-states" - - "--supervisor-fss-namespace=$(CSI_NAMESPACE)" - lifecycle: - preStop: - exec: - command: ["/bin/sh", "-c", "rm -rf /var/lib/csi/sockets/pluginproxy/csi.vsphere.vmware.com"] - env: - - name: CSI_ENDPOINT - value: unix:///var/lib/csi/sockets/pluginproxy/csi.sock - - name: CLUSTER_FLAVOR - value: "WORKLOAD" - - name: X_CSI_MODE - value: "controller" - - name: KUBERNETES_SERVICE_HOST - value: "127.0.0.1" - - name: KUBERNETES_SERVICE_PORT - value: "6443" - - name: POD_LISTENER_SERVICE_PORT - value: "29000" - - name: VSPHERE_CSI_CONFIG - value: "/etc/vmware/wcp/vsphere-cloud-provider.conf" # here vsphere-cloud-provider.conf is the name of the file used for creating secret using "--from-file" flag - - name: LOGGER_LEVEL - value: "PRODUCTION" # Options: DEVELOPMENT, PRODUCTION - - name: INCLUSTER_CLIENT_QPS - value: "50" - - name: INCLUSTER_CLIENT_BURST - value: "50" - - name: CSI_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - imagePullPolicy: "IfNotPresent" - volumeMounts: - - mountPath: /etc/vmware/wcp - name: vsphere-config-volume - readOnly: true - - mountPath: /var/lib/csi/sockets/pluginproxy/ - name: socket-dir - - name: liveness-probe - image: vmware/csi-livenessprobe/csi-livenessprobe:v1.1.0 - args: - - "--csi-address=$(ADDRESS)" - env: - - name: ADDRESS - value: /var/lib/csi/sockets/pluginproxy/csi.sock - volumeMounts: - - mountPath: /var/lib/csi/sockets/pluginproxy/ - name: socket-dir - - name: vsphere-syncer - image: vmware/syncer: - args: - - "--leader-election" - - "--supervisor-fss-name=csi-feature-states" - - "--supervisor-fss-namespace=$(CSI_NAMESPACE)" - env: - - name: CLUSTER_FLAVOR - value: "WORKLOAD" - - name: KUBERNETES_SERVICE_HOST - value: "127.0.0.1" - - name: KUBERNETES_SERVICE_PORT - value: "6443" - - name: FULL_SYNC_INTERVAL_MINUTES - value: "30" - - name: POD_POLL_INTERVAL_SECONDS - value: "2" - - name: POD_LISTENER_SERVICE_PORT - value: "29000" - - name: VSPHERE_CSI_CONFIG - value: "/etc/vmware/wcp/vsphere-cloud-provider.conf" # here vsphere-cloud-provider.conf is the name of the file used for creating secret using "--from-file" flag - - name: LOGGER_LEVEL - value: "PRODUCTION" # Options: DEVELOPMENT, PRODUCTION - - name: CSI_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - imagePullPolicy: "IfNotPresent" - volumeMounts: - - mountPath: /etc/vmware/wcp - name: vsphere-config-volume - readOnly: true - volumes: - - name: vsphere-config-volume - secret: - secretName: vsphere-config-secret - - name: socket-dir - hostPath: - path: /var/lib/csi/sockets/pluginproxy/csi.vsphere.vmware.com - type: DirectoryOrCreate diff --git a/manifests/dev/vsphere-7.0u1/supervisorcluster/k8s-1.15/rbac/vsphere-csi-controller-rbac.yaml b/manifests/dev/vsphere-7.0u1/supervisorcluster/k8s-1.15/rbac/vsphere-csi-controller-rbac.yaml deleted file mode 100644 index 3678af52af..0000000000 --- a/manifests/dev/vsphere-7.0u1/supervisorcluster/k8s-1.15/rbac/vsphere-csi-controller-rbac.yaml +++ /dev/null @@ -1,64 +0,0 @@ -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: vsphere-csi-controller-role -rules: - - apiGroups: [""] - resources: ["nodes", "pods", "resourcequotas"] - verbs: ["get", "list", "watch"] - - apiGroups: [""] - resources: ["persistentvolumeclaims"] - verbs: ["get", "list", "watch", "create", "patch"] - - apiGroups: [""] - resources: ["persistentvolumes"] - verbs: ["get", "list", "watch", "update", "create", "delete"] - - apiGroups: ["storage.k8s.io"] - resources: ["storageclasses", "csinodes"] - verbs: ["get", "list", "watch", "patch"] - - apiGroups: [""] - resources: ["events"] - verbs: ["list", "watch", "create", "update", "patch"] - - apiGroups: ["storage.k8s.io"] - resources: ["volumeattachments"] - verbs: ["get", "list", "watch", "update"] - - apiGroups: ["cns.vmware.com"] - resources: ["cnsnodevmattachments", "cnsvolumemetadatas", "cnsregistervolumes"] - verbs: ["get", "list", "watch", "update"] - - apiGroups: ["cns.vmware.com"] - resources: ["storagepools"] - verbs: ["get", "watch", "list", "delete", "update", "create"] - - apiGroups: ["apiextensions.k8s.io"] - resources: ["customresourcedefinitions"] - verbs: ["get", "create", "update"] - - apiGroups: ["coordination.k8s.io"] - resources: ["leases"] - verbs: ["get", "watch", "list", "delete", "update", "create"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: vsphere-csi-controller-binding -subjects: - - kind: ServiceAccount - name: vsphere-csi-controller - namespace: vmware-system-csi -roleRef: - kind: ClusterRole - name: vsphere-csi-controller-role - apiGroup: rbac.authorization.k8s.io ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: csiRole - namespace: vmware-system-csi -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: wcp-privileged-psp -subjects: - # For the kube-system nodes. - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:serviceaccounts:vmware-system-csi - diff --git a/manifests/dev/vsphere-7.0u1/supervisorcluster/k8s-1.16/deploy/vsphere-csi-controller-deployment.yaml b/manifests/dev/vsphere-7.0u1/supervisorcluster/k8s-1.16/deploy/vsphere-csi-controller-deployment.yaml deleted file mode 100644 index 28f107d821..0000000000 --- a/manifests/dev/vsphere-7.0u1/supervisorcluster/k8s-1.16/deploy/vsphere-csi-controller-deployment.yaml +++ /dev/null @@ -1,189 +0,0 @@ -kind: Deployment -apiVersion: apps/v1 -metadata: - name: vsphere-csi-controller - namespace: vmware-system-csi -spec: - strategy: - type: Recreate - replicas: 1 - selector: - matchLabels: - app: vsphere-csi-controller - template: - metadata: - labels: - app: vsphere-csi-controller - role: vsphere-csi - spec: - serviceAccount: vsphere-csi-controller - nodeSelector: - node-role.kubernetes.io/master: '' - tolerations: - - operator: "Exists" - key: "node-role.kubernetes.io/master" - effect: "NoSchedule" - hostNetwork: true - containers: - - name: csi-provisioner - image: vmware/csi-provisioner/csi-provisioner:v1.2.1_vmware.2 - args: - - "--v=4" - - "--timeout=300s" - - "--csi-address=$(ADDRESS)" - - "--feature-gates=Topology=true" - - "--strict-topology" - - "--enable-leader-election" - - "--leader-election-type=leases" - env: - - name: ADDRESS - value: /var/lib/csi/sockets/pluginproxy/csi.sock - - name: KUBERNETES_SERVICE_HOST - value: "127.0.0.1" - - name: KUBERNETES_SERVICE_PORT - value: "6443" - - name: VSPHERE_CLOUD_OPERATOR_SERVICE_PORT - value: "29000" - imagePullPolicy: "IfNotPresent" - volumeMounts: - - name: socket-dir - mountPath: /var/lib/csi/sockets/pluginproxy/ - - name: csi-attacher - image: vmware/csi-attacher/csi-attacher:v1.1.1 - args: - - "--v=4" - - "--timeout=300s" - - "--csi-address=$(ADDRESS)" - - "--leader-election" - - "--leader-election-type=leases" - env: - - name: ADDRESS - value: /var/lib/csi/sockets/pluginproxy/csi.sock - - name: KUBERNETES_SERVICE_HOST - value: "127.0.0.1" - - name: KUBERNETES_SERVICE_PORT - value: "6443" - imagePullPolicy: "IfNotPresent" - volumeMounts: - - name: socket-dir - mountPath: /var/lib/csi/sockets/pluginproxy/ - - name: csi-resizer - image: vmware/kubernetes-csi_external-resizer/kubernetes-csi_external-resizer:v0.5.0 - imagePullPolicy: IfNotPresent - args: - - --v=4 - - --csiTimeout=300s - - --csi-address=$(ADDRESS) - - --leader-election - env: - - name: ADDRESS - value: /csi/csi.sock - resources: {} - terminationMessagePath: /dev/termination-log - terminationMessagePolicy: File - volumeMounts: - - mountPath: /csi - name: socket-dir - - name: vsphere-csi-controller - image: vmware/vsphere-csi: - args: - - "--supervisor-fss-name=csi-feature-states" - - "--supervisor-fss-namespace=$(CSI_NAMESPACE)" - lifecycle: - preStop: - exec: - command: ["/bin/sh", "-c", "rm -rf /var/lib/csi/sockets/pluginproxy/csi.vsphere.vmware.com"] - env: - - name: CSI_ENDPOINT - value: unix:///var/lib/csi/sockets/pluginproxy/csi.sock - - name: CLUSTER_FLAVOR - value: "WORKLOAD" - - name: X_CSI_MODE - value: "controller" - - name: KUBERNETES_SERVICE_HOST - value: "127.0.0.1" - - name: KUBERNETES_SERVICE_PORT - value: "6443" - - name: POD_LISTENER_SERVICE_PORT - value: "29000" - - name: VSPHERE_CSI_CONFIG - value: "/etc/vmware/wcp/vsphere-cloud-provider.conf" # here vsphere-cloud-provider.conf is the name of the file used for creating secret using "--from-file" flag - - name: LOGGER_LEVEL - value: "PRODUCTION" # Options: DEVELOPMENT, PRODUCTION - - name: INCLUSTER_CLIENT_QPS - value: "50" - - name: INCLUSTER_CLIENT_BURST - value: "50" - - name: CSI_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - imagePullPolicy: "IfNotPresent" - volumeMounts: - - mountPath: /etc/vmware/wcp - name: vsphere-config-volume - readOnly: true - - mountPath: /var/lib/csi/sockets/pluginproxy/ - name: socket-dir - - name: liveness-probe - image: vmware/csi-livenessprobe/csi-livenessprobe:v1.1.0 - args: - - "--csi-address=$(ADDRESS)" - env: - - name: ADDRESS - value: /var/lib/csi/sockets/pluginproxy/csi.sock - volumeMounts: - - mountPath: /var/lib/csi/sockets/pluginproxy/ - name: socket-dir - - name: vsphere-syncer - image: vmware/syncer: - args: - - "--leader-election" - - "--supervisor-fss-name=csi-feature-states" - - "--supervisor-fss-namespace=$(CSI_NAMESPACE)" - env: - - name: CLUSTER_FLAVOR - value: "WORKLOAD" - - name: KUBERNETES_SERVICE_HOST - value: "127.0.0.1" - - name: KUBERNETES_SERVICE_PORT - value: "6443" - - name: FULL_SYNC_INTERVAL_MINUTES - value: "30" - - name: VOLUME_HEALTH_INTERVAL_MINUTES - value: "5" - - name: POD_POLL_INTERVAL_SECONDS - value: "2" - - name: POD_LISTENER_SERVICE_PORT - value: "29000" - - name: VSPHERE_CSI_CONFIG - value: "/etc/vmware/wcp/vsphere-cloud-provider.conf" # here vsphere-cloud-provider.conf is the name of the file used for creating secret using "--from-file" flag - - name: LOGGER_LEVEL - value: "PRODUCTION" # Options: DEVELOPMENT, PRODUCTION - - name: CSI_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - imagePullPolicy: "IfNotPresent" - volumeMounts: - - mountPath: /etc/vmware/wcp - name: vsphere-config-volume - readOnly: true - volumes: - - name: vsphere-config-volume - secret: - secretName: vsphere-config-secret - - name: socket-dir - hostPath: - path: /var/lib/csi/sockets/pluginproxy/csi.vsphere.vmware.com - type: DirectoryOrCreate ---- -apiVersion: v1 -data: - "volume-extend": "true" - "volume-health": "true" -kind: ConfigMap -metadata: - name: csi-feature-states - namespace: vmware-system-csi ---- \ No newline at end of file diff --git a/manifests/dev/vsphere-7.0u1/supervisorcluster/k8s-1.16/rbac/vsphere-csi-controller-rbac.yaml b/manifests/dev/vsphere-7.0u1/supervisorcluster/k8s-1.16/rbac/vsphere-csi-controller-rbac.yaml deleted file mode 100644 index bd140f15ab..0000000000 --- a/manifests/dev/vsphere-7.0u1/supervisorcluster/k8s-1.16/rbac/vsphere-csi-controller-rbac.yaml +++ /dev/null @@ -1,68 +0,0 @@ -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: vsphere-csi-controller-role -rules: - - apiGroups: [""] - resources: ["nodes", "persistentvolumeclaims", "pods", "resourcequotas"] - verbs: ["get", "list", "watch", "patch"] - - apiGroups: [""] - resources: ["persistentvolumes"] - verbs: ["get", "list", "watch", "update", "create", "delete", "patch"] - - apiGroups: [""] - resources: ["persistentvolumeclaims/status"] - verbs: ["update", "patch"] - - apiGroups: ["storage.k8s.io"] - resources: ["storageclasses", "csinodes", "configmaps"] - verbs: ["get", "list", "watch"] - - apiGroups: [""] - resources: ["events"] - verbs: ["list", "watch", "create", "update", "patch"] - - apiGroups: ["storage.k8s.io"] - resources: ["volumeattachments"] - verbs: ["get", "list", "watch", "update"] - - apiGroups: ["cns.vmware.com"] - resources: ["cnsnodevmattachments", "cnsvolumemetadatas"] - verbs: ["get", "list", "watch", "update"] - - apiGroups: ["cns.vmware.com"] - resources: ["storagepools"] - verbs: ["get", "watch", "list", "delete", "update", "create"] - - apiGroups: ["apiextensions.k8s.io"] - resources: ["customresourcedefinitions"] - verbs: ["get", "create"] - - apiGroups: ["coordination.k8s.io"] - resources: ["leases"] - verbs: ["get", "watch", "list", "delete", "update", "create"] - - apiGroups: ["vmoperator.vmware.com"] - resources: ["virtualmachines"] - verbs: ["get", "list"] - ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: vsphere-csi-controller-binding -subjects: - - kind: ServiceAccount - name: vsphere-csi-controller - namespace: vmware-system-csi -roleRef: - kind: ClusterRole - name: vsphere-csi-controller-role - apiGroup: rbac.authorization.k8s.io ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: csiRole - namespace: vmware-system-csi -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: wcp-privileged-psp -subjects: - # For the kube-system nodes. - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:serviceaccounts:vmware-system-csi - diff --git a/manifests/dev/vsphere-7.0u2/guestcluster/1.17/pvcsi.yaml b/manifests/dev/vsphere-7.0u2/guestcluster/1.17/pvcsi.yaml deleted file mode 100644 index b63192b1fb..0000000000 --- a/manifests/dev/vsphere-7.0u2/guestcluster/1.17/pvcsi.yaml +++ /dev/null @@ -1,434 +0,0 @@ -apiVersion: v1 -kind: Namespace -metadata: - name: {{ .PVCSINamespace }} ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: vsphere-csi-controller - namespace: {{ .PVCSINamespace }} ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: vsphere-csi-node - namespace: {{ .PVCSINamespace }} ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: vsphere-csi-controller-role -rules: - - apiGroups: [""] - resources: ["nodes", "pods", "configmaps"] - verbs: ["get", "list", "watch"] - - apiGroups: [""] - resources: ["persistentvolumeclaims"] - verbs: ["get", "list", "watch", "update"] - - apiGroups: [""] - resources: ["persistentvolumes"] - verbs: ["get", "list", "watch", "create", "update", "delete", "patch"] - - apiGroups: [""] - resources: ["events"] - verbs: ["get", "list", "watch", "create", "update", "patch"] - - apiGroups: ["coordination.k8s.io"] - resources: ["leases"] - verbs: ["get", "watch", "list", "delete", "update", "create"] - - apiGroups: ["storage.k8s.io"] - resources: ["storageclasses", "csinodes"] - verbs: ["get", "list", "watch"] - - apiGroups: ["storage.k8s.io"] - resources: ["volumeattachments"] - verbs: ["get", "list", "watch", "update", "patch"] - - apiGroups: ["policy"] - resources: ["podsecuritypolicies"] - verbs: ["use"] - resourceNames: ["vmware-system-privileged"] - - apiGroups: [""] - resources: ["persistentvolumeclaims/status"] - verbs: ["update", "patch"] ---- -kind: Role -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: vsphere-csi-node-role - namespace: {{ .PVCSINamespace }} -rules: - - apiGroups: ["policy"] - resources: ["podsecuritypolicies"] - verbs: ["use"] - resourceNames: ["vmware-system-privileged"] - - apiGroups: [""] - resources: ["configmaps"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: vsphere-csi-controller-binding -subjects: - - kind: ServiceAccount - name: vsphere-csi-controller - namespace: {{ .PVCSINamespace }} -roleRef: - kind: ClusterRole - name: vsphere-csi-controller-role - apiGroup: rbac.authorization.k8s.io ---- -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: vsphere-csi-node-binding - namespace: {{ .PVCSINamespace }} -subjects: - - kind: ServiceAccount - name: vsphere-csi-node - namespace: {{ .PVCSINamespace }} -roleRef: - kind: Role - name: vsphere-csi-node-role - apiGroup: rbac.authorization.k8s.io ---- -kind: Deployment -apiVersion: apps/v1 -metadata: - name: vsphere-csi-controller - namespace: {{ .PVCSINamespace }} -spec: - replicas: 1 - strategy: - type: Recreate - selector: - matchLabels: - app: vsphere-csi-controller - template: - metadata: - labels: - app: vsphere-csi-controller - role: vsphere-csi - spec: - serviceAccountName: vsphere-csi-controller - nodeSelector: - node-role.kubernetes.io/master: "" - tolerations: - - operator: "Exists" - key: node-role.kubernetes.io/master - effect: NoSchedule - containers: - - name: csi-attacher - image: vmware.io/csi-attacher: - args: - - "--v=4" - - "--timeout=300s" - - "--csi-address=$(ADDRESS)" - - "--leader-election" - imagePullPolicy: "IfNotPresent" - env: - - name: ADDRESS - value: /csi/csi.sock - volumeMounts: - - mountPath: /csi - name: socket-dir - - name: vsphere-csi-controller - image: vmware.io/vsphere-csi: - args: - - "--supervisor-fss-name=csi-feature-states" - - "--supervisor-fss-namespace=$(CSI_NAMESPACE)" - - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" - - "--fss-namespace=$(CSI_NAMESPACE)" - imagePullPolicy: "IfNotPresent" - ports: - - containerPort: 2112 - name: prometheus - protocol: TCP - env: - - name: CSI_ENDPOINT - value: unix:///csi/csi.sock - - name: CLUSTER_FLAVOR - value: "GUEST_CLUSTER" - - name: X_CSI_MODE - value: "controller" - - name: GC_CONFIG - value: /etc/cloud/pvcsi-config/cns-csi.conf - - name: PROVISION_TIMEOUT_MINUTES - value: "4" - - name: ATTACHER_TIMEOUT_MINUTES - value: "4" - - name: RESIZE_TIMEOUT_MINUTES - value: "4" - - name: LOGGER_LEVEL - value: "PRODUCTION" # Options: DEVELOPMENT, PRODUCTION - - name: SUPERVISOR_CLIENT_QPS - value: "50" - - name: SUPERVISOR_CLIENT_BURST - value: "50" - - name: INCLUSTER_CLIENT_QPS - value: "50" - - name: INCLUSTER_CLIENT_BURST - value: "50" - - name: CSI_NAMESPACE - value: {{ .PVCSINamespace }} - volumeMounts: - - mountPath: /etc/cloud/pvcsi-provider - name: pvcsi-provider-volume - readOnly: true - - mountPath: /etc/cloud/pvcsi-config - name: pvcsi-config-volume - readOnly: true - - mountPath: /csi - name: socket-dir - - name: vsphere-syncer - image: vmware.io/syncer: - args: - - "--leader-election" - - "--supervisor-fss-name=csi-feature-states" - - "--supervisor-fss-namespace=$(CSI_NAMESPACE)" - - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" - - "--fss-namespace=$(CSI_NAMESPACE)" - imagePullPolicy: "IfNotPresent" - ports: - - containerPort: 2113 - name: prometheus - protocol: TCP - env: - - name: FULL_SYNC_INTERVAL_MINUTES - value: "30" - - name: GC_CONFIG - value: /etc/cloud/pvcsi-config/cns-csi.conf - - name: CLUSTER_FLAVOR - value: "GUEST_CLUSTER" - - name: LOGGER_LEVEL - value: "PRODUCTION" # Options: DEVELOPMENT, PRODUCTION - - name: CSI_NAMESPACE - value: {{ .PVCSINamespace }} - volumeMounts: - - mountPath: /etc/cloud/pvcsi-provider - name: pvcsi-provider-volume - readOnly: true - - mountPath: /etc/cloud/pvcsi-config - name: pvcsi-config-volume - readOnly: true - - name: liveness-probe - image: vmware.io/csi-livenessprobe/csi-livenessprobe: - args: - - "--csi-address=$(ADDRESS)" - imagePullPolicy: "IfNotPresent" - env: - - name: ADDRESS - value: /csi/csi.sock - volumeMounts: - - mountPath: /csi - name: socket-dir - - name: csi-provisioner - image: vmware.io/csi-provisioner/csi-provisioner: - args: - - "--v=4" - - "--timeout=300s" - - "--csi-address=$(ADDRESS)" - - "--enable-leader-election" - - "--leader-election-type=leases" - imagePullPolicy: "IfNotPresent" - env: - - name: ADDRESS - value: /csi/csi.sock - volumeMounts: - - mountPath: /csi - name: socket-dir - - name: csi-resizer - image: vmware.io/kubernetes-csi_external-resizer/kubernetes-csi_external-resizer: - args: - - "--v=4" - - "--timeout=300s" - - "--handle-volume-inuse-error=false" # Set this to true if used in vSphere 7.0U1 - - "--csi-address=$(ADDRESS)" - - "--leader-election" - - "--kube-api-qps=100" - - "--kube-api-burst=100" - env: - - name: ADDRESS - value: /csi/csi.sock - volumeMounts: - - mountPath: /csi - name: socket-dir - volumes: - - name: pvcsi-provider-volume - secret: - secretName: pvcsi-provider-creds - - name: pvcsi-config-volume - configMap: - name: pvcsi-config - - name: socket-dir - emptyDir: {} ---- -apiVersion: storage.k8s.io/v1beta1 -kind: CSIDriver -metadata: - name: csi.vsphere.vmware.com -spec: - attachRequired: true - podInfoOnMount: false ---- -kind: DaemonSet -apiVersion: apps/v1 -metadata: - name: vsphere-csi-node - namespace: {{ .PVCSINamespace }} -spec: - selector: - matchLabels: - app: vsphere-csi-node - updateStrategy: - type: "RollingUpdate" - template: - metadata: - labels: - app: vsphere-csi-node - role: vsphere-csi - spec: - serviceAccountName: vsphere-csi-node - containers: - - name: node-driver-registrar - image: vmware.io/csi-node-driver-registrar: - imagePullPolicy: "IfNotPresent" - lifecycle: - preStop: - exec: - command: ["/bin/sh", "-c", "rm -rf /registration/csi.vsphere.vmware.com-reg.sock /csi/csi.sock"] - args: - - "--v=5" - - "--csi-address=$(ADDRESS)" - - "--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)" - env: - - name: ADDRESS - value: /csi/csi.sock - - name: DRIVER_REG_SOCK_PATH - value: /var/lib/kubelet/plugins/csi.vsphere.vmware.com/csi.sock - volumeMounts: - - name: plugin-dir - mountPath: /csi - - name: registration-dir - mountPath: /registration - - name: vsphere-csi-node - image: vmware.io/vsphere-csi: - args: - - "--supervisor-fss-name=csi-feature-states" - - "--supervisor-fss-namespace=$(CSI_NAMESPACE)" - - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" - - "--fss-namespace=$(CSI_NAMESPACE)" - imagePullPolicy: "IfNotPresent" - env: - - name: NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - - name: CSI_ENDPOINT - value: unix:///csi/csi.sock - - name: X_CSI_MODE - value: "node" - - name: X_CSI_SPEC_REQ_VALIDATION - value: "false" - - name: CLUSTER_FLAVOR - value: "GUEST_CLUSTER" - - name: LOGGER_LEVEL - value: "PRODUCTION" # Options: DEVELOPMENT, PRODUCTION - - name: CSI_NAMESPACE - value: {{ .PVCSINamespace }} - securityContext: - privileged: true - capabilities: - add: ["SYS_ADMIN"] - allowPrivilegeEscalation: true - volumeMounts: - - name: plugin-dir - mountPath: /csi - - name: pods-mount-dir - mountPath: /var/lib/kubelet - mountPropagation: "Bidirectional" - - name: device-dir - mountPath: /dev - - name: blocks-dir - mountPath: /sys/block - - name: sys-devices-dir - mountPath: /sys/devices - - name: liveness-probe - image: vmware.io/csi-livenessprobe/csi-livenessprobe: - args: - - --csi-address=/csi/csi.sock - imagePullPolicy: "IfNotPresent" - volumeMounts: - - name: plugin-dir - mountPath: /csi - volumes: - - name: registration-dir - hostPath: - path: /var/lib/kubelet/plugins_registry - type: Directory - - name: plugin-dir - hostPath: - path: /var/lib/kubelet/plugins/csi.vsphere.vmware.com/ - type: DirectoryOrCreate - - name: pods-mount-dir - hostPath: - path: /var/lib/kubelet - type: Directory - - name: device-dir - hostPath: - path: /dev - - name: blocks-dir - hostPath: - path: /sys/block - type: Directory - - name: sys-devices-dir - hostPath: - path: /sys/devices - type: Directory - tolerations: - - effect: NoExecute - operator: Exists - - effect: NoSchedule - operator: Exists ---- -apiVersion: v1 -data: - cns-csi.conf: | - [GC] - endpoint = "{{ .SupervisorMasterEndpointHostName }}" - port = "{{ .SupervisorMasterPort }}" - tanzukubernetescluster-uid = "{{ .TanzuKubernetesClusterUID }}" - tanzukubernetescluster-name = "{{ .TanzuKubernetesClusterName }}" -kind: ConfigMap -metadata: - name: pvcsi-config - namespace: {{ .PVCSINamespace }} ---- -apiVersion: v1 -data: - "volume-extend": "true" - "volume-health": "true" - "online-volume-extend": "true" - "file-volume": "false" -kind: ConfigMap -metadata: - name: internal-feature-states.csi.vsphere.vmware.com - namespace: {{ .PVCSINamespace }} ---- -apiVersion: v1 -kind: Service -metadata: - name: vsphere-csi-controller - namespace: {{ .PVCSINamespace }} - labels: - app: vsphere-csi-controller -spec: - ports: - - name: ctlr - port: 2112 - targetPort: 2112 - protocol: TCP - - name: syncer - port: 2113 - targetPort: 2113 - protocol: TCP - selector: - app: vsphere-csi-controller \ No newline at end of file diff --git a/manifests/dev/vsphere-7.0u2/guestcluster/1.18/pvcsi.yaml b/manifests/dev/vsphere-7.0u2/guestcluster/1.18/pvcsi.yaml deleted file mode 100644 index 7ba276bf7b..0000000000 --- a/manifests/dev/vsphere-7.0u2/guestcluster/1.18/pvcsi.yaml +++ /dev/null @@ -1,434 +0,0 @@ -apiVersion: v1 -kind: Namespace -metadata: - name: {{ .PVCSINamespace }} ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: vsphere-csi-controller - namespace: {{ .PVCSINamespace }} ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: vsphere-csi-node - namespace: {{ .PVCSINamespace }} ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: vsphere-csi-controller-role -rules: - - apiGroups: [""] - resources: ["nodes", "pods", "configmaps"] - verbs: ["get", "list", "watch"] - - apiGroups: [""] - resources: ["persistentvolumeclaims"] - verbs: ["get", "list", "watch", "update"] - - apiGroups: [""] - resources: ["persistentvolumes"] - verbs: ["get", "list", "watch", "create", "update", "delete", "patch"] - - apiGroups: [""] - resources: ["events"] - verbs: ["get", "list", "watch", "create", "update", "patch"] - - apiGroups: ["coordination.k8s.io"] - resources: ["leases"] - verbs: ["get", "watch", "list", "delete", "update", "create"] - - apiGroups: ["storage.k8s.io"] - resources: ["storageclasses", "csinodes"] - verbs: ["get", "list", "watch"] - - apiGroups: ["storage.k8s.io"] - resources: ["volumeattachments"] - verbs: ["get", "list", "watch", "update", "patch"] - - apiGroups: ["policy"] - resources: ["podsecuritypolicies"] - verbs: ["use"] - resourceNames: ["vmware-system-privileged"] - - apiGroups: [""] - resources: ["persistentvolumeclaims/status"] - verbs: ["update", "patch"] ---- -kind: Role -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: vsphere-csi-node-role - namespace: {{ .PVCSINamespace }} -rules: - - apiGroups: ["policy"] - resources: ["podsecuritypolicies"] - verbs: ["use"] - resourceNames: ["vmware-system-privileged"] - - apiGroups: [""] - resources: ["configmaps"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: vsphere-csi-controller-binding -subjects: - - kind: ServiceAccount - name: vsphere-csi-controller - namespace: {{ .PVCSINamespace }} -roleRef: - kind: ClusterRole - name: vsphere-csi-controller-role - apiGroup: rbac.authorization.k8s.io ---- -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: vsphere-csi-node-binding - namespace: {{ .PVCSINamespace }} -subjects: - - kind: ServiceAccount - name: vsphere-csi-node - namespace: {{ .PVCSINamespace }} -roleRef: - kind: Role - name: vsphere-csi-node-role - apiGroup: rbac.authorization.k8s.io ---- -kind: Deployment -apiVersion: apps/v1 -metadata: - name: vsphere-csi-controller - namespace: {{ .PVCSINamespace }} -spec: - replicas: 1 - strategy: - type: Recreate - selector: - matchLabels: - app: vsphere-csi-controller - template: - metadata: - labels: - app: vsphere-csi-controller - role: vsphere-csi - spec: - serviceAccountName: vsphere-csi-controller - nodeSelector: - node-role.kubernetes.io/master: "" - tolerations: - - operator: "Exists" - key: node-role.kubernetes.io/master - effect: NoSchedule - containers: - - name: csi-attacher - image: vmware.io/csi-attacher: - args: - - "--v=4" - - "--timeout=300s" - - "--csi-address=$(ADDRESS)" - - "--leader-election" - imagePullPolicy: "IfNotPresent" - env: - - name: ADDRESS - value: /csi/csi.sock - volumeMounts: - - mountPath: /csi - name: socket-dir - - name: vsphere-csi-controller - image: vmware.io/vsphere-csi: - args: - - "--supervisor-fss-name=csi-feature-states" - - "--supervisor-fss-namespace=$(CSI_NAMESPACE)" - - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" - - "--fss-namespace=$(CSI_NAMESPACE)" - imagePullPolicy: "IfNotPresent" - ports: - - containerPort: 2112 - name: prometheus - protocol: TCP - env: - - name: CSI_ENDPOINT - value: unix:///csi/csi.sock - - name: CLUSTER_FLAVOR - value: "GUEST_CLUSTER" - - name: X_CSI_MODE - value: "controller" - - name: GC_CONFIG - value: /etc/cloud/pvcsi-config/cns-csi.conf - - name: PROVISION_TIMEOUT_MINUTES - value: "4" - - name: ATTACHER_TIMEOUT_MINUTES - value: "4" - - name: RESIZE_TIMEOUT_MINUTES - value: "4" - - name: LOGGER_LEVEL - value: "PRODUCTION" # Options: DEVELOPMENT, PRODUCTION - - name: SUPERVISOR_CLIENT_QPS - value: "50" - - name: SUPERVISOR_CLIENT_BURST - value: "50" - - name: INCLUSTER_CLIENT_QPS - value: "50" - - name: INCLUSTER_CLIENT_BURST - value: "50" - - name: CSI_NAMESPACE - value: {{ .PVCSINamespace }} - volumeMounts: - - mountPath: /etc/cloud/pvcsi-provider - name: pvcsi-provider-volume - readOnly: true - - mountPath: /etc/cloud/pvcsi-config - name: pvcsi-config-volume - readOnly: true - - mountPath: /csi - name: socket-dir - - name: vsphere-syncer - image: vmware.io/syncer: - args: - - "--leader-election" - - "--supervisor-fss-name=csi-feature-states" - - "--supervisor-fss-namespace=$(CSI_NAMESPACE)" - - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" - - "--fss-namespace=$(CSI_NAMESPACE)" - imagePullPolicy: "IfNotPresent" - ports: - - containerPort: 2113 - name: prometheus - protocol: TCP - env: - - name: FULL_SYNC_INTERVAL_MINUTES - value: "30" - - name: GC_CONFIG - value: /etc/cloud/pvcsi-config/cns-csi.conf - - name: CLUSTER_FLAVOR - value: "GUEST_CLUSTER" - - name: LOGGER_LEVEL - value: "PRODUCTION" # Options: DEVELOPMENT, PRODUCTION - - name: CSI_NAMESPACE - value: {{ .PVCSINamespace }} - volumeMounts: - - mountPath: /etc/cloud/pvcsi-provider - name: pvcsi-provider-volume - readOnly: true - - mountPath: /etc/cloud/pvcsi-config - name: pvcsi-config-volume - readOnly: true - - name: liveness-probe - image: vmware.io/csi-livenessprobe/csi-livenessprobe: - args: - - "--csi-address=$(ADDRESS)" - imagePullPolicy: "IfNotPresent" - env: - - name: ADDRESS - value: /csi/csi.sock - volumeMounts: - - mountPath: /csi - name: socket-dir - - name: csi-provisioner - image: vmware.io/csi-provisioner/csi-provisioner: - args: - - "--v=4" - - "--timeout=300s" - - "--csi-address=$(ADDRESS)" - - "--enable-leader-election" - - "--leader-election-type=leases" - imagePullPolicy: "IfNotPresent" - env: - - name: ADDRESS - value: /csi/csi.sock - volumeMounts: - - mountPath: /csi - name: socket-dir - - name: csi-resizer - image: vmware.io/kubernetes-csi_external-resizer/kubernetes-csi_external-resizer: - args: - - "--v=4" - - "--timeout=300s" - - "--handle-volume-inuse-error=false" # Set this to true if used in vSphere 7.0U1 - - "--csi-address=$(ADDRESS)" - - "--leader-election" - - "--kube-api-qps=100" - - "--kube-api-burst=100" - env: - - name: ADDRESS - value: /csi/csi.sock - volumeMounts: - - mountPath: /csi - name: socket-dir - volumes: - - name: pvcsi-provider-volume - secret: - secretName: pvcsi-provider-creds - - name: pvcsi-config-volume - configMap: - name: pvcsi-config - - name: socket-dir - emptyDir: {} ---- -apiVersion: storage.k8s.io/v1beta1 -kind: CSIDriver -metadata: - name: csi.vsphere.vmware.com -spec: - attachRequired: true - podInfoOnMount: false ---- -kind: DaemonSet -apiVersion: apps/v1 -metadata: - name: vsphere-csi-node - namespace: {{ .PVCSINamespace }} -spec: - selector: - matchLabels: - app: vsphere-csi-node - updateStrategy: - type: "RollingUpdate" - template: - metadata: - labels: - app: vsphere-csi-node - role: vsphere-csi - spec: - serviceAccountName: vsphere-csi-node - containers: - - name: node-driver-registrar - image: vmware.io/csi-node-driver-registrar: - imagePullPolicy: "IfNotPresent" - lifecycle: - preStop: - exec: - command: ["/bin/sh", "-c", "rm -rf /registration/csi.vsphere.vmware.com-reg.sock /csi/csi.sock"] - args: - - "--v=5" - - "--csi-address=$(ADDRESS)" - - "--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)" - env: - - name: ADDRESS - value: /csi/csi.sock - - name: DRIVER_REG_SOCK_PATH - value: /var/lib/kubelet/plugins/csi.vsphere.vmware.com/csi.sock - volumeMounts: - - name: plugin-dir - mountPath: /csi - - name: registration-dir - mountPath: /registration - - name: vsphere-csi-node - image: vmware.io/vsphere-csi: - args: - - "--supervisor-fss-name=csi-feature-states" - - "--supervisor-fss-namespace=$(CSI_NAMESPACE)" - - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" - - "--fss-namespace=$(CSI_NAMESPACE)" - imagePullPolicy: "IfNotPresent" - env: - - name: NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - - name: CSI_ENDPOINT - value: unix:///csi/csi.sock - - name: X_CSI_MODE - value: "node" - - name: X_CSI_SPEC_REQ_VALIDATION - value: "false" - - name: CLUSTER_FLAVOR - value: "GUEST_CLUSTER" - - name: LOGGER_LEVEL - value: "PRODUCTION" # Options: DEVELOPMENT, PRODUCTION - - name: CSI_NAMESPACE - value: {{ .PVCSINamespace }} - securityContext: - privileged: true - capabilities: - add: ["SYS_ADMIN"] - allowPrivilegeEscalation: true - volumeMounts: - - name: plugin-dir - mountPath: /csi - - name: pods-mount-dir - mountPath: /var/lib/kubelet - mountPropagation: "Bidirectional" - - name: device-dir - mountPath: /dev - - name: blocks-dir - mountPath: /sys/block - - name: sys-devices-dir - mountPath: /sys/devices - - name: liveness-probe - image: vmware.io/csi-livenessprobe/csi-livenessprobe: - args: - - --csi-address=/csi/csi.sock - imagePullPolicy: "IfNotPresent" - volumeMounts: - - name: plugin-dir - mountPath: /csi - volumes: - - name: registration-dir - hostPath: - path: /var/lib/kubelet/plugins_registry - type: Directory - - name: plugin-dir - hostPath: - path: /var/lib/kubelet/plugins/csi.vsphere.vmware.com/ - type: DirectoryOrCreate - - name: pods-mount-dir - hostPath: - path: /var/lib/kubelet - type: Directory - - name: device-dir - hostPath: - path: /dev - - name: blocks-dir - hostPath: - path: /sys/block - type: Directory - - name: sys-devices-dir - hostPath: - path: /sys/devices - type: Directory - tolerations: - - effect: NoExecute - operator: Exists - - effect: NoSchedule - operator: Exists ---- -apiVersion: v1 -data: - cns-csi.conf: | - [GC] - endpoint = "{{ .SupervisorMasterEndpointHostName }}" - port = "{{ .SupervisorMasterPort }}" - tanzukubernetescluster-uid = "{{ .TanzuKubernetesClusterUID }}" - tanzukubernetescluster-name = "{{ .TanzuKubernetesClusterName }}" -kind: ConfigMap -metadata: - name: pvcsi-config - namespace: {{ .PVCSINamespace }} ---- -apiVersion: v1 -data: - "volume-extend": "true" - "volume-health": "true" - "online-volume-extend": "true" - "file-volume": "false" -kind: ConfigMap -metadata: - name: internal-feature-states.csi.vsphere.vmware.com - namespace: {{ .PVCSINamespace }} ---- -apiVersion: v1 -kind: Service -metadata: - name: vsphere-csi-controller - namespace: {{ .PVCSINamespace }} - labels: - app: vsphere-csi-controller -spec: - ports: - - name: ctlr - port: 2112 - targetPort: 2112 - protocol: TCP - - name: syncer - port: 2113 - targetPort: 2113 - protocol: TCP - selector: - app: vsphere-csi-controller \ No newline at end of file diff --git a/manifests/dev/vsphere-7.0u2/guestcluster/1.19/pvcsi.yaml b/manifests/dev/vsphere-7.0u2/guestcluster/1.19/pvcsi.yaml deleted file mode 100644 index 91ec2570c8..0000000000 --- a/manifests/dev/vsphere-7.0u2/guestcluster/1.19/pvcsi.yaml +++ /dev/null @@ -1,434 +0,0 @@ -apiVersion: v1 -kind: Namespace -metadata: - name: {{ .PVCSINamespace }} ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: vsphere-csi-controller - namespace: {{ .PVCSINamespace }} ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: vsphere-csi-node - namespace: {{ .PVCSINamespace }} ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: vsphere-csi-controller-role -rules: - - apiGroups: [""] - resources: ["nodes", "pods", "configmaps"] - verbs: ["get", "list", "watch"] - - apiGroups: [""] - resources: ["persistentvolumeclaims"] - verbs: ["get", "list", "watch", "update"] - - apiGroups: [""] - resources: ["persistentvolumes"] - verbs: ["get", "list", "watch", "create", "update", "delete", "patch"] - - apiGroups: [""] - resources: ["events"] - verbs: ["get", "list", "watch", "create", "update", "patch"] - - apiGroups: ["coordination.k8s.io"] - resources: ["leases"] - verbs: ["get", "watch", "list", "delete", "update", "create"] - - apiGroups: ["storage.k8s.io"] - resources: ["storageclasses", "csinodes"] - verbs: ["get", "list", "watch"] - - apiGroups: ["storage.k8s.io"] - resources: ["volumeattachments"] - verbs: ["get", "list", "watch", "update", "patch"] - - apiGroups: ["policy"] - resources: ["podsecuritypolicies"] - verbs: ["use"] - resourceNames: ["vmware-system-privileged"] - - apiGroups: [""] - resources: ["persistentvolumeclaims/status"] - verbs: ["update", "patch"] ---- -kind: Role -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: vsphere-csi-node-role - namespace: {{ .PVCSINamespace }} -rules: - - apiGroups: ["policy"] - resources: ["podsecuritypolicies"] - verbs: ["use"] - resourceNames: ["vmware-system-privileged"] - - apiGroups: [""] - resources: ["configmaps"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: vsphere-csi-controller-binding -subjects: - - kind: ServiceAccount - name: vsphere-csi-controller - namespace: {{ .PVCSINamespace }} -roleRef: - kind: ClusterRole - name: vsphere-csi-controller-role - apiGroup: rbac.authorization.k8s.io ---- -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: vsphere-csi-node-binding - namespace: {{ .PVCSINamespace }} -subjects: - - kind: ServiceAccount - name: vsphere-csi-node - namespace: {{ .PVCSINamespace }} -roleRef: - kind: Role - name: vsphere-csi-node-role - apiGroup: rbac.authorization.k8s.io ---- -kind: Deployment -apiVersion: apps/v1 -metadata: - name: vsphere-csi-controller - namespace: {{ .PVCSINamespace }} -spec: - replicas: 1 - strategy: - type: Recreate - selector: - matchLabels: - app: vsphere-csi-controller - template: - metadata: - labels: - app: vsphere-csi-controller - role: vsphere-csi - spec: - serviceAccountName: vsphere-csi-controller - nodeSelector: - node-role.kubernetes.io/master: "" - tolerations: - - operator: "Exists" - key: node-role.kubernetes.io/master - effect: NoSchedule - containers: - - name: csi-attacher - image: vmware.io/csi-attacher: - args: - - "--v=4" - - "--timeout=300s" - - "--csi-address=$(ADDRESS)" - - "--leader-election" - imagePullPolicy: "IfNotPresent" - env: - - name: ADDRESS - value: /csi/csi.sock - volumeMounts: - - mountPath: /csi - name: socket-dir - - name: vsphere-csi-controller - image: vmware.io/vsphere-csi: - args: - - "--supervisor-fss-name=csi-feature-states" - - "--supervisor-fss-namespace=$(CSI_NAMESPACE)" - - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" - - "--fss-namespace=$(CSI_NAMESPACE)" - imagePullPolicy: "IfNotPresent" - ports: - - containerPort: 2112 - name: prometheus - protocol: TCP - env: - - name: CSI_ENDPOINT - value: unix:///csi/csi.sock - - name: CLUSTER_FLAVOR - value: "GUEST_CLUSTER" - - name: X_CSI_MODE - value: "controller" - - name: GC_CONFIG - value: /etc/cloud/pvcsi-config/cns-csi.conf - - name: PROVISION_TIMEOUT_MINUTES - value: "4" - - name: ATTACHER_TIMEOUT_MINUTES - value: "4" - - name: RESIZE_TIMEOUT_MINUTES - value: "4" - - name: LOGGER_LEVEL - value: "PRODUCTION" # Options: DEVELOPMENT, PRODUCTION - - name: SUPERVISOR_CLIENT_QPS - value: "50" - - name: SUPERVISOR_CLIENT_BURST - value: "50" - - name: INCLUSTER_CLIENT_QPS - value: "50" - - name: INCLUSTER_CLIENT_BURST - value: "50" - - name: CSI_NAMESPACE - value: {{ .PVCSINamespace }} - volumeMounts: - - mountPath: /etc/cloud/pvcsi-provider - name: pvcsi-provider-volume - readOnly: true - - mountPath: /etc/cloud/pvcsi-config - name: pvcsi-config-volume - readOnly: true - - mountPath: /csi - name: socket-dir - - name: vsphere-syncer - image: vmware.io/syncer: - args: - - "--leader-election" - - "--supervisor-fss-name=csi-feature-states" - - "--supervisor-fss-namespace=$(CSI_NAMESPACE)" - - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" - - "--fss-namespace=$(CSI_NAMESPACE)" - imagePullPolicy: "IfNotPresent" - ports: - - containerPort: 2113 - name: prometheus - protocol: TCP - env: - - name: FULL_SYNC_INTERVAL_MINUTES - value: "30" - - name: GC_CONFIG - value: /etc/cloud/pvcsi-config/cns-csi.conf - - name: CLUSTER_FLAVOR - value: "GUEST_CLUSTER" - - name: LOGGER_LEVEL - value: "PRODUCTION" # Options: DEVELOPMENT, PRODUCTION - - name: CSI_NAMESPACE - value: {{ .PVCSINamespace }} - volumeMounts: - - mountPath: /etc/cloud/pvcsi-provider - name: pvcsi-provider-volume - readOnly: true - - mountPath: /etc/cloud/pvcsi-config - name: pvcsi-config-volume - readOnly: true - - name: liveness-probe - image: vmware.io/csi-livenessprobe/csi-livenessprobe: - args: - - "--csi-address=$(ADDRESS)" - imagePullPolicy: "IfNotPresent" - env: - - name: ADDRESS - value: /csi/csi.sock - volumeMounts: - - mountPath: /csi - name: socket-dir - - name: csi-provisioner - image: vmware.io/csi-provisioner/csi-provisioner: - args: - - "--v=4" - - "--timeout=300s" - - "--csi-address=$(ADDRESS)" - - "--enable-leader-election" - - "--leader-election-type=leases" - imagePullPolicy: "IfNotPresent" - env: - - name: ADDRESS - value: /csi/csi.sock - volumeMounts: - - mountPath: /csi - name: socket-dir - - name: csi-resizer - image: vmware.io/kubernetes-csi_external-resizer/kubernetes-csi_external-resizer: - args: - - "--v=4" - - "--timeout=300s" - - "--handle-volume-inuse-error=false" # Set this to true if used in vSphere 7.0U1 - - "--csi-address=$(ADDRESS)" - - "--leader-election" - - "--kube-api-qps=100" - - "--kube-api-burst=100" - env: - - name: ADDRESS - value: /csi/csi.sock - volumeMounts: - - mountPath: /csi - name: socket-dir - volumes: - - name: pvcsi-provider-volume - secret: - secretName: pvcsi-provider-creds - - name: pvcsi-config-volume - configMap: - name: pvcsi-config - - name: socket-dir - emptyDir: {} ---- -apiVersion: storage.k8s.io/v1 -kind: CSIDriver -metadata: - name: csi.vsphere.vmware.com -spec: - attachRequired: true - podInfoOnMount: false ---- -kind: DaemonSet -apiVersion: apps/v1 -metadata: - name: vsphere-csi-node - namespace: {{ .PVCSINamespace }} -spec: - selector: - matchLabels: - app: vsphere-csi-node - updateStrategy: - type: "RollingUpdate" - template: - metadata: - labels: - app: vsphere-csi-node - role: vsphere-csi - spec: - serviceAccountName: vsphere-csi-node - containers: - - name: node-driver-registrar - image: vmware.io/csi-node-driver-registrar: - imagePullPolicy: "IfNotPresent" - lifecycle: - preStop: - exec: - command: ["/bin/sh", "-c", "rm -rf /registration/csi.vsphere.vmware.com-reg.sock /csi/csi.sock"] - args: - - "--v=5" - - "--csi-address=$(ADDRESS)" - - "--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)" - env: - - name: ADDRESS - value: /csi/csi.sock - - name: DRIVER_REG_SOCK_PATH - value: /var/lib/kubelet/plugins/csi.vsphere.vmware.com/csi.sock - volumeMounts: - - name: plugin-dir - mountPath: /csi - - name: registration-dir - mountPath: /registration - - name: vsphere-csi-node - image: vmware.io/vsphere-csi: - args: - - "--supervisor-fss-name=csi-feature-states" - - "--supervisor-fss-namespace=$(CSI_NAMESPACE)" - - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" - - "--fss-namespace=$(CSI_NAMESPACE)" - imagePullPolicy: "IfNotPresent" - env: - - name: NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - - name: CSI_ENDPOINT - value: unix:///csi/csi.sock - - name: X_CSI_MODE - value: "node" - - name: X_CSI_SPEC_REQ_VALIDATION - value: "false" - - name: CLUSTER_FLAVOR - value: "GUEST_CLUSTER" - - name: LOGGER_LEVEL - value: "PRODUCTION" # Options: DEVELOPMENT, PRODUCTION - - name: CSI_NAMESPACE - value: {{ .PVCSINamespace }} - securityContext: - privileged: true - capabilities: - add: ["SYS_ADMIN"] - allowPrivilegeEscalation: true - volumeMounts: - - name: plugin-dir - mountPath: /csi - - name: pods-mount-dir - mountPath: /var/lib/kubelet - mountPropagation: "Bidirectional" - - name: device-dir - mountPath: /dev - - name: blocks-dir - mountPath: /sys/block - - name: sys-devices-dir - mountPath: /sys/devices - - name: liveness-probe - image: vmware.io/csi-livenessprobe/csi-livenessprobe: - args: - - --csi-address=/csi/csi.sock - imagePullPolicy: "IfNotPresent" - volumeMounts: - - name: plugin-dir - mountPath: /csi - volumes: - - name: registration-dir - hostPath: - path: /var/lib/kubelet/plugins_registry - type: Directory - - name: plugin-dir - hostPath: - path: /var/lib/kubelet/plugins/csi.vsphere.vmware.com/ - type: DirectoryOrCreate - - name: pods-mount-dir - hostPath: - path: /var/lib/kubelet - type: Directory - - name: device-dir - hostPath: - path: /dev - - name: blocks-dir - hostPath: - path: /sys/block - type: Directory - - name: sys-devices-dir - hostPath: - path: /sys/devices - type: Directory - tolerations: - - effect: NoExecute - operator: Exists - - effect: NoSchedule - operator: Exists ---- -apiVersion: v1 -data: - cns-csi.conf: | - [GC] - endpoint = "{{ .SupervisorMasterEndpointHostName }}" - port = "{{ .SupervisorMasterPort }}" - tanzukubernetescluster-uid = "{{ .TanzuKubernetesClusterUID }}" - tanzukubernetescluster-name = "{{ .TanzuKubernetesClusterName }}" -kind: ConfigMap -metadata: - name: pvcsi-config - namespace: {{ .PVCSINamespace }} ---- -apiVersion: v1 -data: - "volume-extend": "true" - "volume-health": "true" - "online-volume-extend": "true" - "file-volume": "false" -kind: ConfigMap -metadata: - name: internal-feature-states.csi.vsphere.vmware.com - namespace: {{ .PVCSINamespace }} ---- -apiVersion: v1 -kind: Service -metadata: - name: vsphere-csi-controller - namespace: {{ .PVCSINamespace }} - labels: - app: vsphere-csi-controller -spec: - ports: - - name: ctlr - port: 2112 - targetPort: 2112 - protocol: TCP - - name: syncer - port: 2113 - targetPort: 2113 - protocol: TCP - selector: - app: vsphere-csi-controller diff --git a/manifests/dev/vsphere-7.0u2/supervisorcluster/1.17/vsphere-csi-controller.yaml b/manifests/dev/vsphere-7.0u2/supervisorcluster/1.17/vsphere-csi-controller.yaml deleted file mode 100644 index c64fb7b471..0000000000 --- a/manifests/dev/vsphere-7.0u2/supervisorcluster/1.17/vsphere-csi-controller.yaml +++ /dev/null @@ -1,324 +0,0 @@ -apiVersion: v1 -kind: Namespace -metadata: - name: vmware-system-csi ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: vsphere-csi-controller - namespace: vmware-system-csi ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: vsphere-csi-controller-role -rules: -- apiGroups: [""] - resources: ["nodes", "pods", "configmaps", "resourcequotas"] - verbs: ["get", "list", "watch"] -- apiGroups: [""] - resources: ["persistentvolumeclaims"] - verbs: ["get", "list", "watch", "create", "update", "patch"] -- apiGroups: [""] - resources: ["persistentvolumes"] - verbs: ["get", "list", "watch", "update", "create", "delete", "patch"] -- apiGroups: [""] - resources: ["persistentvolumeclaims/status"] - verbs: ["update", "patch"] -- apiGroups: ["storage.k8s.io"] - resources: ["storageclasses"] - verbs: ["get", "list", "watch", "patch"] -- apiGroups: ["storage.k8s.io"] - resources: ["csinodes"] - verbs: ["get", "list", "watch"] -- apiGroups: [""] - resources: ["events"] - verbs: ["list", "watch", "create", "update", "patch"] -- apiGroups: ["storage.k8s.io"] - resources: ["volumeattachments"] - verbs: ["get", "list", "watch", "update", "patch"] -- apiGroups: ["cns.vmware.com"] - resources: ["cnsnodevmattachments", "cnsvolumemetadatas"] - verbs: ["get", "list", "watch", "update"] -- apiGroups: ["cns.vmware.com"] - resources: ["cnsregistervolumes"] - verbs: ["get", "list", "watch", "update", "delete"] -- apiGroups: ["cns.vmware.com"] - resources: ["storagepools"] - verbs: ["get", "watch", "list", "delete", "update", "create", "patch"] -- apiGroups: ["apiextensions.k8s.io"] - resources: ["customresourcedefinitions"] - verbs: ["get", "create", "update"] -- apiGroups: ["coordination.k8s.io"] - resources: ["leases"] - verbs: ["get", "watch", "list", "delete", "update", "create"] -- apiGroups: ["vmoperator.vmware.com"] - resources: ["virtualmachines"] - verbs: ["get", "list"] ---- -kind: Role -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - namespace: vmware-system-csi - name: vsphere-csi-secret-reader -rules: -- apiGroups: [""] - resources: ["secrets"] - verbs: ["get", "watch", "list"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: vsphere-csi-controller-binding -subjects: -- kind: ServiceAccount - name: vsphere-csi-controller - namespace: vmware-system-csi -roleRef: - kind: ClusterRole - name: vsphere-csi-controller-role - apiGroup: rbac.authorization.k8s.io ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: csiRole - namespace: vmware-system-csi -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: wcp-privileged-psp -subjects: -# For the kube-system nodes. -- apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:serviceaccounts:vmware-system-csi ---- -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: vsphere-csi-provisioner-secret-binding - namespace: vmware-system-csi -subjects: -- kind: ServiceAccount - name: vsphere-csi-controller - namespace: vmware-system-csi -roleRef: - kind: Role - name: vsphere-csi-secret-reader - apiGroup: rbac.authorization.k8s.io ---- -kind: Deployment -apiVersion: apps/v1 -metadata: - name: vsphere-csi-controller - namespace: vmware-system-csi -spec: - strategy: - type: Recreate - replicas: 1 - selector: - matchLabels: - app: vsphere-csi-controller - template: - metadata: - labels: - app: vsphere-csi-controller - role: vsphere-csi - spec: - serviceAccount: vsphere-csi-controller - nodeSelector: - node-role.kubernetes.io/master: '' - tolerations: - - operator: "Exists" - key: "node-role.kubernetes.io/master" - effect: "NoSchedule" - - operator: "Equal" - key: "kubeadmNode" - effect: "NoSchedule" - value: "master" - hostNetwork: true - containers: - - name: csi-provisioner - image: localhost:5000/vmware/csi-provisioner/csi-provisioner:v1.2.1_vmware.11 - args: - - "--v=4" - - "--timeout=300s" - - "--csi-address=$(ADDRESS)" - - "--feature-gates=Topology=true" - - "--strict-topology" - - "--enable-leader-election" - - "--leader-election-type=leases" - - "--enable-hostlocal-placement=true" - env: - - name: ADDRESS - value: /var/lib/csi/sockets/pluginproxy/csi.sock - - name: KUBERNETES_SERVICE_HOST - value: "127.0.0.1" - - name: KUBERNETES_SERVICE_PORT - value: "6443" - - name: VSPHERE_CLOUD_OPERATOR_SERVICE_PORT - value: "29000" - imagePullPolicy: "IfNotPresent" - volumeMounts: - - name: socket-dir - mountPath: /var/lib/csi/sockets/pluginproxy/ - - name: csi-attacher - image: localhost:5000/vmware.io/csi-attacher:v2.0.0_vmware.1 - args: - - "--v=4" - - "--timeout=300s" - - "--csi-address=$(ADDRESS)" - - "--leader-election" - env: - - name: ADDRESS - value: /var/lib/csi/sockets/pluginproxy/csi.sock - - name: KUBERNETES_SERVICE_HOST - value: "127.0.0.1" - - name: KUBERNETES_SERVICE_PORT - value: "6443" - imagePullPolicy: "IfNotPresent" - volumeMounts: - - name: socket-dir - mountPath: /var/lib/csi/sockets/pluginproxy/ - - name: csi-resizer - image: localhost:5000/vmware/kubernetes-csi_external-resizer/kubernetes-csi_external-resizer:v1.0.0_vmware.1 - imagePullPolicy: IfNotPresent - args: - - --v=4 - - --timeout=300s - - --csi-address=$(ADDRESS) - - --leader-election - - --kube-api-qps=100 - - --kube-api-burst=100 - env: - - name: ADDRESS - value: /csi/csi.sock - resources: {} - terminationMessagePath: /dev/termination-log - terminationMessagePolicy: File - volumeMounts: - - mountPath: /csi - name: socket-dir - - name: vsphere-csi-controller - image: localhost:5000/vmware/vsphere-csi: - ports: - - containerPort: 2112 - name: prometheus - protocol: TCP - lifecycle: - preStop: - exec: - command: ["/bin/sh", "-c", "rm -rf /var/lib/csi/sockets/pluginproxy/csi.vsphere.vmware.com"] - env: - - name: CSI_ENDPOINT - value: unix:///var/lib/csi/sockets/pluginproxy/csi.sock - - name: CLUSTER_FLAVOR - value: "WORKLOAD" - - name: X_CSI_MODE - value: "controller" - - name: KUBERNETES_SERVICE_HOST - value: "127.0.0.1" - - name: KUBERNETES_SERVICE_PORT - value: "6443" - - name: POD_LISTENER_SERVICE_PORT - value: "29000" - - name: VSPHERE_CSI_CONFIG - value: "/etc/vmware/wcp/vsphere-cloud-provider.conf" # here vsphere-cloud-provider.conf is the name of the file used for creating secret using "--from-file" flag - - name: LOGGER_LEVEL - value: "PRODUCTION" # Options: DEVELOPMENT, PRODUCTION - imagePullPolicy: "IfNotPresent" - volumeMounts: - - mountPath: /etc/vmware/wcp - name: vsphere-config-volume - readOnly: true - - mountPath: /var/lib/csi/sockets/pluginproxy/ - name: socket-dir - - name: liveness-probe - image: localhost:5000/vmware/csi-livenessprobe/csi-livenessprobe:v1.1.0 - args: - - "--csi-address=$(ADDRESS)" - env: - - name: ADDRESS - value: /var/lib/csi/sockets/pluginproxy/csi.sock - volumeMounts: - - mountPath: /var/lib/csi/sockets/pluginproxy/ - name: socket-dir - - name: vsphere-syncer - image: localhost:5000/vmware/syncer: - args: - - "--leader-election" - env: - - name: CLUSTER_FLAVOR - value: "WORKLOAD" - - name: KUBERNETES_SERVICE_HOST - value: "127.0.0.1" - - name: KUBERNETES_SERVICE_PORT - value: "6443" - - name: FULL_SYNC_INTERVAL_MINUTES - value: "30" - - name: VOLUME_HEALTH_INTERVAL_MINUTES - value: "5" - - name: POD_POLL_INTERVAL_SECONDS - value: "2" - - name: POD_LISTENER_SERVICE_PORT - value: "29000" - - name: VSPHERE_CSI_CONFIG - value: "/etc/vmware/wcp/vsphere-cloud-provider.conf" # here vsphere-cloud-provider.conf is the name of the file used for creating secret using "--from-file" flag - - name: LOGGER_LEVEL - value: "PRODUCTION" # Options: DEVELOPMENT, PRODUCTION - imagePullPolicy: "IfNotPresent" - ports: - - containerPort: 2113 - name: prometheus - protocol: TCP - volumeMounts: - - mountPath: /etc/vmware/wcp - name: vsphere-config-volume - readOnly: true - volumes: - - name: vsphere-config-volume - secret: - secretName: vsphere-config-secret - - name: socket-dir - hostPath: - path: /var/lib/csi/sockets/pluginproxy/csi.vsphere.vmware.com - type: DirectoryOrCreate ---- -apiVersion: storage.k8s.io/v1beta1 -kind: CSIDriver -metadata: - name: csi.vsphere.vmware.com -spec: - attachRequired: true - podInfoOnMount: false ---- -apiVersion: v1 -data: - "volume-extend": "true" - "volume-health": "true" -kind: ConfigMap -metadata: - name: csi-feature-states - namespace: vmware-system-csi ---- -apiVersion: v1 -kind: Service -metadata: - name: vsphere-csi-controller - namespace: vmware-system-csi - labels: - app: vsphere-csi-controller -spec: - ports: - - name: ctlr - port: 2112 - targetPort: 2112 - protocol: TCP - - name: syncer - port: 2113 - targetPort: 2113 - protocol: TCP - selector: - app: vsphere-csi-controller diff --git a/manifests/dev/vsphere-7.0u2/supervisorcluster/1.18/vsphere-csi-controller.yaml b/manifests/dev/vsphere-7.0u2/supervisorcluster/1.18/vsphere-csi-controller.yaml deleted file mode 100644 index 61cbf54fa8..0000000000 --- a/manifests/dev/vsphere-7.0u2/supervisorcluster/1.18/vsphere-csi-controller.yaml +++ /dev/null @@ -1,324 +0,0 @@ -apiVersion: v1 -kind: Namespace -metadata: - name: vmware-system-csi ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: vsphere-csi-controller - namespace: vmware-system-csi ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: vsphere-csi-controller-role -rules: -- apiGroups: [""] - resources: ["nodes", "pods", "configmaps", "resourcequotas"] - verbs: ["get", "list", "watch"] -- apiGroups: [""] - resources: ["persistentvolumeclaims"] - verbs: ["get", "list", "watch", "create", "update", "patch"] -- apiGroups: [""] - resources: ["persistentvolumes"] - verbs: ["get", "list", "watch", "update", "create", "delete", "patch"] -- apiGroups: [""] - resources: ["persistentvolumeclaims/status"] - verbs: ["update", "patch"] -- apiGroups: ["storage.k8s.io"] - resources: ["storageclasses"] - verbs: ["get", "list", "watch", "patch"] -- apiGroups: ["storage.k8s.io"] - resources: ["csinodes"] - verbs: ["get", "list", "watch"] -- apiGroups: [""] - resources: ["events"] - verbs: ["list", "watch", "create", "update", "patch"] -- apiGroups: ["storage.k8s.io"] - resources: ["volumeattachments"] - verbs: ["get", "list", "watch", "update", "patch"] -- apiGroups: ["cns.vmware.com"] - resources: ["cnsnodevmattachments", "cnsvolumemetadatas"] - verbs: ["get", "list", "watch", "update"] -- apiGroups: ["cns.vmware.com"] - resources: ["cnsregistervolumes"] - verbs: ["get", "list", "watch", "update", "delete"] -- apiGroups: ["cns.vmware.com"] - resources: ["storagepools"] - verbs: ["get", "watch", "list", "delete", "update", "create", "patch"] -- apiGroups: ["apiextensions.k8s.io"] - resources: ["customresourcedefinitions"] - verbs: ["get", "create", "update"] -- apiGroups: ["coordination.k8s.io"] - resources: ["leases"] - verbs: ["get", "watch", "list", "delete", "update", "create"] -- apiGroups: ["vmoperator.vmware.com"] - resources: ["virtualmachines"] - verbs: ["get", "list"] ---- -kind: Role -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - namespace: vmware-system-csi - name: vsphere-csi-secret-reader -rules: -- apiGroups: [""] - resources: ["secrets"] - verbs: ["get", "watch", "list"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: vsphere-csi-controller-binding -subjects: -- kind: ServiceAccount - name: vsphere-csi-controller - namespace: vmware-system-csi -roleRef: - kind: ClusterRole - name: vsphere-csi-controller-role - apiGroup: rbac.authorization.k8s.io ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: csiRole - namespace: vmware-system-csi -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: wcp-privileged-psp -subjects: -# For the kube-system nodes. -- apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:serviceaccounts:vmware-system-csi ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: vsphere-csi-provisioner-secret-binding - namespace: vmware-system-csi -subjects: -- kind: ServiceAccount - name: vsphere-csi-controller - namespace: vmware-system-csi -roleRef: - kind: Role - name: vsphere-csi-secret-reader - apiGroup: rbac.authorization.k8s.io ---- -kind: Deployment -apiVersion: apps/v1 -metadata: - name: vsphere-csi-controller - namespace: vmware-system-csi -spec: - strategy: - type: Recreate - replicas: 1 - selector: - matchLabels: - app: vsphere-csi-controller - template: - metadata: - labels: - app: vsphere-csi-controller - role: vsphere-csi - spec: - serviceAccount: vsphere-csi-controller - nodeSelector: - node-role.kubernetes.io/master: '' - tolerations: - - operator: "Exists" - key: "node-role.kubernetes.io/master" - effect: "NoSchedule" - - operator: "Equal" - key: "kubeadmNode" - effect: "NoSchedule" - value: "master" - hostNetwork: true - containers: - - name: csi-provisioner - image: localhost:5000/vmware/csi-provisioner/csi-provisioner:v1.2.1_vmware.11 - args: - - "--v=4" - - "--timeout=300s" - - "--csi-address=$(ADDRESS)" - - "--feature-gates=Topology=true" - - "--strict-topology" - - "--enable-leader-election" - - "--leader-election-type=leases" - - "--enable-hostlocal-placement=true" - env: - - name: ADDRESS - value: /var/lib/csi/sockets/pluginproxy/csi.sock - - name: KUBERNETES_SERVICE_HOST - value: "127.0.0.1" - - name: KUBERNETES_SERVICE_PORT - value: "6443" - - name: VSPHERE_CLOUD_OPERATOR_SERVICE_PORT - value: "29000" - imagePullPolicy: "IfNotPresent" - volumeMounts: - - name: socket-dir - mountPath: /var/lib/csi/sockets/pluginproxy/ - - name: csi-attacher - image: localhost:5000/vmware.io/csi-attacher:v2.0.0_vmware.1 - args: - - "--v=4" - - "--timeout=300s" - - "--csi-address=$(ADDRESS)" - - "--leader-election" - env: - - name: ADDRESS - value: /var/lib/csi/sockets/pluginproxy/csi.sock - - name: KUBERNETES_SERVICE_HOST - value: "127.0.0.1" - - name: KUBERNETES_SERVICE_PORT - value: "6443" - imagePullPolicy: "IfNotPresent" - volumeMounts: - - name: socket-dir - mountPath: /var/lib/csi/sockets/pluginproxy/ - - name: csi-resizer - image: localhost:5000/vmware/kubernetes-csi_external-resizer/kubernetes-csi_external-resizer:v1.0.0_vmware.1 - imagePullPolicy: IfNotPresent - args: - - --v=4 - - --timeout=300s - - --csi-address=$(ADDRESS) - - --leader-election - - --kube-api-qps=100 - - --kube-api-burst=100 - env: - - name: ADDRESS - value: /csi/csi.sock - resources: {} - terminationMessagePath: /dev/termination-log - terminationMessagePolicy: File - volumeMounts: - - mountPath: /csi - name: socket-dir - - name: vsphere-csi-controller - image: localhost:5000/vmware/vsphere-csi: - ports: - - containerPort: 2112 - name: prometheus - protocol: TCP - lifecycle: - preStop: - exec: - command: ["/bin/sh", "-c", "rm -rf /var/lib/csi/sockets/pluginproxy/csi.vsphere.vmware.com"] - env: - - name: CSI_ENDPOINT - value: unix:///var/lib/csi/sockets/pluginproxy/csi.sock - - name: CLUSTER_FLAVOR - value: "WORKLOAD" - - name: X_CSI_MODE - value: "controller" - - name: KUBERNETES_SERVICE_HOST - value: "127.0.0.1" - - name: KUBERNETES_SERVICE_PORT - value: "6443" - - name: POD_LISTENER_SERVICE_PORT - value: "29000" - - name: VSPHERE_CSI_CONFIG - value: "/etc/vmware/wcp/vsphere-cloud-provider.conf" # here vsphere-cloud-provider.conf is the name of the file used for creating secret using "--from-file" flag - - name: LOGGER_LEVEL - value: "PRODUCTION" # Options: DEVELOPMENT, PRODUCTION - imagePullPolicy: "IfNotPresent" - volumeMounts: - - mountPath: /etc/vmware/wcp - name: vsphere-config-volume - readOnly: true - - mountPath: /var/lib/csi/sockets/pluginproxy/ - name: socket-dir - - name: liveness-probe - image: localhost:5000/vmware/csi-livenessprobe/csi-livenessprobe:v1.1.0 - args: - - "--csi-address=$(ADDRESS)" - env: - - name: ADDRESS - value: /var/lib/csi/sockets/pluginproxy/csi.sock - volumeMounts: - - mountPath: /var/lib/csi/sockets/pluginproxy/ - name: socket-dir - - name: vsphere-syncer - image: localhost:5000/vmware/syncer: - args: - - "--leader-election" - env: - - name: CLUSTER_FLAVOR - value: "WORKLOAD" - - name: KUBERNETES_SERVICE_HOST - value: "127.0.0.1" - - name: KUBERNETES_SERVICE_PORT - value: "6443" - - name: FULL_SYNC_INTERVAL_MINUTES - value: "30" - - name: VOLUME_HEALTH_INTERVAL_MINUTES - value: "5" - - name: POD_POLL_INTERVAL_SECONDS - value: "2" - - name: POD_LISTENER_SERVICE_PORT - value: "29000" - - name: VSPHERE_CSI_CONFIG - value: "/etc/vmware/wcp/vsphere-cloud-provider.conf" # here vsphere-cloud-provider.conf is the name of the file used for creating secret using "--from-file" flag - - name: LOGGER_LEVEL - value: "PRODUCTION" # Options: DEVELOPMENT, PRODUCTION - imagePullPolicy: "IfNotPresent" - ports: - - containerPort: 2113 - name: prometheus - protocol: TCP - volumeMounts: - - mountPath: /etc/vmware/wcp - name: vsphere-config-volume - readOnly: true - volumes: - - name: vsphere-config-volume - secret: - secretName: vsphere-config-secret - - name: socket-dir - hostPath: - path: /var/lib/csi/sockets/pluginproxy/csi.vsphere.vmware.com - type: DirectoryOrCreate ---- -apiVersion: storage.k8s.io/v1beta1 -kind: CSIDriver -metadata: - name: csi.vsphere.vmware.com -spec: - attachRequired: true - podInfoOnMount: false ---- -apiVersion: v1 -data: - "volume-extend": "true" - "volume-health": "true" -kind: ConfigMap -metadata: - name: csi-feature-states - namespace: vmware-system-csi ---- -apiVersion: v1 -kind: Service -metadata: - name: vsphere-csi-controller - namespace: vmware-system-csi - labels: - app: vsphere-csi-controller -spec: - ports: - - name: ctlr - port: 2112 - targetPort: 2112 - protocol: TCP - - name: syncer - port: 2113 - targetPort: 2113 - protocol: TCP - selector: - app: vsphere-csi-controller diff --git a/manifests/dev/vsphere-7.0u2/supervisorcluster/1.19/vsphere-csi-controller.yaml b/manifests/dev/vsphere-7.0u2/supervisorcluster/1.19/vsphere-csi-controller.yaml deleted file mode 100644 index 6603bc2122..0000000000 --- a/manifests/dev/vsphere-7.0u2/supervisorcluster/1.19/vsphere-csi-controller.yaml +++ /dev/null @@ -1,334 +0,0 @@ -apiVersion: v1 -kind: Namespace -metadata: - name: vmware-system-csi ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: vsphere-csi-controller - namespace: vmware-system-csi ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: vsphere-csi-controller-role -rules: -- apiGroups: [""] - resources: ["nodes", "pods", "configmaps", "resourcequotas"] - verbs: ["get", "list", "watch"] -- apiGroups: [""] - resources: ["persistentvolumeclaims"] - verbs: ["get", "list", "watch", "create", "update", "patch"] -- apiGroups: [""] - resources: ["persistentvolumes"] - verbs: ["get", "list", "watch", "update", "create", "delete", "patch"] -- apiGroups: [""] - resources: ["persistentvolumeclaims/status"] - verbs: ["update", "patch"] -- apiGroups: ["storage.k8s.io"] - resources: ["storageclasses"] - verbs: ["get", "list", "watch", "patch"] -- apiGroups: ["storage.k8s.io"] - resources: ["csinodes"] - verbs: ["get", "list", "watch"] -- apiGroups: [""] - resources: ["events"] - verbs: ["list", "watch", "create", "update", "patch"] -- apiGroups: ["storage.k8s.io"] - resources: ["volumeattachments"] - verbs: ["get", "list", "watch", "update", "patch"] -- apiGroups: ["cns.vmware.com"] - resources: ["cnsnodevmattachments", "cnsvolumemetadatas", "cnsfileaccessconfigs"] - verbs: ["get", "list", "watch", "update"] -- apiGroups: ["cns.vmware.com"] - resources: ["cnsregistervolumes"] - verbs: ["get", "list", "watch", "update", "delete"] -- apiGroups: ["cns.vmware.com"] - resources: ["storagepools"] - verbs: ["get", "watch", "list", "delete", "update", "create", "patch"] -- apiGroups: ["apiextensions.k8s.io"] - resources: ["customresourcedefinitions"] - verbs: ["get", "create", "update"] -- apiGroups: ["coordination.k8s.io"] - resources: ["leases"] - verbs: ["get", "watch", "list", "delete", "update", "create"] -- apiGroups: ["vmoperator.vmware.com"] - resources: ["virtualmachines"] - verbs: ["get", "list"] ---- -kind: Role -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - namespace: vmware-system-csi - name: vsphere-csi-secret-reader -rules: -- apiGroups: [""] - resources: ["secrets"] - verbs: ["get", "watch", "list"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: vsphere-csi-controller-binding -subjects: -- kind: ServiceAccount - name: vsphere-csi-controller - namespace: vmware-system-csi -roleRef: - kind: ClusterRole - name: vsphere-csi-controller-role - apiGroup: rbac.authorization.k8s.io ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: csiRole - namespace: vmware-system-csi -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: wcp-privileged-psp -subjects: -# For the kube-system nodes. -- apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:serviceaccounts:vmware-system-csi ---- -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: vsphere-csi-provisioner-secret-binding - namespace: vmware-system-csi -subjects: -- kind: ServiceAccount - name: vsphere-csi-controller - namespace: vmware-system-csi -roleRef: - kind: Role - name: vsphere-csi-secret-reader - apiGroup: rbac.authorization.k8s.io ---- -kind: Deployment -apiVersion: apps/v1 -metadata: - name: vsphere-csi-controller - namespace: vmware-system-csi -spec: - strategy: - type: Recreate - replicas: 1 - selector: - matchLabels: - app: vsphere-csi-controller - template: - metadata: - labels: - app: vsphere-csi-controller - role: vsphere-csi - spec: - serviceAccount: vsphere-csi-controller - nodeSelector: - node-role.kubernetes.io/master: '' - tolerations: - - operator: "Exists" - key: "node-role.kubernetes.io/master" - effect: "NoSchedule" - - operator: "Equal" - key: "kubeadmNode" - effect: "NoSchedule" - value: "master" - hostNetwork: true - containers: - - name: csi-provisioner - image: localhost:5000/vmware/csi-provisioner/csi-provisioner:v1.2.1_vmware.11 - args: - - "--v=4" - - "--timeout=300s" - - "--csi-address=$(ADDRESS)" - - "--feature-gates=Topology=true" - - "--strict-topology" - - "--enable-leader-election" - - "--leader-election-type=leases" - - "--enable-hostlocal-placement=true" - env: - - name: ADDRESS - value: /var/lib/csi/sockets/pluginproxy/csi.sock - - name: KUBERNETES_SERVICE_HOST - value: "127.0.0.1" - - name: KUBERNETES_SERVICE_PORT - value: "6443" - - name: VSPHERE_CLOUD_OPERATOR_SERVICE_PORT - value: "29000" - imagePullPolicy: "IfNotPresent" - volumeMounts: - - name: socket-dir - mountPath: /var/lib/csi/sockets/pluginproxy/ - - name: csi-attacher - image: localhost:5000/vmware.io/csi-attacher:v2.0.0_vmware.1 - args: - - "--v=4" - - "--timeout=300s" - - "--csi-address=$(ADDRESS)" - - "--leader-election" - env: - - name: ADDRESS - value: /var/lib/csi/sockets/pluginproxy/csi.sock - - name: KUBERNETES_SERVICE_HOST - value: "127.0.0.1" - - name: KUBERNETES_SERVICE_PORT - value: "6443" - imagePullPolicy: "IfNotPresent" - volumeMounts: - - name: socket-dir - mountPath: /var/lib/csi/sockets/pluginproxy/ - - name: csi-resizer - image: localhost:5000/vmware/kubernetes-csi_external-resizer/kubernetes-csi_external-resizer:v1.0.0_vmware.1 - imagePullPolicy: IfNotPresent - args: - - --v=4 - - --timeout=300s - - --handle-volume-inuse-error=false # Set this to true if used in vSphere 7.0U1 - - --csi-address=$(ADDRESS) - - --leader-election - - --kube-api-qps=100 - - --kube-api-burst=100 - env: - - name: ADDRESS - value: /csi/csi.sock - resources: {} - terminationMessagePath: /dev/termination-log - terminationMessagePolicy: File - volumeMounts: - - mountPath: /csi - name: socket-dir - - name: vsphere-csi-controller - image: localhost:5000/vmware/vsphere-csi: - ports: - - containerPort: 2112 - name: prometheus - protocol: TCP - lifecycle: - preStop: - exec: - command: ["/bin/sh", "-c", "rm -rf /var/lib/csi/sockets/pluginproxy/csi.vsphere.vmware.com"] - env: - - name: CSI_ENDPOINT - value: unix:///var/lib/csi/sockets/pluginproxy/csi.sock - - name: CLUSTER_FLAVOR - value: "WORKLOAD" - - name: X_CSI_MODE - value: "controller" - - name: KUBERNETES_SERVICE_HOST - value: "127.0.0.1" - - name: KUBERNETES_SERVICE_PORT - value: "6443" - - name: POD_LISTENER_SERVICE_PORT - value: "29000" - - name: VSPHERE_CSI_CONFIG - value: "/etc/vmware/wcp/vsphere-cloud-provider.conf" # here vsphere-cloud-provider.conf is the name of the file used for creating secret using "--from-file" flag - - name: LOGGER_LEVEL - value: "PRODUCTION" # Options: DEVELOPMENT, PRODUCTION - - name: INCLUSTER_CLIENT_QPS - value: "50" - - name: INCLUSTER_CLIENT_BURST - value: "50" - imagePullPolicy: "IfNotPresent" - volumeMounts: - - mountPath: /etc/vmware/wcp - name: vsphere-config-volume - readOnly: true - - mountPath: /var/lib/csi/sockets/pluginproxy/ - name: socket-dir - - name: liveness-probe - image: localhost:5000/vmware/csi-livenessprobe/csi-livenessprobe:v1.1.0 - args: - - "--csi-address=$(ADDRESS)" - env: - - name: ADDRESS - value: /var/lib/csi/sockets/pluginproxy/csi.sock - volumeMounts: - - mountPath: /var/lib/csi/sockets/pluginproxy/ - name: socket-dir - - name: vsphere-syncer - image: localhost:5000/vmware/syncer: - args: - - "--leader-election" - env: - - name: CLUSTER_FLAVOR - value: "WORKLOAD" - - name: KUBERNETES_SERVICE_HOST - value: "127.0.0.1" - - name: KUBERNETES_SERVICE_PORT - value: "6443" - - name: FULL_SYNC_INTERVAL_MINUTES - value: "30" - - name: VOLUME_HEALTH_INTERVAL_MINUTES - value: "5" - - name: POD_POLL_INTERVAL_SECONDS - value: "2" - - name: POD_LISTENER_SERVICE_PORT - value: "29000" - - name: VSPHERE_CSI_CONFIG - value: "/etc/vmware/wcp/vsphere-cloud-provider.conf" # here vsphere-cloud-provider.conf is the name of the file used for creating secret using "--from-file" flag - - name: LOGGER_LEVEL - value: "PRODUCTION" # Options: DEVELOPMENT, PRODUCTION - - name: INCLUSTER_CLIENT_QPS - value: "50" - - name: INCLUSTER_CLIENT_BURST - value: "50" - imagePullPolicy: "IfNotPresent" - ports: - - containerPort: 2113 - name: prometheus - protocol: TCP - volumeMounts: - - mountPath: /etc/vmware/wcp - name: vsphere-config-volume - readOnly: true - volumes: - - name: vsphere-config-volume - secret: - secretName: vsphere-config-secret - - name: socket-dir - hostPath: - path: /var/lib/csi/sockets/pluginproxy/csi.vsphere.vmware.com - type: DirectoryOrCreate ---- -apiVersion: storage.k8s.io/v1beta1 -kind: CSIDriver -metadata: - name: csi.vsphere.vmware.com -spec: - attachRequired: true - podInfoOnMount: false ---- -apiVersion: v1 -data: - "volume-extend": "true" - "volume-health": "true" - "online-volume-extend": "true" -kind: ConfigMap -metadata: - name: csi-feature-states - namespace: vmware-system-csi ---- -apiVersion: v1 -kind: Service -metadata: - name: vsphere-csi-controller - namespace: vmware-system-csi - labels: - app: vsphere-csi-controller -spec: - ports: - - name: ctlr - port: 2112 - targetPort: 2112 - protocol: TCP - - name: syncer - port: 2113 - targetPort: 2113 - protocol: TCP - selector: - app: vsphere-csi-controller diff --git a/manifests/dev/vsphere-67u3/vanilla/deploy/vsphere-csi-controller-deployment.yaml b/manifests/v2.2.0/vsphere-67u3/deploy/vsphere-csi-controller-deployment.yaml similarity index 91% rename from manifests/dev/vsphere-67u3/vanilla/deploy/vsphere-csi-controller-deployment.yaml rename to manifests/v2.2.0/vsphere-67u3/deploy/vsphere-csi-controller-deployment.yaml index 6440dce879..82b1305911 100644 --- a/manifests/dev/vsphere-67u3/vanilla/deploy/vsphere-csi-controller-deployment.yaml +++ b/manifests/v2.2.0/vsphere-67u3/deploy/vsphere-csi-controller-deployment.yaml @@ -1,5 +1,3 @@ -# Minimum Kubernetes version - 1.16 -# For prior releases make sure to add required --feature-gates flags kind: Deployment apiVersion: apps/v1 metadata: @@ -49,7 +47,7 @@ spec: - mountPath: /csi name: socket-dir - name: vsphere-csi-controller - image: gcr.io/cloud-provider-vsphere/csi/ci/driver:latest + image: gcr.io/cloud-provider-vsphere/csi/release/driver:v2.2.0-rc-1 args: - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" - "--fss-namespace=$(CSI_NAMESPACE)" @@ -93,15 +91,13 @@ spec: - name: liveness-probe image: quay.io/k8scsi/livenessprobe:v2.2.0 args: - - "--csi-address=$(ADDRESS)" - env: - - name: ADDRESS - value: /var/lib/csi/sockets/pluginproxy/csi.sock + - "--v=4" + - "--csi-address=/csi/csi.sock" volumeMounts: - - mountPath: /var/lib/csi/sockets/pluginproxy/ - name: socket-dir + - name: socket-dir + mountPath: /csi - name: vsphere-syncer - image: gcr.io/cloud-provider-vsphere/csi/ci/syncer:latest + image: gcr.io/cloud-provider-vsphere/csi/release/syncer:v2.2.0-rc-1 args: - "--leader-election" - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" @@ -132,6 +128,8 @@ spec: - "--v=4" - "--timeout=300s" - "--csi-address=$(ADDRESS)" + - "--kube-api-qps=100" + - "--kube-api-burst=100" - "--leader-election" - "--default-fstype=ext4" # needed only for topology aware setup diff --git a/manifests/dev/vsphere-7.0/vanilla/deploy/vsphere-csi-node-ds.yaml b/manifests/v2.2.0/vsphere-67u3/deploy/vsphere-csi-node-ds.yaml similarity index 80% rename from manifests/dev/vsphere-7.0/vanilla/deploy/vsphere-csi-node-ds.yaml rename to manifests/v2.2.0/vsphere-67u3/deploy/vsphere-csi-node-ds.yaml index 7017f69bea..64de33b167 100644 --- a/manifests/dev/vsphere-7.0/vanilla/deploy/vsphere-csi-node-ds.yaml +++ b/manifests/v2.2.0/vsphere-67u3/deploy/vsphere-csi-node-ds.yaml @@ -1,5 +1,3 @@ -# Minimum Kubernetes version - 1.16 -# For prior releases make sure to add required --feature-gates flags kind: DaemonSet apiVersion: apps/v1 metadata: @@ -22,26 +20,32 @@ spec: containers: - name: node-driver-registrar image: quay.io/k8scsi/csi-node-driver-registrar:v2.1.0 - lifecycle: - preStop: - exec: - command: ["/bin/sh", "-c", "rm -rf /registration/csi.vsphere.vmware.com-reg.sock /csi/csi.sock"] args: - - "--v=5" - - "--csi-address=$(ADDRESS)" - - "--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)" + - "--v=5" + - "--csi-address=$(ADDRESS)" + - "--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)" + - "--health-port=9809" env: - - name: ADDRESS - value: /csi/csi.sock - - name: DRIVER_REG_SOCK_PATH - value: /var/lib/kubelet/plugins/csi.vsphere.vmware.com/csi.sock + - name: ADDRESS + value: /csi/csi.sock + - name: DRIVER_REG_SOCK_PATH + value: /var/lib/kubelet/plugins/csi.vsphere.vmware.com/csi.sock volumeMounts: - - name: plugin-dir - mountPath: /csi - - name: registration-dir - mountPath: /registration + - name: plugin-dir + mountPath: /csi + - name: registration-dir + mountPath: /registration + ports: + - containerPort: 9809 + name: healthz + livenessProbe: + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 5 + timeoutSeconds: 5 - name: vsphere-csi-node - image: gcr.io/cloud-provider-vsphere/csi/ci/driver:latest + image: gcr.io/cloud-provider-vsphere/csi/release/driver:v2.2.0-rc-1 args: - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" - "--fss-namespace=$(CSI_NAMESPACE)" @@ -102,7 +106,8 @@ spec: - name: liveness-probe image: quay.io/k8scsi/livenessprobe:v2.2.0 args: - - --csi-address=/csi/csi.sock + - "--v=4" + - "--csi-address=/csi/csi.sock" volumeMounts: - name: plugin-dir mountPath: /csi diff --git a/manifests/dev/vsphere-67u3/vanilla/rbac/vsphere-csi-controller-rbac.yaml b/manifests/v2.2.0/vsphere-67u3/rbac/vsphere-csi-controller-rbac.yaml similarity index 100% rename from manifests/dev/vsphere-67u3/vanilla/rbac/vsphere-csi-controller-rbac.yaml rename to manifests/v2.2.0/vsphere-67u3/rbac/vsphere-csi-controller-rbac.yaml diff --git a/manifests/dev/vsphere-67u3/vanilla/rbac/vsphere-csi-node-rbac.yaml b/manifests/v2.2.0/vsphere-67u3/rbac/vsphere-csi-node-rbac.yaml similarity index 100% rename from manifests/dev/vsphere-67u3/vanilla/rbac/vsphere-csi-node-rbac.yaml rename to manifests/v2.2.0/vsphere-67u3/rbac/vsphere-csi-node-rbac.yaml diff --git a/manifests/dev/vsphere-7.0/vanilla/deploy/vsphere-csi-controller-deployment.yaml b/manifests/v2.2.0/vsphere-7.0/deploy/vsphere-csi-controller-deployment.yaml similarity index 91% rename from manifests/dev/vsphere-7.0/vanilla/deploy/vsphere-csi-controller-deployment.yaml rename to manifests/v2.2.0/vsphere-7.0/deploy/vsphere-csi-controller-deployment.yaml index 2422890299..c504190fe7 100644 --- a/manifests/dev/vsphere-7.0/vanilla/deploy/vsphere-csi-controller-deployment.yaml +++ b/manifests/v2.2.0/vsphere-7.0/deploy/vsphere-csi-controller-deployment.yaml @@ -1,5 +1,3 @@ -# Minimum Kubernetes version - 1.16 -# For prior releases make sure to add required --feature-gates flags kind: Deployment apiVersion: apps/v1 metadata: @@ -53,7 +51,10 @@ spec: args: - "--v=4" - "--timeout=300s" + - "--handle-volume-inuse-error=true" - "--csi-address=$(ADDRESS)" + - "--kube-api-qps=100" + - "--kube-api-burst=100" - "--leader-election" env: - name: ADDRESS @@ -62,7 +63,7 @@ spec: - mountPath: /csi name: socket-dir - name: vsphere-csi-controller - image: gcr.io/cloud-provider-vsphere/csi/ci/driver:latest + image: gcr.io/cloud-provider-vsphere/csi/release/driver:v2.2.0-rc-1 args: - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" - "--fss-namespace=$(CSI_NAMESPACE)" @@ -106,15 +107,13 @@ spec: - name: liveness-probe image: quay.io/k8scsi/livenessprobe:v2.2.0 args: - - "--csi-address=$(ADDRESS)" - env: - - name: ADDRESS - value: /var/lib/csi/sockets/pluginproxy/csi.sock + - "--v=4" + - "--csi-address=/csi/csi.sock" volumeMounts: - - mountPath: /var/lib/csi/sockets/pluginproxy/ - name: socket-dir + - name: socket-dir + mountPath: /csi - name: vsphere-syncer - image: gcr.io/cloud-provider-vsphere/csi/ci/syncer:latest + image: gcr.io/cloud-provider-vsphere/csi/release/syncer:v2.2.0-rc-1 args: - "--leader-election" - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" @@ -145,6 +144,8 @@ spec: - "--v=4" - "--timeout=300s" - "--csi-address=$(ADDRESS)" + - "--kube-api-qps=100" + - "--kube-api-burst=100" - "--leader-election" - "--default-fstype=ext4" # needed only for topology aware setup @@ -167,6 +168,7 @@ apiVersion: v1 data: "csi-migration": "false" "csi-auth-check": "false" + "online-volume-extend": "false" kind: ConfigMap metadata: name: internal-feature-states.csi.vsphere.vmware.com diff --git a/manifests/dev/vsphere-67u3/vanilla/deploy/vsphere-csi-node-ds.yaml b/manifests/v2.2.0/vsphere-7.0/deploy/vsphere-csi-node-ds.yaml similarity index 82% rename from manifests/dev/vsphere-67u3/vanilla/deploy/vsphere-csi-node-ds.yaml rename to manifests/v2.2.0/vsphere-7.0/deploy/vsphere-csi-node-ds.yaml index 7017f69bea..0b92d0593f 100644 --- a/manifests/dev/vsphere-67u3/vanilla/deploy/vsphere-csi-node-ds.yaml +++ b/manifests/v2.2.0/vsphere-7.0/deploy/vsphere-csi-node-ds.yaml @@ -1,5 +1,3 @@ -# Minimum Kubernetes version - 1.16 -# For prior releases make sure to add required --feature-gates flags kind: DaemonSet apiVersion: apps/v1 metadata: @@ -22,26 +20,32 @@ spec: containers: - name: node-driver-registrar image: quay.io/k8scsi/csi-node-driver-registrar:v2.1.0 - lifecycle: - preStop: - exec: - command: ["/bin/sh", "-c", "rm -rf /registration/csi.vsphere.vmware.com-reg.sock /csi/csi.sock"] args: - "--v=5" - "--csi-address=$(ADDRESS)" - "--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)" + - "--health-port=9809" env: - - name: ADDRESS - value: /csi/csi.sock - - name: DRIVER_REG_SOCK_PATH - value: /var/lib/kubelet/plugins/csi.vsphere.vmware.com/csi.sock + - name: ADDRESS + value: /csi/csi.sock + - name: DRIVER_REG_SOCK_PATH + value: /var/lib/kubelet/plugins/csi.vsphere.vmware.com/csi.sock volumeMounts: - - name: plugin-dir - mountPath: /csi - - name: registration-dir - mountPath: /registration + - name: plugin-dir + mountPath: /csi + - name: registration-dir + mountPath: /registration + ports: + - containerPort: 9809 + name: healthz + livenessProbe: + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 5 + timeoutSeconds: 5 - name: vsphere-csi-node - image: gcr.io/cloud-provider-vsphere/csi/ci/driver:latest + image: gcr.io/cloud-provider-vsphere/csi/release/driver:v2.2.0-rc-1 args: - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" - "--fss-namespace=$(CSI_NAMESPACE)" @@ -102,10 +106,11 @@ spec: - name: liveness-probe image: quay.io/k8scsi/livenessprobe:v2.2.0 args: - - --csi-address=/csi/csi.sock + - "--v=4" + - "--csi-address=/csi/csi.sock" volumeMounts: - - name: plugin-dir - mountPath: /csi + - name: plugin-dir + mountPath: /csi volumes: # needed only for topology aware setups #- name: vsphere-config-volume diff --git a/manifests/dev/vsphere-7.0/vanilla/rbac/vsphere-csi-controller-rbac.yaml b/manifests/v2.2.0/vsphere-7.0/rbac/vsphere-csi-controller-rbac.yaml similarity index 100% rename from manifests/dev/vsphere-7.0/vanilla/rbac/vsphere-csi-controller-rbac.yaml rename to manifests/v2.2.0/vsphere-7.0/rbac/vsphere-csi-controller-rbac.yaml diff --git a/manifests/dev/vsphere-7.0/vanilla/rbac/vsphere-csi-node-rbac.yaml b/manifests/v2.2.0/vsphere-7.0/rbac/vsphere-csi-node-rbac.yaml similarity index 100% rename from manifests/dev/vsphere-7.0/vanilla/rbac/vsphere-csi-node-rbac.yaml rename to manifests/v2.2.0/vsphere-7.0/rbac/vsphere-csi-node-rbac.yaml diff --git a/manifests/dev/vsphere-7.0u1/vanilla/deploy/create-validation-webhook.sh b/manifests/v2.2.0/vsphere-7.0u1/deploy/create-validation-webhook.sh similarity index 100% rename from manifests/dev/vsphere-7.0u1/vanilla/deploy/create-validation-webhook.sh rename to manifests/v2.2.0/vsphere-7.0u1/deploy/create-validation-webhook.sh diff --git a/manifests/dev/vsphere-7.0u1/vanilla/deploy/generate-signed-webhook-certs.sh b/manifests/v2.2.0/vsphere-7.0u1/deploy/generate-signed-webhook-certs.sh similarity index 100% rename from manifests/dev/vsphere-7.0u1/vanilla/deploy/generate-signed-webhook-certs.sh rename to manifests/v2.2.0/vsphere-7.0u1/deploy/generate-signed-webhook-certs.sh diff --git a/manifests/dev/vsphere-7.0u2/vanilla/deploy/validatingwebhook.yaml b/manifests/v2.2.0/vsphere-7.0u1/deploy/validatingwebhook.yaml similarity index 96% rename from manifests/dev/vsphere-7.0u2/vanilla/deploy/validatingwebhook.yaml rename to manifests/v2.2.0/vsphere-7.0u1/deploy/validatingwebhook.yaml index 4726e29d1a..7c927ea0a6 100644 --- a/manifests/dev/vsphere-7.0u2/vanilla/deploy/validatingwebhook.yaml +++ b/manifests/v2.2.0/vsphere-7.0u1/deploy/validatingwebhook.yaml @@ -1,5 +1,3 @@ -# Requires k8s 1.19+ ---- apiVersion: v1 kind: Service metadata: @@ -101,7 +99,7 @@ spec: dnsPolicy: "Default" containers: - name: vsphere-webhook - image: gcr.io/cloud-provider-vsphere/csi/ci/syncer:latest + image: gcr.io/cloud-provider-vsphere/csi/release/syncer:v2.2.0-rc-1 args: - "--operation-mode=WEBHOOK_SERVER" - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" diff --git a/manifests/dev/vsphere-7.0u1/vanilla/deploy/vsphere-csi-controller-deployment.yaml b/manifests/v2.2.0/vsphere-7.0u1/deploy/vsphere-csi-controller-deployment.yaml similarity index 94% rename from manifests/dev/vsphere-7.0u1/vanilla/deploy/vsphere-csi-controller-deployment.yaml rename to manifests/v2.2.0/vsphere-7.0u1/deploy/vsphere-csi-controller-deployment.yaml index f46b571606..97bfd0fe85 100644 --- a/manifests/dev/vsphere-7.0u1/vanilla/deploy/vsphere-csi-controller-deployment.yaml +++ b/manifests/v2.2.0/vsphere-7.0u1/deploy/vsphere-csi-controller-deployment.yaml @@ -51,7 +51,10 @@ spec: args: - "--v=4" - "--timeout=300s" + - "--handle-volume-inuse-error=true" - "--csi-address=$(ADDRESS)" + - "--kube-api-qps=100" + - "--kube-api-burst=100" - "--leader-election" env: - name: ADDRESS @@ -60,7 +63,7 @@ spec: - mountPath: /csi name: socket-dir - name: vsphere-csi-controller - image: gcr.io/cloud-provider-vsphere/csi/ci/driver:latest + image: gcr.io/cloud-provider-vsphere/csi/release/driver:v2.2.0-rc-1 args: - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" - "--fss-namespace=$(CSI_NAMESPACE)" @@ -114,7 +117,7 @@ spec: - name: socket-dir mountPath: /csi - name: vsphere-syncer - image: gcr.io/cloud-provider-vsphere/csi/ci/syncer:latest + image: gcr.io/cloud-provider-vsphere/csi/release/syncer:v2.2.0-rc-1 args: - "--leader-election" - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" @@ -149,6 +152,8 @@ spec: - "--v=4" - "--timeout=300s" - "--csi-address=$(ADDRESS)" + - "--kube-api-qps=100" + - "--kube-api-burst=100" - "--leader-election" - "--default-fstype=ext4" # needed only for topology aware setup @@ -171,6 +176,7 @@ apiVersion: v1 data: "csi-migration": "false" # csi-migration feature is only available for vSphere 7.0U1 "csi-auth-check": "true" + "online-volume-extend": "false" kind: ConfigMap metadata: name: internal-feature-states.csi.vsphere.vmware.com diff --git a/manifests/dev/vsphere-7.0u1/vanilla/deploy/vsphere-csi-node-ds.yaml b/manifests/v2.2.0/vsphere-7.0u1/deploy/vsphere-csi-node-ds.yaml similarity index 96% rename from manifests/dev/vsphere-7.0u1/vanilla/deploy/vsphere-csi-node-ds.yaml rename to manifests/v2.2.0/vsphere-7.0u1/deploy/vsphere-csi-node-ds.yaml index 3f38fd84df..5136402bf8 100644 --- a/manifests/dev/vsphere-7.0u1/vanilla/deploy/vsphere-csi-node-ds.yaml +++ b/manifests/v2.2.0/vsphere-7.0u1/deploy/vsphere-csi-node-ds.yaml @@ -46,7 +46,7 @@ spec: initialDelaySeconds: 5 timeoutSeconds: 5 - name: vsphere-csi-node - image: gcr.io/cloud-provider-vsphere/csi/ci/driver:latest + image: gcr.io/cloud-provider-vsphere/csi/release/driver:v2.2.0-rc-1 args: - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" - "--fss-namespace=$(CSI_NAMESPACE)" @@ -100,7 +100,9 @@ spec: path: /healthz port: healthz initialDelaySeconds: 10 - timeoutSeconds: 5 + timeoutSeconds: 3 + periodSeconds: 5 + failureThreshold: 3 - name: liveness-probe image: quay.io/k8scsi/livenessprobe:v2.2.0 args: diff --git a/manifests/dev/vsphere-7.0u1/vanilla/rbac/vsphere-csi-controller-rbac.yaml b/manifests/v2.2.0/vsphere-7.0u1/rbac/vsphere-csi-controller-rbac.yaml similarity index 100% rename from manifests/dev/vsphere-7.0u1/vanilla/rbac/vsphere-csi-controller-rbac.yaml rename to manifests/v2.2.0/vsphere-7.0u1/rbac/vsphere-csi-controller-rbac.yaml diff --git a/manifests/dev/vsphere-7.0u2/vanilla/deploy/create-validation-webhook.sh b/manifests/v2.2.0/vsphere-7.0u2/deploy/create-validation-webhook.sh similarity index 100% rename from manifests/dev/vsphere-7.0u2/vanilla/deploy/create-validation-webhook.sh rename to manifests/v2.2.0/vsphere-7.0u2/deploy/create-validation-webhook.sh diff --git a/manifests/dev/vsphere-7.0u2/vanilla/deploy/generate-signed-webhook-certs.sh b/manifests/v2.2.0/vsphere-7.0u2/deploy/generate-signed-webhook-certs.sh similarity index 100% rename from manifests/dev/vsphere-7.0u2/vanilla/deploy/generate-signed-webhook-certs.sh rename to manifests/v2.2.0/vsphere-7.0u2/deploy/generate-signed-webhook-certs.sh diff --git a/manifests/dev/vsphere-7.0u1/vanilla/deploy/validatingwebhook.yaml b/manifests/v2.2.0/vsphere-7.0u2/deploy/validatingwebhook.yaml similarity index 96% rename from manifests/dev/vsphere-7.0u1/vanilla/deploy/validatingwebhook.yaml rename to manifests/v2.2.0/vsphere-7.0u2/deploy/validatingwebhook.yaml index 4726e29d1a..7c927ea0a6 100644 --- a/manifests/dev/vsphere-7.0u1/vanilla/deploy/validatingwebhook.yaml +++ b/manifests/v2.2.0/vsphere-7.0u2/deploy/validatingwebhook.yaml @@ -1,5 +1,3 @@ -# Requires k8s 1.19+ ---- apiVersion: v1 kind: Service metadata: @@ -101,7 +99,7 @@ spec: dnsPolicy: "Default" containers: - name: vsphere-webhook - image: gcr.io/cloud-provider-vsphere/csi/ci/syncer:latest + image: gcr.io/cloud-provider-vsphere/csi/release/syncer:v2.2.0-rc-1 args: - "--operation-mode=WEBHOOK_SERVER" - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" diff --git a/manifests/dev/vsphere-7.0u2/vanilla/deploy/vsphere-csi-controller-deployment.yaml b/manifests/v2.2.0/vsphere-7.0u2/deploy/vsphere-csi-controller-deployment.yaml similarity index 95% rename from manifests/dev/vsphere-7.0u2/vanilla/deploy/vsphere-csi-controller-deployment.yaml rename to manifests/v2.2.0/vsphere-7.0u2/deploy/vsphere-csi-controller-deployment.yaml index d4ada8bced..15bea4224c 100644 --- a/manifests/dev/vsphere-7.0u2/vanilla/deploy/vsphere-csi-controller-deployment.yaml +++ b/manifests/v2.2.0/vsphere-7.0u2/deploy/vsphere-csi-controller-deployment.yaml @@ -51,7 +51,7 @@ spec: args: - "--v=4" - "--timeout=300s" - - "--handle-volume-inuse-error=false" # Set this to true if used in vSphere 7.0U1 + - "--handle-volume-inuse-error=false" - "--csi-address=$(ADDRESS)" - "--kube-api-qps=100" - "--kube-api-burst=100" @@ -63,7 +63,7 @@ spec: - mountPath: /csi name: socket-dir - name: vsphere-csi-controller - image: gcr.io/cloud-provider-vsphere/csi/ci/driver:latest + image: gcr.io/cloud-provider-vsphere/csi/release/driver:v2.2.0-rc-1 args: - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" - "--fss-namespace=$(CSI_NAMESPACE)" @@ -117,7 +117,7 @@ spec: - name: socket-dir mountPath: /csi - name: vsphere-syncer - image: gcr.io/cloud-provider-vsphere/csi/ci/syncer:latest + image: gcr.io/cloud-provider-vsphere/csi/release/syncer:v2.2.0-rc-1 args: - "--leader-election" - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" diff --git a/manifests/dev/vsphere-7.0u2/vanilla/deploy/vsphere-csi-node-ds.yaml b/manifests/v2.2.0/vsphere-7.0u2/deploy/vsphere-csi-node-ds.yaml similarity index 96% rename from manifests/dev/vsphere-7.0u2/vanilla/deploy/vsphere-csi-node-ds.yaml rename to manifests/v2.2.0/vsphere-7.0u2/deploy/vsphere-csi-node-ds.yaml index 782427fac7..f8bc7f416a 100644 --- a/manifests/dev/vsphere-7.0u2/vanilla/deploy/vsphere-csi-node-ds.yaml +++ b/manifests/v2.2.0/vsphere-7.0u2/deploy/vsphere-csi-node-ds.yaml @@ -47,7 +47,7 @@ spec: initialDelaySeconds: 5 timeoutSeconds: 5 - name: vsphere-csi-node - image: gcr.io/cloud-provider-vsphere/csi/ci/driver:latest + image: gcr.io/cloud-provider-vsphere/csi/release/driver:v2.2.0-rc-1 args: - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" - "--fss-namespace=$(CSI_NAMESPACE)" @@ -105,7 +105,9 @@ spec: path: /healthz port: healthz initialDelaySeconds: 10 - timeoutSeconds: 5 + timeoutSeconds: 3 + periodSeconds: 5 + failureThreshold: 3 - name: liveness-probe image: quay.io/k8scsi/livenessprobe:v2.2.0 args: diff --git a/manifests/dev/vsphere-7.0u2/vanilla/rbac/vsphere-csi-controller-rbac.yaml b/manifests/v2.2.0/vsphere-7.0u2/rbac/vsphere-csi-controller-rbac.yaml similarity index 100% rename from manifests/dev/vsphere-7.0u2/vanilla/rbac/vsphere-csi-controller-rbac.yaml rename to manifests/v2.2.0/vsphere-7.0u2/rbac/vsphere-csi-controller-rbac.yaml diff --git a/manifests/dev/vsphere-7.0u2/vanilla/rbac/vsphere-csi-node-rbac.yaml b/manifests/v2.2.0/vsphere-7.0u2/rbac/vsphere-csi-node-rbac.yaml similarity index 100% rename from manifests/dev/vsphere-7.0u2/vanilla/rbac/vsphere-csi-node-rbac.yaml rename to manifests/v2.2.0/vsphere-7.0u2/rbac/vsphere-csi-node-rbac.yaml From eac71e1f4113c85dc2bd61565e10b7497d8652c0 Mon Sep 17 00:00:00 2001 From: Divyen Patel Date: Mon, 15 Mar 2021 11:58:49 -0700 Subject: [PATCH 24/36] fixed rc images tag --- .../deploy/vsphere-csi-controller-deployment.yaml | 4 ++-- manifests/v2.2.0/vsphere-67u3/deploy/vsphere-csi-node-ds.yaml | 2 +- .../vsphere-7.0/deploy/vsphere-csi-controller-deployment.yaml | 4 ++-- manifests/v2.2.0/vsphere-7.0/deploy/vsphere-csi-node-ds.yaml | 2 +- manifests/v2.2.0/vsphere-7.0u1/deploy/validatingwebhook.yaml | 2 +- .../deploy/vsphere-csi-controller-deployment.yaml | 4 ++-- .../v2.2.0/vsphere-7.0u1/deploy/vsphere-csi-node-ds.yaml | 2 +- manifests/v2.2.0/vsphere-7.0u2/deploy/validatingwebhook.yaml | 2 +- .../deploy/vsphere-csi-controller-deployment.yaml | 4 ++-- .../v2.2.0/vsphere-7.0u2/deploy/vsphere-csi-node-ds.yaml | 2 +- 10 files changed, 14 insertions(+), 14 deletions(-) diff --git a/manifests/v2.2.0/vsphere-67u3/deploy/vsphere-csi-controller-deployment.yaml b/manifests/v2.2.0/vsphere-67u3/deploy/vsphere-csi-controller-deployment.yaml index 82b1305911..3a8c341994 100644 --- a/manifests/v2.2.0/vsphere-67u3/deploy/vsphere-csi-controller-deployment.yaml +++ b/manifests/v2.2.0/vsphere-67u3/deploy/vsphere-csi-controller-deployment.yaml @@ -47,7 +47,7 @@ spec: - mountPath: /csi name: socket-dir - name: vsphere-csi-controller - image: gcr.io/cloud-provider-vsphere/csi/release/driver:v2.2.0-rc-1 + image: gcr.io/cloud-provider-vsphere/csi/release/driver:v2.2.0-rc.1 args: - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" - "--fss-namespace=$(CSI_NAMESPACE)" @@ -97,7 +97,7 @@ spec: - name: socket-dir mountPath: /csi - name: vsphere-syncer - image: gcr.io/cloud-provider-vsphere/csi/release/syncer:v2.2.0-rc-1 + image: gcr.io/cloud-provider-vsphere/csi/release/syncer:v2.2.0-rc.1 args: - "--leader-election" - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" diff --git a/manifests/v2.2.0/vsphere-67u3/deploy/vsphere-csi-node-ds.yaml b/manifests/v2.2.0/vsphere-67u3/deploy/vsphere-csi-node-ds.yaml index 64de33b167..8b7fc0fd87 100644 --- a/manifests/v2.2.0/vsphere-67u3/deploy/vsphere-csi-node-ds.yaml +++ b/manifests/v2.2.0/vsphere-67u3/deploy/vsphere-csi-node-ds.yaml @@ -45,7 +45,7 @@ spec: initialDelaySeconds: 5 timeoutSeconds: 5 - name: vsphere-csi-node - image: gcr.io/cloud-provider-vsphere/csi/release/driver:v2.2.0-rc-1 + image: gcr.io/cloud-provider-vsphere/csi/release/driver:v2.2.0-rc.1 args: - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" - "--fss-namespace=$(CSI_NAMESPACE)" diff --git a/manifests/v2.2.0/vsphere-7.0/deploy/vsphere-csi-controller-deployment.yaml b/manifests/v2.2.0/vsphere-7.0/deploy/vsphere-csi-controller-deployment.yaml index c504190fe7..b501cf7766 100644 --- a/manifests/v2.2.0/vsphere-7.0/deploy/vsphere-csi-controller-deployment.yaml +++ b/manifests/v2.2.0/vsphere-7.0/deploy/vsphere-csi-controller-deployment.yaml @@ -63,7 +63,7 @@ spec: - mountPath: /csi name: socket-dir - name: vsphere-csi-controller - image: gcr.io/cloud-provider-vsphere/csi/release/driver:v2.2.0-rc-1 + image: gcr.io/cloud-provider-vsphere/csi/release/driver:v2.2.0-rc.1 args: - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" - "--fss-namespace=$(CSI_NAMESPACE)" @@ -113,7 +113,7 @@ spec: - name: socket-dir mountPath: /csi - name: vsphere-syncer - image: gcr.io/cloud-provider-vsphere/csi/release/syncer:v2.2.0-rc-1 + image: gcr.io/cloud-provider-vsphere/csi/release/syncer:v2.2.0-rc.1 args: - "--leader-election" - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" diff --git a/manifests/v2.2.0/vsphere-7.0/deploy/vsphere-csi-node-ds.yaml b/manifests/v2.2.0/vsphere-7.0/deploy/vsphere-csi-node-ds.yaml index 0b92d0593f..3d65a83938 100644 --- a/manifests/v2.2.0/vsphere-7.0/deploy/vsphere-csi-node-ds.yaml +++ b/manifests/v2.2.0/vsphere-7.0/deploy/vsphere-csi-node-ds.yaml @@ -45,7 +45,7 @@ spec: initialDelaySeconds: 5 timeoutSeconds: 5 - name: vsphere-csi-node - image: gcr.io/cloud-provider-vsphere/csi/release/driver:v2.2.0-rc-1 + image: gcr.io/cloud-provider-vsphere/csi/release/driver:v2.2.0-rc.1 args: - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" - "--fss-namespace=$(CSI_NAMESPACE)" diff --git a/manifests/v2.2.0/vsphere-7.0u1/deploy/validatingwebhook.yaml b/manifests/v2.2.0/vsphere-7.0u1/deploy/validatingwebhook.yaml index 7c927ea0a6..bd2fdb268a 100644 --- a/manifests/v2.2.0/vsphere-7.0u1/deploy/validatingwebhook.yaml +++ b/manifests/v2.2.0/vsphere-7.0u1/deploy/validatingwebhook.yaml @@ -99,7 +99,7 @@ spec: dnsPolicy: "Default" containers: - name: vsphere-webhook - image: gcr.io/cloud-provider-vsphere/csi/release/syncer:v2.2.0-rc-1 + image: gcr.io/cloud-provider-vsphere/csi/release/syncer:v2.2.0-rc.1 args: - "--operation-mode=WEBHOOK_SERVER" - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" diff --git a/manifests/v2.2.0/vsphere-7.0u1/deploy/vsphere-csi-controller-deployment.yaml b/manifests/v2.2.0/vsphere-7.0u1/deploy/vsphere-csi-controller-deployment.yaml index 97bfd0fe85..6bc5c0401a 100644 --- a/manifests/v2.2.0/vsphere-7.0u1/deploy/vsphere-csi-controller-deployment.yaml +++ b/manifests/v2.2.0/vsphere-7.0u1/deploy/vsphere-csi-controller-deployment.yaml @@ -63,7 +63,7 @@ spec: - mountPath: /csi name: socket-dir - name: vsphere-csi-controller - image: gcr.io/cloud-provider-vsphere/csi/release/driver:v2.2.0-rc-1 + image: gcr.io/cloud-provider-vsphere/csi/release/driver:v2.2.0-rc.1 args: - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" - "--fss-namespace=$(CSI_NAMESPACE)" @@ -117,7 +117,7 @@ spec: - name: socket-dir mountPath: /csi - name: vsphere-syncer - image: gcr.io/cloud-provider-vsphere/csi/release/syncer:v2.2.0-rc-1 + image: gcr.io/cloud-provider-vsphere/csi/release/syncer:v2.2.0-rc.1 args: - "--leader-election" - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" diff --git a/manifests/v2.2.0/vsphere-7.0u1/deploy/vsphere-csi-node-ds.yaml b/manifests/v2.2.0/vsphere-7.0u1/deploy/vsphere-csi-node-ds.yaml index 5136402bf8..aa5ac87335 100644 --- a/manifests/v2.2.0/vsphere-7.0u1/deploy/vsphere-csi-node-ds.yaml +++ b/manifests/v2.2.0/vsphere-7.0u1/deploy/vsphere-csi-node-ds.yaml @@ -46,7 +46,7 @@ spec: initialDelaySeconds: 5 timeoutSeconds: 5 - name: vsphere-csi-node - image: gcr.io/cloud-provider-vsphere/csi/release/driver:v2.2.0-rc-1 + image: gcr.io/cloud-provider-vsphere/csi/release/driver:v2.2.0-rc.1 args: - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" - "--fss-namespace=$(CSI_NAMESPACE)" diff --git a/manifests/v2.2.0/vsphere-7.0u2/deploy/validatingwebhook.yaml b/manifests/v2.2.0/vsphere-7.0u2/deploy/validatingwebhook.yaml index 7c927ea0a6..bd2fdb268a 100644 --- a/manifests/v2.2.0/vsphere-7.0u2/deploy/validatingwebhook.yaml +++ b/manifests/v2.2.0/vsphere-7.0u2/deploy/validatingwebhook.yaml @@ -99,7 +99,7 @@ spec: dnsPolicy: "Default" containers: - name: vsphere-webhook - image: gcr.io/cloud-provider-vsphere/csi/release/syncer:v2.2.0-rc-1 + image: gcr.io/cloud-provider-vsphere/csi/release/syncer:v2.2.0-rc.1 args: - "--operation-mode=WEBHOOK_SERVER" - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" diff --git a/manifests/v2.2.0/vsphere-7.0u2/deploy/vsphere-csi-controller-deployment.yaml b/manifests/v2.2.0/vsphere-7.0u2/deploy/vsphere-csi-controller-deployment.yaml index 15bea4224c..d448969299 100644 --- a/manifests/v2.2.0/vsphere-7.0u2/deploy/vsphere-csi-controller-deployment.yaml +++ b/manifests/v2.2.0/vsphere-7.0u2/deploy/vsphere-csi-controller-deployment.yaml @@ -63,7 +63,7 @@ spec: - mountPath: /csi name: socket-dir - name: vsphere-csi-controller - image: gcr.io/cloud-provider-vsphere/csi/release/driver:v2.2.0-rc-1 + image: gcr.io/cloud-provider-vsphere/csi/release/driver:v2.2.0-rc.1 args: - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" - "--fss-namespace=$(CSI_NAMESPACE)" @@ -117,7 +117,7 @@ spec: - name: socket-dir mountPath: /csi - name: vsphere-syncer - image: gcr.io/cloud-provider-vsphere/csi/release/syncer:v2.2.0-rc-1 + image: gcr.io/cloud-provider-vsphere/csi/release/syncer:v2.2.0-rc.1 args: - "--leader-election" - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" diff --git a/manifests/v2.2.0/vsphere-7.0u2/deploy/vsphere-csi-node-ds.yaml b/manifests/v2.2.0/vsphere-7.0u2/deploy/vsphere-csi-node-ds.yaml index f8bc7f416a..e0e32688fe 100644 --- a/manifests/v2.2.0/vsphere-7.0u2/deploy/vsphere-csi-node-ds.yaml +++ b/manifests/v2.2.0/vsphere-7.0u2/deploy/vsphere-csi-node-ds.yaml @@ -47,7 +47,7 @@ spec: initialDelaySeconds: 5 timeoutSeconds: 5 - name: vsphere-csi-node - image: gcr.io/cloud-provider-vsphere/csi/release/driver:v2.2.0-rc-1 + image: gcr.io/cloud-provider-vsphere/csi/release/driver:v2.2.0-rc.1 args: - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" - "--fss-namespace=$(CSI_NAMESPACE)" From 06988408d03895b193cca80b3f461f8f4b9ee7bb Mon Sep 17 00:00:00 2001 From: Shalini Bhaskara Date: Mon, 15 Mar 2021 17:26:32 -0700 Subject: [PATCH 25/36] Resolve inconsistencies in the vanilla YAMLs for different vSphere versions --- .../vsphere-csi-controller-deployment.yaml | 12 ++++++-- .../deploy/vsphere-csi-node-ds.yaml | 4 ++- .../vsphere-csi-controller-deployment.yaml | 12 ++++++-- .../deploy/vsphere-csi-node-ds.yaml | 4 ++- .../deploy/vsphere-csi-node-ds.yaml | 2 +- .../rbac/vsphere-csi-node-rbac.yaml | 29 +++++++++++++++++++ .../deploy/vsphere-csi-node-ds.yaml | 2 +- 7 files changed, 57 insertions(+), 8 deletions(-) create mode 100644 manifests/v2.2.0/vsphere-7.0u1/rbac/vsphere-csi-node-rbac.yaml diff --git a/manifests/v2.2.0/vsphere-67u3/deploy/vsphere-csi-controller-deployment.yaml b/manifests/v2.2.0/vsphere-67u3/deploy/vsphere-csi-controller-deployment.yaml index 3a8c341994..fd77733ae5 100644 --- a/manifests/v2.2.0/vsphere-67u3/deploy/vsphere-csi-controller-deployment.yaml +++ b/manifests/v2.2.0/vsphere-67u3/deploy/vsphere-csi-controller-deployment.yaml @@ -54,13 +54,17 @@ spec: imagePullPolicy: "Always" env: - name: CSI_ENDPOINT - value: unix:///var/lib/csi/sockets/pluginproxy/csi.sock + value: unix:///csi/csi.sock - name: X_CSI_MODE value: "controller" - name: VSPHERE_CSI_CONFIG value: "/etc/cloud/csi-vsphere.conf" - name: LOGGER_LEVEL value: "PRODUCTION" # Options: DEVELOPMENT, PRODUCTION + - name: INCLUSTER_CLIENT_QPS + value: "100" + - name: INCLUSTER_CLIENT_BURST + value: "100" - name: CSI_NAMESPACE valueFrom: fieldRef: @@ -71,7 +75,7 @@ spec: - mountPath: /etc/cloud name: vsphere-config-volume readOnly: true - - mountPath: /var/lib/csi/sockets/pluginproxy/ + - mountPath: /csi name: socket-dir ports: - name: healthz @@ -114,6 +118,10 @@ spec: value: "/etc/cloud/csi-vsphere.conf" - name: LOGGER_LEVEL value: "PRODUCTION" # Options: DEVELOPMENT, PRODUCTION + - name: INCLUSTER_CLIENT_QPS + value: "100" + - name: INCLUSTER_CLIENT_BURST + value: "100" - name: CSI_NAMESPACE valueFrom: fieldRef: diff --git a/manifests/v2.2.0/vsphere-67u3/deploy/vsphere-csi-node-ds.yaml b/manifests/v2.2.0/vsphere-67u3/deploy/vsphere-csi-node-ds.yaml index 8b7fc0fd87..7031fdcfbe 100644 --- a/manifests/v2.2.0/vsphere-67u3/deploy/vsphere-csi-node-ds.yaml +++ b/manifests/v2.2.0/vsphere-67u3/deploy/vsphere-csi-node-ds.yaml @@ -9,6 +9,8 @@ spec: app: vsphere-csi-node updateStrategy: type: "RollingUpdate" + rollingUpdate: + maxUnavailable: 1 template: metadata: labels: @@ -100,7 +102,7 @@ spec: path: /healthz port: healthz initialDelaySeconds: 10 - timeoutSeconds: 3 + timeoutSeconds: 5 periodSeconds: 5 failureThreshold: 3 - name: liveness-probe diff --git a/manifests/v2.2.0/vsphere-7.0/deploy/vsphere-csi-controller-deployment.yaml b/manifests/v2.2.0/vsphere-7.0/deploy/vsphere-csi-controller-deployment.yaml index b501cf7766..7bd2770bcf 100644 --- a/manifests/v2.2.0/vsphere-7.0/deploy/vsphere-csi-controller-deployment.yaml +++ b/manifests/v2.2.0/vsphere-7.0/deploy/vsphere-csi-controller-deployment.yaml @@ -70,13 +70,17 @@ spec: imagePullPolicy: "Always" env: - name: CSI_ENDPOINT - value: unix:///var/lib/csi/sockets/pluginproxy/csi.sock + value: unix:///csi/csi.sock - name: X_CSI_MODE value: "controller" - name: VSPHERE_CSI_CONFIG value: "/etc/cloud/csi-vsphere.conf" - name: LOGGER_LEVEL value: "PRODUCTION" # Options: DEVELOPMENT, PRODUCTION + - name: INCLUSTER_CLIENT_QPS + value: "100" + - name: INCLUSTER_CLIENT_BURST + value: "100" - name: CSI_NAMESPACE valueFrom: fieldRef: @@ -87,7 +91,7 @@ spec: - mountPath: /etc/cloud name: vsphere-config-volume readOnly: true - - mountPath: /var/lib/csi/sockets/pluginproxy/ + - mountPath: /csi name: socket-dir ports: - name: healthz @@ -130,6 +134,10 @@ spec: value: "/etc/cloud/csi-vsphere.conf" - name: LOGGER_LEVEL value: "PRODUCTION" # Options: DEVELOPMENT, PRODUCTION + - name: INCLUSTER_CLIENT_QPS + value: "100" + - name: INCLUSTER_CLIENT_BURST + value: "100" - name: CSI_NAMESPACE valueFrom: fieldRef: diff --git a/manifests/v2.2.0/vsphere-7.0/deploy/vsphere-csi-node-ds.yaml b/manifests/v2.2.0/vsphere-7.0/deploy/vsphere-csi-node-ds.yaml index 3d65a83938..878857508d 100644 --- a/manifests/v2.2.0/vsphere-7.0/deploy/vsphere-csi-node-ds.yaml +++ b/manifests/v2.2.0/vsphere-7.0/deploy/vsphere-csi-node-ds.yaml @@ -9,6 +9,8 @@ spec: app: vsphere-csi-node updateStrategy: type: "RollingUpdate" + rollingUpdate: + maxUnavailable: 1 template: metadata: labels: @@ -100,7 +102,7 @@ spec: path: /healthz port: healthz initialDelaySeconds: 10 - timeoutSeconds: 3 + timeoutSeconds: 5 periodSeconds: 5 failureThreshold: 3 - name: liveness-probe diff --git a/manifests/v2.2.0/vsphere-7.0u1/deploy/vsphere-csi-node-ds.yaml b/manifests/v2.2.0/vsphere-7.0u1/deploy/vsphere-csi-node-ds.yaml index aa5ac87335..5db384a231 100644 --- a/manifests/v2.2.0/vsphere-7.0u1/deploy/vsphere-csi-node-ds.yaml +++ b/manifests/v2.2.0/vsphere-7.0u1/deploy/vsphere-csi-node-ds.yaml @@ -100,7 +100,7 @@ spec: path: /healthz port: healthz initialDelaySeconds: 10 - timeoutSeconds: 3 + timeoutSeconds: 5 periodSeconds: 5 failureThreshold: 3 - name: liveness-probe diff --git a/manifests/v2.2.0/vsphere-7.0u1/rbac/vsphere-csi-node-rbac.yaml b/manifests/v2.2.0/vsphere-7.0u1/rbac/vsphere-csi-node-rbac.yaml new file mode 100644 index 0000000000..79d865582a --- /dev/null +++ b/manifests/v2.2.0/vsphere-7.0u1/rbac/vsphere-csi-node-rbac.yaml @@ -0,0 +1,29 @@ +kind: ServiceAccount +apiVersion: v1 +metadata: + name: vsphere-csi-node + namespace: kube-system +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: vsphere-csi-node-role + namespace: kube-system +rules: + - apiGroups: [""] + resources: ["configmaps"] + verbs: ["get", "list", "watch"] +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: vsphere-csi-node-binding + namespace: kube-system +subjects: + - kind: ServiceAccount + name: vsphere-csi-node + namespace: kube-system +roleRef: + kind: Role + name: vsphere-csi-node-role + apiGroup: rbac.authorization.k8s.io diff --git a/manifests/v2.2.0/vsphere-7.0u2/deploy/vsphere-csi-node-ds.yaml b/manifests/v2.2.0/vsphere-7.0u2/deploy/vsphere-csi-node-ds.yaml index e0e32688fe..e006844865 100644 --- a/manifests/v2.2.0/vsphere-7.0u2/deploy/vsphere-csi-node-ds.yaml +++ b/manifests/v2.2.0/vsphere-7.0u2/deploy/vsphere-csi-node-ds.yaml @@ -105,7 +105,7 @@ spec: path: /healthz port: healthz initialDelaySeconds: 10 - timeoutSeconds: 3 + timeoutSeconds: 5 periodSeconds: 5 failureThreshold: 3 - name: liveness-probe From 87bdb73cf21f2136444112d4de96cc0c243bee4c Mon Sep 17 00:00:00 2001 From: Divyen Patel Date: Tue, 9 Mar 2021 18:12:39 -0800 Subject: [PATCH 26/36] fail volume creation when unable to retrieve AccessibleTopology of the volume --- pkg/csi/service/vanilla/controller.go | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/pkg/csi/service/vanilla/controller.go b/pkg/csi/service/vanilla/controller.go index c587f016d6..544346aa56 100644 --- a/pkg/csi/service/vanilla/controller.go +++ b/pkg/csi/service/vanilla/controller.go @@ -472,7 +472,7 @@ func (c *controller) createBlockVolume(ctx context.Context, req *csi.CreateVolum // If CNS CreateVolume API does not return datastoreURL, retrieve this by calling QueryVolume // otherwise, retrieve this from PlacementResults from the response of CreateVolume API var volumeAccessibleTopology = make(map[string]string) - var datastoreAccessibleTopology = make([]map[string]string, 0) + var datastoreAccessibleTopology []map[string]string var datastoreURL string if len(datastoreTopologyMap) > 0 { if volumeInfo.DatastoreURL == "" { @@ -487,9 +487,18 @@ func (c *controller) createBlockVolume(ctx context.Context, req *csi.CreateVolum } if len(queryResult.Volumes) > 0 { // Find datastore topology from the retrieved datastoreURL + if queryResult.Volumes[0].DatastoreUrl == "" { + msg := fmt.Sprintf("could not retrieve datastore of volume: %q", volumeInfo.VolumeID.Id) + log.Error(msg) + return nil, status.Error(codes.Internal, msg) + } datastoreAccessibleTopology = datastoreTopologyMap[queryResult.Volumes[0].DatastoreUrl] datastoreURL = queryResult.Volumes[0].DatastoreUrl log.Debugf("Volume: %s is provisioned on the datastore: %s ", volumeInfo.VolumeID.Id, datastoreURL) + } else { + msg := fmt.Sprintf("QueryVolume could not retrieve volume information for volume: %q", volumeInfo.VolumeID.Id) + log.Error(msg) + return nil, status.Error(codes.Internal, msg) } } else { // retrieve datastoreURL from placementResults From b315f7686adb29f127cff6fc24aefac141c5155f Mon Sep 17 00:00:00 2001 From: BaluDontu Date: Wed, 17 Mar 2021 12:55:14 -0700 Subject: [PATCH 27/36] Not run auth manager for file volumes if vSAN file services is not enabled --- pkg/common/cns-lib/vsphere/utils.go | 14 ++++ .../cns-lib/vsphere/virtualcentermanager.go | 26 +++++++ pkg/csi/service/common/authmanager.go | 15 +++- pkg/csi/service/vanilla/controller.go | 70 ++++++------------- 4 files changed, 76 insertions(+), 49 deletions(-) diff --git a/pkg/common/cns-lib/vsphere/utils.go b/pkg/common/cns-lib/vsphere/utils.go index 058a35e81a..ea846b22e5 100644 --- a/pkg/common/cns-lib/vsphere/utils.go +++ b/pkg/common/cns-lib/vsphere/utils.go @@ -15,6 +15,7 @@ import ( "github.com/davecgh/go-spew/spew" + "github.com/vmware/govmomi/cns" cnstypes "github.com/vmware/govmomi/cns/types" "github.com/vmware/govmomi/sts" "github.com/vmware/govmomi/vapi/rest" @@ -356,3 +357,16 @@ func GetDatastoreInfoByURL(ctx context.Context, vc *VirtualCenter, clusterID, ds } return nil, fmt.Errorf("datastore corresponding to URL %v not found in cluster %v", dsURL, clusterID) } + +// isVsan67u3Release returns true if it is vSAN 67u3 Release of vCenter. +func isVsan67u3Release(ctx context.Context, m *defaultVirtualCenterManager, host string) (bool, error) { + log := logger.GetLogger(ctx) + log.Debug("Checking if vCenter version is of vsan 67u3 release") + vc, err := m.GetVirtualCenter(ctx, host) + if err != nil || vc == nil { + log.Errorf("failed to get vcenter version. Err: %v", err) + return false, err + } + log.Debugf("vCenter version is :%q", vc.Client.Version) + return vc.Client.Version == cns.ReleaseVSAN67u3, nil +} diff --git a/pkg/common/cns-lib/vsphere/virtualcentermanager.go b/pkg/common/cns-lib/vsphere/virtualcentermanager.go index 30c1362152..f2387364f5 100644 --- a/pkg/common/cns-lib/vsphere/virtualcentermanager.go +++ b/pkg/common/cns-lib/vsphere/virtualcentermanager.go @@ -48,6 +48,10 @@ type VirtualCenterManager interface { UnregisterVirtualCenter(ctx context.Context, host string) error // UnregisterAllVirtualCenters disconnects and unregisters all virtual centers. UnregisterAllVirtualCenters(ctx context.Context) error + // IsvSANFileServicesSupported checks if vSAN file services is supported or not. + IsvSANFileServicesSupported(ctx context.Context, host string) (bool, error) + // IsExtendVolumeSupported checks if extend volume is supported or not. + IsExtendVolumeSupported(ctx context.Context, host string) (bool, error) } var ( @@ -145,3 +149,25 @@ func (m *defaultVirtualCenterManager) UnregisterAllVirtualCenters(ctx context.Co }) return err } + +// IsvSANFileServicesSupported checks if vSAN file services is supported or not. +func (m *defaultVirtualCenterManager) IsvSANFileServicesSupported(ctx context.Context, host string) (bool, error) { + log := logger.GetLogger(ctx) + is67u3Release, err := isVsan67u3Release(ctx, m, host) + if err != nil { + log.Errorf("Failed to identify the vCenter release with error: %+v", err) + return false, err + } + return !is67u3Release, nil +} + +// IsExtendVolumeSupported checks if extend volume is supported or not. +func (m *defaultVirtualCenterManager) IsExtendVolumeSupported(ctx context.Context, host string) (bool, error) { + log := logger.GetLogger(ctx) + is67u3Release, err := isVsan67u3Release(ctx, m, host) + if err != nil { + log.Errorf("Failed to identify the vCenter release with error: %+v", err) + return false, err + } + return !is67u3Release, nil +} diff --git a/pkg/csi/service/common/authmanager.go b/pkg/csi/service/common/authmanager.go index a48c593e51..4468c2a4a3 100644 --- a/pkg/csi/service/common/authmanager.go +++ b/pkg/csi/service/common/authmanager.go @@ -201,12 +201,18 @@ func GenerateDatastoreMapForBlockVolumes(ctx context.Context, vc *vsphere.Virtua // It will return datastores which has the privileges for creating file volume func GenerateDatastoreMapForFileVolumes(ctx context.Context, vc *vsphere.VirtualCenter) (map[string]*cnsvsphere.DatastoreInfo, error) { log := logger.GetLogger(ctx) + dsURLToInfoMap := make(map[string]*cnsvsphere.DatastoreInfo) // get all vSAN datastores from VC vsanDsURLToInfoMap, err := vc.GetVsanDatastores(ctx) if err != nil { log.Errorf("failed to get vSAN datastores with error %+v", err) return nil, err } + // Return empty map if no vSAN datastores are found. + if len(vsanDsURLToInfoMap) == 0 { + log.Debug("No vSAN datastores found") + return dsURLToInfoMap, nil + } var allvsanDatastoreUrls []string for dsURL := range vsanDsURLToInfoMap { allvsanDatastoreUrls = append(allvsanDatastoreUrls, dsURL) @@ -217,7 +223,6 @@ func GenerateDatastoreMapForFileVolumes(ctx context.Context, vc *vsphere.Virtual return nil, err } - dsURLToInfoMap := make(map[string]*cnsvsphere.DatastoreInfo) for dsURL, dsInfo := range vsanDsURLToInfoMap { if val, ok := fsEnabledMap[dsURL]; ok { if val { @@ -384,6 +389,14 @@ func getDsToFileServiceEnabledMap(ctx context.Context, vc *vsphere.VirtualCenter log.Errorf("failed to get the vsan cluster config. error: %+v", err) return nil, err } + if !(*config.Enabled) { + log.Debugf("cluster: %+v is a non-vSAN cluster. Skipping this cluster", cluster) + continue + } else if config.FileServiceConfig == nil { + log.Debugf("VsanClusterGetConfig.FileServiceConfig is empty. Skipping this cluster: %+v with config: %+v", + cluster, config) + continue + } log.Debugf("cluster: %+v has vSAN file services enabled: %t", cluster, config.FileServiceConfig.Enabled) var dsList []vim25types.ManagedObjectReference var dsMoList []mo.Datastore diff --git a/pkg/csi/service/vanilla/controller.go b/pkg/csi/service/vanilla/controller.go index 544346aa56..0c61362c2e 100644 --- a/pkg/csi/service/vanilla/controller.go +++ b/pkg/csi/service/vanilla/controller.go @@ -31,7 +31,6 @@ import ( "github.com/container-storage-interface/spec/lib/go/csi" "github.com/fsnotify/fsnotify" - "github.com/vmware/govmomi/cns" cnstypes "github.com/vmware/govmomi/cns/types" "github.com/vmware/govmomi/units" "github.com/vmware/govmomi/vapi/tags" @@ -66,23 +65,6 @@ type controller struct { // volumeMigrationService holds the pointer to VolumeMigration instance var volumeMigrationService migration.VolumeMigrationService -var ( - // VSAN67u3ControllerServiceCapability represents the capability of controller service - // for VSAN67u3 release - VSAN67u3ControllerServiceCapability = []csi.ControllerServiceCapability_RPC_Type{ - csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME, - csi.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME, - } - - // VSAN7ControllerServiceCapability represents the capability of controller service - // for VSAN 7.0 release - VSAN7ControllerServiceCapability = []csi.ControllerServiceCapability_RPC_Type{ - csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME, - csi.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME, - csi.ControllerServiceCapability_RPC_EXPAND_VOLUME, - } -) - // New creates a CNS controller func New() csitypes.CnsController { return &controller{} @@ -167,8 +149,15 @@ func (c *controller) Init(config *cnsconfig.Config, version string) error { c.authMgr = authMgr go common.ComputeDatastoreMapForBlockVolumes(authMgr.(*common.AuthManager), config.Global.CSIAuthCheckIntervalInMin) - go common.ComputeDatastoreMapForFileVolumes(authMgr.(*common.AuthManager), - config.Global.CSIAuthCheckIntervalInMin) + isvSANFileServicesSupported, err := c.manager.VcenterManager.IsvSANFileServicesSupported(ctx, c.manager.VcenterConfig.Host) + if err != nil { + log.Errorf("failed to verify if vSAN file services is supported or not. Error:%+v", err) + return err + } + if isvSANFileServicesSupported { + go common.ComputeDatastoreMapForFileVolumes(authMgr.(*common.AuthManager), + config.Global.CSIAuthCheckIntervalInMin) + } } watcher, err := fsnotify.NewWatcher() @@ -612,12 +601,12 @@ func (c *controller) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequ } if common.IsFileVolumeRequest(ctx, volumeCapabilities) { volumeType = prometheus.PrometheusFileVolumeType - vsan67u3Release, err := isVsan67u3Release(ctx, c) + isvSANFileServicesSupported, err := c.manager.VcenterManager.IsvSANFileServicesSupported(ctx, c.manager.VcenterConfig.Host) if err != nil { - log.Error("failed to get vcenter version to help identify if fileshare volume creation should be permitted or not. Error:%v", err) + log.Errorf("failed to verify if vSAN file services is supported or not. Error:%+v", err) return nil, status.Error(codes.Internal, err.Error()) } - if vsan67u3Release { + if !isvSANFileServicesSupported { msg := "fileshare volume creation is not supported on vSAN 67u3 release" log.Error(msg) return nil, status.Error(codes.FailedPrecondition, msg) @@ -998,22 +987,6 @@ func (c *controller) GetCapacity(ctx context.Context, req *csi.GetCapacityReques return nil, status.Error(codes.Unimplemented, "") } -// isVsan67u3Release returns true if controller is dealing with vSAN 67u3 Release of vCenter. -func isVsan67u3Release(ctx context.Context, c *controller) (bool, error) { - log := logger.GetLogger(ctx) - log.Debug("Checking if vCenter version is of vsan 67u3 release") - if c.manager == nil || c.manager.VolumeManager == nil { - return false, errors.New("cannot retrieve vcenter version. controller manager is not initialized") - } - vc, err := c.manager.VcenterManager.GetVirtualCenter(ctx, c.manager.VcenterConfig.Host) - if err != nil || vc == nil { - log.Errorf("failed to get vcenter version. Err: %v", err) - return false, err - } - log.Debugf("vCenter version is :%q", vc.Client.Version) - return vc.Client.Version == cns.ReleaseVSAN67u3, nil -} - // initVolumeMigrationService is a helper method to initialize volumeMigrationService in controller func initVolumeMigrationService(ctx context.Context, c *controller) error { log := logger.GetLogger(ctx) @@ -1038,19 +1011,20 @@ func (c *controller) ControllerGetCapabilities(ctx context.Context, req *csi.Con log := logger.GetLogger(ctx) log.Infof("ControllerGetCapabilities: called with args %+v", *req) - var controllerCaps []csi.ControllerServiceCapability_RPC_Type - - vsan67u3Release, err := isVsan67u3Release(ctx, c) + isExtendSupported, err := c.manager.VcenterManager.IsExtendVolumeSupported(ctx, c.manager.VcenterConfig.Host) if err != nil { - log.Error("failed to get vcenter version to help identify controller service capabilities") + log.Errorf("failed to verify if extend volume is supported or not. Error:%+v", err) return nil, status.Error(codes.FailedPrecondition, err.Error()) } - if vsan67u3Release { - controllerCaps = VSAN67u3ControllerServiceCapability - } else { - controllerCaps = VSAN7ControllerServiceCapability + controllerCaps := []csi.ControllerServiceCapability_RPC_Type{ + csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME, + csi.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME, + } + if isExtendSupported { + log.Debug("Adding extend volume capability to default capabilities") + controllerCaps = append(controllerCaps, + csi.ControllerServiceCapability_RPC_EXPAND_VOLUME) } - var caps []*csi.ControllerServiceCapability for _, cap := range controllerCaps { c := &csi.ControllerServiceCapability{ From c8c3732813dff0b5de32af2c09c07def4ab4a859 Mon Sep 17 00:00:00 2001 From: BaluDontu Date: Thu, 18 Mar 2021 12:25:14 -0700 Subject: [PATCH 28/36] Enable CSI auth check --- .../vsphere-67u3/deploy/vsphere-csi-controller-deployment.yaml | 2 +- .../vsphere-7.0/deploy/vsphere-csi-controller-deployment.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/manifests/v2.2.0/vsphere-67u3/deploy/vsphere-csi-controller-deployment.yaml b/manifests/v2.2.0/vsphere-67u3/deploy/vsphere-csi-controller-deployment.yaml index fd77733ae5..261bc00786 100644 --- a/manifests/v2.2.0/vsphere-67u3/deploy/vsphere-csi-controller-deployment.yaml +++ b/manifests/v2.2.0/vsphere-67u3/deploy/vsphere-csi-controller-deployment.yaml @@ -159,7 +159,7 @@ spec: apiVersion: v1 data: "csi-migration": "false" - "csi-auth-check": "false" + "csi-auth-check": "true" kind: ConfigMap metadata: name: internal-feature-states.csi.vsphere.vmware.com diff --git a/manifests/v2.2.0/vsphere-7.0/deploy/vsphere-csi-controller-deployment.yaml b/manifests/v2.2.0/vsphere-7.0/deploy/vsphere-csi-controller-deployment.yaml index 7bd2770bcf..20ac8476bd 100644 --- a/manifests/v2.2.0/vsphere-7.0/deploy/vsphere-csi-controller-deployment.yaml +++ b/manifests/v2.2.0/vsphere-7.0/deploy/vsphere-csi-controller-deployment.yaml @@ -175,7 +175,7 @@ spec: apiVersion: v1 data: "csi-migration": "false" - "csi-auth-check": "false" + "csi-auth-check": "true" "online-volume-extend": "false" kind: ConfigMap metadata: From 78627a1707fbb525c10c0e77acc1e67ec5b3729f Mon Sep 17 00:00:00 2001 From: Liping Xue Date: Fri, 19 Mar 2021 16:03:35 -0700 Subject: [PATCH 29/36] Refactor VMC related document. --- docs/book/SUMMARY.md | 1 - docs/book/driver-deployment/installation.md | 4 ++- docs/book/driver-deployment/prerequisites.md | 6 ++-- docs/book/features/vsphere_csi_vmc.md | 35 -------------------- 4 files changed, 6 insertions(+), 40 deletions(-) delete mode 100644 docs/book/features/vsphere_csi_vmc.md diff --git a/docs/book/SUMMARY.md b/docs/book/SUMMARY.md index 791971f5e0..93d53cc0ad 100644 --- a/docs/book/SUMMARY.md +++ b/docs/book/SUMMARY.md @@ -29,7 +29,6 @@ * [Volume Topology](features/volume_topology.md) * [Volume Health](features/volume_health.md) * [vSphere CSI Migration](features/vsphere_csi_migration.md) - * [vSphere CSI on VMC](features/vsphere_csi_vmc.md) * [Known Issues](known_issues.md) * [Troubleshooting](troubleshooting.md) * [Development](development.md) diff --git a/docs/book/driver-deployment/installation.md b/docs/book/driver-deployment/installation.md index dba8fa4c37..45cde4907b 100644 --- a/docs/book/driver-deployment/installation.md +++ b/docs/book/driver-deployment/installation.md @@ -86,6 +86,8 @@ Where the entries have the following meaning: - `datacenters` - list of all comma separated datacenter paths where kubernetes node VMs are present. When datacenter is located at the root, the name of datacenter is enough but when datacenter is placed in the folder, path needs to be specified as `folder/datacenter-name`. Please note since comma is used as a delimiter, the datacenter name itself must not contain a comma. +**Note:** To deploy CSI driver for block volume in VMC environment, in the vSphere configuration file, need to specifiy cloudadmin user in `user` field and cloudadmin password in `password` field. + ### vSphere configuration file for file volumes For file volumes, there are some extra parameters added to the config to help specify network permissions and placement of volumes. A sample config file for file volumes is shown below. @@ -126,7 +128,7 @@ datacenters = ", , ..." targetvSANFileShareDatastoreURLs = "ds:///vmfs/volumes/vsan:52635b9067079319-95a7473222c4c9cd/" # Optional ``` -Some of the parameters have been explained in the previous section for block volumes. +Some of the parameters have been explained in the previous section for block volumes. `targetvSANFileShareDatastoreURLs` and `NetPermissions` section are exclusive to file volumes and are optional. diff --git a/docs/book/driver-deployment/prerequisites.md b/docs/book/driver-deployment/prerequisites.md index 1c56df8ab2..8408c1e0aa 100644 --- a/docs/book/driver-deployment/prerequisites.md +++ b/docs/book/driver-deployment/prerequisites.md @@ -21,7 +21,7 @@ The following roles need to be created with sets of privileges. | Role | Privileges for the role | Required on | |-------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| CNS-DATASTORE | ![ROLE-CNS-DATASTORE](https://raw.githubusercontent.com/kubernetes-sigs/vsphere-csi-driver/master/docs/images/ROLE-CNS-DATASTORE.png)
govc role.ls CNS-DATASTORE
Datastore.FileManagement
System.Anonymous
System.Read
System.View
| Shared datastores where persistent volumes need to be provisioned. | +| CNS-DATASTORE | ![ROLE-CNS-DATASTORE](https://raw.githubusercontent.com/kubernetes-sigs/vsphere-csi-driver/master/docs/images/ROLE-CNS-DATASTORE.png)
govc role.ls CNS-DATASTORE
Datastore.FileManagement
System.Anonymous
System.Read
System.View
| Shared datastores where persistent volumes need to be provisioned.

Note: Before CSI v2.2.0, we require all shared datastores to have Datastore.FileManagement privilege. From CSI v2.2.0, this requirement has been relaxed. We do not require all shared datastores to have Datastore.FileManagement privilege. CSI will skip those shared datastores which do not have Datastore.FileManagement privilege during volume provisioning and not provisioning volume on those datastores. | | CNS-HOST-CONFIG-STORAGE | ![ROLE-CNS-HOST-CONFIG-STORAGE](https://raw.githubusercontent.com/kubernetes-sigs/vsphere-csi-driver/master/docs/images/ROLE-CNS-HOST-CONFIG-STORAGE.png)
% govc role.ls CNS-HOST-CONFIG-STORAGE
Host.Config.Storage
System.Anonymous
System.Read
System.View
| Required on vSAN file service enabled vSAN cluster. Required for file volume only. | | CNS-VM | ![ROLE-CNS-VM](https://raw.githubusercontent.com/kubernetes-sigs/vsphere-csi-driver/master/docs/images/ROLE-CNS-VM.png)
% govc role.ls CNS-VM
System.Anonymous
System.Read
System.View
VirtualMachine.Config.AddExistingDisk
VirtualMachine.Config.AddRemoveDevice
| All node VMs. | | CNS-SEARCH-AND-SPBM | ![ROLE-CNS-SEARCH-AND-SPBM](https://raw.githubusercontent.com/kubernetes-sigs/vsphere-csi-driver/master/docs/images/ROLE-CNS-SEARCH-AND-SPBM.png)
% govc role.ls CNS-SEARCH-AND-SPBM
Cns.Searchable
StorageProfile.View
System.Anonymous
System.Read
System.View
| Root vCenter Server. | @@ -120,7 +120,7 @@ The VMs can also be configured by using the `govc` command-line tool. ```bash govc vm.change -vm '//vm/' -e="disk.enableUUID=1" ``` - + - Upgrade VM hardware version of node VMs to 15 or higher. Run the below command for all Node VMs that are part of the Kubernetes cluster. @@ -136,7 +136,7 @@ Follow the steps described under “Install the vSphere Cloud Provider Interface Installation steps for vSphere CPI is briefly described here Step-1: Taint nodes. - + Before installing CPI, verify all nodes (including master nodes) are tainted with "node.cloudprovider.kubernetes.io/uninitialized=true:NoSchedule". To taint nodes use following command diff --git a/docs/book/features/vsphere_csi_vmc.md b/docs/book/features/vsphere_csi_vmc.md deleted file mode 100644 index 4cbd800638..0000000000 --- a/docs/book/features/vsphere_csi_vmc.md +++ /dev/null @@ -1,35 +0,0 @@ - - - -# vSphere CSI Driver - VMware Cloud on AWS (VMC) support - -- [Introduction](#introduction) -- [Deploy vSphere CSI driver on VMC](#deploy-csi-on-vmc) - -**Note:** Feature to support vSphere CSI driver on VMC will be released with v2.2.0. v2.2.0 vSphere CSI driver on VMC will only support block volume. The minimum SDDC version to support this feature is 1.12. Please refer to [VMC release notes](https://docs.vmware.com/en/VMware-Cloud-on-AWS/0/rn/vmc-on-aws-relnotes.html) to get more details. - -## Introduction - -[VMware Cloud™ on AWS](https://cloud.vmware.com/vmc-aws) brings VMware’s enterprise-class SDDC software to the AWS Cloud with optimized access to AWS services. Powered by VMware Cloud Foundation, VMware Cloud on AWS integrates our compute, storage and network virtualization products (VMware vSphere®, vSAN™ and NSX®) along with VMware vCenter management, optimized to run on dedicated, elastic, bare-metal AWS infrastructure. - -VMware Cloud on AWS provides two vSAN datastores in each SDDC cluster: WorkloadDatastore, managed by the Cloud Administrator, and vsanDatastore, managed by VMware. Cloudadmin user does not have the privilege to create volume on vsanDatastore and only has the privilege to create volume on WorkloadDatastore. - -Without this feature, cloudadmin user cannot provision PVs using vSphere CSI drvier on VMC due to no privilege to create volume on vSANDatastore. This feature enables cloudadmin user to provision PV using vSphere CSI driver without specifying the datastore where the volume is provisioned. - -## Deploy vSphere CSI driver on VMC - -To deploy vSphere CSI driver on VMC, please make sure to keep roles and privileges up to date as mentioned in the [roles and privileges requirement](https://vsphere-csi-driver.sigs.k8s.io/driver-deployment/prerequisites.html#roles_and_privileges). For cloudadmin user, CNS-DATASTORE role should only be assigned to WorkLoadDatastore. - -Make sure to use cloudadmin user and password in `csi-vsphere.conf` file. This file will be used to create a Kubernetes secret for vSphere credentials, which is required to install vSphere CSI driver. The following is an sample `csi-vsphere.conf` file used to deploy vSphere CSI driver in VMC environment. - -```bash -[Global] -cluster-id = "unique-kubernetes-cluster-id" - -[VirtualCenter "1.2.3.4"] -insecure-flag = "true" -user = "cloudadmin vcenter username" -password = "cloudadmin vcenter password" -port = "443" -datacenters = "list of comma separated datacenter paths where node VMs are present" -``` From d5a87a7d31dbd5cf84e070c588b51c5742cdd88b Mon Sep 17 00:00:00 2001 From: Divyen Patel Date: Fri, 19 Mar 2021 15:44:02 -0700 Subject: [PATCH 30/36] re-try reload config operation --- cmd/syncer/main.go | 6 +- pkg/common/cns-lib/vsphere/virtualcenter.go | 67 ++++++++++- .../cns-lib/vsphere/virtualcentermanager.go | 29 +++-- pkg/common/config/types.go | 5 + pkg/csi/service/common/authmanager.go | 10 ++ pkg/csi/service/common/util.go | 14 +++ pkg/csi/service/vanilla/controller.go | 79 ++++++++----- pkg/csi/service/vanilla/controller_test.go | 4 + .../cnsfileaccessconfig_controller.go | 8 +- .../cnsnodevmattachment_controller.go | 12 +- .../cnsregistervolume_controller.go | 13 ++- .../cnsvolumemetadata_controller.go | 12 +- .../cnsoperator/controller/controller.go | 6 +- pkg/syncer/cnsoperator/manager/init.go | 11 +- pkg/syncer/fullsync.go | 3 +- pkg/syncer/metadatasyncer.go | 32 +++--- pkg/syncer/storagepool/service.go | 8 +- pkg/syncer/syncer_test.go | 3 +- pkg/syncer/types.go | 4 +- pkg/syncer/types/commontypes.go | 105 ------------------ 20 files changed, 225 insertions(+), 206 deletions(-) delete mode 100644 pkg/syncer/types/commontypes.go diff --git a/cmd/syncer/main.go b/cmd/syncer/main.go index 618be90e11..54c33f76af 100644 --- a/cmd/syncer/main.go +++ b/cmd/syncer/main.go @@ -25,6 +25,7 @@ import ( "github.com/prometheus/client_golang/prometheus/promhttp" "sigs.k8s.io/vsphere-csi-driver/pkg/common/prometheus" + "sigs.k8s.io/vsphere-csi-driver/pkg/csi/service/common" "github.com/kubernetes-csi/csi-lib-utils/leaderelection" cnstypes "github.com/vmware/govmomi/cns/types" @@ -38,7 +39,6 @@ import ( "sigs.k8s.io/vsphere-csi-driver/pkg/syncer/cnsoperator/manager" "sigs.k8s.io/vsphere-csi-driver/pkg/syncer/k8scloudoperator" "sigs.k8s.io/vsphere-csi-driver/pkg/syncer/storagepool" - "sigs.k8s.io/vsphere-csi-driver/pkg/syncer/types" ) // OperationModeWebHookServer starts container for webhook server @@ -88,7 +88,7 @@ func main() { } else if *operationMode == operationModeMetaDataSync { log.Infof("Starting container with operation mode: %v", operationModeMetaDataSync) var err error - configInfo, err := types.InitConfigInfo(ctx) + configInfo, err := common.InitConfigInfo(ctx) if err != nil { log.Errorf("failed to initialize the configInfo. Err: %+v", err) os.Exit(1) @@ -151,7 +151,7 @@ func main() { // initSyncerComponents initializes syncer components that are dependant on the leader election algorithm. // This function is only called by the leader instance of vsphere-syncer, if enabled. // TODO: Change name from initSyncerComponents to initComponents where will be the name of this container -func initSyncerComponents(ctx context.Context, clusterFlavor cnstypes.CnsClusterFlavor, configInfo *types.ConfigInfo, coInitParams *interface{}) func(ctx context.Context) { +func initSyncerComponents(ctx context.Context, clusterFlavor cnstypes.CnsClusterFlavor, configInfo *config.ConfigurationInfo, coInitParams *interface{}) func(ctx context.Context) { return func(ctx context.Context) { log := logger.GetLogger(ctx) // Initialize CNS Operator for Supervisor clusters diff --git a/pkg/common/cns-lib/vsphere/virtualcenter.go b/pkg/common/cns-lib/vsphere/virtualcenter.go index 83e1b43543..e2bee5ac10 100644 --- a/pkg/common/cns-lib/vsphere/virtualcenter.go +++ b/pkg/common/cns-lib/vsphere/virtualcenter.go @@ -31,6 +31,7 @@ import ( "github.com/vmware/govmomi/property" "github.com/vmware/govmomi/vsan" + "sigs.k8s.io/vsphere-csi-driver/pkg/common/config" "sigs.k8s.io/vsphere-csi-driver/pkg/csi/service/logger" "github.com/vmware/govmomi" @@ -66,6 +67,15 @@ type VirtualCenter struct { VsanClient *vsan.Client } +var ( + // VirtualCenter instance + vCenterInstance *VirtualCenter + // Ensure vcenter is a singleton + vCenterInitialized bool + // vCenterInstanceLock is used for handling race conditions while initializing a vCenter instance + vCenterInstanceLock = &sync.RWMutex{} +) + func (vc *VirtualCenter) String() string { return fmt.Sprintf("VirtualCenter [Config: %v, Client: %v, PbmClient: %v]", vc.Config, vc.Client, vc.PbmClient) @@ -214,9 +224,11 @@ func (vc *VirtualCenter) Connect(ctx context.Context) error { // Logging out of the current session to make sure we // retry creating a new client in the next attempt defer func() { - logoutErr := vc.Client.Logout(ctx) - if logoutErr != nil { - log.Errorf("Could not logout of VC session. Error: %v", logoutErr) + if vc.Client != nil { + logoutErr := vc.Client.Logout(ctx) + if logoutErr != nil { + log.Errorf("Could not logout of VC session. Error: %v", logoutErr) + } } }() } @@ -458,3 +470,52 @@ func (vc *VirtualCenter) GetDatastoresByCluster(ctx context.Context, clusterMore } return dsList, nil } + +// GetVirtualCenterInstance returns the vcenter object singleton. +// It is thread safe. +// Takes in a boolean paramater reloadConfig. +// If reinitialize is true, the vcenter object is instantiated again and the old object becomes eligible for garbage collection. +// If reinitialize is false and instance was already initialized, the previous instance is returned. +func GetVirtualCenterInstance(ctx context.Context, config *config.ConfigurationInfo, reinitialize bool) (*VirtualCenter, error) { + log := logger.GetLogger(ctx) + vCenterInstanceLock.Lock() + defer vCenterInstanceLock.Unlock() + + if !vCenterInitialized || reinitialize { + log.Infof("Initializing new vCenterInstance.") + + var vcconfig *VirtualCenterConfig + vcconfig, err := GetVirtualCenterConfig(ctx, config.Cfg) + if err != nil { + log.Errorf("failed to get VirtualCenterConfig. Err: %+v", err) + return nil, err + } + + // Initialize the virtual center manager + virtualcentermanager := GetVirtualCenterManager(ctx) + + //Unregister all VCs from virtual center manager + if err = virtualcentermanager.UnregisterAllVirtualCenters(ctx); err != nil { + log.Errorf("failed to unregister vcenter with virtualCenterManager.") + return nil, err + } + + // Register with virtual center manager + vCenterInstance, err = virtualcentermanager.RegisterVirtualCenter(ctx, vcconfig) + if err != nil { + log.Errorf("failed to register VirtualCenter . Err: %+v", err) + return nil, err + } + + // Connect to VC + err = vCenterInstance.Connect(ctx) + if err != nil { + log.Errorf("failed to connect to VirtualCenter host: %q. Err: %+v", vcconfig.Host, err) + return nil, err + } + + vCenterInitialized = true + log.Info("vCenterInstance initialized") + } + return vCenterInstance, nil +} diff --git a/pkg/common/cns-lib/vsphere/virtualcentermanager.go b/pkg/common/cns-lib/vsphere/virtualcentermanager.go index f2387364f5..ed34aab087 100644 --- a/pkg/common/cns-lib/vsphere/virtualcentermanager.go +++ b/pkg/common/cns-lib/vsphere/virtualcentermanager.go @@ -119,23 +119,22 @@ func (m *defaultVirtualCenterManager) UnregisterVirtualCenter(ctx context.Contex log := logger.GetLogger(ctx) vc, err := m.GetVirtualCenter(ctx, host) if err != nil { - log.Errorf("failed to find VC %s, couldn't unregister", host) + if err == ErrVCNotFound { + log.Warnf("failed to find vCenter: %q Assuming vCenter is already unregistered.", host) + return nil + } return err } - if vc != nil { - if err = vc.DisconnectPbm(ctx); err != nil { - log.Warnf("failed to disconnect VC pbm %s, couldn't unregister", host) - } - if err = vc.Disconnect(ctx); err != nil { - log.Warnf("failed to disconnect VC %s, couldn't unregister", host) - } - vc.DisconnectCns(ctx) - m.virtualCenters.Delete(host) - log.Infof("Successfully unregistered VC %s", host) - return nil + if err = vc.DisconnectPbm(ctx); err != nil { + log.Warnf("failed to disconnect VC pbm %s, couldn't unregister", host) } - log.Warnf("failed to find VC %s, couldn't unregister", host) - return err + if err = vc.Disconnect(ctx); err != nil { + log.Warnf("failed to disconnect VC %s, couldn't unregister", host) + } + vc.DisconnectCns(ctx) + m.virtualCenters.Delete(host) + log.Infof("Successfully unregistered VC %s", host) + return nil } func (m *defaultVirtualCenterManager) UnregisterAllVirtualCenters(ctx context.Context) error { @@ -143,7 +142,7 @@ func (m *defaultVirtualCenterManager) UnregisterAllVirtualCenters(ctx context.Co log := logger.GetLogger(ctx) m.virtualCenters.Range(func(hostInf, _ interface{}) bool { if err = m.UnregisterVirtualCenter(ctx, hostInf.(string)); err != nil { - log.Warnf("failed to unregister VC %v", hostInf) + log.Warnf("failed to unregister vCenter: %q, err: %+v", hostInf.(string), err) } return true }) diff --git a/pkg/common/config/types.go b/pkg/common/config/types.go index db044167b4..53c7e901dc 100644 --- a/pkg/common/config/types.go +++ b/pkg/common/config/types.go @@ -76,6 +76,11 @@ type Config struct { } } +// ConfigurationInfo is a struct that used to capture config param details +type ConfigurationInfo struct { + Cfg *Config +} + // FeatureStatesConfigInfo contains the details about feature states configmap type FeatureStatesConfigInfo struct { Name string diff --git a/pkg/csi/service/common/authmanager.go b/pkg/csi/service/common/authmanager.go index 4468c2a4a3..f22ceaa675 100644 --- a/pkg/csi/service/common/authmanager.go +++ b/pkg/csi/service/common/authmanager.go @@ -46,6 +46,9 @@ type AuthorizationService interface { // GetDatastoreMapForFileVolumes returns a map of datastore URL to datastore info for only those // datastores the CSI VC user has Host.Config.Storage privilege on vSAN cluster with vSAN FS enabled. GetDatastoreMapForFileVolumes(ctx context.Context) map[string]*cnsvsphere.DatastoreInfo + + // ResetvCenterInstance sets new vCenter instance for AuthorizationService + ResetvCenterInstance(ctx context.Context, vCenter *cnsvsphere.VirtualCenter) } // AuthManager maintains an internal map to track the datastores that need to be used by create volume @@ -108,6 +111,13 @@ func (authManager *AuthManager) GetDatastoreMapForFileVolumes(ctx context.Contex return datastoreMapForFileVolumes } +// ResetvCenterInstance sets new vCenter instance for AuthorizationService +func (authManager *AuthManager) ResetvCenterInstance(ctx context.Context, vCenter *cnsvsphere.VirtualCenter) { + log := logger.GetLogger(ctx) + log.Info("Resetting vCenter Instance in the AuthManager") + authManager.vcenter = vCenter +} + // refreshDatastoreMapForBlockVolumes scans all datastores in vCenter to check privileges, and compute the // datastoreMapForBlockVolumes func (authManager *AuthManager) refreshDatastoreMapForBlockVolumes() { diff --git a/pkg/csi/service/common/util.go b/pkg/csi/service/common/util.go index 9e3bd535af..ff70530184 100644 --- a/pkg/csi/service/common/util.go +++ b/pkg/csi/service/common/util.go @@ -303,6 +303,20 @@ func GetConfig(ctx context.Context) (*cnsconfig.Config, error) { return cfg, err } +// InitConfigInfo initializes the ConfigurationInfo struct +func InitConfigInfo(ctx context.Context) (*cnsconfig.ConfigurationInfo, error) { + log := logger.GetLogger(ctx) + cfg, err := GetConfig(ctx) + if err != nil { + log.Errorf("failed to read config. Error: %+v", err) + return nil, err + } + configInfo := &cnsconfig.ConfigurationInfo{ + Cfg: cfg, + } + return configInfo, nil +} + // GetK8sCloudOperatorServicePort return the port to connect the K8sCloudOperator gRPC service. // If environment variable POD_LISTENER_SERVICE_PORT is set and valid, // return the interval value read from environment variable diff --git a/pkg/csi/service/vanilla/controller.go b/pkg/csi/service/vanilla/controller.go index 0c61362c2e..d319a1986b 100644 --- a/pkg/csi/service/vanilla/controller.go +++ b/pkg/csi/service/vanilla/controller.go @@ -26,11 +26,9 @@ import ( "strings" "time" - "github.com/prometheus/client_golang/prometheus/promhttp" - "sigs.k8s.io/vsphere-csi-driver/pkg/common/prometheus" - "github.com/container-storage-interface/spec/lib/go/csi" "github.com/fsnotify/fsnotify" + "github.com/prometheus/client_golang/prometheus/promhttp" cnstypes "github.com/vmware/govmomi/cns/types" "github.com/vmware/govmomi/units" "github.com/vmware/govmomi/vapi/tags" @@ -41,6 +39,7 @@ import ( cnsvolume "sigs.k8s.io/vsphere-csi-driver/pkg/common/cns-lib/volume" cnsvsphere "sigs.k8s.io/vsphere-csi-driver/pkg/common/cns-lib/vsphere" cnsconfig "sigs.k8s.io/vsphere-csi-driver/pkg/common/config" + "sigs.k8s.io/vsphere-csi-driver/pkg/common/prometheus" "sigs.k8s.io/vsphere-csi-driver/pkg/csi/service/common" "sigs.k8s.io/vsphere-csi-driver/pkg/csi/service/common/commonco" "sigs.k8s.io/vsphere-csi-driver/pkg/csi/service/logger" @@ -175,9 +174,15 @@ func (c *controller) Init(config *cnsconfig.Config, version string) error { } log.Debugf("fsnotify event: %q", event.String()) if event.Op&fsnotify.Remove == fsnotify.Remove { - log.Infof("Reloading Configuration") - c.ReloadConfiguration(ctx) - log.Infof("Successfully reloaded configuration from: %q", cfgPath) + for { + reloadConfigErr := c.ReloadConfiguration() + if reloadConfigErr == nil { + log.Infof("Successfully reloaded configuration from: %q", cfgPath) + break + } + log.Errorf("failed to reload configuration. will retry again in 5 seconds. err: %+v", reloadConfigErr) + time.Sleep(5 * time.Second) + } } case err, ok := <-watcher.Errors: if !ok { @@ -221,52 +226,74 @@ func (c *controller) Init(config *cnsconfig.Config, version string) error { // ReloadConfiguration reloads configuration from the secret, and update controller's config cache // and VolumeManager's VC Config cache. -func (c *controller) ReloadConfiguration(ctx context.Context) { - log := logger.GetLogger(ctx) +func (c *controller) ReloadConfiguration() error { + ctx, log := logger.GetNewContextWithLogger() + log.Info("Reloading Configuration") cfg, err := common.GetConfig(ctx) if err != nil { - log.Errorf("failed to read config. Error: %+v", err) - return + msg := fmt.Sprintf("failed to read config. Error: %+v", err) + log.Error(msg) + return errors.New(msg) } newVCConfig, err := cnsvsphere.GetVirtualCenterConfig(ctx, cfg) if err != nil { log.Errorf("failed to get VirtualCenterConfig. err=%v", err) - return + return err } if newVCConfig != nil { var vcenter *cnsvsphere.VirtualCenter if c.manager.VcenterConfig.Host != newVCConfig.Host || c.manager.VcenterConfig.Username != newVCConfig.Username || c.manager.VcenterConfig.Password != newVCConfig.Password { - log.Debugf("Unregistering virtual center: %q from virtualCenterManager", c.manager.VcenterConfig.Host) - err = c.manager.VcenterManager.UnregisterAllVirtualCenters(ctx) - if err != nil { - log.Errorf("failed to unregister vcenter with virtualCenterManager.") - return + + // Verify if new configuration has valid credentials by connecting to vCenter. + // Proceed only if the connection succeeds, else return error. + newVC := &cnsvsphere.VirtualCenter{Config: newVCConfig} + if err = newVC.Connect(ctx); err != nil { + msg := fmt.Sprintf("failed to connect to VirtualCenter host: %q, Err: %+v", newVCConfig.Host, err) + log.Error(msg) + return errors.New(msg) } - log.Debugf("Registering virtual center: %q with virtualCenterManager", newVCConfig.Host) - vcenter, err = c.manager.VcenterManager.RegisterVirtualCenter(ctx, newVCConfig) + + // Reset virtual center singleton instance by passing reload flag as true + log.Info("Obtaining new vCenterInstance using new credentials") + vcenter, err = cnsvsphere.GetVirtualCenterInstance(ctx, &cnsconfig.ConfigurationInfo{Cfg: cfg}, true) if err != nil { - log.Errorf("failed to register VC with virtualCenterManager. err=%v", err) - return + msg := fmt.Sprintf("failed to get VirtualCenter. err=%v", err) + log.Error(msg) + return errors.New(msg) } - c.manager.VcenterManager = cnsvsphere.GetVirtualCenterManager(ctx) } else { - vcenter, err = c.manager.VcenterManager.GetVirtualCenter(ctx, newVCConfig.Host) + // If it's not a VC host or VC credentials update, same singleton instance can be used + // and it's Config field can be updated + vcenter, err = cnsvsphere.GetVirtualCenterInstance(ctx, &cnsconfig.ConfigurationInfo{Cfg: cfg}, false) if err != nil { - log.Errorf("failed to get VirtualCenter. err=%v", err) - return + msg := fmt.Sprintf("failed to get VirtualCenter. err=%v", err) + log.Error(msg) + return errors.New(msg) } vcenter.Config = newVCConfig } c.manager.VolumeManager.ResetManager(ctx, vcenter) - c.manager.VolumeManager = cnsvolume.GetManager(ctx, vcenter) c.manager.VcenterConfig = newVCConfig + c.manager.VolumeManager = cnsvolume.GetManager(ctx, vcenter) + // Re-Initialize Node Manager to cache latest vCenter config + c.nodeMgr = &Nodes{} + err = c.nodeMgr.Initialize(ctx) + if err != nil { + log.Errorf("failed to re-initialize nodeMgr. err=%v", err) + return err + } + if c.authMgr != nil { + c.authMgr.ResetvCenterInstance(ctx, vcenter) + log.Debugf("Updated vCenter in auth manager") + } } if cfg != nil { - log.Debugf("Updating manager.CnsConfig") c.manager.CnsConfig = cfg + log.Debugf("Updated manager.CnsConfig") } + return nil } func (c *controller) filterDatastores(ctx context.Context, sharedDatastores []*cnsvsphere.DatastoreInfo) []*cnsvsphere.DatastoreInfo { diff --git a/pkg/csi/service/vanilla/controller_test.go b/pkg/csi/service/vanilla/controller_test.go index c28de19e4e..cd6f025f19 100644 --- a/pkg/csi/service/vanilla/controller_test.go +++ b/pkg/csi/service/vanilla/controller_test.go @@ -250,6 +250,10 @@ func (f *FakeAuthManager) GetDatastoreMapForFileVolumes(ctx context.Context) map return datastoreMapForFileVolumes } +func (f *FakeAuthManager) ResetvCenterInstance(ctx context.Context, vCenter *cnsvsphere.VirtualCenter) { + f.vcenter = vCenter +} + func getControllerTest(t *testing.T) *controllerTest { onceForControllerTest.Do(func() { // Create context diff --git a/pkg/syncer/cnsoperator/controller/cnsfileaccessconfig/cnsfileaccessconfig_controller.go b/pkg/syncer/cnsoperator/controller/cnsfileaccessconfig/cnsfileaccessconfig_controller.go index 26b5511474..a9804ff516 100644 --- a/pkg/syncer/cnsoperator/controller/cnsfileaccessconfig/cnsfileaccessconfig_controller.go +++ b/pkg/syncer/cnsoperator/controller/cnsfileaccessconfig/cnsfileaccessconfig_controller.go @@ -45,6 +45,7 @@ import ( cnsoperatorapis "sigs.k8s.io/vsphere-csi-driver/pkg/apis/cnsoperator" cnsfileaccessconfigv1alpha1 "sigs.k8s.io/vsphere-csi-driver/pkg/apis/cnsoperator/cnsfileaccessconfig/v1alpha1" volumes "sigs.k8s.io/vsphere-csi-driver/pkg/common/cns-lib/volume" + commonconfig "sigs.k8s.io/vsphere-csi-driver/pkg/common/config" "sigs.k8s.io/vsphere-csi-driver/pkg/csi/service/common" "sigs.k8s.io/vsphere-csi-driver/pkg/csi/service/common/commonco" "sigs.k8s.io/vsphere-csi-driver/pkg/csi/service/logger" @@ -53,7 +54,6 @@ import ( "sigs.k8s.io/vsphere-csi-driver/pkg/syncer" cnsoperatortypes "sigs.k8s.io/vsphere-csi-driver/pkg/syncer/cnsoperator/types" cnsoperatorutil "sigs.k8s.io/vsphere-csi-driver/pkg/syncer/cnsoperator/util" - "sigs.k8s.io/vsphere-csi-driver/pkg/syncer/types" ) const ( @@ -72,7 +72,7 @@ var ( // Add creates a new CnsFileAccessConfig Controller and adds it to the Manager. The Manager will set fields on the Controller // and Start it when the Manager is Started. -func Add(mgr manager.Manager, configInfo *types.ConfigInfo, volumeManager volumes.Manager) error { +func Add(mgr manager.Manager, configInfo *commonconfig.ConfigurationInfo, volumeManager volumes.Manager) error { ctx, cancel := context.WithCancel(context.Background()) defer cancel() ctx = logger.NewContextWithLogger(ctx) @@ -134,7 +134,7 @@ func Add(mgr manager.Manager, configInfo *types.ConfigInfo, volumeManager volume } // newReconciler returns a new reconcile.Reconciler -func newReconciler(mgr manager.Manager, configInfo *types.ConfigInfo, volumeManager volumes.Manager, vmOperatorClient client.Client, dynamicClient dynamic.Interface, recorder record.EventRecorder) reconcile.Reconciler { +func newReconciler(mgr manager.Manager, configInfo *commonconfig.ConfigurationInfo, volumeManager volumes.Manager, vmOperatorClient client.Client, dynamicClient dynamic.Interface, recorder record.EventRecorder) reconcile.Reconciler { return &ReconcileCnsFileAccessConfig{client: mgr.GetClient(), scheme: mgr.GetScheme(), configInfo: configInfo, volumeManager: volumeManager, vmOperatorClient: vmOperatorClient, dynamicClient: dynamicClient, recorder: recorder} } @@ -171,7 +171,7 @@ type ReconcileCnsFileAccessConfig struct { // that reads objects from the cache and writes to the apiserver client client.Client scheme *runtime.Scheme - configInfo *types.ConfigInfo + configInfo *commonconfig.ConfigurationInfo volumeManager volumes.Manager vmOperatorClient client.Client dynamicClient dynamic.Interface diff --git a/pkg/syncer/cnsoperator/controller/cnsnodevmattachment/cnsnodevmattachment_controller.go b/pkg/syncer/cnsoperator/controller/cnsnodevmattachment/cnsnodevmattachment_controller.go index 82cd9b9abb..d3b639fb39 100644 --- a/pkg/syncer/cnsoperator/controller/cnsnodevmattachment/cnsnodevmattachment_controller.go +++ b/pkg/syncer/cnsoperator/controller/cnsnodevmattachment/cnsnodevmattachment_controller.go @@ -50,9 +50,9 @@ import ( cnsnode "sigs.k8s.io/vsphere-csi-driver/pkg/common/cns-lib/node" volumes "sigs.k8s.io/vsphere-csi-driver/pkg/common/cns-lib/volume" "sigs.k8s.io/vsphere-csi-driver/pkg/common/cns-lib/vsphere" + cnsvsphere "sigs.k8s.io/vsphere-csi-driver/pkg/common/cns-lib/vsphere" "sigs.k8s.io/vsphere-csi-driver/pkg/common/config" cnsoperatortypes "sigs.k8s.io/vsphere-csi-driver/pkg/syncer/cnsoperator/types" - "sigs.k8s.io/vsphere-csi-driver/pkg/syncer/types" ) const ( @@ -69,10 +69,10 @@ var ( backOffDurationMapMutex = sync.Mutex{} ) -// Add creates a new CnsNodeVmAttachment Controller and adds it to the Manager, ConfigInfo +// Add creates a new CnsNodeVmAttachment Controller and adds it to the Manager, vSphereSecretConfigInfo // and VirtualCenterTypes. The Manager will set fields on the Controller // and Start it when the Manager is Started. -func Add(mgr manager.Manager, configInfo *types.ConfigInfo, volumeManager volumes.Manager) error { +func Add(mgr manager.Manager, configInfo *config.ConfigurationInfo, volumeManager volumes.Manager) error { ctx, cancel := context.WithCancel(context.Background()) defer cancel() ctx = logger.NewContextWithLogger(ctx) @@ -96,7 +96,7 @@ func Add(mgr manager.Manager, configInfo *types.ConfigInfo, volumeManager volume } // newReconciler returns a new reconcile.Reconciler -func newReconciler(mgr manager.Manager, configInfo *types.ConfigInfo, volumeManager volumes.Manager, recorder record.EventRecorder) reconcile.Reconciler { +func newReconciler(mgr manager.Manager, configInfo *config.ConfigurationInfo, volumeManager volumes.Manager, recorder record.EventRecorder) reconcile.Reconciler { ctx, cancel := context.WithCancel(context.Background()) defer cancel() ctx = logger.NewContextWithLogger(ctx) @@ -138,7 +138,7 @@ type ReconcileCnsNodeVMAttachment struct { // that reads objects from the cache and writes to the apiserver client client.Client scheme *runtime.Scheme - configInfo *types.ConfigInfo + configInfo *config.ConfigurationInfo volumeManager volumes.Manager nodeManager cnsnode.Manager recorder record.EventRecorder @@ -208,7 +208,7 @@ func (r *ReconcileCnsNodeVMAttachment) Reconcile(request reconcile.Request) (rec } // Get node VM by nodeUUID var dc *vsphere.Datacenter - vcenter, err := types.GetVirtualCenterInstance(ctx, r.configInfo, false) + vcenter, err := cnsvsphere.GetVirtualCenterInstance(ctx, r.configInfo, false) if err != nil { msg := fmt.Sprintf("failed to get virtual center instance with error: %v", err) instance.Status.Error = err.Error() diff --git a/pkg/syncer/cnsoperator/controller/cnsregistervolume/cnsregistervolume_controller.go b/pkg/syncer/cnsoperator/controller/cnsregistervolume/cnsregistervolume_controller.go index 44e4d37fb9..62e6e39003 100644 --- a/pkg/syncer/cnsoperator/controller/cnsregistervolume/cnsregistervolume_controller.go +++ b/pkg/syncer/cnsoperator/controller/cnsregistervolume/cnsregistervolume_controller.go @@ -42,10 +42,11 @@ import ( apis "sigs.k8s.io/vsphere-csi-driver/pkg/apis/cnsoperator" cnsregistervolumev1alpha1 "sigs.k8s.io/vsphere-csi-driver/pkg/apis/cnsoperator/cnsregistervolume/v1alpha1" volumes "sigs.k8s.io/vsphere-csi-driver/pkg/common/cns-lib/volume" + cnsvsphere "sigs.k8s.io/vsphere-csi-driver/pkg/common/cns-lib/vsphere" + commonconfig "sigs.k8s.io/vsphere-csi-driver/pkg/common/config" "sigs.k8s.io/vsphere-csi-driver/pkg/csi/service/common" "sigs.k8s.io/vsphere-csi-driver/pkg/csi/service/logger" k8s "sigs.k8s.io/vsphere-csi-driver/pkg/kubernetes" - "sigs.k8s.io/vsphere-csi-driver/pkg/syncer/types" ) const ( @@ -63,10 +64,10 @@ var ( backOffDurationMapMutex = sync.Mutex{} ) -// Add creates a new CnsRegisterVolume Controller and adds it to the Manager, ConfigInfo +// Add creates a new CnsRegisterVolume Controller and adds it to the Manager, ConfigurationInfo // and VirtualCenterTypes. The Manager will set fields on the Controller // and Start it when the Manager is Started. -func Add(mgr manager.Manager, configInfo *types.ConfigInfo, volumeManager volumes.Manager) error { +func Add(mgr manager.Manager, configInfo *commonconfig.ConfigurationInfo, volumeManager volumes.Manager) error { ctx, log := logger.GetNewContextWithLogger() // Initializes kubernetes client k8sclient, err := k8s.NewClient(ctx) @@ -87,7 +88,7 @@ func Add(mgr manager.Manager, configInfo *types.ConfigInfo, volumeManager volume } // newReconciler returns a new reconcile.Reconciler -func newReconciler(mgr manager.Manager, configInfo *types.ConfigInfo, volumeManager volumes.Manager, recorder record.EventRecorder) reconcile.Reconciler { +func newReconciler(mgr manager.Manager, configInfo *commonconfig.ConfigurationInfo, volumeManager volumes.Manager, recorder record.EventRecorder) reconcile.Reconciler { return &ReconcileCnsRegisterVolume{client: mgr.GetClient(), scheme: mgr.GetScheme(), configInfo: configInfo, volumeManager: volumeManager, recorder: recorder} } @@ -123,7 +124,7 @@ type ReconcileCnsRegisterVolume struct { // that reads objects from the cache and writes to the apiserver client client.Client scheme *runtime.Scheme - configInfo *types.ConfigInfo + configInfo *commonconfig.ConfigurationInfo volumeManager volumes.Manager recorder record.EventRecorder } @@ -183,7 +184,7 @@ func (r *ReconcileCnsRegisterVolume) Reconcile(request reconcile.Request) (recon return reconcile.Result{RequeueAfter: timeout}, nil } - vc, err := types.GetVirtualCenterInstance(ctx, r.configInfo, false) + vc, err := cnsvsphere.GetVirtualCenterInstance(ctx, r.configInfo, false) if err != nil { msg := fmt.Sprintf("Failed to get virtual center instance with error: %+v", err) log.Error(msg) diff --git a/pkg/syncer/cnsoperator/controller/cnsvolumemetadata/cnsvolumemetadata_controller.go b/pkg/syncer/cnsoperator/controller/cnsvolumemetadata/cnsvolumemetadata_controller.go index d26fdd20f2..442911d123 100644 --- a/pkg/syncer/cnsoperator/controller/cnsvolumemetadata/cnsvolumemetadata_controller.go +++ b/pkg/syncer/cnsoperator/controller/cnsvolumemetadata/cnsvolumemetadata_controller.go @@ -25,6 +25,7 @@ import ( "sync" "time" + commonconfig "sigs.k8s.io/vsphere-csi-driver/pkg/common/config" "sigs.k8s.io/vsphere-csi-driver/pkg/csi/service/logger" "github.com/davecgh/go-spew/spew" @@ -53,7 +54,6 @@ import ( csitypes "sigs.k8s.io/vsphere-csi-driver/pkg/csi/types" k8s "sigs.k8s.io/vsphere-csi-driver/pkg/kubernetes" cnsoperatortypes "sigs.k8s.io/vsphere-csi-driver/pkg/syncer/cnsoperator/types" - "sigs.k8s.io/vsphere-csi-driver/pkg/syncer/types" ) const ( @@ -70,10 +70,10 @@ var ( backOffDurationMapMutex = sync.Mutex{} ) -// Add creates a new CnsVolumeMetadata Controller and adds it to the Manager, ConfigInfo, +// Add creates a new CnsVolumeMetadata Controller and adds it to the Manager, ConfigurationInfo, // volumeManager and k8sclient. The Manager will set fields on the Controller // and Start it when the Manager is Started. -func Add(mgr manager.Manager, configInfo *types.ConfigInfo, volumeManager volumes.Manager) error { +func Add(mgr manager.Manager, configInfo *commonconfig.ConfigurationInfo, volumeManager volumes.Manager) error { // Initializes kubernetes client ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -97,7 +97,7 @@ func Add(mgr manager.Manager, configInfo *types.ConfigInfo, volumeManager volume } // newReconciler returns a new reconcile.Reconciler -func newReconciler(mgr manager.Manager, configInfo *types.ConfigInfo, volumeManager volumes.Manager, k8sclient kubernetes.Interface, recorder record.EventRecorder) reconcile.Reconciler { +func newReconciler(mgr manager.Manager, configInfo *commonconfig.ConfigurationInfo, volumeManager volumes.Manager, k8sclient kubernetes.Interface, recorder record.EventRecorder) reconcile.Reconciler { return &ReconcileCnsVolumeMetadata{client: mgr.GetClient(), scheme: mgr.GetScheme(), configInfo: configInfo, volumeManager: volumeManager, k8sclient: k8sclient, recorder: recorder} } @@ -163,7 +163,7 @@ var _ reconcile.Reconciler = &ReconcileCnsVolumeMetadata{} type ReconcileCnsVolumeMetadata struct { client client.Client scheme *runtime.Scheme - configInfo *types.ConfigInfo + configInfo *commonconfig.ConfigurationInfo volumeManager volumes.Manager k8sclient kubernetes.Interface recorder record.EventRecorder @@ -304,7 +304,7 @@ func (r *ReconcileCnsVolumeMetadata) Reconcile(request reconcile.Request) (recon func (r *ReconcileCnsVolumeMetadata) updateCnsMetadata(ctx context.Context, instance *cnsv1alpha1.CnsVolumeMetadata, deleteFlag bool) bool { log := logger.GetLogger(ctx) log.Debugf("ReconcileCnsVolumeMetadata: Calling updateCnsMetadata for instance %q with delete flag %v", instance.Name, deleteFlag) - vCenter, err := types.GetVirtualCenterInstance(ctx, r.configInfo, false) + vCenter, err := cnsvsphere.GetVirtualCenterInstance(ctx, r.configInfo, false) if err != nil { log.Errorf("ReconcileCnsVolumeMetadata: Failed to get virtual center instance. Err: %v", err) return false diff --git a/pkg/syncer/cnsoperator/controller/controller.go b/pkg/syncer/cnsoperator/controller/controller.go index 20f0fb4f83..f17a290221 100644 --- a/pkg/syncer/cnsoperator/controller/controller.go +++ b/pkg/syncer/cnsoperator/controller/controller.go @@ -19,14 +19,14 @@ package controller import ( "sigs.k8s.io/controller-runtime/pkg/manager" volumes "sigs.k8s.io/vsphere-csi-driver/pkg/common/cns-lib/volume" - "sigs.k8s.io/vsphere-csi-driver/pkg/syncer/types" + "sigs.k8s.io/vsphere-csi-driver/pkg/common/config" ) // AddToManagerFuncs is a list of functions to add all Controllers to the Manager -var AddToManagerFuncs []func(manager.Manager, *types.ConfigInfo, volumes.Manager) error +var AddToManagerFuncs []func(manager.Manager, *config.ConfigurationInfo, volumes.Manager) error // AddToManager adds all Controllers to the Manager -func AddToManager(manager manager.Manager, configInfo *types.ConfigInfo, volumeManager volumes.Manager) error { +func AddToManager(manager manager.Manager, configInfo *config.ConfigurationInfo, volumeManager volumes.Manager) error { for _, f := range AddToManagerFuncs { if err := f(manager, configInfo, volumeManager); err != nil { return err diff --git a/pkg/syncer/cnsoperator/manager/init.go b/pkg/syncer/cnsoperator/manager/init.go index 24f138b2f6..dd483e1b11 100644 --- a/pkg/syncer/cnsoperator/manager/init.go +++ b/pkg/syncer/cnsoperator/manager/init.go @@ -34,13 +34,14 @@ import ( cnsnodevmattachmentv1alpha1 "sigs.k8s.io/vsphere-csi-driver/pkg/apis/cnsoperator/cnsnodevmattachment/v1alpha1" cnsvolumemetadatav1alpha1 "sigs.k8s.io/vsphere-csi-driver/pkg/apis/cnsoperator/cnsvolumemetadata/v1alpha1" volumes "sigs.k8s.io/vsphere-csi-driver/pkg/common/cns-lib/volume" + "sigs.k8s.io/vsphere-csi-driver/pkg/common/cns-lib/vsphere" + commonconfig "sigs.k8s.io/vsphere-csi-driver/pkg/common/config" "sigs.k8s.io/vsphere-csi-driver/pkg/csi/service/common" "sigs.k8s.io/vsphere-csi-driver/pkg/csi/service/common/commonco" "sigs.k8s.io/vsphere-csi-driver/pkg/csi/service/logger" internal "sigs.k8s.io/vsphere-csi-driver/pkg/internal/cnsoperator/cnsfilevolumeclient/v1alpha1" k8s "sigs.k8s.io/vsphere-csi-driver/pkg/kubernetes" "sigs.k8s.io/vsphere-csi-driver/pkg/syncer/cnsoperator/controller" - "sigs.k8s.io/vsphere-csi-driver/pkg/syncer/types" ) var ( @@ -50,12 +51,12 @@ var ( ) type cnsOperator struct { - configInfo *types.ConfigInfo + configInfo *commonconfig.ConfigurationInfo coCommonInterface commonco.COCommonInterface } // InitCnsOperator initializes the Cns Operator -func InitCnsOperator(configInfo *types.ConfigInfo, coInitParams *interface{}) error { +func InitCnsOperator(configInfo *commonconfig.ConfigurationInfo, coInitParams *interface{}) error { ctx, cancel := context.WithCancel(context.Background()) defer cancel() ctx = logger.NewContextWithLogger(ctx) @@ -64,7 +65,7 @@ func InitCnsOperator(configInfo *types.ConfigInfo, coInitParams *interface{}) er log.Infof("Initializing CNS Operator") cnsOperator := &cnsOperator{} cnsOperator.configInfo = configInfo - vCenter, err := types.GetVirtualCenterInstance(ctx, cnsOperator.configInfo, false) + vCenter, err := vsphere.GetVirtualCenterInstance(ctx, cnsOperator.configInfo, false) if err != nil { return err } @@ -235,7 +236,7 @@ func reloadConfiguration(ctx context.Context, cnsOperator *cnsOperator) error { log.Errorf("Failed to read config. Error: %+v", err) return err } - cnsOperator.configInfo = &types.ConfigInfo{Cfg: cfg} + cnsOperator.configInfo = &commonconfig.ConfigurationInfo{Cfg: cfg} log.Infof("Reloaded the value for CnsRegisterVolumesCleanupIntervalInMin to %d", cnsOperator.configInfo.Cfg.Global.CnsRegisterVolumesCleanupIntervalInMin) return nil } diff --git a/pkg/syncer/fullsync.go b/pkg/syncer/fullsync.go index 975ad0a3ef..67d3256d91 100644 --- a/pkg/syncer/fullsync.go +++ b/pkg/syncer/fullsync.go @@ -30,7 +30,6 @@ import ( cnsvsphere "sigs.k8s.io/vsphere-csi-driver/pkg/common/cns-lib/vsphere" "sigs.k8s.io/vsphere-csi-driver/pkg/csi/service/common" "sigs.k8s.io/vsphere-csi-driver/pkg/csi/service/logger" - "sigs.k8s.io/vsphere-csi-driver/pkg/syncer/types" ) // csiFullSync reconciles volume metadata on a vanilla k8s cluster @@ -109,7 +108,7 @@ func csiFullSync(ctx context.Context, metadataSyncer *metadataSyncInformer) { log.Debugf("FullSync: pvToCnsEntityMetadataMap %+v \n pvToK8sEntityMetadataMap: %+v \n", spew.Sdump(volumeToCnsEntityMetadataMap), spew.Sdump(volumeToK8sEntityMetadataMap)) log.Debugf("FullSync: volumes where clusterDistribution is set: %+v", volumeClusterDistributionMap) - vcenter, err := types.GetVirtualCenterInstance(ctx, metadataSyncer.configInfo, false) + vcenter, err := cnsvsphere.GetVirtualCenterInstance(ctx, metadataSyncer.configInfo, false) if err != nil { log.Errorf("FullSync: failed to get vcenter with error %+v", err) return diff --git a/pkg/syncer/metadatasyncer.go b/pkg/syncer/metadatasyncer.go index 31e3e29211..5849d58aac 100644 --- a/pkg/syncer/metadatasyncer.go +++ b/pkg/syncer/metadatasyncer.go @@ -36,9 +36,9 @@ import ( "k8s.io/client-go/informers" clientset "k8s.io/client-go/kubernetes" "k8s.io/client-go/util/workqueue" - "sigs.k8s.io/vsphere-csi-driver/pkg/apis/migration" cnsoperatorv1alpha1 "sigs.k8s.io/vsphere-csi-driver/pkg/apis/cnsoperator" + "sigs.k8s.io/vsphere-csi-driver/pkg/apis/migration" volumes "sigs.k8s.io/vsphere-csi-driver/pkg/common/cns-lib/volume" cnsvsphere "sigs.k8s.io/vsphere-csi-driver/pkg/common/cns-lib/vsphere" cnsconfig "sigs.k8s.io/vsphere-csi-driver/pkg/common/config" @@ -48,7 +48,6 @@ import ( csitypes "sigs.k8s.io/vsphere-csi-driver/pkg/csi/types" k8s "sigs.k8s.io/vsphere-csi-driver/pkg/kubernetes" "sigs.k8s.io/vsphere-csi-driver/pkg/syncer/storagepool" - "sigs.k8s.io/vsphere-csi-driver/pkg/syncer/types" ) var ( @@ -111,7 +110,7 @@ func getVolumeHealthIntervalInMin(ctx context.Context) int { } // InitMetadataSyncer initializes the Metadata Sync Informer -func InitMetadataSyncer(ctx context.Context, clusterFlavor cnstypes.CnsClusterFlavor, configInfo *types.ConfigInfo) error { +func InitMetadataSyncer(ctx context.Context, clusterFlavor cnstypes.CnsClusterFlavor, configInfo *cnsconfig.ConfigurationInfo) error { log := logger.GetLogger(ctx) var err error log.Infof("Initializing MetadataSyncer") @@ -151,7 +150,7 @@ func InitMetadataSyncer(ctx context.Context, clusterFlavor cnstypes.CnsClusterFl } else { // Initialize volume manager with vcenter credentials // if metadata syncer is being intialized for Vanilla or Supervisor clusters - vCenter, err := types.GetVirtualCenterInstance(ctx, configInfo, false) + vCenter, err := cnsvsphere.GetVirtualCenterInstance(ctx, configInfo, false) if err != nil { return err } @@ -180,11 +179,14 @@ func InitMetadataSyncer(ctx context.Context, clusterFlavor cnstypes.CnsClusterFl } log.Debugf("fsnotify event: %q", event.String()) if event.Op&fsnotify.Remove == fsnotify.Remove { - log.Infof("Reloading Configuration") - if err := ReloadConfiguration(ctx, metadataSyncer); err != nil { - log.Errorf("failed to reload configuration from: %q. Current configuration unchanged.", cfgPath) - } else { - log.Infof("Successfully reloaded configuration from: %q", cfgPath) + for { + reloadConfigErr := ReloadConfiguration(metadataSyncer) + if reloadConfigErr == nil { + log.Infof("Successfully reloaded configuration from: %q", cfgPath) + break + } + log.Errorf("failed to reload configuration will retry again in 5 seconds. err: %+v", reloadConfigErr) + time.Sleep(5 * time.Second) } } case err, ok := <-watcher.Errors: @@ -325,8 +327,9 @@ func InitMetadataSyncer(ctx context.Context, clusterFlavor cnstypes.CnsClusterFl } // ReloadConfiguration reloads configuration from the secret, and update controller's cached configs -func ReloadConfiguration(ctx context.Context, metadataSyncer *metadataSyncInformer) error { - log := logger.GetLogger(ctx) +func ReloadConfiguration(metadataSyncer *metadataSyncInformer) error { + ctx, log := logger.GetNewContextWithLogger() + log.Info("Reloading Configuration") cfg, err := common.GetConfig(ctx) if err != nil { msg := fmt.Sprintf("failed to read config. Error: %+v", err) @@ -373,7 +376,7 @@ func ReloadConfiguration(ctx context.Context, metadataSyncer *metadataSyncInform // Reset virtual center singleton instance by passing reload flag as true log.Info("Obtaining new vCenterInstance using new credentials") - vcenter, err = types.GetVirtualCenterInstance(ctx, &types.ConfigInfo{Cfg: cfg}, true) + vcenter, err = cnsvsphere.GetVirtualCenterInstance(ctx, &cnsconfig.ConfigurationInfo{Cfg: cfg}, true) if err != nil { msg := fmt.Sprintf("failed to get VirtualCenter. err=%v", err) log.Error(msg) @@ -382,7 +385,7 @@ func ReloadConfiguration(ctx context.Context, metadataSyncer *metadataSyncInform } else { // If it's not a VC host or VC credentials update, same singleton instance can be used // and it's Config field can be updated - vcenter, err = types.GetVirtualCenterInstance(ctx, &types.ConfigInfo{Cfg: cfg}, false) + vcenter, err = cnsvsphere.GetVirtualCenterInstance(ctx, &cnsconfig.ConfigurationInfo{Cfg: cfg}, false) if err != nil { msg := fmt.Sprintf("failed to get VirtualCenter. err=%v", err) log.Error(msg) @@ -395,9 +398,10 @@ func ReloadConfiguration(ctx context.Context, metadataSyncer *metadataSyncInform if metadataSyncer.clusterFlavor == cnstypes.CnsClusterFlavorWorkload { storagepool.ResetVC(ctx, vcenter) } + metadataSyncer.host = newVCConfig.Host } if cfg != nil { - metadataSyncer.configInfo = &types.ConfigInfo{Cfg: cfg} + metadataSyncer.configInfo = &cnsconfig.ConfigurationInfo{Cfg: cfg} log.Infof("updated metadataSyncer.configInfo") } } diff --git a/pkg/syncer/storagepool/service.go b/pkg/syncer/storagepool/service.go index 457e2e3b43..abfd4b4e30 100644 --- a/pkg/syncer/storagepool/service.go +++ b/pkg/syncer/storagepool/service.go @@ -28,11 +28,11 @@ import ( spv1alpha1 "sigs.k8s.io/vsphere-csi-driver/pkg/apis/storagepool/cns/v1alpha1" cnsvsphere "sigs.k8s.io/vsphere-csi-driver/pkg/common/cns-lib/vsphere" + commonconfig "sigs.k8s.io/vsphere-csi-driver/pkg/common/config" "sigs.k8s.io/vsphere-csi-driver/pkg/csi/service/common" "sigs.k8s.io/vsphere-csi-driver/pkg/csi/service/common/commonco" "sigs.k8s.io/vsphere-csi-driver/pkg/csi/service/logger" k8s "sigs.k8s.io/vsphere-csi-driver/pkg/kubernetes" - commontypes "sigs.k8s.io/vsphere-csi-driver/pkg/syncer/types" ) // Service holds the controllers needed to manage StoragePools @@ -50,7 +50,7 @@ var ( // InitStoragePoolService initializes the StoragePool service that updates // vSphere Datastore information into corresponding k8s StoragePool resources. -func InitStoragePoolService(ctx context.Context, configInfo *commontypes.ConfigInfo, coInitParams *interface{}) error { +func InitStoragePoolService(ctx context.Context, configInfo *commonconfig.ConfigurationInfo, coInitParams *interface{}) error { log := logger.GetLogger(ctx) log.Infof("Initializing Storage Pool Service") @@ -74,9 +74,9 @@ func InitStoragePoolService(ctx context.Context, configInfo *commontypes.ConfigI } // Get VC connection - vc, err := commontypes.GetVirtualCenterInstance(ctx, configInfo, false) + vc, err := cnsvsphere.GetVirtualCenterInstance(ctx, configInfo, false) if err != nil { - log.Errorf("Failed to get vCenter from ConfigInfo. Err: %+v", err) + log.Errorf("Failed to get vCenter from vSphereSecretConfigInfo. Err: %+v", err) return err } diff --git a/pkg/syncer/syncer_test.go b/pkg/syncer/syncer_test.go index 4541c3772f..7b8f0f1e99 100644 --- a/pkg/syncer/syncer_test.go +++ b/pkg/syncer/syncer_test.go @@ -46,7 +46,6 @@ import ( "sigs.k8s.io/vsphere-csi-driver/pkg/csi/service/common" csitypes "sigs.k8s.io/vsphere-csi-driver/pkg/csi/types" k8s "sigs.k8s.io/vsphere-csi-driver/pkg/kubernetes" - "sigs.k8s.io/vsphere-csi-driver/pkg/syncer/types" ) const ( @@ -196,7 +195,7 @@ func TestSyncerWorkflows(t *testing.T) { // Initialize metadata syncer object metadataSyncer = &metadataSyncInformer{} - configInfo := &types.ConfigInfo{} + configInfo := &cnsconfig.ConfigurationInfo{} configInfo.Cfg = config metadataSyncer.configInfo = configInfo metadataSyncer.volumeManager = volumes.GetManager(ctx, virtualCenter) diff --git a/pkg/syncer/types.go b/pkg/syncer/types.go index e128e2f276..d75baab982 100644 --- a/pkg/syncer/types.go +++ b/pkg/syncer/types.go @@ -26,9 +26,9 @@ import ( corelisters "k8s.io/client-go/listers/core/v1" "sigs.k8s.io/controller-runtime/pkg/client" volumes "sigs.k8s.io/vsphere-csi-driver/pkg/common/cns-lib/volume" + commonconfig "sigs.k8s.io/vsphere-csi-driver/pkg/common/config" "sigs.k8s.io/vsphere-csi-driver/pkg/csi/service/common/commonco" k8s "sigs.k8s.io/vsphere-csi-driver/pkg/kubernetes" - "sigs.k8s.io/vsphere-csi-driver/pkg/syncer/types" ) // Version of the syncer. This should be set via ldflags. @@ -95,7 +95,7 @@ type metadataSyncInformer struct { host string cnsOperatorClient client.Client supervisorClient clientset.Interface - configInfo *types.ConfigInfo + configInfo *commonconfig.ConfigurationInfo k8sInformerManager *k8s.InformerManager pvLister corelisters.PersistentVolumeLister pvcLister corelisters.PersistentVolumeClaimLister diff --git a/pkg/syncer/types/commontypes.go b/pkg/syncer/types/commontypes.go deleted file mode 100644 index c8f6ca4237..0000000000 --- a/pkg/syncer/types/commontypes.go +++ /dev/null @@ -1,105 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package types - -import ( - "context" - "sync" - - "sigs.k8s.io/vsphere-csi-driver/pkg/csi/service/common" - "sigs.k8s.io/vsphere-csi-driver/pkg/csi/service/logger" - - cnsvsphere "sigs.k8s.io/vsphere-csi-driver/pkg/common/cns-lib/vsphere" - cnsconfig "sigs.k8s.io/vsphere-csi-driver/pkg/common/config" -) - -// ConfigInfo is a struct that used to capture config param details -type ConfigInfo struct { - Cfg *cnsconfig.Config -} - -var ( - // VirtualCenter instance for syncer - vCenterInstance *cnsvsphere.VirtualCenter - // Ensure vcenter is a singleton - vCenterInitialized bool - // vCenterInstanceLock is used for handling race conditions while initializing a vCenter instance - vCenterInstanceLock = &sync.RWMutex{} -) - -// InitConfigInfo initializes the ConfigInfo struct -func InitConfigInfo(ctx context.Context) (*ConfigInfo, error) { - log := logger.GetLogger(ctx) - cfg, err := common.GetConfig(ctx) - if err != nil { - log.Errorf("failed to read config. Error: %+v", err) - return nil, err - } - configInfo := &ConfigInfo{ - cfg, - } - return configInfo, nil -} - -// GetVirtualCenterInstance returns the vcenter object singleton. -// It is thread safe. -// Takes in a boolean paramater reloadConfig. -// If reloadConfig is true, the vcenter object is instantiated again and the old object becomes eligible for garbage collection. -// If reloadConfig is false and instance was already initialized, the previous instance is returned. -func GetVirtualCenterInstance(ctx context.Context, configTypes *ConfigInfo, reloadConfig bool) (*cnsvsphere.VirtualCenter, error) { - log := logger.GetLogger(ctx) - vCenterInstanceLock.Lock() - defer vCenterInstanceLock.Unlock() - - if !vCenterInitialized || reloadConfig { - log.Infof("Initializing new vCenterInstance.") - - var vcconfig *cnsvsphere.VirtualCenterConfig - vcconfig, err := cnsvsphere.GetVirtualCenterConfig(ctx, configTypes.Cfg) - if err != nil { - log.Errorf("failed to get VirtualCenterConfig. Err: %+v", err) - return nil, err - } - - // Initialize the virtual center manager - virtualcentermanager := cnsvsphere.GetVirtualCenterManager(ctx) - - //Unregister all VCs from virtual center manager - if err = virtualcentermanager.UnregisterAllVirtualCenters(ctx); err != nil { - log.Errorf("failed to unregister vcenter with virtualCenterManager.") - return nil, err - } - - // Register with virtual center manager - vCenterInstance, err = virtualcentermanager.RegisterVirtualCenter(ctx, vcconfig) - if err != nil { - log.Errorf("failed to register VirtualCenter . Err: %+v", err) - return nil, err - } - - // Connect to VC - err = vCenterInstance.Connect(ctx) - if err != nil { - log.Errorf("failed to connect to VirtualCenter host: %q. Err: %+v", vcconfig.Host, err) - return nil, err - } - - vCenterInitialized = true - log.Info("vCenterInstance initialized") - } - return vCenterInstance, nil -} From 9ab6567cffade3c0daad42e830a2b0c0947d0e21 Mon Sep 17 00:00:00 2001 From: Shalini Bhaskara Date: Fri, 19 Mar 2021 17:03:32 -0700 Subject: [PATCH 31/36] VC check for online volume expansion workflow --- .../vsphere-csi-controller-deployment.yaml | 5 ++++ .../vsphere-csi-controller-deployment.yaml | 5 +++- .../vsphere-csi-controller-deployment.yaml | 5 +++- .../cns-lib/vsphere/virtualcentermanager.go | 22 ++++++++++++++++ .../common/common_controller_helper.go | 2 +- pkg/csi/service/vanilla/controller.go | 9 ++++++- pkg/csi/service/vanilla/controller_helper.go | 25 +++++++++++-------- 7 files changed, 58 insertions(+), 15 deletions(-) diff --git a/manifests/v2.2.0/vsphere-67u3/deploy/vsphere-csi-controller-deployment.yaml b/manifests/v2.2.0/vsphere-67u3/deploy/vsphere-csi-controller-deployment.yaml index 261bc00786..43c1ba60fb 100644 --- a/manifests/v2.2.0/vsphere-67u3/deploy/vsphere-csi-controller-deployment.yaml +++ b/manifests/v2.2.0/vsphere-67u3/deploy/vsphere-csi-controller-deployment.yaml @@ -160,6 +160,11 @@ apiVersion: v1 data: "csi-migration": "false" "csi-auth-check": "true" + # Internal FSS for online volume expansion is enabled in v2.2.0 version of the driver. + # The vSphere version needs to be at least 7.0U2 for this feature to work. + # If the vSphere version is lower then the online volume expansion will fail + # with appropriate error message during runtime. + "online-volume-extend": "true" kind: ConfigMap metadata: name: internal-feature-states.csi.vsphere.vmware.com diff --git a/manifests/v2.2.0/vsphere-7.0/deploy/vsphere-csi-controller-deployment.yaml b/manifests/v2.2.0/vsphere-7.0/deploy/vsphere-csi-controller-deployment.yaml index 20ac8476bd..2ca1a3fd39 100644 --- a/manifests/v2.2.0/vsphere-7.0/deploy/vsphere-csi-controller-deployment.yaml +++ b/manifests/v2.2.0/vsphere-7.0/deploy/vsphere-csi-controller-deployment.yaml @@ -56,6 +56,9 @@ spec: - "--kube-api-qps=100" - "--kube-api-burst=100" - "--leader-election" + # Uncomment the line below to increase performance of online + # resize in vSphere versions 7.0U2 or above + # - "--handle-volume-inuse-error=false" env: - name: ADDRESS value: /csi/csi.sock @@ -176,7 +179,7 @@ apiVersion: v1 data: "csi-migration": "false" "csi-auth-check": "true" - "online-volume-extend": "false" + "online-volume-extend": "true" kind: ConfigMap metadata: name: internal-feature-states.csi.vsphere.vmware.com diff --git a/manifests/v2.2.0/vsphere-7.0u1/deploy/vsphere-csi-controller-deployment.yaml b/manifests/v2.2.0/vsphere-7.0u1/deploy/vsphere-csi-controller-deployment.yaml index 6bc5c0401a..789c5e5651 100644 --- a/manifests/v2.2.0/vsphere-7.0u1/deploy/vsphere-csi-controller-deployment.yaml +++ b/manifests/v2.2.0/vsphere-7.0u1/deploy/vsphere-csi-controller-deployment.yaml @@ -56,6 +56,9 @@ spec: - "--kube-api-qps=100" - "--kube-api-burst=100" - "--leader-election" + # Uncomment the line below to increase performance of online + # resize in vSphere versions 7.0U2 or above + # - "--handle-volume-inuse-error=false" env: - name: ADDRESS value: /csi/csi.sock @@ -176,7 +179,7 @@ apiVersion: v1 data: "csi-migration": "false" # csi-migration feature is only available for vSphere 7.0U1 "csi-auth-check": "true" - "online-volume-extend": "false" + "online-volume-extend": "true" kind: ConfigMap metadata: name: internal-feature-states.csi.vsphere.vmware.com diff --git a/pkg/common/cns-lib/vsphere/virtualcentermanager.go b/pkg/common/cns-lib/vsphere/virtualcentermanager.go index ed34aab087..f8fc0778c3 100644 --- a/pkg/common/cns-lib/vsphere/virtualcentermanager.go +++ b/pkg/common/cns-lib/vsphere/virtualcentermanager.go @@ -21,6 +21,8 @@ import ( "errors" "sync" + "github.com/vmware/govmomi/cns" + "sigs.k8s.io/vsphere-csi-driver/pkg/csi/service/logger" ) @@ -52,6 +54,8 @@ type VirtualCenterManager interface { IsvSANFileServicesSupported(ctx context.Context, host string) (bool, error) // IsExtendVolumeSupported checks if extend volume is supported or not. IsExtendVolumeSupported(ctx context.Context, host string) (bool, error) + // IsOnlineExtendVolumeSupported checks if online extend volume is supported or not on the vCenter Host + IsOnlineExtendVolumeSupported(ctx context.Context, host string) (bool, error) } var ( @@ -170,3 +174,21 @@ func (m *defaultVirtualCenterManager) IsExtendVolumeSupported(ctx context.Contex } return !is67u3Release, nil } + +// IsOnlineExtendVolumeSupported checks if online extend volume is supported or not. +func (m *defaultVirtualCenterManager) IsOnlineExtendVolumeSupported(ctx context.Context, host string) (bool, error) { + log := logger.GetLogger(ctx) + + // Get VC instance + vcenter, err := m.GetVirtualCenter(ctx, host) + if err != nil { + log.Errorf("Failed to get vCenter. Err: %v", err) + return false, err + } + vCenterVersion := vcenter.Client.Version + if vCenterVersion != cns.ReleaseVSAN67u3 && vCenterVersion != cns.ReleaseVSAN70 && vCenterVersion != cns.ReleaseVSAN70u1 { + return true, nil + } + log.Infof("Online volume expansion is not supported on vCenter version %q", vCenterVersion) + return false, nil +} diff --git a/pkg/csi/service/common/common_controller_helper.go b/pkg/csi/service/common/common_controller_helper.go index 897fdbe604..99ebe7c07d 100644 --- a/pkg/csi/service/common/common_controller_helper.go +++ b/pkg/csi/service/common/common_controller_helper.go @@ -189,7 +189,7 @@ func IsOnlineExpansion(ctx context.Context, volumeID string, nodes []*cnsvsphere log.Error(msg) return status.Errorf(codes.Internal, msg) } else if diskUUID != "" { - msg := fmt.Sprintf("failed to expand volume: %q. Volume is attached to node. Only offline volume expansion is supported", volumeID) + msg := fmt.Sprintf("failed to expand volume: %q. Volume is attached to node. Online volume expansion is not supported in this version", volumeID) log.Error(msg) return status.Errorf(codes.FailedPrecondition, msg) } diff --git a/pkg/csi/service/vanilla/controller.go b/pkg/csi/service/vanilla/controller.go index d319a1986b..3b0c520475 100644 --- a/pkg/csi/service/vanilla/controller.go +++ b/pkg/csi/service/vanilla/controller.go @@ -946,8 +946,15 @@ func (c *controller) ControllerExpandVolume(ctx context.Context, req *csi.Contro log.Error(msg) return nil, status.Errorf(codes.Unimplemented, msg) } + + isOnlineExpansionSupported, err := c.manager.VcenterManager.IsOnlineExtendVolumeSupported(ctx, c.manager.VcenterConfig.Host) + if err != nil { + msg := fmt.Sprintf("failed to check if online expansion is supported due to error: %v", err) + log.Error(msg) + return nil, status.Errorf(codes.Internal, msg) + } isOnlineExpansionEnabled := commonco.ContainerOrchestratorUtility.IsFSSEnabled(ctx, common.OnlineVolumeExtend) - err := validateVanillaControllerExpandVolumeRequest(ctx, req, isOnlineExpansionEnabled) + err = validateVanillaControllerExpandVolumeRequest(ctx, req, isOnlineExpansionEnabled, isOnlineExpansionSupported) if err != nil { msg := fmt.Sprintf("validation for ExpandVolume Request: %+v has failed. Error: %v", *req, err) log.Error(msg) diff --git a/pkg/csi/service/vanilla/controller_helper.go b/pkg/csi/service/vanilla/controller_helper.go index 76379318b7..7d0ba01ae0 100644 --- a/pkg/csi/service/vanilla/controller_helper.go +++ b/pkg/csi/service/vanilla/controller_helper.go @@ -53,21 +53,24 @@ func validateVanillaControllerUnpublishVolumeRequest(ctx context.Context, req *c // ExpandVolumeRequest for Vanilla CSI driver. // Function returns error if validation fails otherwise returns nil. func validateVanillaControllerExpandVolumeRequest(ctx context.Context, req *csi.ControllerExpandVolumeRequest, - isOnlineExpansionEnabled bool) error { + isOnlineExpansionEnabled, isOnlineExpansionSupported bool) error { log := logger.GetLogger(ctx) if err := common.ValidateControllerExpandVolumeRequest(ctx, req); err != nil { return err } - if !isOnlineExpansionEnabled { - nodeManager := node.GetManager(ctx) - nodes, err := nodeManager.GetAllNodes(ctx) - if err != nil { - msg := fmt.Sprintf("failed to find VirtualMachines for all registered nodes. Error: %v", err) - log.Error(msg) - return status.Error(codes.Internal, msg) - } - return common.IsOnlineExpansion(ctx, req.GetVolumeId(), nodes) + // Check online extend FSS and vCenter support + if isOnlineExpansionEnabled && isOnlineExpansionSupported { + return nil } - return nil + + // Check if it is an online expansion scenario and raise error + nodeManager := node.GetManager(ctx) + nodes, err := nodeManager.GetAllNodes(ctx) + if err != nil { + msg := fmt.Sprintf("failed to find VirtualMachines for all registered nodes. Error: %v", err) + log.Error(msg) + return status.Error(codes.Internal, msg) + } + return common.IsOnlineExpansion(ctx, req.GetVolumeId(), nodes) } From 61ed203554679fc00ef2865e4dd8a3dea573875a Mon Sep 17 00:00:00 2001 From: Shalini Bhaskara Date: Mon, 22 Mar 2021 19:23:07 -0700 Subject: [PATCH 32/36] Fail gracefully if volume expansion is attempted on vSphere 6.7U3 --- pkg/csi/service/vanilla/controller.go | 23 +++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-) diff --git a/pkg/csi/service/vanilla/controller.go b/pkg/csi/service/vanilla/controller.go index 3b0c520475..d0a4c6fb06 100644 --- a/pkg/csi/service/vanilla/controller.go +++ b/pkg/csi/service/vanilla/controller.go @@ -947,6 +947,17 @@ func (c *controller) ControllerExpandVolume(ctx context.Context, req *csi.Contro return nil, status.Errorf(codes.Unimplemented, msg) } + isExtendSupported, err := c.manager.VcenterManager.IsExtendVolumeSupported(ctx, c.manager.VcenterConfig.Host) + if err != nil { + log.Errorf("failed to verify if extend volume is supported or not. Error: %+v", err) + return nil, status.Error(codes.Internal, err.Error()) + } + if !isExtendSupported { + msg := "Volume Expansion is not supported in this vSphere release. Kindly upgrade to vSphere 7.0 for offline expansion and vSphere 7.0U2 for online expansion support." + log.Error(msg) + return nil, status.Error(codes.Internal, msg) + } + isOnlineExpansionSupported, err := c.manager.VcenterManager.IsOnlineExtendVolumeSupported(ctx, c.manager.VcenterConfig.Host) if err != nil { msg := fmt.Sprintf("failed to check if online expansion is supported due to error: %v", err) @@ -1045,20 +1056,12 @@ func (c *controller) ControllerGetCapabilities(ctx context.Context, req *csi.Con log := logger.GetLogger(ctx) log.Infof("ControllerGetCapabilities: called with args %+v", *req) - isExtendSupported, err := c.manager.VcenterManager.IsExtendVolumeSupported(ctx, c.manager.VcenterConfig.Host) - if err != nil { - log.Errorf("failed to verify if extend volume is supported or not. Error:%+v", err) - return nil, status.Error(codes.FailedPrecondition, err.Error()) - } controllerCaps := []csi.ControllerServiceCapability_RPC_Type{ csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME, csi.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME, + csi.ControllerServiceCapability_RPC_EXPAND_VOLUME, } - if isExtendSupported { - log.Debug("Adding extend volume capability to default capabilities") - controllerCaps = append(controllerCaps, - csi.ControllerServiceCapability_RPC_EXPAND_VOLUME) - } + var caps []*csi.ControllerServiceCapability for _, cap := range controllerCaps { c := &csi.ControllerServiceCapability{ From 1f802dcd99e7198a577eb1075db0cbe394753de7 Mon Sep 17 00:00:00 2001 From: Shalini Bhaskara Date: Mon, 22 Mar 2021 22:03:38 -0700 Subject: [PATCH 33/36] Update RC images --- .../deploy/vsphere-csi-controller-deployment.yaml | 4 ++-- manifests/v2.2.0/vsphere-67u3/deploy/vsphere-csi-node-ds.yaml | 2 +- .../vsphere-7.0/deploy/vsphere-csi-controller-deployment.yaml | 4 ++-- manifests/v2.2.0/vsphere-7.0/deploy/vsphere-csi-node-ds.yaml | 2 +- manifests/v2.2.0/vsphere-7.0u1/deploy/validatingwebhook.yaml | 2 +- .../deploy/vsphere-csi-controller-deployment.yaml | 4 ++-- .../v2.2.0/vsphere-7.0u1/deploy/vsphere-csi-node-ds.yaml | 2 +- manifests/v2.2.0/vsphere-7.0u2/deploy/validatingwebhook.yaml | 2 +- .../deploy/vsphere-csi-controller-deployment.yaml | 4 ++-- .../v2.2.0/vsphere-7.0u2/deploy/vsphere-csi-node-ds.yaml | 2 +- 10 files changed, 14 insertions(+), 14 deletions(-) diff --git a/manifests/v2.2.0/vsphere-67u3/deploy/vsphere-csi-controller-deployment.yaml b/manifests/v2.2.0/vsphere-67u3/deploy/vsphere-csi-controller-deployment.yaml index 43c1ba60fb..0826e6de25 100644 --- a/manifests/v2.2.0/vsphere-67u3/deploy/vsphere-csi-controller-deployment.yaml +++ b/manifests/v2.2.0/vsphere-67u3/deploy/vsphere-csi-controller-deployment.yaml @@ -47,7 +47,7 @@ spec: - mountPath: /csi name: socket-dir - name: vsphere-csi-controller - image: gcr.io/cloud-provider-vsphere/csi/release/driver:v2.2.0-rc.1 + image: gcr.io/cloud-provider-vsphere/csi/release/driver:v2.2.0-rc.2 args: - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" - "--fss-namespace=$(CSI_NAMESPACE)" @@ -101,7 +101,7 @@ spec: - name: socket-dir mountPath: /csi - name: vsphere-syncer - image: gcr.io/cloud-provider-vsphere/csi/release/syncer:v2.2.0-rc.1 + image: gcr.io/cloud-provider-vsphere/csi/release/syncer:v2.2.0-rc.2 args: - "--leader-election" - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" diff --git a/manifests/v2.2.0/vsphere-67u3/deploy/vsphere-csi-node-ds.yaml b/manifests/v2.2.0/vsphere-67u3/deploy/vsphere-csi-node-ds.yaml index 7031fdcfbe..945add2bbf 100644 --- a/manifests/v2.2.0/vsphere-67u3/deploy/vsphere-csi-node-ds.yaml +++ b/manifests/v2.2.0/vsphere-67u3/deploy/vsphere-csi-node-ds.yaml @@ -47,7 +47,7 @@ spec: initialDelaySeconds: 5 timeoutSeconds: 5 - name: vsphere-csi-node - image: gcr.io/cloud-provider-vsphere/csi/release/driver:v2.2.0-rc.1 + image: gcr.io/cloud-provider-vsphere/csi/release/driver:v2.2.0-rc.2 args: - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" - "--fss-namespace=$(CSI_NAMESPACE)" diff --git a/manifests/v2.2.0/vsphere-7.0/deploy/vsphere-csi-controller-deployment.yaml b/manifests/v2.2.0/vsphere-7.0/deploy/vsphere-csi-controller-deployment.yaml index 2ca1a3fd39..ff4f9e768b 100644 --- a/manifests/v2.2.0/vsphere-7.0/deploy/vsphere-csi-controller-deployment.yaml +++ b/manifests/v2.2.0/vsphere-7.0/deploy/vsphere-csi-controller-deployment.yaml @@ -66,7 +66,7 @@ spec: - mountPath: /csi name: socket-dir - name: vsphere-csi-controller - image: gcr.io/cloud-provider-vsphere/csi/release/driver:v2.2.0-rc.1 + image: gcr.io/cloud-provider-vsphere/csi/release/driver:v2.2.0-rc.2 args: - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" - "--fss-namespace=$(CSI_NAMESPACE)" @@ -120,7 +120,7 @@ spec: - name: socket-dir mountPath: /csi - name: vsphere-syncer - image: gcr.io/cloud-provider-vsphere/csi/release/syncer:v2.2.0-rc.1 + image: gcr.io/cloud-provider-vsphere/csi/release/syncer:v2.2.0-rc.2 args: - "--leader-election" - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" diff --git a/manifests/v2.2.0/vsphere-7.0/deploy/vsphere-csi-node-ds.yaml b/manifests/v2.2.0/vsphere-7.0/deploy/vsphere-csi-node-ds.yaml index 878857508d..288f5d7c9c 100644 --- a/manifests/v2.2.0/vsphere-7.0/deploy/vsphere-csi-node-ds.yaml +++ b/manifests/v2.2.0/vsphere-7.0/deploy/vsphere-csi-node-ds.yaml @@ -47,7 +47,7 @@ spec: initialDelaySeconds: 5 timeoutSeconds: 5 - name: vsphere-csi-node - image: gcr.io/cloud-provider-vsphere/csi/release/driver:v2.2.0-rc.1 + image: gcr.io/cloud-provider-vsphere/csi/release/driver:v2.2.0-rc.2 args: - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" - "--fss-namespace=$(CSI_NAMESPACE)" diff --git a/manifests/v2.2.0/vsphere-7.0u1/deploy/validatingwebhook.yaml b/manifests/v2.2.0/vsphere-7.0u1/deploy/validatingwebhook.yaml index bd2fdb268a..04984ba8d6 100644 --- a/manifests/v2.2.0/vsphere-7.0u1/deploy/validatingwebhook.yaml +++ b/manifests/v2.2.0/vsphere-7.0u1/deploy/validatingwebhook.yaml @@ -99,7 +99,7 @@ spec: dnsPolicy: "Default" containers: - name: vsphere-webhook - image: gcr.io/cloud-provider-vsphere/csi/release/syncer:v2.2.0-rc.1 + image: gcr.io/cloud-provider-vsphere/csi/release/syncer:v2.2.0-rc.2 args: - "--operation-mode=WEBHOOK_SERVER" - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" diff --git a/manifests/v2.2.0/vsphere-7.0u1/deploy/vsphere-csi-controller-deployment.yaml b/manifests/v2.2.0/vsphere-7.0u1/deploy/vsphere-csi-controller-deployment.yaml index 789c5e5651..410f2c94b1 100644 --- a/manifests/v2.2.0/vsphere-7.0u1/deploy/vsphere-csi-controller-deployment.yaml +++ b/manifests/v2.2.0/vsphere-7.0u1/deploy/vsphere-csi-controller-deployment.yaml @@ -66,7 +66,7 @@ spec: - mountPath: /csi name: socket-dir - name: vsphere-csi-controller - image: gcr.io/cloud-provider-vsphere/csi/release/driver:v2.2.0-rc.1 + image: gcr.io/cloud-provider-vsphere/csi/release/driver:v2.2.0-rc.2 args: - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" - "--fss-namespace=$(CSI_NAMESPACE)" @@ -120,7 +120,7 @@ spec: - name: socket-dir mountPath: /csi - name: vsphere-syncer - image: gcr.io/cloud-provider-vsphere/csi/release/syncer:v2.2.0-rc.1 + image: gcr.io/cloud-provider-vsphere/csi/release/syncer:v2.2.0-rc.2 args: - "--leader-election" - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" diff --git a/manifests/v2.2.0/vsphere-7.0u1/deploy/vsphere-csi-node-ds.yaml b/manifests/v2.2.0/vsphere-7.0u1/deploy/vsphere-csi-node-ds.yaml index 5db384a231..1f59aff229 100644 --- a/manifests/v2.2.0/vsphere-7.0u1/deploy/vsphere-csi-node-ds.yaml +++ b/manifests/v2.2.0/vsphere-7.0u1/deploy/vsphere-csi-node-ds.yaml @@ -46,7 +46,7 @@ spec: initialDelaySeconds: 5 timeoutSeconds: 5 - name: vsphere-csi-node - image: gcr.io/cloud-provider-vsphere/csi/release/driver:v2.2.0-rc.1 + image: gcr.io/cloud-provider-vsphere/csi/release/driver:v2.2.0-rc.2 args: - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" - "--fss-namespace=$(CSI_NAMESPACE)" diff --git a/manifests/v2.2.0/vsphere-7.0u2/deploy/validatingwebhook.yaml b/manifests/v2.2.0/vsphere-7.0u2/deploy/validatingwebhook.yaml index bd2fdb268a..04984ba8d6 100644 --- a/manifests/v2.2.0/vsphere-7.0u2/deploy/validatingwebhook.yaml +++ b/manifests/v2.2.0/vsphere-7.0u2/deploy/validatingwebhook.yaml @@ -99,7 +99,7 @@ spec: dnsPolicy: "Default" containers: - name: vsphere-webhook - image: gcr.io/cloud-provider-vsphere/csi/release/syncer:v2.2.0-rc.1 + image: gcr.io/cloud-provider-vsphere/csi/release/syncer:v2.2.0-rc.2 args: - "--operation-mode=WEBHOOK_SERVER" - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" diff --git a/manifests/v2.2.0/vsphere-7.0u2/deploy/vsphere-csi-controller-deployment.yaml b/manifests/v2.2.0/vsphere-7.0u2/deploy/vsphere-csi-controller-deployment.yaml index d448969299..c8581d0f6c 100644 --- a/manifests/v2.2.0/vsphere-7.0u2/deploy/vsphere-csi-controller-deployment.yaml +++ b/manifests/v2.2.0/vsphere-7.0u2/deploy/vsphere-csi-controller-deployment.yaml @@ -63,7 +63,7 @@ spec: - mountPath: /csi name: socket-dir - name: vsphere-csi-controller - image: gcr.io/cloud-provider-vsphere/csi/release/driver:v2.2.0-rc.1 + image: gcr.io/cloud-provider-vsphere/csi/release/driver:v2.2.0-rc.2 args: - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" - "--fss-namespace=$(CSI_NAMESPACE)" @@ -117,7 +117,7 @@ spec: - name: socket-dir mountPath: /csi - name: vsphere-syncer - image: gcr.io/cloud-provider-vsphere/csi/release/syncer:v2.2.0-rc.1 + image: gcr.io/cloud-provider-vsphere/csi/release/syncer:v2.2.0-rc.2 args: - "--leader-election" - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" diff --git a/manifests/v2.2.0/vsphere-7.0u2/deploy/vsphere-csi-node-ds.yaml b/manifests/v2.2.0/vsphere-7.0u2/deploy/vsphere-csi-node-ds.yaml index e006844865..292f7e62ec 100644 --- a/manifests/v2.2.0/vsphere-7.0u2/deploy/vsphere-csi-node-ds.yaml +++ b/manifests/v2.2.0/vsphere-7.0u2/deploy/vsphere-csi-node-ds.yaml @@ -47,7 +47,7 @@ spec: initialDelaySeconds: 5 timeoutSeconds: 5 - name: vsphere-csi-node - image: gcr.io/cloud-provider-vsphere/csi/release/driver:v2.2.0-rc.1 + image: gcr.io/cloud-provider-vsphere/csi/release/driver:v2.2.0-rc.2 args: - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" - "--fss-namespace=$(CSI_NAMESPACE)" From d1605f0a7fa0a37b4e1eebffe396194209250cec Mon Sep 17 00:00:00 2001 From: Sandeep Pissay Srinivasa Rao Date: Tue, 23 Mar 2021 14:36:35 -0700 Subject: [PATCH 34/36] Replacing the usage of CNS Query with QueryAll(with selection) to avoid SPBM workflows. --- pkg/csi/service/common/vsphereutil.go | 8 +++++++- pkg/csi/service/vanilla/controller.go | 16 ++++++++++++++-- pkg/syncer/metadatasyncer.go | 6 ++++-- 3 files changed, 25 insertions(+), 5 deletions(-) diff --git a/pkg/csi/service/common/vsphereutil.go b/pkg/csi/service/common/vsphereutil.go index dbbc875522..90833f1e5c 100644 --- a/pkg/csi/service/common/vsphereutil.go +++ b/pkg/csi/service/common/vsphereutil.go @@ -553,7 +553,13 @@ func isExpansionRequired(ctx context.Context, volumeID string, requestedSize int queryFilter := cnstypes.CnsQueryFilter{ VolumeIds: volumeIds, } - queryResult, err := manager.VolumeManager.QueryVolume(ctx, queryFilter) + querySelection := cnstypes.CnsQuerySelection{ + Names: []string{ + string(cnstypes.QuerySelectionNameTypeBackingObjectDetails), + }, + } + // Query only the backing object details. + queryResult, err := manager.VolumeManager.QueryAllVolume(ctx, queryFilter, querySelection) if err != nil { log.Errorf("failed to call QueryVolume for volumeID: %q: %v", volumeID, err) return false, err diff --git a/pkg/csi/service/vanilla/controller.go b/pkg/csi/service/vanilla/controller.go index d0a4c6fb06..b5bd2cd0a8 100644 --- a/pkg/csi/service/vanilla/controller.go +++ b/pkg/csi/service/vanilla/controller.go @@ -751,7 +751,13 @@ func (c *controller) ControllerPublishVolume(ctx context.Context, req *csi.Contr queryFilter := cnstypes.CnsQueryFilter{ VolumeIds: []cnstypes.CnsVolumeId{{Id: req.VolumeId}}, } - queryResult, err := c.manager.VolumeManager.QueryVolume(ctx, queryFilter) + querySelection := cnstypes.CnsQuerySelection{ + Names: []string{ + string(cnstypes.QuerySelectionNameTypeBackingObjectDetails), + }, + } + // Select only the backing object details. + queryResult, err := c.manager.VolumeManager.QueryAllVolume(ctx, queryFilter, querySelection) if err != nil { msg := fmt.Sprintf("QueryVolume failed for volumeID: %q. %+v", req.VolumeId, err.Error()) log.Error(msg) @@ -860,7 +866,13 @@ func (c *controller) ControllerUnpublishVolume(ctx context.Context, req *csi.Con queryFilter := cnstypes.CnsQueryFilter{ VolumeIds: []cnstypes.CnsVolumeId{{Id: req.VolumeId}}, } - queryResult, err := c.manager.VolumeManager.QueryVolume(ctx, queryFilter) + querySelection := cnstypes.CnsQuerySelection{ + Names: []string{ + string(cnstypes.QuerySelectionNameTypeVolumeType), + }, + } + // Select only the volume type. + queryResult, err := c.manager.VolumeManager.QueryAllVolume(ctx, queryFilter, querySelection) if err != nil { msg := fmt.Sprintf("QueryVolume failed for volumeID: %q. %+v", req.VolumeId, err.Error()) log.Error(msg) diff --git a/pkg/syncer/metadatasyncer.go b/pkg/syncer/metadatasyncer.go index 5849d58aac..4b387a8c69 100644 --- a/pkg/syncer/metadatasyncer.go +++ b/pkg/syncer/metadatasyncer.go @@ -755,7 +755,8 @@ func csiPVCUpdated(ctx context.Context, pvc *v1.PersistentVolumeClaim, pv *v1.Pe queryFilter := cnstypes.CnsQueryFilter{ VolumeIds: []cnstypes.CnsVolumeId{{Id: volumeHandle}}, } - queryResult, err := metadataSyncer.volumeManager.QueryVolume(ctx, queryFilter) + // Query with empty selection. CNS returns only the volume ID from it's cache. + queryResult, err := metadataSyncer.volumeManager.QueryAllVolume(ctx, queryFilter, cnstypes.CnsQuerySelection{}) if err != nil { log.Warnf("PVCUpdated: Failed to query volume metadata for volume %q with error %+v", volumeHandle, err) return false, err @@ -897,7 +898,8 @@ func csiPVUpdated(ctx context.Context, newPv *v1.PersistentVolume, oldPv *v1.Per } volumeOperationsLock.Lock() defer volumeOperationsLock.Unlock() - queryResult, err := metadataSyncer.volumeManager.QueryVolume(ctx, queryFilter) + // QueryAll with no selection will return only the volume ID. + queryResult, err := metadataSyncer.volumeManager.QueryAllVolume(ctx, queryFilter, cnstypes.CnsQuerySelection{}) if err != nil { log.Errorf("PVUpdated: QueryVolume failed. error: %+v", err) return From 6fd0a65be51c70368c22397ca3bc050435bc9af7 Mon Sep 17 00:00:00 2001 From: Divyen Patel Date: Wed, 24 Mar 2021 21:36:01 -0700 Subject: [PATCH 35/36] update images tag to v2.2.0-rc.3 --- .../deploy/vsphere-csi-controller-deployment.yaml | 4 ++-- manifests/v2.2.0/vsphere-67u3/deploy/vsphere-csi-node-ds.yaml | 2 +- .../vsphere-7.0/deploy/vsphere-csi-controller-deployment.yaml | 4 ++-- manifests/v2.2.0/vsphere-7.0/deploy/vsphere-csi-node-ds.yaml | 2 +- manifests/v2.2.0/vsphere-7.0u1/deploy/validatingwebhook.yaml | 2 +- .../deploy/vsphere-csi-controller-deployment.yaml | 4 ++-- .../v2.2.0/vsphere-7.0u1/deploy/vsphere-csi-node-ds.yaml | 2 +- manifests/v2.2.0/vsphere-7.0u2/deploy/validatingwebhook.yaml | 2 +- .../deploy/vsphere-csi-controller-deployment.yaml | 4 ++-- .../v2.2.0/vsphere-7.0u2/deploy/vsphere-csi-node-ds.yaml | 2 +- 10 files changed, 14 insertions(+), 14 deletions(-) diff --git a/manifests/v2.2.0/vsphere-67u3/deploy/vsphere-csi-controller-deployment.yaml b/manifests/v2.2.0/vsphere-67u3/deploy/vsphere-csi-controller-deployment.yaml index 0826e6de25..9b1ccbf681 100644 --- a/manifests/v2.2.0/vsphere-67u3/deploy/vsphere-csi-controller-deployment.yaml +++ b/manifests/v2.2.0/vsphere-67u3/deploy/vsphere-csi-controller-deployment.yaml @@ -47,7 +47,7 @@ spec: - mountPath: /csi name: socket-dir - name: vsphere-csi-controller - image: gcr.io/cloud-provider-vsphere/csi/release/driver:v2.2.0-rc.2 + image: gcr.io/cloud-provider-vsphere/csi/release/driver:v2.2.0-rc.3 args: - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" - "--fss-namespace=$(CSI_NAMESPACE)" @@ -101,7 +101,7 @@ spec: - name: socket-dir mountPath: /csi - name: vsphere-syncer - image: gcr.io/cloud-provider-vsphere/csi/release/syncer:v2.2.0-rc.2 + image: gcr.io/cloud-provider-vsphere/csi/release/syncer:v2.2.0-rc.3 args: - "--leader-election" - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" diff --git a/manifests/v2.2.0/vsphere-67u3/deploy/vsphere-csi-node-ds.yaml b/manifests/v2.2.0/vsphere-67u3/deploy/vsphere-csi-node-ds.yaml index 945add2bbf..424608ec0a 100644 --- a/manifests/v2.2.0/vsphere-67u3/deploy/vsphere-csi-node-ds.yaml +++ b/manifests/v2.2.0/vsphere-67u3/deploy/vsphere-csi-node-ds.yaml @@ -47,7 +47,7 @@ spec: initialDelaySeconds: 5 timeoutSeconds: 5 - name: vsphere-csi-node - image: gcr.io/cloud-provider-vsphere/csi/release/driver:v2.2.0-rc.2 + image: gcr.io/cloud-provider-vsphere/csi/release/driver:v2.2.0-rc.3 args: - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" - "--fss-namespace=$(CSI_NAMESPACE)" diff --git a/manifests/v2.2.0/vsphere-7.0/deploy/vsphere-csi-controller-deployment.yaml b/manifests/v2.2.0/vsphere-7.0/deploy/vsphere-csi-controller-deployment.yaml index ff4f9e768b..118834080e 100644 --- a/manifests/v2.2.0/vsphere-7.0/deploy/vsphere-csi-controller-deployment.yaml +++ b/manifests/v2.2.0/vsphere-7.0/deploy/vsphere-csi-controller-deployment.yaml @@ -66,7 +66,7 @@ spec: - mountPath: /csi name: socket-dir - name: vsphere-csi-controller - image: gcr.io/cloud-provider-vsphere/csi/release/driver:v2.2.0-rc.2 + image: gcr.io/cloud-provider-vsphere/csi/release/driver:v2.2.0-rc.3 args: - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" - "--fss-namespace=$(CSI_NAMESPACE)" @@ -120,7 +120,7 @@ spec: - name: socket-dir mountPath: /csi - name: vsphere-syncer - image: gcr.io/cloud-provider-vsphere/csi/release/syncer:v2.2.0-rc.2 + image: gcr.io/cloud-provider-vsphere/csi/release/syncer:v2.2.0-rc.3 args: - "--leader-election" - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" diff --git a/manifests/v2.2.0/vsphere-7.0/deploy/vsphere-csi-node-ds.yaml b/manifests/v2.2.0/vsphere-7.0/deploy/vsphere-csi-node-ds.yaml index 288f5d7c9c..ca44465e14 100644 --- a/manifests/v2.2.0/vsphere-7.0/deploy/vsphere-csi-node-ds.yaml +++ b/manifests/v2.2.0/vsphere-7.0/deploy/vsphere-csi-node-ds.yaml @@ -47,7 +47,7 @@ spec: initialDelaySeconds: 5 timeoutSeconds: 5 - name: vsphere-csi-node - image: gcr.io/cloud-provider-vsphere/csi/release/driver:v2.2.0-rc.2 + image: gcr.io/cloud-provider-vsphere/csi/release/driver:v2.2.0-rc.3 args: - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" - "--fss-namespace=$(CSI_NAMESPACE)" diff --git a/manifests/v2.2.0/vsphere-7.0u1/deploy/validatingwebhook.yaml b/manifests/v2.2.0/vsphere-7.0u1/deploy/validatingwebhook.yaml index 04984ba8d6..c073b25748 100644 --- a/manifests/v2.2.0/vsphere-7.0u1/deploy/validatingwebhook.yaml +++ b/manifests/v2.2.0/vsphere-7.0u1/deploy/validatingwebhook.yaml @@ -99,7 +99,7 @@ spec: dnsPolicy: "Default" containers: - name: vsphere-webhook - image: gcr.io/cloud-provider-vsphere/csi/release/syncer:v2.2.0-rc.2 + image: gcr.io/cloud-provider-vsphere/csi/release/syncer:v2.2.0-rc.3 args: - "--operation-mode=WEBHOOK_SERVER" - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" diff --git a/manifests/v2.2.0/vsphere-7.0u1/deploy/vsphere-csi-controller-deployment.yaml b/manifests/v2.2.0/vsphere-7.0u1/deploy/vsphere-csi-controller-deployment.yaml index 410f2c94b1..cc48adce2c 100644 --- a/manifests/v2.2.0/vsphere-7.0u1/deploy/vsphere-csi-controller-deployment.yaml +++ b/manifests/v2.2.0/vsphere-7.0u1/deploy/vsphere-csi-controller-deployment.yaml @@ -66,7 +66,7 @@ spec: - mountPath: /csi name: socket-dir - name: vsphere-csi-controller - image: gcr.io/cloud-provider-vsphere/csi/release/driver:v2.2.0-rc.2 + image: gcr.io/cloud-provider-vsphere/csi/release/driver:v2.2.0-rc.3 args: - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" - "--fss-namespace=$(CSI_NAMESPACE)" @@ -120,7 +120,7 @@ spec: - name: socket-dir mountPath: /csi - name: vsphere-syncer - image: gcr.io/cloud-provider-vsphere/csi/release/syncer:v2.2.0-rc.2 + image: gcr.io/cloud-provider-vsphere/csi/release/syncer:v2.2.0-rc.3 args: - "--leader-election" - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" diff --git a/manifests/v2.2.0/vsphere-7.0u1/deploy/vsphere-csi-node-ds.yaml b/manifests/v2.2.0/vsphere-7.0u1/deploy/vsphere-csi-node-ds.yaml index 1f59aff229..211c0ae4d0 100644 --- a/manifests/v2.2.0/vsphere-7.0u1/deploy/vsphere-csi-node-ds.yaml +++ b/manifests/v2.2.0/vsphere-7.0u1/deploy/vsphere-csi-node-ds.yaml @@ -46,7 +46,7 @@ spec: initialDelaySeconds: 5 timeoutSeconds: 5 - name: vsphere-csi-node - image: gcr.io/cloud-provider-vsphere/csi/release/driver:v2.2.0-rc.2 + image: gcr.io/cloud-provider-vsphere/csi/release/driver:v2.2.0-rc.3 args: - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" - "--fss-namespace=$(CSI_NAMESPACE)" diff --git a/manifests/v2.2.0/vsphere-7.0u2/deploy/validatingwebhook.yaml b/manifests/v2.2.0/vsphere-7.0u2/deploy/validatingwebhook.yaml index 04984ba8d6..c073b25748 100644 --- a/manifests/v2.2.0/vsphere-7.0u2/deploy/validatingwebhook.yaml +++ b/manifests/v2.2.0/vsphere-7.0u2/deploy/validatingwebhook.yaml @@ -99,7 +99,7 @@ spec: dnsPolicy: "Default" containers: - name: vsphere-webhook - image: gcr.io/cloud-provider-vsphere/csi/release/syncer:v2.2.0-rc.2 + image: gcr.io/cloud-provider-vsphere/csi/release/syncer:v2.2.0-rc.3 args: - "--operation-mode=WEBHOOK_SERVER" - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" diff --git a/manifests/v2.2.0/vsphere-7.0u2/deploy/vsphere-csi-controller-deployment.yaml b/manifests/v2.2.0/vsphere-7.0u2/deploy/vsphere-csi-controller-deployment.yaml index c8581d0f6c..f001e37ae9 100644 --- a/manifests/v2.2.0/vsphere-7.0u2/deploy/vsphere-csi-controller-deployment.yaml +++ b/manifests/v2.2.0/vsphere-7.0u2/deploy/vsphere-csi-controller-deployment.yaml @@ -63,7 +63,7 @@ spec: - mountPath: /csi name: socket-dir - name: vsphere-csi-controller - image: gcr.io/cloud-provider-vsphere/csi/release/driver:v2.2.0-rc.2 + image: gcr.io/cloud-provider-vsphere/csi/release/driver:v2.2.0-rc.3 args: - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" - "--fss-namespace=$(CSI_NAMESPACE)" @@ -117,7 +117,7 @@ spec: - name: socket-dir mountPath: /csi - name: vsphere-syncer - image: gcr.io/cloud-provider-vsphere/csi/release/syncer:v2.2.0-rc.2 + image: gcr.io/cloud-provider-vsphere/csi/release/syncer:v2.2.0-rc.3 args: - "--leader-election" - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" diff --git a/manifests/v2.2.0/vsphere-7.0u2/deploy/vsphere-csi-node-ds.yaml b/manifests/v2.2.0/vsphere-7.0u2/deploy/vsphere-csi-node-ds.yaml index 292f7e62ec..6eaa36031c 100644 --- a/manifests/v2.2.0/vsphere-7.0u2/deploy/vsphere-csi-node-ds.yaml +++ b/manifests/v2.2.0/vsphere-7.0u2/deploy/vsphere-csi-node-ds.yaml @@ -47,7 +47,7 @@ spec: initialDelaySeconds: 5 timeoutSeconds: 5 - name: vsphere-csi-node - image: gcr.io/cloud-provider-vsphere/csi/release/driver:v2.2.0-rc.2 + image: gcr.io/cloud-provider-vsphere/csi/release/driver:v2.2.0-rc.3 args: - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" - "--fss-namespace=$(CSI_NAMESPACE)" From 1b2276903ebe45bbf050352d564df2e6213c5000 Mon Sep 17 00:00:00 2001 From: Hemant Kumar Date: Thu, 8 Apr 2021 22:39:14 -0400 Subject: [PATCH 36/36] Add new vendored dependencies --- go.sum | 70 - vendor/github.com/google/uuid/README.md | 2 +- vendor/github.com/google/uuid/hash.go | 4 +- vendor/github.com/google/uuid/marshal.go | 7 +- vendor/github.com/google/uuid/sql.go | 2 +- vendor/github.com/google/uuid/uuid.go | 10 +- vendor/github.com/google/uuid/version1.go | 12 +- vendor/github.com/google/uuid/version4.go | 15 +- vendor/github.com/onsi/ginkgo/.travis.yml | 11 +- vendor/github.com/onsi/ginkgo/CHANGELOG.md | 17 + vendor/github.com/onsi/ginkgo/README.md | 2 +- .../github.com/onsi/ginkgo/config/config.go | 33 +- vendor/github.com/onsi/ginkgo/ginkgo_dsl.go | 18 +- vendor/github.com/onsi/ginkgo/go.mod | 4 +- vendor/github.com/onsi/ginkgo/go.sum | 39 +- .../remote/output_interceptor_darwin.go | 11 - .../remote/output_interceptor_dragonfly.go | 11 - .../remote/output_interceptor_freebsd.go | 11 - .../remote/output_interceptor_linux.go | 12 - .../output_interceptor_linux_mips64le.go | 12 - .../remote/output_interceptor_netbsd.go | 11 - .../remote/output_interceptor_openbsd.go | 11 - .../remote/output_interceptor_solaris.go | 11 - .../remote/output_interceptor_unix.go | 7 +- .../onsi/ginkgo/internal/spec/specs.go | 20 +- .../onsi/ginkgo/internal/suite/suite.go | 2 +- .../internal/testingtproxy/testing_t_proxy.go | 42 +- vendor/github.com/onsi/gomega/.travis.yml | 5 +- vendor/github.com/onsi/gomega/CHANGELOG.md | 24 + .../github.com/onsi/gomega/format/format.go | 8 +- vendor/github.com/onsi/gomega/go.mod | 5 +- vendor/github.com/onsi/gomega/go.sum | 14 +- vendor/github.com/onsi/gomega/gomega_dsl.go | 35 +- .../onsi/gomega/matchers/consist_of.go | 53 +- .../matchers/contain_elements_matcher.go | 4 +- .../gomega/matchers/match_error_matcher.go | 4 +- vendor/github.com/rexray/gocsi/go.mod | 2 +- vendor/github.com/rexray/gocsi/go.sum | 4 +- vendor/github.com/spf13/cobra/.golangci.yml | 48 + vendor/github.com/spf13/cobra/.travis.yml | 9 +- vendor/github.com/spf13/cobra/CHANGELOG.md | 35 +- vendor/github.com/spf13/cobra/CONDUCT.md | 37 + vendor/github.com/spf13/cobra/Makefile | 18 +- vendor/github.com/spf13/cobra/README.md | 32 +- .../spf13/cobra/bash_completions.go | 133 +- .../spf13/cobra/bash_completions.md | 2 +- vendor/github.com/spf13/cobra/cobra.go | 15 + vendor/github.com/spf13/cobra/command.go | 116 +- .../spf13/cobra/custom_completions.go | 4 +- .../spf13/cobra/fish_completions.go | 6 +- vendor/github.com/spf13/cobra/go.mod | 2 +- vendor/github.com/spf13/cobra/go.sum | 4 +- .../spf13/cobra/powershell_completions.go | 323 +- .../spf13/cobra/powershell_completions.md | 15 +- .../spf13/cobra/projects_using_cobra.md | 3 + .../spf13/cobra/shell_completions.md | 119 +- .../github.com/spf13/cobra/zsh_completions.go | 4 +- vendor/github.com/spf13/viper/util.go | 13 +- vendor/github.com/spf13/viper/viper.go | 24 +- .../github.com/vmware/govmomi/.goreleaser.yml | 2 + .../github.com/vmware/govmomi/cns/client.go | 1 + .../github.com/vmware/govmomi/cns/cns_util.go | 30 +- .../vmware/govmomi/object/virtual_machine.go | 87 +- .../github.com/vmware/govmomi/ovf/envelope.go | 8 + .../vmware/govmomi/session/manager.go | 11 + .../simulator/cluster_compute_resource.go | 47 + .../vmware/govmomi/simulator/snapshot.go | 2 +- .../govmomi/simulator/virtual_machine.go | 1 + .../github.com/vmware/govmomi/vim25/retry.go | 68 +- .../vmware/govmomi/vim25/soap/client.go | 2 +- .../vmware/govmomi/vim25/soap/error.go | 10 + vendor/go.uber.org/zap/CHANGELOG.md | 35 +- vendor/go.uber.org/zap/FAQ.md | 1 + vendor/go.uber.org/zap/config.go | 2 + vendor/go.uber.org/zap/field.go | 8 +- vendor/go.uber.org/zap/go.mod | 1 + vendor/go.uber.org/zap/logger.go | 41 +- vendor/go.uber.org/zap/options.go | 19 +- vendor/go.uber.org/zap/sink.go | 2 +- vendor/go.uber.org/zap/stacktrace.go | 47 +- .../zap/zapcore/console_encoder.go | 30 +- vendor/go.uber.org/zap/zapcore/encoder.go | 42 + vendor/go.uber.org/zap/zapcore/entry.go | 14 +- vendor/go.uber.org/zap/zapcore/field.go | 18 +- .../go.uber.org/zap/zapcore/json_encoder.go | 30 +- vendor/go.uber.org/zap/zapcore/marshaler.go | 8 + vendor/golang.org/x/net/html/parse.go | 15 +- vendor/golang.org/x/net/http2/transport.go | 25 +- .../idna/{tables12.00.go => tables12.0.0.go} | 2 +- vendor/golang.org/x/net/idna/tables13.0.0.go | 4839 +++++++++++++++++ vendor/golang.org/x/xerrors/LICENSE | 27 - vendor/golang.org/x/xerrors/PATENTS | 22 - vendor/golang.org/x/xerrors/README | 2 - vendor/golang.org/x/xerrors/adaptor.go | 193 - vendor/golang.org/x/xerrors/codereview.cfg | 1 - vendor/golang.org/x/xerrors/doc.go | 22 - vendor/golang.org/x/xerrors/errors.go | 33 - vendor/golang.org/x/xerrors/fmt.go | 187 - vendor/golang.org/x/xerrors/format.go | 34 - vendor/golang.org/x/xerrors/frame.go | 56 - vendor/golang.org/x/xerrors/go.mod | 3 - .../golang.org/x/xerrors/internal/internal.go | 8 - vendor/golang.org/x/xerrors/wrap.go | 106 - vendor/gopkg.in/yaml.v2/.travis.yml | 1 + vendor/gopkg.in/yaml.v2/apic.go | 6 +- vendor/gopkg.in/yaml.v2/go.mod | 8 +- vendor/gopkg.in/yaml.v2/yaml.go | 14 +- vendor/modules.txt | 25 +- 108 files changed, 6304 insertions(+), 1419 deletions(-) delete mode 100644 vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_darwin.go delete mode 100644 vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_dragonfly.go delete mode 100644 vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_freebsd.go delete mode 100644 vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_linux.go delete mode 100644 vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_linux_mips64le.go delete mode 100644 vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_netbsd.go delete mode 100644 vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_openbsd.go delete mode 100644 vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_solaris.go create mode 100644 vendor/github.com/spf13/cobra/.golangci.yml create mode 100644 vendor/github.com/spf13/cobra/CONDUCT.md rename vendor/golang.org/x/net/idna/{tables12.00.go => tables12.0.0.go} (99%) create mode 100644 vendor/golang.org/x/net/idna/tables13.0.0.go delete mode 100644 vendor/golang.org/x/xerrors/LICENSE delete mode 100644 vendor/golang.org/x/xerrors/PATENTS delete mode 100644 vendor/golang.org/x/xerrors/README delete mode 100644 vendor/golang.org/x/xerrors/adaptor.go delete mode 100644 vendor/golang.org/x/xerrors/codereview.cfg delete mode 100644 vendor/golang.org/x/xerrors/doc.go delete mode 100644 vendor/golang.org/x/xerrors/errors.go delete mode 100644 vendor/golang.org/x/xerrors/fmt.go delete mode 100644 vendor/golang.org/x/xerrors/format.go delete mode 100644 vendor/golang.org/x/xerrors/frame.go delete mode 100644 vendor/golang.org/x/xerrors/go.mod delete mode 100644 vendor/golang.org/x/xerrors/internal/internal.go delete mode 100644 vendor/golang.org/x/xerrors/wrap.go diff --git a/go.sum b/go.sum index d4ac0f8919..ff4fe62ae2 100644 --- a/go.sum +++ b/go.sum @@ -32,17 +32,14 @@ github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd/go.mod h1:64YHyfSL2R96J44Nlwm39UHepQbyR5q10x7iYa1ks2E= github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= github.com/Microsoft/hcsshim v0.0.0-20190417211021-672e52e9209d/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= -github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46 h1:lsxEuwrXEAokXB9qhlbKWPpo3KMLZQ5WB5WLQRW1uq0= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/OpenPeeDeeP/depguard v1.0.0/go.mod h1:7/4sitnI9YlQgTLLk734QlzXT8DuHVnAyztLplQjk+o= github.com/OpenPeeDeeP/depguard v1.0.1/go.mod h1:xsIw86fROiiwelg+jB2uM9PiKihMMmUx/1V+TNhjQvM= github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/Rican7/retry v0.1.0/go.mod h1:FgOROf8P5bebcC1DS0PdOQiqGUridaZvikzUmkFW6gg= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= @@ -57,10 +54,8 @@ github.com/akutz/gofsutil v0.1.2/go.mod h1:09JEF8dR0bTTZMQ1m3/+O1rqQyH2lG1ET34PO github.com/akutz/gosync v0.1.0 h1:naxPT/aDYDh79PMwM3XmencmNQeYmpNFSZy4ZE9zIW0= github.com/akutz/gosync v0.1.0/go.mod h1:I8I4aiqJI1nqaeYOOB1WS+CgRJVVPqhct9Y4njywM84= github.com/akutz/memconn v0.1.0/go.mod h1:Jo8rI7m0NieZyLI5e2CDlRdRqRRB4S7Xp77ukDjH+Fw= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc h1:cAKDfWh5VpdgMhJosfJnn5/FoN2SRZ4p7fJNX58YPaU= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf h1:qet1QNfXsQxTZqLG4oE62mJzwPIB8+Tee4RNCL9ulrY= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= @@ -87,7 +82,6 @@ github.com/bazelbuild/buildtools v0.0.0-20190731111112-f720930ceb60/go.mod h1:5J github.com/bazelbuild/buildtools v0.0.0-20190917191645-69366ca98f89/go.mod h1:5JP0TXzWDHXv8qvxRC4InIazwdyDseBDbzESUMKk1yU= github.com/bazelbuild/rules_go v0.0.0-20190719190356-6dae44dc5cab/go.mod h1:MC23Dc/wkXEyk3Wpq6lCqz0ZAYOZDw2DR5y3N1q2i7M= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= @@ -151,7 +145,6 @@ github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfc github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/creack/pty v1.1.7 h1:6pwm8kMQKCmgUg0ZHTm5+/YvRK0s3THD/28+T6/kk4A= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= @@ -187,7 +180,6 @@ github.com/elazarl/goproxy v0.0.0-20200710112657-153946a5f232/go.mod h1:Ro8st/El github.com/elazarl/goproxy/ext v0.0.0-20190711103511-473e67f1d7d2/go.mod h1:gNh8nYJoAm43RfaxurUnxr+N1PwuFV3ZMl/efxlIlY8= github.com/elazarl/goproxy/ext v0.0.0-20200710112657-153946a5f232 h1:gj8NHKvd8kkOMT8gcy4gJBCXsDK2fP0tqKc/F20q73k= github.com/elazarl/goproxy/ext v0.0.0-20200710112657-153946a5f232/go.mod h1:gNh8nYJoAm43RfaxurUnxr+N1PwuFV3ZMl/efxlIlY8= -github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633 h1:H2pdYOb3KQ1/YsqVWoWNLQO+fusocsw354rqGTZtAgw= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.9.5+incompatible h1:spTtZBk5DYEvbxMVutUuTyh1Ao2r4iyvLdACqsl/Ljk= github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= @@ -199,7 +191,6 @@ github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLi github.com/evanphx/json-patch v4.5.0+incompatible h1:ouOWdg56aJriqS0huScTkVXPC5IcNrDCXZ6OoTAWu7M= github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4= -github.com/fatih/camelcase v1.0.0 h1:hxNvNX/xYBp0ovncs8WyWZrOrpBNub/JfaMvbURyft8= github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc= github.com/fatih/color v1.6.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= @@ -207,12 +198,10 @@ github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= -github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= @@ -246,14 +235,11 @@ github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+ github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= -github.com/go-openapi/jsonpointer v0.19.3 h1:gihV7YNZK1iK6Tgwwsxo2rJbD1GTbdm72325Bq8FI3w= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= -github.com/go-openapi/jsonreference v0.19.2 h1:o20suLFB4Ri0tuzpWtyHlh7E7HnkqTNLq6aR6WVNS1w= github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= -github.com/go-openapi/jsonreference v0.19.3 h1:5cxNfTy0UVC3X8JL5ymxzyoUZmo8iZb+jeTWn7tUa8o= github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= @@ -267,7 +253,6 @@ github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nA github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY= -github.com/go-openapi/spec v0.19.3 h1:0XRyw8kguri6Yw4SxhsQA/atC88yqrk0+G4YhI2wabc= github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= @@ -277,14 +262,12 @@ github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dp github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.5 h1:lTz6Ys4CmqqCQmZPBlbQENR1/GucA2bzYTE12Pw4tFY= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= github.com/go-openapi/validate v0.19.5/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4= github.com/go-ozzo/ozzo-validation v3.5.0+incompatible/go.mod h1:gsEKFIVnabGBt6mXmxK0MoFy+cZoTJY6mu5Ll3LVLBU= github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= -github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-toolsmith/astcast v1.0.0/go.mod h1:mt2OdQTeAQcY4DQgPSArJjHCcOwlX+Wl/kwN+LbLGQ4= github.com/go-toolsmith/astcopy v1.0.0/go.mod h1:vrgyG+5Bxrnz4MZWPF+pI4R8h3qKRjjyvV/DSez4WVQ= @@ -311,12 +294,10 @@ github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGw github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef h1:veQD95Isof8w9/WXiA+pa3tz3fJXkt5B7QaRBrM62gk= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 h1:ZgQEtGgCBiWRM39fZuwSd1LwSqqSW0hOdXCYYDX0R3I= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.0.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.1.1 h1:G5FRp8JnTd7RQH5kemVNlMeyXQAztQ3mOWV95KxsXH8= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1 h1:qGJ6qTW+x6xX/my+8YUVl4WNpX9B7+/l2tRsHGZ7f2s= @@ -324,14 +305,12 @@ github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFU github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= @@ -360,33 +339,26 @@ github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Z github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/cadvisor v0.35.0/go.mod h1:1nql6U13uTHaLYB8rLS5x9IJc2qT6Xd/Tr1sTX6NE48= -github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0 h1:/QaMHBdZ26BB3SSst0Iwl10Epc+xhTquomWX0oZEB6w= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= -github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v0.0.0-20170306145142-6a5e28554805/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.0.0 h1:b4Gk+7WdP/d3HZH8EJsZpvV7EtDOgaZLtnaNGIu1adA= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.2.0 h1:qJYtXnJRWmpe7m/3XlyhrsLrEURqHRM2kxzoxXqyUDs= github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -449,7 +421,6 @@ github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2p github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/heketi/heketi v9.0.1-0.20190917153846-c2e2a4ab7ab9+incompatible/go.mod h1:bB9ly3RchcQqsQ9CpyaQwvva7RS5ytVoSoholZQON6o= github.com/heketi/tests v0.0.0-20151005000721-f3775cbcefd6/go.mod h1:xGMAM8JLi7UkZt1i4FQeQy0R2T8GLUwQhOP5M1gBhy4= -github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= @@ -464,11 +435,8 @@ github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht github.com/jonboulle/clockwork v0.1.0 h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= -github.com/json-iterator/go v1.1.6 h1:MrUvLMLTMxbqFJ9kzlvat/rYZqZnW3u4wkLzWTaFwKs= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.7 h1:KfgG9LzI+pYjr4xvmz/5H4FXjokeP+rlHLhv3iH62Fo= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.8 h1:QiWkFLKq0T7mpzwOTu6BzNDbfTE8OLrYhVKYMLF46Ok= github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= @@ -496,7 +464,6 @@ github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORN github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= @@ -523,7 +490,6 @@ github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e h1:hB2xlXdHp/pmPZq0y3QnmWAArdw9PqbmotexnWx/FU8= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= github.com/marten-seemann/qtls v0.2.3/go.mod h1:xzjG7avBwGGbdZ8dTGxlBnLArsVKLvwmjgmPuiQEcYk= @@ -565,14 +531,11 @@ github.com/mohae/deepcopy v0.0.0-20170603005431-491d3605edfb/go.mod h1:TaXosZuwd github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/mozilla/tls-observatory v0.0.0-20180409132520-8791a200eb40/go.mod h1:SrKMQvPiws7F7iqYp8/TX+IhxCYhzr6N/1yb8cwHsGk= github.com/mrunalp/fileutils v0.0.0-20171103030105-7d4729fb3618/go.mod h1:x8F1gnqOkIEiO4rqoeEEEqQbo7HjGMTvyoq3gej4iT0= -github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d h1:7PxY7LVfSZm7PEeBTyK1rj1gABdCO2mbri6GKO1cMDs= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mvdan/xurls v1.1.0/go.mod h1:tQlNn3BED8bE/15hnSL2HLkDeLWpNPAwtw7wkEq44oU= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0= github.com/naoina/toml v0.1.1/go.mod h1:NBIhNtsFMo3G2szEBne+bO4gS192HuIYRqfvOWb4i1E= @@ -630,18 +593,15 @@ github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnh github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pborman/uuid v1.2.0 h1:J7Q5mO4ysT1dv8hyrUGHb9+ooztCXu1D8MY8DZYsu3g= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.1.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= -github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -655,7 +615,6 @@ github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v0.9.4/go.mod h1:oCXIBxdI62A4cR6aTRJCgetEjecSIYzOEaeAn4iYEpM= -github.com/prometheus/client_golang v1.0.0 h1:vrDKnkGzuGvhNAL56c7DBz29ZL+KxnoR0x7enabFceM= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= @@ -664,7 +623,6 @@ github.com/prometheus/client_golang v1.9.0/go.mod h1:FqZLKOZnGdFAhOK4nqGHa7D66Id github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 h1:gQz4mCbXsO+nc9n1hCxHcGA3Zx3Eo+UHZoInFGUIXNM= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= @@ -672,7 +630,6 @@ github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6T github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.4.1 h1:K0MGApIoQvMw27RTdJkPbr3JZ7DNbtxQNyi5STVM6Kw= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= @@ -681,10 +638,8 @@ github.com/prometheus/common v0.15.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16 github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.2 h1:6LJUbpNm42llc4HRCuvApCSWB/WfhuNo9K98Q9sNGfs= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= -github.com/prometheus/procfs v0.0.11 h1:DhHlBtkHWPYi8O2y31JkK0TF+DGM+51OopZjH/Ia5qI= github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.2.0 h1:wH4vA7pcjKuZzjF7lM8awk4fnuJO6idemZXoKnULUx4= @@ -722,7 +677,6 @@ github.com/sirupsen/logrus v1.0.5/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjM github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= -github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= @@ -745,7 +699,6 @@ github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkU github.com/spf13/cobra v0.0.1/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.2/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= github.com/spf13/cobra v1.1.3 h1:xghbfqPkxzxP3C/f3n5DdpAbdKLj4ZE4BWQI362l53M= github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= @@ -754,17 +707,13 @@ github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb6 github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.0 h1:oaPbdDe/x0UncahuwiPxW1GYJyilRAdsPnq3e1yaPcI= github.com/spf13/pflag v1.0.0/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.1 h1:aCvUg6QPl3ibpQUxyLkrEkCHtPqYJL4x9AuhqVqFis4= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.0.2/go.mod h1:A8kyI5cUJhb8N+3pkfONlcEcZbueH6nhAm0Fq7SrnBM= github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= -github.com/spf13/viper v1.7.0 h1:xVKxvI7ouOI5I+U9s2eeiUfMaWBVoXA3AWskkrqK0VM= github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= github.com/spf13/viper v1.7.1 h1:pM5oEahlgWv/WnHXpgbKz7iLIxRf65tye2Ci+XFK5sk= github.com/spf13/viper v1.7.1/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= @@ -778,7 +727,6 @@ github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= @@ -838,14 +786,12 @@ go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= -go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A= go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= -go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= go.uber.org/zap v1.16.0 h1:uFRZXykJGK9lLY4HtgSw44DnIcAM+kRBP7x5m+NpAOM= @@ -868,7 +814,6 @@ golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550 h1:ObdrDkeb4kJdCP557AjRjq69pTHfNouLtWZG7j9rPN8= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= @@ -930,20 +875,15 @@ golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553 h1:efeOvDhwQ29Dj3SdAV/MJf8oukgn+8D8WgaCaRMchF8= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20201021035429-f5854403a974 h1:IX6qOQeG5uLjB/hjjwjedwfjND0hgjPMMyO1RoIXQNI= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb h1:eBmm0M9fYhWpKZLjQUUKka/LtIxf46G4fxeEz5KJr9U= golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be h1:vEDujvNQGv4jgYKudGeI/+DAX4Jffq6hpD55MmoEvKs= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a h1:tImsplftrFpALCYumobsd0K86vlAs/eXGFms2txfJfA= golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -994,10 +934,8 @@ golang.org/x/sys v0.0.0-20191220220014-0732a990476f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae h1:Ih9Yo4hSPImZOpfGuA4bR/ORKTAbhZo2AbWNRCnevdo= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201214210602-f9fddec55a1e h1:AyodaIpKjppX+cBfTASF2E1US3H2JFBj920Ot3rtDjs= golang.org/x/sys v0.0.0-20201214210602-f9fddec55a1e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4 h1:myAQVi0cGEoqQVR5POX+8RR2mrocKqNN1hmeMqhX27k= @@ -1006,14 +944,11 @@ golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fq golang.org/x/text v0.0.0-20170915090833-1cbadb444a80/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2 h1:+DCIGbF/swA92ohVg0//6X2IVY3KZs6p9mix0ziNYJM= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1057,14 +992,12 @@ golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7 h1:EBZoQjiKKPaLbPrbpssUfuHtwM6KV/vb4U85g/cigFY= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0 h1:po9/4sTYwZU9lPhi1tOrb4hCv3qrhiQ77LZfGa2OjwY= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1158,7 +1091,6 @@ gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= @@ -1176,8 +1108,6 @@ honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.2/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.1.2 h1:SMdYLJl312RXuxXziCCHhRsp/tvct9cGKey0yv95tZM= -honnef.co/go/tools v0.1.2/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= honnef.co/go/tools v0.1.3 h1:qTakTkI6ni6LFD5sBwwsdSO+AQqbSIxOauHTTQKZ/7o= honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= k8s.io/api v0.18.5 h1:fKbCxr+U3fu7k6jB+QeYPD/c6xKYeSJ2KVWmyUypuWM= diff --git a/vendor/github.com/google/uuid/README.md b/vendor/github.com/google/uuid/README.md index 9d92c11f16..f765a46f91 100644 --- a/vendor/github.com/google/uuid/README.md +++ b/vendor/github.com/google/uuid/README.md @@ -16,4 +16,4 @@ change is the ability to represent an invalid UUID (vs a NIL UUID). Full `go doc` style documentation for the package can be viewed online without installing this package by using the GoDoc site here: -http://godoc.org/github.com/google/uuid +http://pkg.go.dev/github.com/google/uuid diff --git a/vendor/github.com/google/uuid/hash.go b/vendor/github.com/google/uuid/hash.go index b174616315..b404f4bec2 100644 --- a/vendor/github.com/google/uuid/hash.go +++ b/vendor/github.com/google/uuid/hash.go @@ -26,8 +26,8 @@ var ( // NewMD5 and NewSHA1. func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID { h.Reset() - h.Write(space[:]) - h.Write(data) + h.Write(space[:]) //nolint:errcheck + h.Write(data) //nolint:errcheck s := h.Sum(nil) var uuid UUID copy(uuid[:], s) diff --git a/vendor/github.com/google/uuid/marshal.go b/vendor/github.com/google/uuid/marshal.go index 7f9e0c6c0e..14bd34072b 100644 --- a/vendor/github.com/google/uuid/marshal.go +++ b/vendor/github.com/google/uuid/marshal.go @@ -16,10 +16,11 @@ func (uuid UUID) MarshalText() ([]byte, error) { // UnmarshalText implements encoding.TextUnmarshaler. func (uuid *UUID) UnmarshalText(data []byte) error { id, err := ParseBytes(data) - if err == nil { - *uuid = id + if err != nil { + return err } - return err + *uuid = id + return nil } // MarshalBinary implements encoding.BinaryMarshaler. diff --git a/vendor/github.com/google/uuid/sql.go b/vendor/github.com/google/uuid/sql.go index f326b54db3..2e02ec06c0 100644 --- a/vendor/github.com/google/uuid/sql.go +++ b/vendor/github.com/google/uuid/sql.go @@ -9,7 +9,7 @@ import ( "fmt" ) -// Scan implements sql.Scanner so UUIDs can be read from databases transparently +// Scan implements sql.Scanner so UUIDs can be read from databases transparently. // Currently, database types that map to string and []byte are supported. Please // consult database-specific driver documentation for matching types. func (uuid *UUID) Scan(src interface{}) error { diff --git a/vendor/github.com/google/uuid/uuid.go b/vendor/github.com/google/uuid/uuid.go index 524404cc52..60d26bb50c 100644 --- a/vendor/github.com/google/uuid/uuid.go +++ b/vendor/github.com/google/uuid/uuid.go @@ -35,6 +35,12 @@ const ( var rander = rand.Reader // random function +type invalidLengthError struct{ len int } + +func (err invalidLengthError) Error() string { + return fmt.Sprintf("invalid UUID length: %d", err.len) +} + // Parse decodes s into a UUID or returns an error. Both the standard UUID // forms of xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded as well as the @@ -68,7 +74,7 @@ func Parse(s string) (UUID, error) { } return uuid, nil default: - return uuid, fmt.Errorf("invalid UUID length: %d", len(s)) + return uuid, invalidLengthError{len(s)} } // s is now at least 36 bytes long // it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx @@ -112,7 +118,7 @@ func ParseBytes(b []byte) (UUID, error) { } return uuid, nil default: - return uuid, fmt.Errorf("invalid UUID length: %d", len(b)) + return uuid, invalidLengthError{len(b)} } // s is now at least 36 bytes long // it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx diff --git a/vendor/github.com/google/uuid/version1.go b/vendor/github.com/google/uuid/version1.go index 199a1ac654..463109629e 100644 --- a/vendor/github.com/google/uuid/version1.go +++ b/vendor/github.com/google/uuid/version1.go @@ -17,12 +17,6 @@ import ( // // In most cases, New should be used. func NewUUID() (UUID, error) { - nodeMu.Lock() - if nodeID == zeroID { - setNodeInterface("") - } - nodeMu.Unlock() - var uuid UUID now, seq, err := GetTime() if err != nil { @@ -38,7 +32,13 @@ func NewUUID() (UUID, error) { binary.BigEndian.PutUint16(uuid[4:], timeMid) binary.BigEndian.PutUint16(uuid[6:], timeHi) binary.BigEndian.PutUint16(uuid[8:], seq) + + nodeMu.Lock() + if nodeID == zeroID { + setNodeInterface("") + } copy(uuid[10:], nodeID[:]) + nodeMu.Unlock() return uuid, nil } diff --git a/vendor/github.com/google/uuid/version4.go b/vendor/github.com/google/uuid/version4.go index 84af91c9f5..86160fbd07 100644 --- a/vendor/github.com/google/uuid/version4.go +++ b/vendor/github.com/google/uuid/version4.go @@ -14,6 +14,14 @@ func New() UUID { return Must(NewRandom()) } +// NewString creates a new random UUID and returns it as a string or panics. +// NewString is equivalent to the expression +// +// uuid.New().String() +func NewString() string { + return Must(NewRandom()).String() +} + // NewRandom returns a Random (Version 4) UUID. // // The strength of the UUIDs is based on the strength of the crypto/rand @@ -27,8 +35,13 @@ func New() UUID { // equivalent to the odds of creating a few tens of trillions of UUIDs in a // year and having one duplicate. func NewRandom() (UUID, error) { + return NewRandomFromReader(rander) +} + +// NewRandomFromReader returns a UUID based on bytes read from a given io.Reader. +func NewRandomFromReader(r io.Reader) (UUID, error) { var uuid UUID - _, err := io.ReadFull(rander, uuid[:]) + _, err := io.ReadFull(r, uuid[:]) if err != nil { return Nil, err } diff --git a/vendor/github.com/onsi/ginkgo/.travis.yml b/vendor/github.com/onsi/ginkgo/.travis.yml index 079af24318..8b2883f976 100644 --- a/vendor/github.com/onsi/ginkgo/.travis.yml +++ b/vendor/github.com/onsi/ginkgo/.travis.yml @@ -1,7 +1,7 @@ language: go go: - - 1.13.x - 1.14.x + - 1.15.x - tip cache: @@ -16,10 +16,9 @@ install: - GO111MODULE="off" go get golang.org/x/tools/cmd/cover - GO111MODULE="off" go get github.com/onsi/gomega - GO111MODULE="off" go install github.com/onsi/ginkgo/ginkgo - - export PATH=$PATH:$HOME/gopath/bin + - export PATH=$GOPATH/bin:$PATH script: - - GO111MODULE="on" go mod tidy - - diff -u <(echo -n) <(git diff go.mod) - - diff -u <(echo -n) <(git diff go.sum) - - $HOME/gopath/bin/ginkgo -r --randomizeAllSpecs --randomizeSuites --race --trace && go vet + - GO111MODULE="on" go mod tidy && git diff --exit-code go.mod go.sum + - go vet + - ginkgo -r --randomizeAllSpecs --randomizeSuites --race --trace diff --git a/vendor/github.com/onsi/ginkgo/CHANGELOG.md b/vendor/github.com/onsi/ginkgo/CHANGELOG.md index 6092fcb633..bf51fe9cd2 100644 --- a/vendor/github.com/onsi/ginkgo/CHANGELOG.md +++ b/vendor/github.com/onsi/ginkgo/CHANGELOG.md @@ -1,3 +1,20 @@ +## 1.15.0 + +### Features +- Adds 'outline' command to print the outline of specs/containers in a file (#754) [071c369] [6803cc3] [935b538] [06744e8] [0c40583] +- Add support for using template to generate tests (#752) [efb9e69] +- Add a Chinese Doc #755 (#756) [5207632] +- cli: allow multiple -focus and -skip flags (#736) [9a782fb] + +### Fixes +- Add _internal to filename of tests created with internal flag (#751) [43c12da] + +## 1.14.2 + +### Fixes +- correct handling windows backslash in import path (#721) [97f3d51] +- Add additional methods to GinkgoT() to improve compatibility with the testing.TB interface [b5fe44d] + ## 1.14.1 ### Fixes diff --git a/vendor/github.com/onsi/ginkgo/README.md b/vendor/github.com/onsi/ginkgo/README.md index 475e04994f..64e85eee0b 100644 --- a/vendor/github.com/onsi/ginkgo/README.md +++ b/vendor/github.com/onsi/ginkgo/README.md @@ -2,7 +2,7 @@ [![Build Status](https://travis-ci.org/onsi/ginkgo.svg?branch=master)](https://travis-ci.org/onsi/ginkgo) -Jump to the [docs](https://onsi.github.io/ginkgo/) to learn more. To start rolling your Ginkgo tests *now* [keep reading](#set-me-up)! +Jump to the [docs](https://onsi.github.io/ginkgo/) | [中文文档](https://ke-chain.github.io/ginkgodoc) to learn more. To start rolling your Ginkgo tests *now* [keep reading](#set-me-up)! If you have a question, comment, bug report, feature request, etc. please open a GitHub issue, or visit the [Ginkgo Slack channel](https://app.slack.com/client/T029RQSE6/CQQ50BBNW). diff --git a/vendor/github.com/onsi/ginkgo/config/config.go b/vendor/github.com/onsi/ginkgo/config/config.go index feef2bcd6c..8c177811e9 100644 --- a/vendor/github.com/onsi/ginkgo/config/config.go +++ b/vendor/github.com/onsi/ginkgo/config/config.go @@ -20,14 +20,14 @@ import ( "fmt" ) -const VERSION = "1.14.1" +const VERSION = "1.15.0" type GinkgoConfigType struct { RandomSeed int64 RandomizeAllSpecs bool RegexScansFilePath bool - FocusString string - SkipString string + FocusStrings []string + SkipStrings []string SkipMeasurements bool FailOnPending bool FailFast bool @@ -65,6 +65,11 @@ func processPrefix(prefix string) string { return prefix } +type flagFunc func(string) + +func (f flagFunc) String() string { return "" } +func (f flagFunc) Set(s string) error { f(s); return nil } + func Flags(flagSet *flag.FlagSet, prefix string, includeParallelFlags bool) { prefix = processPrefix(prefix) flagSet.Int64Var(&(GinkgoConfig.RandomSeed), prefix+"seed", time.Now().Unix(), "The seed used to randomize the spec suite.") @@ -75,8 +80,8 @@ func Flags(flagSet *flag.FlagSet, prefix string, includeParallelFlags bool) { flagSet.BoolVar(&(GinkgoConfig.DryRun), prefix+"dryRun", false, "If set, ginkgo will walk the test hierarchy without actually running anything. Best paired with -v.") - flagSet.StringVar(&(GinkgoConfig.FocusString), prefix+"focus", "", "If set, ginkgo will only run specs that match this regular expression.") - flagSet.StringVar(&(GinkgoConfig.SkipString), prefix+"skip", "", "If set, ginkgo will only run specs that do not match this regular expression.") + flagSet.Var(flagFunc(flagFocus), prefix+"focus", "If set, ginkgo will only run specs that match this regular expression. Can be specified multiple times, values are ORed.") + flagSet.Var(flagFunc(flagSkip), prefix+"skip", "If set, ginkgo will only run specs that do not match this regular expression. Can be specified multiple times, values are ORed.") flagSet.BoolVar(&(GinkgoConfig.RegexScansFilePath), prefix+"regexScansFilePath", false, "If set, ginkgo regex matching also will look at the file path (code location).") @@ -133,12 +138,12 @@ func BuildFlagArgs(prefix string, ginkgo GinkgoConfigType, reporter DefaultRepor result = append(result, fmt.Sprintf("--%sdryRun", prefix)) } - if ginkgo.FocusString != "" { - result = append(result, fmt.Sprintf("--%sfocus=%s", prefix, ginkgo.FocusString)) + for _, s := range ginkgo.FocusStrings { + result = append(result, fmt.Sprintf("--%sfocus=%s", prefix, s)) } - if ginkgo.SkipString != "" { - result = append(result, fmt.Sprintf("--%sskip=%s", prefix, ginkgo.SkipString)) + for _, s := range ginkgo.SkipStrings { + result = append(result, fmt.Sprintf("--%sskip=%s", prefix, s)) } if ginkgo.FlakeAttempts > 1 { @@ -211,3 +216,13 @@ func BuildFlagArgs(prefix string, ginkgo GinkgoConfigType, reporter DefaultRepor return result } + +// flagFocus implements the -focus flag. +func flagFocus(arg string) { + GinkgoConfig.FocusStrings = append(GinkgoConfig.FocusStrings, arg) +} + +// flagSkip implements the -skip flag. +func flagSkip(arg string) { + GinkgoConfig.SkipStrings = append(GinkgoConfig.SkipStrings, arg) +} diff --git a/vendor/github.com/onsi/ginkgo/ginkgo_dsl.go b/vendor/github.com/onsi/ginkgo/ginkgo_dsl.go index 30ff86f59f..7e8a487082 100644 --- a/vendor/github.com/onsi/ginkgo/ginkgo_dsl.go +++ b/vendor/github.com/onsi/ginkgo/ginkgo_dsl.go @@ -93,26 +93,36 @@ func GinkgoT(optionalOffset ...int) GinkgoTInterface { if len(optionalOffset) > 0 { offset = optionalOffset[0] } - return testingtproxy.New(GinkgoWriter, Fail, offset) + failedFunc := func() bool { + return CurrentGinkgoTestDescription().Failed + } + nameFunc := func() string { + return CurrentGinkgoTestDescription().FullTestText + } + return testingtproxy.New(GinkgoWriter, Fail, Skip, failedFunc, nameFunc, offset) } //The interface returned by GinkgoT(). This covers most of the methods //in the testing package's T. type GinkgoTInterface interface { - Fail() + Cleanup(func()) Error(args ...interface{}) Errorf(format string, args ...interface{}) + Fail() FailNow() + Failed() bool Fatal(args ...interface{}) Fatalf(format string, args ...interface{}) + Helper() Log(args ...interface{}) Logf(format string, args ...interface{}) - Failed() bool + Name() string Parallel() Skip(args ...interface{}) - Skipf(format string, args ...interface{}) SkipNow() + Skipf(format string, args ...interface{}) Skipped() bool + TempDir() string } //Custom Ginkgo test reporters must implement the Reporter interface. diff --git a/vendor/github.com/onsi/ginkgo/go.mod b/vendor/github.com/onsi/ginkgo/go.mod index 1f71252280..655060cf74 100644 --- a/vendor/github.com/onsi/ginkgo/go.mod +++ b/vendor/github.com/onsi/ginkgo/go.mod @@ -4,8 +4,8 @@ require ( github.com/fsnotify/fsnotify v1.4.9 // indirect github.com/nxadm/tail v1.4.4 github.com/onsi/gomega v1.10.1 - golang.org/x/sys v0.0.0-20200519105757-fe76b779f299 - golang.org/x/text v0.3.2 // indirect + golang.org/x/sys v0.0.0-20210112080510-489259a85091 + golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e ) go 1.13 diff --git a/vendor/github.com/onsi/ginkgo/go.sum b/vendor/github.com/onsi/ginkgo/go.sum index 2b774f3e82..56a493f9da 100644 --- a/vendor/github.com/onsi/ginkgo/go.sum +++ b/vendor/github.com/onsi/ginkgo/go.sum @@ -1,8 +1,6 @@ -github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= @@ -15,39 +13,50 @@ github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/gomega v1.7.1 h1:K0jcRCwNQM3vFGh1ppMtDh/+7ApJrjldlX8fA0jDTLQ= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd h1:nTDtHvHSdCn1m6ITfMRqtOd/9+7a3s8RBNOZ3eYZzJA= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7 h1:AeiKBIuRw3UomYXSbLy0Mc2dDLfdtbT/IVn4keq83P0= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974 h1:IX6qOQeG5uLjB/hjjwjedwfjND0hgjPMMyO1RoIXQNI= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e h1:N7DeIrjYszNmSW409R3frPPwglRwMkXSBzwVbkOjLLA= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200519105757-fe76b779f299 h1:DYfZAGf2WMFjMxbgTjaC+2HC7NkNAQs+6Q8b9WEB/F4= -golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210112080510-489259a85091 h1:DMyOG0U+gKfu8JZzg2UQe9MeaC1X+xQWlAKcRnjxjCw= +golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e h1:4nW4NLDYnU28ojHaHO8OVxFHk/aQ33U01a9cjED+pzE= +golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -57,11 +66,9 @@ google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyz google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_darwin.go b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_darwin.go deleted file mode 100644 index e3d09eadb8..0000000000 --- a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_darwin.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build darwin - -package remote - -import ( - "golang.org/x/sys/unix" -) - -func interceptorDupx(oldfd int, newfd int) { - unix.Dup2(oldfd, newfd) -} diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_dragonfly.go b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_dragonfly.go deleted file mode 100644 index 72d38686a0..0000000000 --- a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_dragonfly.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build dragonfly - -package remote - -import ( - "golang.org/x/sys/unix" -) - -func interceptorDupx(oldfd int, newfd int) { - unix.Dup2(oldfd, newfd) -} diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_freebsd.go b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_freebsd.go deleted file mode 100644 index 497d548d99..0000000000 --- a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_freebsd.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build freebsd - -package remote - -import ( - "golang.org/x/sys/unix" -) - -func interceptorDupx(oldfd int, newfd int) { - unix.Dup2(oldfd, newfd) -} diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_linux.go b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_linux.go deleted file mode 100644 index 29add0d330..0000000000 --- a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_linux.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build linux -// +build !mips64le - -package remote - -import ( - "golang.org/x/sys/unix" -) - -func interceptorDupx(oldfd int, newfd int) { - unix.Dup2(oldfd, newfd) -} diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_linux_mips64le.go b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_linux_mips64le.go deleted file mode 100644 index 09bd062606..0000000000 --- a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_linux_mips64le.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build linux -// +build mips64le - -package remote - -import ( - "golang.org/x/sys/unix" -) - -func interceptorDupx(oldfd int, newfd int) { - unix.Dup3(oldfd, newfd, 0) -} diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_netbsd.go b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_netbsd.go deleted file mode 100644 index 16ad6aeb29..0000000000 --- a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_netbsd.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build netbsd - -package remote - -import ( - "golang.org/x/sys/unix" -) - -func interceptorDupx(oldfd int, newfd int) { - unix.Dup2(oldfd, newfd) -} diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_openbsd.go b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_openbsd.go deleted file mode 100644 index 4275f84210..0000000000 --- a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_openbsd.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build openbsd - -package remote - -import ( - "golang.org/x/sys/unix" -) - -func interceptorDupx(oldfd int, newfd int) { - unix.Dup2(oldfd, newfd) -} diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_solaris.go b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_solaris.go deleted file mode 100644 index 882a38a9e0..0000000000 --- a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_solaris.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build solaris - -package remote - -import ( - "golang.org/x/sys/unix" -) - -func interceptorDupx(oldfd int, newfd int) { - unix.Dup2(oldfd, newfd) -} diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_unix.go b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_unix.go index 80614d0ce5..774967db66 100644 --- a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_unix.go +++ b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_unix.go @@ -8,6 +8,7 @@ import ( "os" "github.com/nxadm/tail" + "golang.org/x/sys/unix" ) func NewOutputInterceptor() OutputInterceptor { @@ -35,8 +36,10 @@ func (interceptor *outputInterceptor) StartInterceptingOutput() error { return err } - interceptorDupx(int(interceptor.redirectFile.Fd()), 1) - interceptorDupx(int(interceptor.redirectFile.Fd()), 2) + // This might call Dup3 if the dup2 syscall is not available, e.g. on + // linux/arm64 or linux/riscv64 + unix.Dup2(int(interceptor.redirectFile.Fd()), 1) + unix.Dup2(int(interceptor.redirectFile.Fd()), 2) if interceptor.streamTarget != nil { interceptor.tailer, _ = tail.TailFile(interceptor.redirectFile.Name(), tail.Config{Follow: true}) diff --git a/vendor/github.com/onsi/ginkgo/internal/spec/specs.go b/vendor/github.com/onsi/ginkgo/internal/spec/specs.go index 8a20071375..0a24139fb1 100644 --- a/vendor/github.com/onsi/ginkgo/internal/spec/specs.go +++ b/vendor/github.com/onsi/ginkgo/internal/spec/specs.go @@ -4,6 +4,7 @@ import ( "math/rand" "regexp" "sort" + "strings" ) type Specs struct { @@ -46,11 +47,11 @@ func (e *Specs) Shuffle(r *rand.Rand) { e.names = names } -func (e *Specs) ApplyFocus(description string, focusString string, skipString string) { - if focusString == "" && skipString == "" { +func (e *Specs) ApplyFocus(description string, focus, skip []string) { + if len(focus)+len(skip) == 0 { e.applyProgrammaticFocus() } else { - e.applyRegExpFocusAndSkip(description, focusString, skipString) + e.applyRegExpFocusAndSkip(description, focus, skip) } } @@ -90,14 +91,13 @@ func (e *Specs) toMatch(description string, i int) []byte { } } -func (e *Specs) applyRegExpFocusAndSkip(description string, focusString string, skipString string) { - var focusFilter *regexp.Regexp - if focusString != "" { - focusFilter = regexp.MustCompile(focusString) +func (e *Specs) applyRegExpFocusAndSkip(description string, focus, skip []string) { + var focusFilter, skipFilter *regexp.Regexp + if len(focus) > 0 { + focusFilter = regexp.MustCompile(strings.Join(focus, "|")) } - var skipFilter *regexp.Regexp - if skipString != "" { - skipFilter = regexp.MustCompile(skipString) + if len(skip) > 0 { + skipFilter = regexp.MustCompile(strings.Join(skip, "|")) } for i, spec := range e.specs { diff --git a/vendor/github.com/onsi/ginkgo/internal/suite/suite.go b/vendor/github.com/onsi/ginkgo/internal/suite/suite.go index e75da1f896..b4a83c432d 100644 --- a/vendor/github.com/onsi/ginkgo/internal/suite/suite.go +++ b/vendor/github.com/onsi/ginkgo/internal/suite/suite.go @@ -97,7 +97,7 @@ func (suite *Suite) generateSpecsIterator(description string, config config.Gink specs.Shuffle(rand.New(rand.NewSource(config.RandomSeed))) } - specs.ApplyFocus(description, config.FocusString, config.SkipString) + specs.ApplyFocus(description, config.FocusStrings, config.SkipStrings) if config.SkipMeasurements { specs.SkipMeasurements() diff --git a/vendor/github.com/onsi/ginkgo/internal/testingtproxy/testing_t_proxy.go b/vendor/github.com/onsi/ginkgo/internal/testingtproxy/testing_t_proxy.go index 090445d084..d7bbb7a96b 100644 --- a/vendor/github.com/onsi/ginkgo/internal/testingtproxy/testing_t_proxy.go +++ b/vendor/github.com/onsi/ginkgo/internal/testingtproxy/testing_t_proxy.go @@ -6,21 +6,34 @@ import ( ) type failFunc func(message string, callerSkip ...int) +type skipFunc func(message string, callerSkip ...int) +type failedFunc func() bool +type nameFunc func() string -func New(writer io.Writer, fail failFunc, offset int) *ginkgoTestingTProxy { +func New(writer io.Writer, fail failFunc, skip skipFunc, failed failedFunc, name nameFunc, offset int) *ginkgoTestingTProxy { return &ginkgoTestingTProxy{ fail: fail, offset: offset, writer: writer, + skip: skip, + failed: failed, + name: name, } } type ginkgoTestingTProxy struct { fail failFunc + skip skipFunc + failed failedFunc + name nameFunc offset int writer io.Writer } +func (t *ginkgoTestingTProxy) Cleanup(func()) { + // No-op +} + func (t *ginkgoTestingTProxy) Error(args ...interface{}) { t.fail(fmt.Sprintln(args...), t.offset) } @@ -37,6 +50,10 @@ func (t *ginkgoTestingTProxy) FailNow() { t.fail("failed", t.offset) } +func (t *ginkgoTestingTProxy) Failed() bool { + return t.failed() +} + func (t *ginkgoTestingTProxy) Fatal(args ...interface{}) { t.fail(fmt.Sprintln(args...), t.offset) } @@ -45,6 +62,10 @@ func (t *ginkgoTestingTProxy) Fatalf(format string, args ...interface{}) { t.fail(fmt.Sprintf(format, args...), t.offset) } +func (t *ginkgoTestingTProxy) Helper() { + // No-op +} + func (t *ginkgoTestingTProxy) Log(args ...interface{}) { fmt.Fprintln(t.writer, args...) } @@ -53,24 +74,31 @@ func (t *ginkgoTestingTProxy) Logf(format string, args ...interface{}) { t.Log(fmt.Sprintf(format, args...)) } -func (t *ginkgoTestingTProxy) Failed() bool { - return false +func (t *ginkgoTestingTProxy) Name() string { + return t.name() } func (t *ginkgoTestingTProxy) Parallel() { + // No-op } func (t *ginkgoTestingTProxy) Skip(args ...interface{}) { - fmt.Println(args...) + t.skip(fmt.Sprintln(args...), t.offset) } -func (t *ginkgoTestingTProxy) Skipf(format string, args ...interface{}) { - t.Skip(fmt.Sprintf(format, args...)) +func (t *ginkgoTestingTProxy) SkipNow() { + t.skip("skip", t.offset) } -func (t *ginkgoTestingTProxy) SkipNow() { +func (t *ginkgoTestingTProxy) Skipf(format string, args ...interface{}) { + t.skip(fmt.Sprintf(format, args...), t.offset) } func (t *ginkgoTestingTProxy) Skipped() bool { return false } + +func (t *ginkgoTestingTProxy) TempDir() string { + // No-op + return "" +} diff --git a/vendor/github.com/onsi/gomega/.travis.yml b/vendor/github.com/onsi/gomega/.travis.yml index 072fdd2db3..348e3014c6 100644 --- a/vendor/github.com/onsi/gomega/.travis.yml +++ b/vendor/github.com/onsi/gomega/.travis.yml @@ -1,8 +1,11 @@ language: go +arch: + - amd64 + - ppc64le go: - - 1.13.x - 1.14.x + - 1.15.x - gotip env: diff --git a/vendor/github.com/onsi/gomega/CHANGELOG.md b/vendor/github.com/onsi/gomega/CHANGELOG.md index 3aafdbcfcc..16095fa3c2 100644 --- a/vendor/github.com/onsi/gomega/CHANGELOG.md +++ b/vendor/github.com/onsi/gomega/CHANGELOG.md @@ -1,3 +1,27 @@ +## 1.10.5 + +### Fixes +- fix: collections matchers should display type of expectation (#408) [6b4eb5a] +- fix(ContainElements): consistently flatten expected values [073b880] +- fix(ConsistOf): consistently flatten expected values [7266efe] + +## 1.10.4 + +### Fixes +- update golang net library to more recent version without vulnerability (#406) [817a8b9] +- Correct spelling: alloted -> allotted (#403) [0bae715] +- fix a panic in MessageWithDiff with long message (#402) [ea06b9b] + +## 1.10.3 + +### Fixes +- updates golang/x/net to fix vulnerability detected by snyk (#394) [c479356] + +## 1.10.2 + +### Fixes +- Add ExpectWithOffset, EventuallyWithOffset and ConsistentlyWithOffset to WithT (#391) [990941a] + ## 1.10.1 ### Fixes diff --git a/vendor/github.com/onsi/gomega/format/format.go b/vendor/github.com/onsi/gomega/format/format.go index fae25adceb..e59d7d75b6 100644 --- a/vendor/github.com/onsi/gomega/format/format.go +++ b/vendor/github.com/onsi/gomega/format/format.go @@ -105,7 +105,13 @@ func MessageWithDiff(actual, message, expected string) string { tabLength := 4 spaceFromMessageToActual := tabLength + len(": ") - len(message) - padding := strings.Repeat(" ", spaceFromMessageToActual+spacesBeforeFormattedMismatch) + "|" + + paddingCount := spaceFromMessageToActual + spacesBeforeFormattedMismatch + if paddingCount < 0 { + return Message(formattedActual, message, formattedExpected) + } + + padding := strings.Repeat(" ", paddingCount) + "|" return Message(formattedActual, message+padding, formattedExpected) } diff --git a/vendor/github.com/onsi/gomega/go.mod b/vendor/github.com/onsi/gomega/go.mod index 7789351417..6f853a5797 100644 --- a/vendor/github.com/onsi/gomega/go.mod +++ b/vendor/github.com/onsi/gomega/go.mod @@ -1,9 +1,10 @@ module github.com/onsi/gomega +go 1.14 + require ( github.com/golang/protobuf v1.4.2 github.com/onsi/ginkgo v1.12.1 - golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7 - golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 + golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb gopkg.in/yaml.v2 v2.3.0 ) diff --git a/vendor/github.com/onsi/gomega/go.sum b/vendor/github.com/onsi/gomega/go.sum index 610b09beef..54eeacd2ba 100644 --- a/vendor/github.com/onsi/gomega/go.sum +++ b/vendor/github.com/onsi/gomega/go.sum @@ -23,22 +23,28 @@ github.com/onsi/ginkgo v1.12.1 h1:mFwc4LvZ0xpSvDZ3E+k8Yte0hLOMxXUlP+yXtJqkYfQ= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd h1:nTDtHvHSdCn1m6ITfMRqtOd/9+7a3s8RBNOZ3eYZzJA= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7 h1:AeiKBIuRw3UomYXSbLy0Mc2dDLfdtbT/IVn4keq83P0= -golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb h1:eBmm0M9fYhWpKZLjQUUKka/LtIxf46G4fxeEz5KJr9U= +golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e h1:o3PsSEY8E4eXWkXrIP9YJALUkVZqzHJT5DOasTyn8Vs= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e h1:N7DeIrjYszNmSW409R3frPPwglRwMkXSBzwVbkOjLLA= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd h1:xhmwyvizuTgC2qz7ZlMluP20uW+C3Rm0FD/WLDX8884= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f h1:+Nyd8tzPX9R7BWHguqsrbFdRx3WQ/1ib8I44HXV5yTA= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= diff --git a/vendor/github.com/onsi/gomega/gomega_dsl.go b/vendor/github.com/onsi/gomega/gomega_dsl.go index 8ff9611d53..1bc5288b8b 100644 --- a/vendor/github.com/onsi/gomega/gomega_dsl.go +++ b/vendor/github.com/onsi/gomega/gomega_dsl.go @@ -24,7 +24,7 @@ import ( "github.com/onsi/gomega/types" ) -const GOMEGA_VERSION = "1.10.1" +const GOMEGA_VERSION = "1.10.5" const nilFailHandlerPanic = `You are trying to make an assertion, but Gomega's fail handler is nil. If you're using Ginkgo then you probably forgot to put your assertion in an It(). @@ -376,13 +376,13 @@ func NewGomegaWithT(t types.GomegaTestingT) *GomegaWithT { return NewWithT(t) } -// Expect is used to make assertions. See documentation for Expect. -func (g *WithT) Expect(actual interface{}, extra ...interface{}) Assertion { - return assertion.New(actual, testingtsupport.BuildTestingTGomegaFailWrapper(g.t), 0, extra...) +// ExpectWithOffset is used to make assertions. See documentation for ExpectWithOffset. +func (g *WithT) ExpectWithOffset(offset int, actual interface{}, extra ...interface{}) Assertion { + return assertion.New(actual, testingtsupport.BuildTestingTGomegaFailWrapper(g.t), offset, extra...) } -// Eventually is used to make asynchronous assertions. See documentation for Eventually. -func (g *WithT) Eventually(actual interface{}, intervals ...interface{}) AsyncAssertion { +// EventuallyWithOffset is used to make asynchronous assertions. See documentation for EventuallyWithOffset. +func (g *WithT) EventuallyWithOffset(offset int, actual interface{}, intervals ...interface{}) AsyncAssertion { timeoutInterval := defaultEventuallyTimeout pollingInterval := defaultEventuallyPollingInterval if len(intervals) > 0 { @@ -391,11 +391,11 @@ func (g *WithT) Eventually(actual interface{}, intervals ...interface{}) AsyncAs if len(intervals) > 1 { pollingInterval = toDuration(intervals[1]) } - return asyncassertion.New(asyncassertion.AsyncAssertionTypeEventually, actual, testingtsupport.BuildTestingTGomegaFailWrapper(g.t), timeoutInterval, pollingInterval, 0) + return asyncassertion.New(asyncassertion.AsyncAssertionTypeEventually, actual, testingtsupport.BuildTestingTGomegaFailWrapper(g.t), timeoutInterval, pollingInterval, offset) } -// Consistently is used to make asynchronous assertions. See documentation for Consistently. -func (g *WithT) Consistently(actual interface{}, intervals ...interface{}) AsyncAssertion { +// ConsistentlyWithOffset is used to make asynchronous assertions. See documentation for ConsistentlyWithOffset. +func (g *WithT) ConsistentlyWithOffset(offset int, actual interface{}, intervals ...interface{}) AsyncAssertion { timeoutInterval := defaultConsistentlyDuration pollingInterval := defaultConsistentlyPollingInterval if len(intervals) > 0 { @@ -404,7 +404,22 @@ func (g *WithT) Consistently(actual interface{}, intervals ...interface{}) Async if len(intervals) > 1 { pollingInterval = toDuration(intervals[1]) } - return asyncassertion.New(asyncassertion.AsyncAssertionTypeConsistently, actual, testingtsupport.BuildTestingTGomegaFailWrapper(g.t), timeoutInterval, pollingInterval, 0) + return asyncassertion.New(asyncassertion.AsyncAssertionTypeConsistently, actual, testingtsupport.BuildTestingTGomegaFailWrapper(g.t), timeoutInterval, pollingInterval, offset) +} + +// Expect is used to make assertions. See documentation for Expect. +func (g *WithT) Expect(actual interface{}, extra ...interface{}) Assertion { + return g.ExpectWithOffset(0, actual, extra...) +} + +// Eventually is used to make asynchronous assertions. See documentation for Eventually. +func (g *WithT) Eventually(actual interface{}, intervals ...interface{}) AsyncAssertion { + return g.EventuallyWithOffset(0, actual, intervals...) +} + +// Consistently is used to make asynchronous assertions. See documentation for Consistently. +func (g *WithT) Consistently(actual interface{}, intervals ...interface{}) AsyncAssertion { + return g.ConsistentlyWithOffset(0, actual, intervals...) } func toDuration(input interface{}) time.Duration { diff --git a/vendor/github.com/onsi/gomega/matchers/consist_of.go b/vendor/github.com/onsi/gomega/matchers/consist_of.go index e453b22d1c..e8ef0dee1f 100644 --- a/vendor/github.com/onsi/gomega/matchers/consist_of.go +++ b/vendor/github.com/onsi/gomega/matchers/consist_of.go @@ -57,17 +57,21 @@ func equalMatchersToElements(matchers []interface{}) (elements []interface{}) { return } -func matchers(expectedElems []interface{}) (matchers []interface{}) { - elems := expectedElems - if len(expectedElems) == 1 && isArrayOrSlice(expectedElems[0]) { - elems = []interface{}{} - value := reflect.ValueOf(expectedElems[0]) - for i := 0; i < value.Len(); i++ { - elems = append(elems, value.Index(i).Interface()) - } +func flatten(elems []interface{}) []interface{} { + if len(elems) != 1 || !isArrayOrSlice(elems[0]) { + return elems } - for _, e := range elems { + value := reflect.ValueOf(elems[0]) + flattened := make([]interface{}, value.Len()) + for i := 0; i < value.Len(); i++ { + flattened[i] = value.Index(i).Interface() + } + return flattened +} + +func matchers(expectedElems []interface{}) (matchers []interface{}) { + for _, e := range flatten(expectedElems) { matcher, isMatcher := e.(omegaMatcher) if !isMatcher { matcher = &EqualMatcher{Expected: e} @@ -77,6 +81,29 @@ func matchers(expectedElems []interface{}) (matchers []interface{}) { return } +func presentable(elems []interface{}) interface{} { + elems = flatten(elems) + + if len(elems) == 0 { + return []interface{}{} + } + + sv := reflect.ValueOf(elems) + tt := sv.Index(0).Elem().Type() + for i := 1; i < sv.Len(); i++ { + if sv.Index(i).Elem().Type() != tt { + return elems + } + } + + ss := reflect.MakeSlice(reflect.SliceOf(tt), sv.Len(), sv.Len()) + for i := 0; i < sv.Len(); i++ { + ss.Index(i).Set(sv.Index(i).Elem()) + } + + return ss.Interface() +} + func valuesOf(actual interface{}) []interface{} { value := reflect.ValueOf(actual) values := []interface{}{} @@ -95,11 +122,11 @@ func valuesOf(actual interface{}) []interface{} { } func (matcher *ConsistOfMatcher) FailureMessage(actual interface{}) (message string) { - message = format.Message(actual, "to consist of", matcher.Elements) + message = format.Message(actual, "to consist of", presentable(matcher.Elements)) message = appendMissingElements(message, matcher.missingElements) if len(matcher.extraElements) > 0 { message = fmt.Sprintf("%s\nthe extra elements were\n%s", message, - format.Object(matcher.extraElements, 1)) + format.Object(presentable(matcher.extraElements), 1)) } return } @@ -109,9 +136,9 @@ func appendMissingElements(message string, missingElements []interface{}) string return message } return fmt.Sprintf("%s\nthe missing elements were\n%s", message, - format.Object(missingElements, 1)) + format.Object(presentable(missingElements), 1)) } func (matcher *ConsistOfMatcher) NegatedFailureMessage(actual interface{}) (message string) { - return format.Message(actual, "not to consist of", matcher.Elements) + return format.Message(actual, "not to consist of", presentable(matcher.Elements)) } diff --git a/vendor/github.com/onsi/gomega/matchers/contain_elements_matcher.go b/vendor/github.com/onsi/gomega/matchers/contain_elements_matcher.go index 19a9e78f89..946cd8bea5 100644 --- a/vendor/github.com/onsi/gomega/matchers/contain_elements_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/contain_elements_matcher.go @@ -35,10 +35,10 @@ func (matcher *ContainElementsMatcher) Match(actual interface{}) (success bool, } func (matcher *ContainElementsMatcher) FailureMessage(actual interface{}) (message string) { - message = format.Message(actual, "to contain elements", matcher.Elements) + message = format.Message(actual, "to contain elements", presentable(matcher.Elements)) return appendMissingElements(message, matcher.missingElements) } func (matcher *ContainElementsMatcher) NegatedFailureMessage(actual interface{}) (message string) { - return format.Message(actual, "not to contain elements", matcher.Elements) + return format.Message(actual, "not to contain elements", presentable(matcher.Elements)) } diff --git a/vendor/github.com/onsi/gomega/matchers/match_error_matcher.go b/vendor/github.com/onsi/gomega/matchers/match_error_matcher.go index 4e09239fff..c8993a86d9 100644 --- a/vendor/github.com/onsi/gomega/matchers/match_error_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/match_error_matcher.go @@ -1,11 +1,11 @@ package matchers import ( + "errors" "fmt" "reflect" "github.com/onsi/gomega/format" - "golang.org/x/xerrors" ) type MatchErrorMatcher struct { @@ -25,7 +25,7 @@ func (matcher *MatchErrorMatcher) Match(actual interface{}) (success bool, err e expected := matcher.Expected if isError(expected) { - return reflect.DeepEqual(actualErr, expected) || xerrors.Is(actualErr, expected.(error)), nil + return reflect.DeepEqual(actualErr, expected) || errors.Is(actualErr, expected.(error)), nil } if isString(expected) { diff --git a/vendor/github.com/rexray/gocsi/go.mod b/vendor/github.com/rexray/gocsi/go.mod index f1cb050abb..c38a797b30 100644 --- a/vendor/github.com/rexray/gocsi/go.mod +++ b/vendor/github.com/rexray/gocsi/go.mod @@ -15,7 +15,7 @@ require ( github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef // indirect github.com/golang/protobuf v1.3.1 github.com/google/btree v1.0.0 // indirect - github.com/gorilla/websocket v1.4.0 // indirect + github.com/gorilla/websocket v1.4.2 // indirect github.com/grpc-ecosystem/go-grpc-middleware v1.0.0 // indirect github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect github.com/grpc-ecosystem/grpc-gateway v1.9.1 // indirect diff --git a/vendor/github.com/rexray/gocsi/go.sum b/vendor/github.com/rexray/gocsi/go.sum index 55b3c814f0..22fa5d3db4 100644 --- a/vendor/github.com/rexray/gocsi/go.sum +++ b/vendor/github.com/rexray/gocsi/go.sum @@ -46,8 +46,8 @@ github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/gorilla/websocket v1.4.0 h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH/Q= -github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0 h1:Iju5GlWwrvL6UBg4zJJt3btmonfrMlCDdsejg4CZE7c= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= diff --git a/vendor/github.com/spf13/cobra/.golangci.yml b/vendor/github.com/spf13/cobra/.golangci.yml new file mode 100644 index 0000000000..0d6e61793a --- /dev/null +++ b/vendor/github.com/spf13/cobra/.golangci.yml @@ -0,0 +1,48 @@ +run: + deadline: 5m + +linters: + disable-all: true + enable: + #- bodyclose + - deadcode + #- depguard + #- dogsled + #- dupl + - errcheck + #- exhaustive + #- funlen + - gas + #- gochecknoinits + - goconst + #- gocritic + #- gocyclo + #- gofmt + - goimports + - golint + #- gomnd + #- goprintffuncname + #- gosec + #- gosimple + - govet + - ineffassign + - interfacer + #- lll + - maligned + - megacheck + #- misspell + #- nakedret + #- noctx + #- nolintlint + #- rowserrcheck + #- scopelint + #- staticcheck + - structcheck + #- stylecheck + #- typecheck + - unconvert + #- unparam + #- unused + - varcheck + #- whitespace + fast: false diff --git a/vendor/github.com/spf13/cobra/.travis.yml b/vendor/github.com/spf13/cobra/.travis.yml index a9bd4e5478..e0a3b50043 100644 --- a/vendor/github.com/spf13/cobra/.travis.yml +++ b/vendor/github.com/spf13/cobra/.travis.yml @@ -1,7 +1,6 @@ language: go stages: - - diff - test - build @@ -10,20 +9,20 @@ go: - 1.13.x - tip +env: GO111MODULE=on + before_install: - go get -u github.com/kyoh86/richgo - go get -u github.com/mitchellh/gox + - curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin latest matrix: allow_failures: - go: tip include: - - stage: diff - go: 1.13.x - script: make fmt - stage: build go: 1.13.x script: make cobra_generator -script: +script: - make test diff --git a/vendor/github.com/spf13/cobra/CHANGELOG.md b/vendor/github.com/spf13/cobra/CHANGELOG.md index 742d6d6e24..8a23b4f851 100644 --- a/vendor/github.com/spf13/cobra/CHANGELOG.md +++ b/vendor/github.com/spf13/cobra/CHANGELOG.md @@ -1,11 +1,40 @@ # Cobra Changelog -## Pending -* Fix man page doc generation - no auto generated tag when `cmd.DisableAutoGenTag = true` @jpmcb +## v1.1.3 + +* **Fix:** release-branch.cobra1.1 only: Revert "Deprecate Go < 1.14" to maintain backward compatibility + +## v1.1.2 + +### Notable Changes + +* Bump license year to 2021 in golden files (#1309) @Bowbaq +* Enhance PowerShell completion with custom comp (#1208) @Luap99 +* Update gopkg.in/yaml.v2 to v2.4.0: The previous breaking change in yaml.v2 v2.3.0 has been reverted, see go-yaml/yaml#670 +* Documentation readability improvements (#1228 etc.) @zaataylor etc. +* Use golangci-lint: Repair warnings and errors resulting from linting (#1044) @umarcor + +## v1.1.1 + +* **Fix:** yaml.v2 2.3.0 contained a unintended breaking change. This release reverts to yaml.v2 v2.2.8 which has recent critical CVE fixes, but does not have the breaking changes. See https://github.com/spf13/cobra/pull/1259 for context. +* **Fix:** correct internal formatting for go-md2man v2 (which caused man page generation to be broken). See https://github.com/spf13/cobra/issues/1049 for context. + +## v1.1.0 + +### Notable Changes + +* Extend Go completions and revamp zsh comp (#1070) +* Fix man page doc generation - no auto generated tag when `cmd.DisableAutoGenTag = true` (#1104) @jpmcb +* Add completion for help command (#1136) +* Complete subcommands when TraverseChildren is set (#1171) +* Fix stderr printing functions (#894) +* fix: fish output redirection (#1247) ## v1.0.0 + Announcing v1.0.0 of Cobra. 🎉 -**Notable Changes** + +### Notable Changes * Fish completion (including support for Go custom completion) @marckhouzam * API (urgent): Rename BashCompDirectives to ShellCompDirectives @marckhouzam * Remove/replace SetOutput on Command - deprecated @jpmcb diff --git a/vendor/github.com/spf13/cobra/CONDUCT.md b/vendor/github.com/spf13/cobra/CONDUCT.md new file mode 100644 index 0000000000..9d16f88fd1 --- /dev/null +++ b/vendor/github.com/spf13/cobra/CONDUCT.md @@ -0,0 +1,37 @@ +## Cobra User Contract + +### Versioning +Cobra will follow a steady release cadence. Non breaking changes will be released as minor versions quarterly. Patch bug releases are at the discretion of the maintainers. Users can expect security patch fixes to be released within relatively short order of a CVE becoming known. For more information on security patch fixes see the CVE section below. Releases will follow [Semantic Versioning](https://semver.org/). Users tracking the Master branch should expect unpredictable breaking changes as the project continues to move forward. For stability, it is highly recommended to use a release. + +### Backward Compatibility +We will maintain two major releases in a moving window. The N-1 release will only receive bug fixes and security updates and will be dropped once N+1 is released. + +### Deprecation +Deprecation of Go versions or dependent packages will only occur in major releases. To reduce the change of this taking users by surprise, any large deprecation will be preceded by an announcement in the [#cobra slack channel](https://gophers.slack.com/archives/CD3LP1199) and an Issue on Github. + +### CVE +Maintainers will make every effort to release security patches in the case of a medium to high severity CVE directly impacting the library. The speed in which these patches reach a release is up to the discretion of the maintainers. A low severity CVE may be a lower priority than a high severity one. + +### Communication +Cobra maintainers will use GitHub issues and the [#cobra slack channel](https://gophers.slack.com/archives/CD3LP1199) as the primary means of communication with the community. This is to foster open communication with all users and contributors. + +### Breaking Changes +Breaking changes are generally allowed in the master branch, as this is the branch used to develop the next release of Cobra. + +There may be times, however, when master is closed for breaking changes. This is likely to happen as we near the release of a new version. + +Breaking changes are not allowed in release branches, as these represent minor versions that have already been released. These version have consumers who expect the APIs, behaviors, etc, to remain stable during the lifetime of the patch stream for the minor release. + +Examples of breaking changes include: +- Removing or renaming exported constant, variable, type, or function. +- Updating the version of critical libraries such as `spf13/pflag`, `spf13/viper` etc... + - Some version updates may be acceptable for picking up bug fixes, but maintainers must exercise caution when reviewing. + +There may, at times, need to be exceptions where breaking changes are allowed in release branches. These are at the discretion of the project's maintainers, and must be carefully considered before merging. + +### CI Testing +Maintainers will ensure the Cobra test suite utilizes the current supported versions of Golang. + +### Disclaimer +Changes to this document and the contents therein are at the discretion of the maintainers. +None of the contents of this document are legally binding in any way to the maintainers or the users. diff --git a/vendor/github.com/spf13/cobra/Makefile b/vendor/github.com/spf13/cobra/Makefile index e9740d1e17..472c73bf16 100644 --- a/vendor/github.com/spf13/cobra/Makefile +++ b/vendor/github.com/spf13/cobra/Makefile @@ -1,21 +1,29 @@ BIN="./bin" SRC=$(shell find . -name "*.go") +ifeq (, $(shell which golangci-lint)) +$(warning "could not find golangci-lint in $(PATH), run: curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh") +endif + ifeq (, $(shell which richgo)) $(warning "could not find richgo in $(PATH), run: go get github.com/kyoh86/richgo") endif -.PHONY: fmt vet test cobra_generator install_deps clean +.PHONY: fmt lint test cobra_generator install_deps clean default: all -all: fmt vet test cobra_generator +all: fmt test cobra_generator fmt: $(info ******************** checking formatting ********************) @test -z $(shell gofmt -l $(SRC)) || (gofmt -d $(SRC); exit 1) -test: install_deps vet +lint: + $(info ******************** running lint tools ********************) + golangci-lint run -v + +test: install_deps lint $(info ******************** running tests ********************) richgo test -v ./... @@ -28,9 +36,5 @@ install_deps: $(info ******************** downloading dependencies ********************) go get -v ./... -vet: - $(info ******************** vetting ********************) - go vet ./... - clean: rm -rf $(BIN) diff --git a/vendor/github.com/spf13/cobra/README.md b/vendor/github.com/spf13/cobra/README.md index 3cf1b25d8e..a1b13ddda6 100644 --- a/vendor/github.com/spf13/cobra/README.md +++ b/vendor/github.com/spf13/cobra/README.md @@ -6,6 +6,7 @@ Cobra is used in many Go projects such as [Kubernetes](http://kubernetes.io/), [Hugo](https://gohugo.io), and [Github CLI](https://github.com/cli/cli) to name a few. [This list](./projects_using_cobra.md) contains a more extensive list of projects using Cobra. +[![](https://img.shields.io/github/workflow/status/spf13/cobra/Test?longCache=tru&label=Test&logo=github%20actions&logoColor=fff)](https://github.com/spf13/cobra/actions?query=workflow%3ATest) [![Build Status](https://travis-ci.org/spf13/cobra.svg "Travis CI status")](https://travis-ci.org/spf13/cobra) [![GoDoc](https://godoc.org/github.com/spf13/cobra?status.svg)](https://godoc.org/github.com/spf13/cobra) [![Go Report Card](https://goreportcard.com/badge/github.com/spf13/cobra)](https://goreportcard.com/report/github.com/spf13/cobra) @@ -62,8 +63,8 @@ Cobra is built on a structure of commands, arguments & flags. **Commands** represent actions, **Args** are things and **Flags** are modifiers for those actions. -The best applications will read like sentences when used. Users will know how -to use the application because they will natively understand how to use it. +The best applications read like sentences when used, and as a result, users +intuitively know how to interact with them. The pattern to follow is `APPNAME VERB NOUN --ADJECTIVE.` @@ -234,11 +235,6 @@ func init() { rootCmd.AddCommand(initCmd) } -func er(msg interface{}) { - fmt.Println("Error:", msg) - os.Exit(1) -} - func initConfig() { if cfgFile != "" { // Use config file from the flag. @@ -246,9 +242,7 @@ func initConfig() { } else { // Find home directory. home, err := homedir.Dir() - if err != nil { - er(err) - } + cobra.CheckErr(err) // Search config in home directory with name ".cobra" (without extension). viper.AddConfigPath(home) @@ -268,7 +262,7 @@ func initConfig() { With the root command you need to have your main function execute it. Execute should be run on the root for clarity, though it can be called on any command. -In a Cobra app, typically the main.go file is very bare. It serves, one purpose, to initialize Cobra. +In a Cobra app, typically the main.go file is very bare. It serves one purpose: to initialize Cobra. ```go package main @@ -363,7 +357,7 @@ There are two different approaches to assign a flag. ### Persistent Flags -A flag can be 'persistent' meaning that this flag will be available to the +A flag can be 'persistent', meaning that this flag will be available to the command it's assigned to as well as every command under that command. For global flags, assign a flag as a persistent flag on the root. @@ -373,7 +367,7 @@ rootCmd.PersistentFlags().BoolVarP(&Verbose, "verbose", "v", false, "verbose out ### Local Flags -A flag can also be assigned locally which will only apply to that specific command. +A flag can also be assigned locally, which will only apply to that specific command. ```go localCmd.Flags().StringVarP(&Source, "source", "s", "", "Source directory to read from") @@ -381,8 +375,8 @@ localCmd.Flags().StringVarP(&Source, "source", "s", "", "Source directory to rea ### Local Flag on Parent Commands -By default Cobra only parses local flags on the target command, any local flags on -parent commands are ignored. By enabling `Command.TraverseChildren` Cobra will +By default, Cobra only parses local flags on the target command, and any local flags on +parent commands are ignored. By enabling `Command.TraverseChildren`, Cobra will parse local flags on each command before executing the target command. ```go @@ -404,8 +398,8 @@ func init() { } ``` -In this example the persistent flag `author` is bound with `viper`. -**Note**, that the variable `author` will not be set to the value from config, +In this example, the persistent flag `author` is bound with `viper`. +**Note**: the variable `author` will not be set to the value from config, when the `--author` flag is not provided by user. More in [viper documentation](https://github.com/spf13/viper#working-with-flags). @@ -465,7 +459,7 @@ var cmd = &cobra.Command{ In the example below, we have defined three commands. Two are at the top level and one (cmdTimes) is a child of one of the top commands. In this case the root -is not executable meaning that a subcommand is required. This is accomplished +is not executable, meaning that a subcommand is required. This is accomplished by not providing a 'Run' for the 'rootCmd'. We have only defined one flag for a single command. @@ -759,7 +753,7 @@ Cobra can generate documentation based on subcommands, flags, etc. Read more abo ## Generating shell completions -Cobra can generate a shell-completion file for the following shells: Bash, Zsh, Fish, Powershell. If you add more information to your commands, these completions can be amazingly powerful and flexible. Read more about it in [Shell Completions](shell_completions.md). +Cobra can generate a shell-completion file for the following shells: bash, zsh, fish, PowerShell. If you add more information to your commands, these completions can be amazingly powerful and flexible. Read more about it in [Shell Completions](shell_completions.md). # License diff --git a/vendor/github.com/spf13/cobra/bash_completions.go b/vendor/github.com/spf13/cobra/bash_completions.go index 846636d75b..7106147937 100644 --- a/vendor/github.com/spf13/cobra/bash_completions.go +++ b/vendor/github.com/spf13/cobra/bash_completions.go @@ -19,9 +19,9 @@ const ( BashCompSubdirsInDir = "cobra_annotation_bash_completion_subdirs_in_dir" ) -func writePreamble(buf *bytes.Buffer, name string) { - buf.WriteString(fmt.Sprintf("# bash completion for %-36s -*- shell-script -*-\n", name)) - buf.WriteString(fmt.Sprintf(` +func writePreamble(buf io.StringWriter, name string) { + WriteStringAndCheck(buf, fmt.Sprintf("# bash completion for %-36s -*- shell-script -*-\n", name)) + WriteStringAndCheck(buf, fmt.Sprintf(` __%[1]s_debug() { if [[ -n ${BASH_COMP_DEBUG_FILE} ]]; then @@ -380,10 +380,10 @@ __%[1]s_handle_word() ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs)) } -func writePostscript(buf *bytes.Buffer, name string) { +func writePostscript(buf io.StringWriter, name string) { name = strings.Replace(name, ":", "__", -1) - buf.WriteString(fmt.Sprintf("__start_%s()\n", name)) - buf.WriteString(fmt.Sprintf(`{ + WriteStringAndCheck(buf, fmt.Sprintf("__start_%s()\n", name)) + WriteStringAndCheck(buf, fmt.Sprintf(`{ local cur prev words cword declare -A flaghash 2>/dev/null || : declare -A aliashash 2>/dev/null || : @@ -410,33 +410,33 @@ func writePostscript(buf *bytes.Buffer, name string) { } `, name)) - buf.WriteString(fmt.Sprintf(`if [[ $(type -t compopt) = "builtin" ]]; then + WriteStringAndCheck(buf, fmt.Sprintf(`if [[ $(type -t compopt) = "builtin" ]]; then complete -o default -F __start_%s %s else complete -o default -o nospace -F __start_%s %s fi `, name, name, name, name)) - buf.WriteString("# ex: ts=4 sw=4 et filetype=sh\n") + WriteStringAndCheck(buf, "# ex: ts=4 sw=4 et filetype=sh\n") } -func writeCommands(buf *bytes.Buffer, cmd *Command) { - buf.WriteString(" commands=()\n") +func writeCommands(buf io.StringWriter, cmd *Command) { + WriteStringAndCheck(buf, " commands=()\n") for _, c := range cmd.Commands() { if !c.IsAvailableCommand() && c != cmd.helpCommand { continue } - buf.WriteString(fmt.Sprintf(" commands+=(%q)\n", c.Name())) + WriteStringAndCheck(buf, fmt.Sprintf(" commands+=(%q)\n", c.Name())) writeCmdAliases(buf, c) } - buf.WriteString("\n") + WriteStringAndCheck(buf, "\n") } -func writeFlagHandler(buf *bytes.Buffer, name string, annotations map[string][]string, cmd *Command) { +func writeFlagHandler(buf io.StringWriter, name string, annotations map[string][]string, cmd *Command) { for key, value := range annotations { switch key { case BashCompFilenameExt: - buf.WriteString(fmt.Sprintf(" flags_with_completion+=(%q)\n", name)) + WriteStringAndCheck(buf, fmt.Sprintf(" flags_with_completion+=(%q)\n", name)) var ext string if len(value) > 0 { @@ -444,17 +444,18 @@ func writeFlagHandler(buf *bytes.Buffer, name string, annotations map[string][]s } else { ext = "_filedir" } - buf.WriteString(fmt.Sprintf(" flags_completion+=(%q)\n", ext)) + WriteStringAndCheck(buf, fmt.Sprintf(" flags_completion+=(%q)\n", ext)) case BashCompCustom: - buf.WriteString(fmt.Sprintf(" flags_with_completion+=(%q)\n", name)) + WriteStringAndCheck(buf, fmt.Sprintf(" flags_with_completion+=(%q)\n", name)) + if len(value) > 0 { handlers := strings.Join(value, "; ") - buf.WriteString(fmt.Sprintf(" flags_completion+=(%q)\n", handlers)) + WriteStringAndCheck(buf, fmt.Sprintf(" flags_completion+=(%q)\n", handlers)) } else { - buf.WriteString(" flags_completion+=(:)\n") + WriteStringAndCheck(buf, " flags_completion+=(:)\n") } case BashCompSubdirsInDir: - buf.WriteString(fmt.Sprintf(" flags_with_completion+=(%q)\n", name)) + WriteStringAndCheck(buf, fmt.Sprintf(" flags_with_completion+=(%q)\n", name)) var ext string if len(value) == 1 { @@ -462,46 +463,48 @@ func writeFlagHandler(buf *bytes.Buffer, name string, annotations map[string][]s } else { ext = "_filedir -d" } - buf.WriteString(fmt.Sprintf(" flags_completion+=(%q)\n", ext)) + WriteStringAndCheck(buf, fmt.Sprintf(" flags_completion+=(%q)\n", ext)) } } } -func writeShortFlag(buf *bytes.Buffer, flag *pflag.Flag, cmd *Command) { +const cbn = "\")\n" + +func writeShortFlag(buf io.StringWriter, flag *pflag.Flag, cmd *Command) { name := flag.Shorthand format := " " if len(flag.NoOptDefVal) == 0 { format += "two_word_" } - format += "flags+=(\"-%s\")\n" - buf.WriteString(fmt.Sprintf(format, name)) + format += "flags+=(\"-%s" + cbn + WriteStringAndCheck(buf, fmt.Sprintf(format, name)) writeFlagHandler(buf, "-"+name, flag.Annotations, cmd) } -func writeFlag(buf *bytes.Buffer, flag *pflag.Flag, cmd *Command) { +func writeFlag(buf io.StringWriter, flag *pflag.Flag, cmd *Command) { name := flag.Name format := " flags+=(\"--%s" if len(flag.NoOptDefVal) == 0 { format += "=" } - format += "\")\n" - buf.WriteString(fmt.Sprintf(format, name)) + format += cbn + WriteStringAndCheck(buf, fmt.Sprintf(format, name)) if len(flag.NoOptDefVal) == 0 { - format = " two_word_flags+=(\"--%s\")\n" - buf.WriteString(fmt.Sprintf(format, name)) + format = " two_word_flags+=(\"--%s" + cbn + WriteStringAndCheck(buf, fmt.Sprintf(format, name)) } writeFlagHandler(buf, "--"+name, flag.Annotations, cmd) } -func writeLocalNonPersistentFlag(buf *bytes.Buffer, flag *pflag.Flag) { +func writeLocalNonPersistentFlag(buf io.StringWriter, flag *pflag.Flag) { name := flag.Name - format := " local_nonpersistent_flags+=(\"--%[1]s\")\n" + format := " local_nonpersistent_flags+=(\"--%[1]s" + cbn if len(flag.NoOptDefVal) == 0 { - format += " local_nonpersistent_flags+=(\"--%[1]s=\")\n" + format += " local_nonpersistent_flags+=(\"--%[1]s=" + cbn } - buf.WriteString(fmt.Sprintf(format, name)) + WriteStringAndCheck(buf, fmt.Sprintf(format, name)) if len(flag.Shorthand) > 0 { - buf.WriteString(fmt.Sprintf(" local_nonpersistent_flags+=(\"-%s\")\n", flag.Shorthand)) + WriteStringAndCheck(buf, fmt.Sprintf(" local_nonpersistent_flags+=(\"-%s\")\n", flag.Shorthand)) } } @@ -519,9 +522,9 @@ func prepareCustomAnnotationsForFlags(cmd *Command) { } } -func writeFlags(buf *bytes.Buffer, cmd *Command) { +func writeFlags(buf io.StringWriter, cmd *Command) { prepareCustomAnnotationsForFlags(cmd) - buf.WriteString(` flags=() + WriteStringAndCheck(buf, ` flags=() two_word_flags=() local_nonpersistent_flags=() flags_with_completion=() @@ -553,11 +556,11 @@ func writeFlags(buf *bytes.Buffer, cmd *Command) { } }) - buf.WriteString("\n") + WriteStringAndCheck(buf, "\n") } -func writeRequiredFlag(buf *bytes.Buffer, cmd *Command) { - buf.WriteString(" must_have_one_flag=()\n") +func writeRequiredFlag(buf io.StringWriter, cmd *Command) { + WriteStringAndCheck(buf, " must_have_one_flag=()\n") flags := cmd.NonInheritedFlags() flags.VisitAll(func(flag *pflag.Flag) { if nonCompletableFlag(flag) { @@ -570,55 +573,55 @@ func writeRequiredFlag(buf *bytes.Buffer, cmd *Command) { if flag.Value.Type() != "bool" { format += "=" } - format += "\")\n" - buf.WriteString(fmt.Sprintf(format, flag.Name)) + format += cbn + WriteStringAndCheck(buf, fmt.Sprintf(format, flag.Name)) if len(flag.Shorthand) > 0 { - buf.WriteString(fmt.Sprintf(" must_have_one_flag+=(\"-%s\")\n", flag.Shorthand)) + WriteStringAndCheck(buf, fmt.Sprintf(" must_have_one_flag+=(\"-%s"+cbn, flag.Shorthand)) } } } }) } -func writeRequiredNouns(buf *bytes.Buffer, cmd *Command) { - buf.WriteString(" must_have_one_noun=()\n") - sort.Sort(sort.StringSlice(cmd.ValidArgs)) +func writeRequiredNouns(buf io.StringWriter, cmd *Command) { + WriteStringAndCheck(buf, " must_have_one_noun=()\n") + sort.Strings(cmd.ValidArgs) for _, value := range cmd.ValidArgs { // Remove any description that may be included following a tab character. // Descriptions are not supported by bash completion. value = strings.Split(value, "\t")[0] - buf.WriteString(fmt.Sprintf(" must_have_one_noun+=(%q)\n", value)) + WriteStringAndCheck(buf, fmt.Sprintf(" must_have_one_noun+=(%q)\n", value)) } if cmd.ValidArgsFunction != nil { - buf.WriteString(" has_completion_function=1\n") + WriteStringAndCheck(buf, " has_completion_function=1\n") } } -func writeCmdAliases(buf *bytes.Buffer, cmd *Command) { +func writeCmdAliases(buf io.StringWriter, cmd *Command) { if len(cmd.Aliases) == 0 { return } - sort.Sort(sort.StringSlice(cmd.Aliases)) + sort.Strings(cmd.Aliases) - buf.WriteString(fmt.Sprint(` if [[ -z "${BASH_VERSION}" || "${BASH_VERSINFO[0]}" -gt 3 ]]; then`, "\n")) + WriteStringAndCheck(buf, fmt.Sprint(` if [[ -z "${BASH_VERSION}" || "${BASH_VERSINFO[0]}" -gt 3 ]]; then`, "\n")) for _, value := range cmd.Aliases { - buf.WriteString(fmt.Sprintf(" command_aliases+=(%q)\n", value)) - buf.WriteString(fmt.Sprintf(" aliashash[%q]=%q\n", value, cmd.Name())) + WriteStringAndCheck(buf, fmt.Sprintf(" command_aliases+=(%q)\n", value)) + WriteStringAndCheck(buf, fmt.Sprintf(" aliashash[%q]=%q\n", value, cmd.Name())) } - buf.WriteString(` fi`) - buf.WriteString("\n") + WriteStringAndCheck(buf, ` fi`) + WriteStringAndCheck(buf, "\n") } -func writeArgAliases(buf *bytes.Buffer, cmd *Command) { - buf.WriteString(" noun_aliases=()\n") - sort.Sort(sort.StringSlice(cmd.ArgAliases)) +func writeArgAliases(buf io.StringWriter, cmd *Command) { + WriteStringAndCheck(buf, " noun_aliases=()\n") + sort.Strings(cmd.ArgAliases) for _, value := range cmd.ArgAliases { - buf.WriteString(fmt.Sprintf(" noun_aliases+=(%q)\n", value)) + WriteStringAndCheck(buf, fmt.Sprintf(" noun_aliases+=(%q)\n", value)) } } -func gen(buf *bytes.Buffer, cmd *Command) { +func gen(buf io.StringWriter, cmd *Command) { for _, c := range cmd.Commands() { if !c.IsAvailableCommand() && c != cmd.helpCommand { continue @@ -630,22 +633,22 @@ func gen(buf *bytes.Buffer, cmd *Command) { commandName = strings.Replace(commandName, ":", "__", -1) if cmd.Root() == cmd { - buf.WriteString(fmt.Sprintf("_%s_root_command()\n{\n", commandName)) + WriteStringAndCheck(buf, fmt.Sprintf("_%s_root_command()\n{\n", commandName)) } else { - buf.WriteString(fmt.Sprintf("_%s()\n{\n", commandName)) + WriteStringAndCheck(buf, fmt.Sprintf("_%s()\n{\n", commandName)) } - buf.WriteString(fmt.Sprintf(" last_command=%q\n", commandName)) - buf.WriteString("\n") - buf.WriteString(" command_aliases=()\n") - buf.WriteString("\n") + WriteStringAndCheck(buf, fmt.Sprintf(" last_command=%q\n", commandName)) + WriteStringAndCheck(buf, "\n") + WriteStringAndCheck(buf, " command_aliases=()\n") + WriteStringAndCheck(buf, "\n") writeCommands(buf, cmd) writeFlags(buf, cmd) writeRequiredFlag(buf, cmd) writeRequiredNouns(buf, cmd) writeArgAliases(buf, cmd) - buf.WriteString("}\n\n") + WriteStringAndCheck(buf, "}\n\n") } // GenBashCompletion generates bash completion file and writes to the passed writer. diff --git a/vendor/github.com/spf13/cobra/bash_completions.md b/vendor/github.com/spf13/cobra/bash_completions.md index a82d5bb8b4..130f99b923 100644 --- a/vendor/github.com/spf13/cobra/bash_completions.md +++ b/vendor/github.com/spf13/cobra/bash_completions.md @@ -4,7 +4,7 @@ Please refer to [Shell Completions](shell_completions.md) for details. ## Bash legacy dynamic completions -For backwards-compatibility, Cobra still supports its legacy dynamic completion solution (described below). Unlike the `ValidArgsFunction` solution, the legacy solution will only work for Bash shell-completion and not for other shells. This legacy solution can be used along-side `ValidArgsFunction` and `RegisterFlagCompletionFunc()`, as long as both solutions are not used for the same command. This provides a path to gradually migrate from the legacy solution to the new solution. +For backward compatibility, Cobra still supports its legacy dynamic completion solution (described below). Unlike the `ValidArgsFunction` solution, the legacy solution will only work for Bash shell-completion and not for other shells. This legacy solution can be used along-side `ValidArgsFunction` and `RegisterFlagCompletionFunc()`, as long as both solutions are not used for the same command. This provides a path to gradually migrate from the legacy solution to the new solution. The legacy solution allows you to inject bash functions into the bash completion script. Those bash functions are responsible for providing the completion choices for your own completions. diff --git a/vendor/github.com/spf13/cobra/cobra.go b/vendor/github.com/spf13/cobra/cobra.go index d01becc8fa..d6cbfd7198 100644 --- a/vendor/github.com/spf13/cobra/cobra.go +++ b/vendor/github.com/spf13/cobra/cobra.go @@ -19,6 +19,7 @@ package cobra import ( "fmt" "io" + "os" "reflect" "strconv" "strings" @@ -205,3 +206,17 @@ func stringInSlice(a string, list []string) bool { } return false } + +// CheckErr prints the msg with the prefix 'Error:' and exits with error code 1. If the msg is nil, it does nothing. +func CheckErr(msg interface{}) { + if msg != nil { + fmt.Fprintln(os.Stderr, "Error:", msg) + os.Exit(1) + } +} + +// WriteStringAndCheck writes a string into a buffer, and checks if the error is not nil. +func WriteStringAndCheck(b io.StringWriter, s string) { + _, err := b.WriteString(s) + CheckErr(err) +} diff --git a/vendor/github.com/spf13/cobra/command.go b/vendor/github.com/spf13/cobra/command.go index 77b399e02e..d6732ad115 100644 --- a/vendor/github.com/spf13/cobra/command.go +++ b/vendor/github.com/spf13/cobra/command.go @@ -84,9 +84,6 @@ type Command struct { // Deprecated defines, if this command is deprecated and should print this string when used. Deprecated string - // Hidden defines, if this command is hidden and should NOT show up in the list of available commands. - Hidden bool - // Annotations are key/value pairs that can be used by applications to identify or // group commands. Annotations map[string]string @@ -126,55 +123,6 @@ type Command struct { // PersistentPostRunE: PersistentPostRun but returns an error. PersistentPostRunE func(cmd *Command, args []string) error - // SilenceErrors is an option to quiet errors down stream. - SilenceErrors bool - - // SilenceUsage is an option to silence usage when an error occurs. - SilenceUsage bool - - // DisableFlagParsing disables the flag parsing. - // If this is true all flags will be passed to the command as arguments. - DisableFlagParsing bool - - // DisableAutoGenTag defines, if gen tag ("Auto generated by spf13/cobra...") - // will be printed by generating docs for this command. - DisableAutoGenTag bool - - // DisableFlagsInUseLine will disable the addition of [flags] to the usage - // line of a command when printing help or generating docs - DisableFlagsInUseLine bool - - // DisableSuggestions disables the suggestions based on Levenshtein distance - // that go along with 'unknown command' messages. - DisableSuggestions bool - // SuggestionsMinimumDistance defines minimum levenshtein distance to display suggestions. - // Must be > 0. - SuggestionsMinimumDistance int - - // TraverseChildren parses flags on all parents before executing child command. - TraverseChildren bool - - // FParseErrWhitelist flag parse errors to be ignored - FParseErrWhitelist FParseErrWhitelist - - ctx context.Context - - // commands is the list of commands supported by this program. - commands []*Command - // parent is a parent command for this command. - parent *Command - // Max lengths of commands' string lengths for use in padding. - commandsMaxUseLen int - commandsMaxCommandPathLen int - commandsMaxNameLen int - // commandsAreSorted defines, if command slice are sorted or not. - commandsAreSorted bool - // commandCalledAs is the name or alias value used to call this command. - commandCalledAs struct { - name string - called bool - } - // args is actual args parsed from flags. args []string // flagErrorBuf contains all error messages from pflag. @@ -216,6 +164,60 @@ type Command struct { outWriter io.Writer // errWriter is a writer defined by the user that replaces stderr errWriter io.Writer + + //FParseErrWhitelist flag parse errors to be ignored + FParseErrWhitelist FParseErrWhitelist + + // commandsAreSorted defines, if command slice are sorted or not. + commandsAreSorted bool + // commandCalledAs is the name or alias value used to call this command. + commandCalledAs struct { + name string + called bool + } + + ctx context.Context + + // commands is the list of commands supported by this program. + commands []*Command + // parent is a parent command for this command. + parent *Command + // Max lengths of commands' string lengths for use in padding. + commandsMaxUseLen int + commandsMaxCommandPathLen int + commandsMaxNameLen int + + // TraverseChildren parses flags on all parents before executing child command. + TraverseChildren bool + + // Hidden defines, if this command is hidden and should NOT show up in the list of available commands. + Hidden bool + + // SilenceErrors is an option to quiet errors down stream. + SilenceErrors bool + + // SilenceUsage is an option to silence usage when an error occurs. + SilenceUsage bool + + // DisableFlagParsing disables the flag parsing. + // If this is true all flags will be passed to the command as arguments. + DisableFlagParsing bool + + // DisableAutoGenTag defines, if gen tag ("Auto generated by spf13/cobra...") + // will be printed by generating docs for this command. + DisableAutoGenTag bool + + // DisableFlagsInUseLine will disable the addition of [flags] to the usage + // line of a command when printing help or generating docs + DisableFlagsInUseLine bool + + // DisableSuggestions disables the suggestions based on Levenshtein distance + // that go along with 'unknown command' messages. + DisableSuggestions bool + + // SuggestionsMinimumDistance defines minimum levenshtein distance to display suggestions. + // Must be > 0. + SuggestionsMinimumDistance int } // Context returns underlying command context. If command wasn't @@ -418,7 +420,7 @@ func (c *Command) UsageString() string { c.outWriter = bb c.errWriter = bb - c.Usage() + CheckErr(c.Usage()) // Setting things back to normal c.outWriter = tmpOutput @@ -964,13 +966,13 @@ func (c *Command) ExecuteC() (cmd *Command, err error) { return cmd, nil } - // If root command has SilentErrors flagged, + // If root command has SilenceErrors flagged, // all subcommands should respect it if !cmd.SilenceErrors && !c.SilenceErrors { c.PrintErrln("Error:", err.Error()) } - // If root command has SilentUsage flagged, + // If root command has SilenceUsage flagged, // all subcommands should respect it if !cmd.SilenceUsage && !c.SilenceUsage { c.Println(cmd.UsageString()) @@ -1087,10 +1089,10 @@ Simply type ` + c.Name() + ` help [path to command] for full details.`, cmd, _, e := c.Root().Find(args) if cmd == nil || e != nil { c.Printf("Unknown help topic %#q\n", args) - c.Root().Usage() + CheckErr(c.Root().Usage()) } else { cmd.InitDefaultHelpFlag() // make possible 'help' flag to be shown - cmd.Help() + CheckErr(cmd.Help()) } }, } diff --git a/vendor/github.com/spf13/cobra/custom_completions.go b/vendor/github.com/spf13/cobra/custom_completions.go index f9e88e081f..fa060c147b 100644 --- a/vendor/github.com/spf13/cobra/custom_completions.go +++ b/vendor/github.com/spf13/cobra/custom_completions.go @@ -527,13 +527,13 @@ func CompDebug(msg string, printToStdErr bool) { os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) if err == nil { defer f.Close() - f.WriteString(msg) + WriteStringAndCheck(f, msg) } } if printToStdErr { // Must print to stderr for this not to be read by the completion script. - fmt.Fprintf(os.Stderr, msg) + fmt.Fprint(os.Stderr, msg) } } diff --git a/vendor/github.com/spf13/cobra/fish_completions.go b/vendor/github.com/spf13/cobra/fish_completions.go index eaae9bca86..3e112347d7 100644 --- a/vendor/github.com/spf13/cobra/fish_completions.go +++ b/vendor/github.com/spf13/cobra/fish_completions.go @@ -8,7 +8,7 @@ import ( "strings" ) -func genFishComp(buf *bytes.Buffer, name string, includeDesc bool) { +func genFishComp(buf io.StringWriter, name string, includeDesc bool) { // Variables should not contain a '-' or ':' character nameForVar := name nameForVar = strings.Replace(nameForVar, "-", "_", -1) @@ -18,8 +18,8 @@ func genFishComp(buf *bytes.Buffer, name string, includeDesc bool) { if !includeDesc { compCmd = ShellCompNoDescRequestCmd } - buf.WriteString(fmt.Sprintf("# fish completion for %-36s -*- shell-script -*-\n", name)) - buf.WriteString(fmt.Sprintf(` + WriteStringAndCheck(buf, fmt.Sprintf("# fish completion for %-36s -*- shell-script -*-\n", name)) + WriteStringAndCheck(buf, fmt.Sprintf(` function __%[1]s_debug set file "$BASH_COMP_DEBUG_FILE" if test -n "$file" diff --git a/vendor/github.com/spf13/cobra/go.mod b/vendor/github.com/spf13/cobra/go.mod index 57e3244d5e..ff56144056 100644 --- a/vendor/github.com/spf13/cobra/go.mod +++ b/vendor/github.com/spf13/cobra/go.mod @@ -8,5 +8,5 @@ require ( github.com/mitchellh/go-homedir v1.1.0 github.com/spf13/pflag v1.0.5 github.com/spf13/viper v1.7.0 - gopkg.in/yaml.v2 v2.2.8 + gopkg.in/yaml.v2 v2.4.0 ) diff --git a/vendor/github.com/spf13/cobra/go.sum b/vendor/github.com/spf13/cobra/go.sum index 0aae738631..9328ee3ee7 100644 --- a/vendor/github.com/spf13/cobra/go.sum +++ b/vendor/github.com/spf13/cobra/go.sum @@ -304,8 +304,8 @@ gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= -gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/vendor/github.com/spf13/cobra/powershell_completions.go b/vendor/github.com/spf13/cobra/powershell_completions.go index 756c61b9dc..c55be71cd1 100644 --- a/vendor/github.com/spf13/cobra/powershell_completions.go +++ b/vendor/github.com/spf13/cobra/powershell_completions.go @@ -1,6 +1,3 @@ -// PowerShell completions are based on the amazing work from clap: -// https://github.com/clap-rs/clap/blob/3294d18efe5f264d12c9035f404c7d189d4824e1/src/completions/powershell.rs -// // The generated scripts require PowerShell v5.0+ (which comes Windows 10, but // can be downloaded separately for windows 7 or 8.1). @@ -11,90 +8,278 @@ import ( "fmt" "io" "os" - "strings" - - "github.com/spf13/pflag" ) -var powerShellCompletionTemplate = `using namespace System.Management.Automation -using namespace System.Management.Automation.Language -Register-ArgumentCompleter -Native -CommandName '%s' -ScriptBlock { - param($wordToComplete, $commandAst, $cursorPosition) - $commandElements = $commandAst.CommandElements - $command = @( - '%s' - for ($i = 1; $i -lt $commandElements.Count; $i++) { - $element = $commandElements[$i] - if ($element -isnot [StringConstantExpressionAst] -or - $element.StringConstantType -ne [StringConstantType]::BareWord -or - $element.Value.StartsWith('-')) { - break - } - $element.Value - } - ) -join ';' - $completions = @(switch ($command) {%s - }) - $completions.Where{ $_.CompletionText -like "$wordToComplete*" } | - Sort-Object -Property ListItemText -}` - -func generatePowerShellSubcommandCases(out io.Writer, cmd *Command, previousCommandName string) { - var cmdName string - if previousCommandName == "" { - cmdName = cmd.Name() - } else { - cmdName = fmt.Sprintf("%s;%s", previousCommandName, cmd.Name()) - } - - fmt.Fprintf(out, "\n '%s' {", cmdName) - - cmd.Flags().VisitAll(func(flag *pflag.Flag) { - if nonCompletableFlag(flag) { - return - } - usage := escapeStringForPowerShell(flag.Usage) - if len(flag.Shorthand) > 0 { - fmt.Fprintf(out, "\n [CompletionResult]::new('-%s', '%s', [CompletionResultType]::ParameterName, '%s')", flag.Shorthand, flag.Shorthand, usage) - } - fmt.Fprintf(out, "\n [CompletionResult]::new('--%s', '%s', [CompletionResultType]::ParameterName, '%s')", flag.Name, flag.Name, usage) - }) - - for _, subCmd := range cmd.Commands() { - usage := escapeStringForPowerShell(subCmd.Short) - fmt.Fprintf(out, "\n [CompletionResult]::new('%s', '%s', [CompletionResultType]::ParameterValue, '%s')", subCmd.Name(), subCmd.Name(), usage) +func genPowerShellComp(buf io.StringWriter, name string, includeDesc bool) { + compCmd := ShellCompRequestCmd + if !includeDesc { + compCmd = ShellCompNoDescRequestCmd } + WriteStringAndCheck(buf, fmt.Sprintf(`# powershell completion for %-36[1]s -*- shell-script -*- - fmt.Fprint(out, "\n break\n }") - - for _, subCmd := range cmd.Commands() { - generatePowerShellSubcommandCases(out, subCmd, cmdName) - } +function __%[1]s_debug { + if ($env:BASH_COMP_DEBUG_FILE) { + "$args" | Out-File -Append -FilePath "$env:BASH_COMP_DEBUG_FILE" + } } -func escapeStringForPowerShell(s string) string { - return strings.Replace(s, "'", "''", -1) +filter __%[1]s_escapeStringWithSpecialChars { +`+" $_ -replace '\\s|#|@|\\$|;|,|''|\\{|\\}|\\(|\\)|\"|`|\\||<|>|&','`$&'"+` } -// GenPowerShellCompletion generates PowerShell completion file and writes to the passed writer. -func (c *Command) GenPowerShellCompletion(w io.Writer) error { - buf := new(bytes.Buffer) +Register-ArgumentCompleter -CommandName '%[1]s' -ScriptBlock { + param( + $WordToComplete, + $CommandAst, + $CursorPosition + ) + + # Get the current command line and convert into a string + $Command = $CommandAst.CommandElements + $Command = "$Command" + + __%[1]s_debug "" + __%[1]s_debug "========= starting completion logic ==========" + __%[1]s_debug "WordToComplete: $WordToComplete Command: $Command CursorPosition: $CursorPosition" + + # The user could have moved the cursor backwards on the command-line. + # We need to trigger completion from the $CursorPosition location, so we need + # to truncate the command-line ($Command) up to the $CursorPosition location. + # Make sure the $Command is longer then the $CursorPosition before we truncate. + # This happens because the $Command does not include the last space. + if ($Command.Length -gt $CursorPosition) { + $Command=$Command.Substring(0,$CursorPosition) + } + __%[1]s_debug "Truncated command: $Command" + + $ShellCompDirectiveError=%[3]d + $ShellCompDirectiveNoSpace=%[4]d + $ShellCompDirectiveNoFileComp=%[5]d + $ShellCompDirectiveFilterFileExt=%[6]d + $ShellCompDirectiveFilterDirs=%[7]d + + # Prepare the command to request completions for the program. + # Split the command at the first space to separate the program and arguments. + $Program,$Arguments = $Command.Split(" ",2) + $RequestComp="$Program %[2]s $Arguments" + __%[1]s_debug "RequestComp: $RequestComp" + + # we cannot use $WordToComplete because it + # has the wrong values if the cursor was moved + # so use the last argument + if ($WordToComplete -ne "" ) { + $WordToComplete = $Arguments.Split(" ")[-1] + } + __%[1]s_debug "New WordToComplete: $WordToComplete" + + + # Check for flag with equal sign + $IsEqualFlag = ($WordToComplete -Like "--*=*" ) + if ( $IsEqualFlag ) { + __%[1]s_debug "Completing equal sign flag" + # Remove the flag part + $Flag,$WordToComplete = $WordToComplete.Split("=",2) + } + + if ( $WordToComplete -eq "" -And ( -Not $IsEqualFlag )) { + # If the last parameter is complete (there is a space following it) + # We add an extra empty parameter so we can indicate this to the go method. + __%[1]s_debug "Adding extra empty parameter" +`+" # We need to use `\"`\" to pass an empty argument a \"\" or '' does not work!!!"+` +`+" $RequestComp=\"$RequestComp\" + ' `\"`\"' "+` + } + + __%[1]s_debug "Calling $RequestComp" + #call the command store the output in $out and redirect stderr and stdout to null + # $Out is an array contains each line per element + Invoke-Expression -OutVariable out "$RequestComp" 2>&1 | Out-Null + + + # get directive from last line + [int]$Directive = $Out[-1].TrimStart(':') + if ($Directive -eq "") { + # There is no directive specified + $Directive = 0 + } + __%[1]s_debug "The completion directive is: $Directive" + + # remove directive (last element) from out + $Out = $Out | Where-Object { $_ -ne $Out[-1] } + __%[1]s_debug "The completions are: $Out" + + if (($Directive -band $ShellCompDirectiveError) -ne 0 ) { + # Error code. No completion. + __%[1]s_debug "Received error from custom completion go code" + return + } + + $Longest = 0 + $Values = $Out | ForEach-Object { + #Split the output in name and description +`+" $Name, $Description = $_.Split(\"`t\",2)"+` + __%[1]s_debug "Name: $Name Description: $Description" + + # Look for the longest completion so that we can format things nicely + if ($Longest -lt $Name.Length) { + $Longest = $Name.Length + } + + # Set the description to a one space string if there is none set. + # This is needed because the CompletionResult does not accept an empty string as argument + if (-Not $Description) { + $Description = " " + } + @{Name="$Name";Description="$Description"} + } + + + $Space = " " + if (($Directive -band $ShellCompDirectiveNoSpace) -ne 0 ) { + # remove the space here + __%[1]s_debug "ShellCompDirectiveNoSpace is called" + $Space = "" + } + + if (($Directive -band $ShellCompDirectiveNoFileComp) -ne 0 ) { + __%[1]s_debug "ShellCompDirectiveNoFileComp is called" + + if ($Values.Length -eq 0) { + # Just print an empty string here so the + # shell does not start to complete paths. + # We cannot use CompletionResult here because + # it does not accept an empty string as argument. + "" + return + } + } + + if ((($Directive -band $ShellCompDirectiveFilterFileExt) -ne 0 ) -or + (($Directive -band $ShellCompDirectiveFilterDirs) -ne 0 )) { + __%[1]s_debug "ShellCompDirectiveFilterFileExt ShellCompDirectiveFilterDirs are not supported" + + # return here to prevent the completion of the extensions + return + } - var subCommandCases bytes.Buffer - generatePowerShellSubcommandCases(&subCommandCases, c, "") - fmt.Fprintf(buf, powerShellCompletionTemplate, c.Name(), c.Name(), subCommandCases.String()) + $Values = $Values | Where-Object { + # filter the result + $_.Name -like "$WordToComplete*" + # Join the flag back if we have a equal sign flag + if ( $IsEqualFlag ) { + __%[1]s_debug "Join the equal sign flag back to the completion value" + $_.Name = $Flag + "=" + $_.Name + } + } + + # Get the current mode + $Mode = (Get-PSReadLineKeyHandler | Where-Object {$_.Key -eq "Tab" }).Function + __%[1]s_debug "Mode: $Mode" + + $Values | ForEach-Object { + + # store temporay because switch will overwrite $_ + $comp = $_ + + # PowerShell supports three different completion modes + # - TabCompleteNext (default windows style - on each key press the next option is displayed) + # - Complete (works like bash) + # - MenuComplete (works like zsh) + # You set the mode with Set-PSReadLineKeyHandler -Key Tab -Function + + # CompletionResult Arguments: + # 1) CompletionText text to be used as the auto completion result + # 2) ListItemText text to be displayed in the suggestion list + # 3) ResultType type of completion result + # 4) ToolTip text for the tooltip with details about the object + + switch ($Mode) { + + # bash like + "Complete" { + + if ($Values.Length -eq 1) { + __%[1]s_debug "Only one completion left" + + # insert space after value + [System.Management.Automation.CompletionResult]::new($($comp.Name | __%[1]s_escapeStringWithSpecialChars) + $Space, "$($comp.Name)", 'ParameterValue', "$($comp.Description)") + + } else { + # Add the proper number of spaces to align the descriptions + while($comp.Name.Length -lt $Longest) { + $comp.Name = $comp.Name + " " + } + + # Check for empty description and only add parentheses if needed + if ($($comp.Description) -eq " " ) { + $Description = "" + } else { + $Description = " ($($comp.Description))" + } + + [System.Management.Automation.CompletionResult]::new("$($comp.Name)$Description", "$($comp.Name)$Description", 'ParameterValue', "$($comp.Description)") + } + } + + # zsh like + "MenuComplete" { + # insert space after value + # MenuComplete will automatically show the ToolTip of + # the highlighted value at the bottom of the suggestions. + [System.Management.Automation.CompletionResult]::new($($comp.Name | __%[1]s_escapeStringWithSpecialChars) + $Space, "$($comp.Name)", 'ParameterValue', "$($comp.Description)") + } + + # TabCompleteNext and in case we get something unknown + Default { + # Like MenuComplete but we don't want to add a space here because + # the user need to press space anyway to get the completion. + # Description will not be shown because thats not possible with TabCompleteNext + [System.Management.Automation.CompletionResult]::new($($comp.Name | __%[1]s_escapeStringWithSpecialChars), "$($comp.Name)", 'ParameterValue', "$($comp.Description)") + } + } + + } +} +`, name, compCmd, + ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp, + ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs)) +} + +func (c *Command) genPowerShellCompletion(w io.Writer, includeDesc bool) error { + buf := new(bytes.Buffer) + genPowerShellComp(buf, c.Name(), includeDesc) _, err := buf.WriteTo(w) return err } -// GenPowerShellCompletionFile generates PowerShell completion file. -func (c *Command) GenPowerShellCompletionFile(filename string) error { +func (c *Command) genPowerShellCompletionFile(filename string, includeDesc bool) error { outFile, err := os.Create(filename) if err != nil { return err } defer outFile.Close() - return c.GenPowerShellCompletion(outFile) + return c.genPowerShellCompletion(outFile, includeDesc) +} + +// GenPowerShellCompletionFile generates powershell completion file without descriptions. +func (c *Command) GenPowerShellCompletionFile(filename string) error { + return c.genPowerShellCompletionFile(filename, false) +} + +// GenPowerShellCompletion generates powershell completion file without descriptions +// and writes it to the passed writer. +func (c *Command) GenPowerShellCompletion(w io.Writer) error { + return c.genPowerShellCompletion(w, false) +} + +// GenPowerShellCompletionFileWithDesc generates powershell completion file with descriptions. +func (c *Command) GenPowerShellCompletionFileWithDesc(filename string) error { + return c.genPowerShellCompletionFile(filename, true) +} + +// GenPowerShellCompletionWithDesc generates powershell completion file with descriptions +// and writes it to the passed writer. +func (c *Command) GenPowerShellCompletionWithDesc(w io.Writer) error { + return c.genPowerShellCompletion(w, true) } diff --git a/vendor/github.com/spf13/cobra/powershell_completions.md b/vendor/github.com/spf13/cobra/powershell_completions.md index 55f154a68f..c449f1e5c0 100644 --- a/vendor/github.com/spf13/cobra/powershell_completions.md +++ b/vendor/github.com/spf13/cobra/powershell_completions.md @@ -1,16 +1,3 @@ # Generating PowerShell Completions For Your Own cobra.Command -Cobra can generate PowerShell completion scripts. Users need PowerShell version 5.0 or above, which comes with Windows 10 and can be downloaded separately for Windows 7 or 8.1. They can then write the completions to a file and source this file from their PowerShell profile, which is referenced by the `$Profile` environment variable. See `Get-Help about_Profiles` for more info about PowerShell profiles. - -*Note*: PowerShell completions have not (yet?) been aligned to Cobra's generic shell completion support. This implies the PowerShell completions are not as rich as for other shells (see [What's not yet supported](#whats-not-yet-supported)), and may behave slightly differently. They are still very useful for PowerShell users. - -# What's supported - -- Completion for subcommands using their `.Short` description -- Completion for non-hidden flags using their `.Name` and `.Shorthand` - -# What's not yet supported - -- Command aliases -- Required, filename or custom flags (they will work like normal flags) -- Custom completion scripts +Please refer to [Shell Completions](shell_completions.md#powershell-completions) for details. diff --git a/vendor/github.com/spf13/cobra/projects_using_cobra.md b/vendor/github.com/spf13/cobra/projects_using_cobra.md index 31c272036a..d98a71e36f 100644 --- a/vendor/github.com/spf13/cobra/projects_using_cobra.md +++ b/vendor/github.com/spf13/cobra/projects_using_cobra.md @@ -25,6 +25,8 @@ - [Moby (former Docker)](https://github.com/moby/moby) - [Nanobox](https://github.com/nanobox-io/nanobox)/[Nanopack](https://github.com/nanopack) - [OpenShift](https://www.openshift.com/) +- [Ory Hydra](https://github.com/ory/hydra) +- [Ory Kratos](https://github.com/ory/kratos) - [Pouch](https://github.com/alibaba/pouch) - [ProjectAtomic (enterprise)](http://www.projectatomic.io/) - [Prototool](https://github.com/uber/prototool) @@ -32,4 +34,5 @@ - [Rclone](https://rclone.org/) - [Skaffold](https://skaffold.dev/) - [Tendermint](https://github.com/tendermint/tendermint) +- [Twitch CLI](https://github.com/twitchdev/twitch-cli) - [Werf](https://werf.io/) diff --git a/vendor/github.com/spf13/cobra/shell_completions.md b/vendor/github.com/spf13/cobra/shell_completions.md index d8416ab1dc..cd533ac3d4 100644 --- a/vendor/github.com/spf13/cobra/shell_completions.md +++ b/vendor/github.com/spf13/cobra/shell_completions.md @@ -4,10 +4,10 @@ Cobra can generate shell completions for multiple shells. The currently supported shells are: - Bash - Zsh -- Fish +- fish - PowerShell -If you are using the generator you can create a completion command by running +If you are using the generator, you can create a completion command by running ```bash cobra add completion @@ -17,38 +17,46 @@ and then modifying the generated `cmd/completion.go` file to look something like ```go var completionCmd = &cobra.Command{ - Use: "completion [bash|zsh|fish|powershell]", - Short: "Generate completion script", + Use: "completion [bash|zsh|fish|powershell]", + Short: "Generate completion script", Long: `To load completions: Bash: -$ source <(yourprogram completion bash) + $ source <(yourprogram completion bash) -# To load completions for each session, execute once: -Linux: + # To load completions for each session, execute once: + # Linux: $ yourprogram completion bash > /etc/bash_completion.d/yourprogram -MacOS: + # macOS: $ yourprogram completion bash > /usr/local/etc/bash_completion.d/yourprogram Zsh: -# If shell completion is not already enabled in your environment you will need -# to enable it. You can execute the following once: + # If shell completion is not already enabled in your environment, + # you will need to enable it. You can execute the following once: -$ echo "autoload -U compinit; compinit" >> ~/.zshrc + $ echo "autoload -U compinit; compinit" >> ~/.zshrc -# To load completions for each session, execute once: -$ yourprogram completion zsh > "${fpath[1]}/_yourprogram" + # To load completions for each session, execute once: + $ yourprogram completion zsh > "${fpath[1]}/_yourprogram" -# You will need to start a new shell for this setup to take effect. + # You will need to start a new shell for this setup to take effect. -Fish: +fish: -$ yourprogram completion fish | source + $ yourprogram completion fish | source -# To load completions for each session, execute once: -$ yourprogram completion fish > ~/.config/fish/completions/yourprogram.fish + # To load completions for each session, execute once: + $ yourprogram completion fish > ~/.config/fish/completions/yourprogram.fish + +PowerShell: + + PS> yourprogram completion powershell | Out-String | Invoke-Expression + + # To load completions for every new session, run: + PS> yourprogram completion powershell > yourprogram.ps1 + # and source this file from your PowerShell profile. `, DisableFlagsInUseLine: true, ValidArgs: []string{"bash", "zsh", "fish", "powershell"}, @@ -68,7 +76,7 @@ $ yourprogram completion fish > ~/.config/fish/completions/yourprogram.fish } ``` -**Note:** The cobra generator may include messages printed to stdout for example if the config file is loaded, this will break the auto complete script so must be removed. +**Note:** The cobra generator may include messages printed to stdout, for example, if the config file is loaded; this will break the auto-completion script so must be removed. # Customizing completions @@ -91,8 +99,7 @@ cmd := &cobra.Command{ Long: get_long, Example: get_example, Run: func(cmd *cobra.Command, args []string) { - err := RunGet(f, out, cmd, args) - util.CheckErr(err) + cobra.CheckErr(RunGet(f, out, cmd, args)) }, ValidArgs: validArgs, } @@ -124,7 +131,7 @@ the completion algorithm if entered manually, e.g. in: ```bash $ kubectl get rc [tab][tab] -backend frontend database +backend frontend database ``` Note that without declaring `rc` as an alias, the completion algorithm would not know to show the list of @@ -246,7 +253,7 @@ and you'll get something like ```bash $ kubectl exec [tab][tab] --c --container= -p --pod= +-c --container= -p --pod= ``` ### Specify dynamic flag completion @@ -316,7 +323,7 @@ cmd.RegisterFlagCompletionFunc(flagName, func(cmd *cobra.Command, args []string, ``` ### Descriptions for completions -Both `zsh` and `fish` allow for descriptions to annotate completion choices. For commands and flags, Cobra will provide the descriptions automatically, based on usage information. For example, using zsh: +`zsh`, `fish` and `powershell` allow for descriptions to annotate completion choices. For commands and flags, Cobra will provide the descriptions automatically, based on usage information. For example, using zsh: ``` $ helm s[tab] search -- search for a keyword in charts @@ -361,12 +368,12 @@ completion firstcommand secondcommand ``` ### Bash legacy dynamic completions -For backwards-compatibility, Cobra still supports its bash legacy dynamic completion solution. +For backward compatibility, Cobra still supports its bash legacy dynamic completion solution. Please refer to [Bash Completions](bash_completions.md) for details. ## Zsh completions -Cobra supports native Zsh completion generated from the root `cobra.Command`. +Cobra supports native zsh completion generated from the root `cobra.Command`. The generated completion script should be put somewhere in your `$fpath` and be named `_`. You will need to start a new shell for the completions to become available. @@ -385,23 +392,23 @@ status -- displays the status of the named release $ helm s[tab] search show status ``` -*Note*: Because of backwards-compatibility requirements, we were forced to have a different API to disable completion descriptions between `Zsh` and `Fish`. +*Note*: Because of backward-compatibility requirements, we were forced to have a different API to disable completion descriptions between `zsh` and `fish`. ### Limitations * Custom completions implemented in Bash scripting (legacy) are not supported and will be ignored for `zsh` (including the use of the `BashCompCustom` flag annotation). - * You should instead use `ValidArgsFunction` and `RegisterFlagCompletionFunc()` which are portable to the different shells (`bash`, `zsh`, `fish`). + * You should instead use `ValidArgsFunction` and `RegisterFlagCompletionFunc()` which are portable to the different shells (`bash`, `zsh`, `fish`, `powershell`). * The function `MarkFlagCustom()` is not supported and will be ignored for `zsh`. * You should instead use `RegisterFlagCompletionFunc()`. ### Zsh completions standardization -Cobra 1.1 standardized its zsh completion support to align it with its other shell completions. Although the API was kept backwards-compatible, some small changes in behavior were introduced. +Cobra 1.1 standardized its zsh completion support to align it with its other shell completions. Although the API was kept backward-compatible, some small changes in behavior were introduced. Please refer to [Zsh Completions](zsh_completions.md) for details. -## Fish completions +## fish completions -Cobra supports native Fish completions generated from the root `cobra.Command`. You can use the `command.GenFishCompletion()` or `command.GenFishCompletionFile()` functions. You must provide these functions with a parameter indicating if the completions should be annotated with a description; Cobra will provide the description automatically based on usage information. You can choose to make this option configurable by your users. +Cobra supports native fish completions generated from the root `cobra.Command`. You can use the `command.GenFishCompletion()` or `command.GenFishCompletionFile()` functions. You must provide these functions with a parameter indicating if the completions should be annotated with a description; Cobra will provide the description automatically based on usage information. You can choose to make this option configurable by your users. ``` # With descriptions $ helm s[tab] @@ -411,12 +418,12 @@ search (search for a keyword in charts) show (show information of a chart) s $ helm s[tab] search show status ``` -*Note*: Because of backwards-compatibility requirements, we were forced to have a different API to disable completion descriptions between `Zsh` and `Fish`. +*Note*: Because of backward-compatibility requirements, we were forced to have a different API to disable completion descriptions between `zsh` and `fish`. ### Limitations -* Custom completions implemented in Bash scripting (legacy) are not supported and will be ignored for `fish` (including the use of the `BashCompCustom` flag annotation). - * You should instead use `ValidArgsFunction` and `RegisterFlagCompletionFunc()` which are portable to the different shells (`bash`, `zsh`, `fish`). +* Custom completions implemented in bash scripting (legacy) are not supported and will be ignored for `fish` (including the use of the `BashCompCustom` flag annotation). + * You should instead use `ValidArgsFunction` and `RegisterFlagCompletionFunc()` which are portable to the different shells (`bash`, `zsh`, `fish`, `powershell`). * The function `MarkFlagCustom()` is not supported and will be ignored for `fish`. * You should instead use `RegisterFlagCompletionFunc()`. * The following flag completion annotations are not supported and will be ignored for `fish`: @@ -431,4 +438,46 @@ search show status ## PowerShell completions -Please refer to [PowerShell Completions](powershell_completions.md) for details. +Cobra supports native PowerShell completions generated from the root `cobra.Command`. You can use the `command.GenPowerShellCompletion()` or `command.GenPowerShellCompletionFile()` functions. To include descriptions use `command.GenPowerShellCompletionWithDesc()` and `command.GenPowerShellCompletionFileWithDesc()`. Cobra will provide the description automatically based on usage information. You can choose to make this option configurable by your users. + +The script is designed to support all three PowerShell completion modes: + +* TabCompleteNext (default windows style - on each key press the next option is displayed) +* Complete (works like bash) +* MenuComplete (works like zsh) + +You set the mode with `Set-PSReadLineKeyHandler -Key Tab -Function `. Descriptions are only displayed when using the `Complete` or `MenuComplete` mode. + +Users need PowerShell version 5.0 or above, which comes with Windows 10 and can be downloaded separately for Windows 7 or 8.1. They can then write the completions to a file and source this file from their PowerShell profile, which is referenced by the `$Profile` environment variable. See `Get-Help about_Profiles` for more info about PowerShell profiles. + +``` +# With descriptions and Mode 'Complete' +$ helm s[tab] +search (search for a keyword in charts) show (show information of a chart) status (displays the status of the named release) + +# With descriptions and Mode 'MenuComplete' The description of the current selected value will be displayed below the suggestions. +$ helm s[tab] +search show status + +search for a keyword in charts + +# Without descriptions +$ helm s[tab] +search show status +``` + +### Limitations + +* Custom completions implemented in bash scripting (legacy) are not supported and will be ignored for `powershell` (including the use of the `BashCompCustom` flag annotation). + * You should instead use `ValidArgsFunction` and `RegisterFlagCompletionFunc()` which are portable to the different shells (`bash`, `zsh`, `fish`, `powershell`). +* The function `MarkFlagCustom()` is not supported and will be ignored for `powershell`. + * You should instead use `RegisterFlagCompletionFunc()`. +* The following flag completion annotations are not supported and will be ignored for `powershell`: + * `BashCompFilenameExt` (filtering by file extension) + * `BashCompSubdirsInDir` (filtering by directory) +* The functions corresponding to the above annotations are consequently not supported and will be ignored for `powershell`: + * `MarkFlagFilename()` and `MarkPersistentFlagFilename()` (filtering by file extension) + * `MarkFlagDirname()` and `MarkPersistentFlagDirname()` (filtering by directory) +* Similarly, the following completion directives are not supported and will be ignored for `powershell`: + * `ShellCompDirectiveFilterFileExt` (filtering by file extension) + * `ShellCompDirectiveFilterDirs` (filtering by directory) diff --git a/vendor/github.com/spf13/cobra/zsh_completions.go b/vendor/github.com/spf13/cobra/zsh_completions.go index 92a70394a9..2e840285f3 100644 --- a/vendor/github.com/spf13/cobra/zsh_completions.go +++ b/vendor/github.com/spf13/cobra/zsh_completions.go @@ -70,12 +70,12 @@ func (c *Command) genZshCompletion(w io.Writer, includeDesc bool) error { return err } -func genZshComp(buf *bytes.Buffer, name string, includeDesc bool) { +func genZshComp(buf io.StringWriter, name string, includeDesc bool) { compCmd := ShellCompRequestCmd if !includeDesc { compCmd = ShellCompNoDescRequestCmd } - buf.WriteString(fmt.Sprintf(`#compdef _%[1]s %[1]s + WriteStringAndCheck(buf, fmt.Sprintf(`#compdef _%[1]s %[1]s # zsh completion for %-36[1]s -*- shell-script -*- diff --git a/vendor/github.com/spf13/viper/util.go b/vendor/github.com/spf13/viper/util.go index b788969637..cee6b24296 100644 --- a/vendor/github.com/spf13/viper/util.go +++ b/vendor/github.com/spf13/viper/util.go @@ -91,13 +91,22 @@ func insensitiviseMap(m map[string]interface{}) { func absPathify(inPath string) string { jww.INFO.Println("Trying to resolve absolute path to", inPath) - if strings.HasPrefix(inPath, "$HOME") { + if inPath == "$HOME" || strings.HasPrefix(inPath, "$HOME"+string(os.PathSeparator)) { inPath = userHomeDir() + inPath[5:] } if strings.HasPrefix(inPath, "$") { end := strings.Index(inPath, string(os.PathSeparator)) - inPath = os.Getenv(inPath[1:end]) + inPath[end:] + + var value, suffix string + if end == -1 { + value = os.Getenv(inPath[1:]) + } else { + value = os.Getenv(inPath[1:end]) + suffix = inPath[end:] + } + + inPath = value + suffix } if filepath.IsAbs(inPath) { diff --git a/vendor/github.com/spf13/viper/viper.go b/vendor/github.com/spf13/viper/viper.go index f61f4ed755..405dc20fe3 100644 --- a/vendor/github.com/spf13/viper/viper.go +++ b/vendor/github.com/spf13/viper/viper.go @@ -896,13 +896,7 @@ func UnmarshalKey(key string, rawVal interface{}, opts ...DecoderConfigOption) e return v.UnmarshalKey(key, rawVal, opts...) } func (v *Viper) UnmarshalKey(key string, rawVal interface{}, opts ...DecoderConfigOption) error { - err := decode(v.Get(key), defaultDecoderConfig(rawVal, opts...)) - - if err != nil { - return err - } - - return nil + return decode(v.Get(key), defaultDecoderConfig(rawVal, opts...)) } // Unmarshal unmarshals the config into a Struct. Make sure that the tags @@ -911,13 +905,7 @@ func Unmarshal(rawVal interface{}, opts ...DecoderConfigOption) error { return v.Unmarshal(rawVal, opts...) } func (v *Viper) Unmarshal(rawVal interface{}, opts ...DecoderConfigOption) error { - err := decode(v.AllSettings(), defaultDecoderConfig(rawVal, opts...)) - - if err != nil { - return err - } - - return nil + return decode(v.AllSettings(), defaultDecoderConfig(rawVal, opts...)) } // defaultDecoderConfig returns default mapsstructure.DecoderConfig with suppot @@ -956,13 +944,7 @@ func (v *Viper) UnmarshalExact(rawVal interface{}, opts ...DecoderConfigOption) config := defaultDecoderConfig(rawVal, opts...) config.ErrorUnused = true - err := decode(v.AllSettings(), config) - - if err != nil { - return err - } - - return nil + return decode(v.AllSettings(), config) } // BindPFlags binds a full flag set to the configuration, using each flag's long diff --git a/vendor/github.com/vmware/govmomi/.goreleaser.yml b/vendor/github.com/vmware/govmomi/.goreleaser.yml index 6a374cd9b4..9e7edeb3c7 100644 --- a/vendor/github.com/vmware/govmomi/.goreleaser.yml +++ b/vendor/github.com/vmware/govmomi/.goreleaser.yml @@ -11,6 +11,7 @@ builds: - amd64 - 386 - arm64 + - mips64le env: - CGO_ENABLED=0 main: ./govc/main.go @@ -27,6 +28,7 @@ builds: - amd64 - 386 - arm64 + - mips64le env: - CGO_ENABLED=0 main: ./vcsim/main.go diff --git a/vendor/github.com/vmware/govmomi/cns/client.go b/vendor/github.com/vmware/govmomi/cns/client.go index e43d0fac9f..575a6e57ad 100644 --- a/vendor/github.com/vmware/govmomi/cns/client.go +++ b/vendor/github.com/vmware/govmomi/cns/client.go @@ -36,6 +36,7 @@ const ( const ( ReleaseVSAN67u3 = "vSAN 6.7U3" ReleaseVSAN70 = "7.0" + ReleaseVSAN70u1 = "vSAN 7.0U1" ) var ( diff --git a/vendor/github.com/vmware/govmomi/cns/cns_util.go b/vendor/github.com/vmware/govmomi/cns/cns_util.go index 80441601a6..bffbe476b7 100644 --- a/vendor/github.com/vmware/govmomi/cns/cns_util.go +++ b/vendor/github.com/vmware/govmomi/cns/cns_util.go @@ -70,9 +70,10 @@ func GetTaskResultArray(ctx context.Context, taskInfo *vim25types.TaskInfo) ([]c // dropUnknownCreateSpecElements helps drop newly added elements in the CnsVolumeCreateSpec, which are not known to the prior vSphere releases func dropUnknownCreateSpecElements(c *Client, createSpecList []cnstypes.CnsVolumeCreateSpec) []cnstypes.CnsVolumeCreateSpec { - if c.serviceClient.Version == ReleaseVSAN67u3 { + updatedcreateSpecList := make([]cnstypes.CnsVolumeCreateSpec, 0, len(createSpecList)) + switch c.serviceClient.Version { + case ReleaseVSAN67u3: // Dropping optional fields not known to vSAN 6.7U3 - updatedcreateSpecList := make([]cnstypes.CnsVolumeCreateSpec, 0, len(createSpecList)) for _, createSpec := range createSpecList { createSpec.Metadata.ContainerCluster.ClusterFlavor = "" createSpec.Metadata.ContainerCluster.ClusterDistribution = "" @@ -92,10 +93,16 @@ func dropUnknownCreateSpecElements(c *Client, createSpecList []cnstypes.CnsVolum updatedcreateSpecList = append(updatedcreateSpecList, createSpec) } createSpecList = updatedcreateSpecList - } else if c.serviceClient.Version == ReleaseVSAN70 { - updatedcreateSpecList := make([]cnstypes.CnsVolumeCreateSpec, 0, len(createSpecList)) + case ReleaseVSAN70: + // Dropping optional fields not known to vSAN 7.0 for _, createSpec := range createSpecList { createSpec.Metadata.ContainerCluster.ClusterDistribution = "" + var updatedContainerClusterArray []cnstypes.CnsContainerCluster + for _, containerCluster := range createSpec.Metadata.ContainerClusterArray { + containerCluster.ClusterDistribution = "" + updatedContainerClusterArray = append(updatedContainerClusterArray, containerCluster) + } + createSpec.Metadata.ContainerClusterArray = updatedContainerClusterArray _, ok := createSpec.BackingObjectDetails.(*cnstypes.CnsBlockBackingDetails) if ok { createSpec.BackingObjectDetails.(*cnstypes.CnsBlockBackingDetails).BackingDiskUrlPath = "" @@ -103,6 +110,19 @@ func dropUnknownCreateSpecElements(c *Client, createSpecList []cnstypes.CnsVolum updatedcreateSpecList = append(updatedcreateSpecList, createSpec) } createSpecList = updatedcreateSpecList + case ReleaseVSAN70u1: + // Dropping optional fields not known to vSAN 7.0U1 + for _, createSpec := range createSpecList { + createSpec.Metadata.ContainerCluster.ClusterDistribution = "" + var updatedContainerClusterArray []cnstypes.CnsContainerCluster + for _, containerCluster := range createSpec.Metadata.ContainerClusterArray { + containerCluster.ClusterDistribution = "" + updatedContainerClusterArray = append(updatedContainerClusterArray, containerCluster) + } + createSpec.Metadata.ContainerClusterArray = updatedContainerClusterArray + updatedcreateSpecList = append(updatedcreateSpecList, createSpec) + } + createSpecList = updatedcreateSpecList } return createSpecList } @@ -127,7 +147,7 @@ func dropUnknownVolumeMetadataUpdateSpecElements(c *Client, updateSpecList []cns updatedUpdateSpecList = append(updatedUpdateSpecList, updateSpec) } updateSpecList = updatedUpdateSpecList - } else if c.serviceClient.Version == ReleaseVSAN70 { + } else if c.serviceClient.Version == ReleaseVSAN70 || c.serviceClient.Version == ReleaseVSAN70u1 { updatedUpdateSpecList := make([]cnstypes.CnsVolumeMetadataUpdateSpec, 0, len(updateSpecList)) for _, updateSpec := range updateSpecList { updateSpec.Metadata.ContainerCluster.ClusterDistribution = "" diff --git a/vendor/github.com/vmware/govmomi/object/virtual_machine.go b/vendor/github.com/vmware/govmomi/object/virtual_machine.go index 2f0022803e..1d85047fc1 100644 --- a/vendor/github.com/vmware/govmomi/object/virtual_machine.go +++ b/vendor/github.com/vmware/govmomi/object/virtual_machine.go @@ -1,5 +1,5 @@ /* -Copyright (c) 2015-2017 VMware, Inc. All Rights Reserved. +Copyright (c) 2015-2021 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -40,6 +40,34 @@ type VirtualMachine struct { Common } +// extractDiskLayoutFiles is a helper function used to extract file keys for +// all disk files attached to the virtual machine at the current point of +// running. +func extractDiskLayoutFiles(diskLayoutList []types.VirtualMachineFileLayoutExDiskLayout) []int { + var result []int + + for _, layoutExDisk := range diskLayoutList { + for _, link := range layoutExDisk.Chain { + for i := range link.FileKey { // diskDescriptor, diskExtent pairs + result = append(result, int(link.FileKey[i])) + } + } + } + + return result +} + +// removeKey is a helper function for removing a specific file key from a list +// of keys associated with disks attached to a virtual machine. +func removeKey(l *[]int, key int) { + for i, k := range *l { + if k == key { + *l = append((*l)[:i], (*l)[i+1:]...) + break + } + } +} + func NewVirtualMachine(c *vim25.Client, ref types.ManagedObjectReference) *VirtualMachine { return &VirtualMachine{ Common: NewCommon(c, ref), @@ -628,6 +656,63 @@ func (m snapshotMap) add(parent string, tree []types.VirtualMachineSnapshotTree) } } +// SnapshotSize calculates the size of a given snapshot in bytes. If the +// snapshot is current, disk files not associated with any parent snapshot are +// included in size calculations. This allows for measuring and including the +// growth from the last fixed snapshot to the present state. +func SnapshotSize(info types.ManagedObjectReference, parent *types.ManagedObjectReference, vmlayout *types.VirtualMachineFileLayoutEx, isCurrent bool) int { + var fileKeyList []int + var parentFiles []int + var allSnapshotFiles []int + + diskFiles := extractDiskLayoutFiles(vmlayout.Disk) + + for _, layout := range vmlayout.Snapshot { + diskLayout := extractDiskLayoutFiles(layout.Disk) + allSnapshotFiles = append(allSnapshotFiles, diskLayout...) + + if layout.Key.Value == info.Value { + fileKeyList = append(fileKeyList, int(layout.DataKey)) // The .vmsn file + fileKeyList = append(fileKeyList, diskLayout...) // The .vmdk files + } else if parent != nil && layout.Key.Value == parent.Value { + parentFiles = append(parentFiles, diskLayout...) + } + } + + for _, parentFile := range parentFiles { + removeKey(&fileKeyList, parentFile) + } + + for _, file := range allSnapshotFiles { + removeKey(&diskFiles, file) + } + + fileKeyMap := make(map[int]types.VirtualMachineFileLayoutExFileInfo) + for _, file := range vmlayout.File { + fileKeyMap[int(file.Key)] = file + } + + size := 0 + + for _, fileKey := range fileKeyList { + file := fileKeyMap[fileKey] + if parent != nil || + (file.Type != string(types.VirtualMachineFileLayoutExFileTypeDiskDescriptor) && + file.Type != string(types.VirtualMachineFileLayoutExFileTypeDiskExtent)) { + size += int(file.Size) + } + } + + if isCurrent { + for _, diskFile := range diskFiles { + file := fileKeyMap[diskFile] + size += int(file.Size) + } + } + + return size +} + // FindSnapshot supports snapshot lookup by name, where name can be: // 1) snapshot ManagedObjectReference.Value (unique) // 2) snapshot name (may not be unique) diff --git a/vendor/github.com/vmware/govmomi/ovf/envelope.go b/vendor/github.com/vmware/govmomi/ovf/envelope.go index fa4690d88a..274adb9df1 100644 --- a/vendor/github.com/vmware/govmomi/ovf/envelope.go +++ b/vendor/github.com/vmware/govmomi/ovf/envelope.go @@ -148,6 +148,12 @@ type EulaSection struct { License string `xml:"License"` } +type Config struct { + Required *bool `xml:"required,attr"` + Key string `xml:"key,attr"` + Value string `xml:"value,attr"` +} + type VirtualHardwareSection struct { Section @@ -157,6 +163,8 @@ type VirtualHardwareSection struct { System *VirtualSystemSettingData `xml:"System"` Item []ResourceAllocationSettingData `xml:"Item"` StorageItem []StorageAllocationSettingData `xml:"StorageItem"` + Config []Config `xml:"Config"` + ExtraConfig []Config `xml:"ExtraConfig"` } type VirtualSystemSettingData struct { diff --git a/vendor/github.com/vmware/govmomi/session/manager.go b/vendor/github.com/vmware/govmomi/session/manager.go index 24c6a2dbe0..8689acd504 100644 --- a/vendor/github.com/vmware/govmomi/session/manager.go +++ b/vendor/github.com/vmware/govmomi/session/manager.go @@ -281,3 +281,14 @@ func (sm *Manager) CloneSession(ctx context.Context, ticket string) error { sm.userSession = &res.Returnval return nil } + +func (sm *Manager) UpdateServiceMessage(ctx context.Context, message string) error { + req := types.UpdateServiceMessage{ + This: sm.Reference(), + Message: message, + } + + _, err := methods.UpdateServiceMessage(ctx, sm.client, &req) + + return err +} diff --git a/vendor/github.com/vmware/govmomi/simulator/cluster_compute_resource.go b/vendor/github.com/vmware/govmomi/simulator/cluster_compute_resource.go index 906261d325..6930c9c09f 100644 --- a/vendor/github.com/vmware/govmomi/simulator/cluster_compute_resource.go +++ b/vendor/github.com/vmware/govmomi/simulator/cluster_compute_resource.go @@ -263,6 +263,52 @@ func (c *ClusterComputeResource) updateOverridesDRS(cfg *types.ClusterConfigInfo return nil } +func (c *ClusterComputeResource) updateOverridesVmOrchestration(cfg *types.ClusterConfigInfoEx, cspec *types.ClusterConfigSpecEx) types.BaseMethodFault { + for _, spec := range cspec.VmOrchestrationSpec { + var i int + var key types.ManagedObjectReference + exists := false + + if spec.Operation == types.ArrayUpdateOperationRemove { + key = spec.RemoveKey.(types.ManagedObjectReference) + } else { + key = spec.Info.Vm + } + + for i = range cfg.VmOrchestration { + if cfg.VmOrchestration[i].Vm == key { + exists = true + break + } + } + + switch spec.Operation { + case types.ArrayUpdateOperationAdd: + if exists { + return new(types.InvalidArgument) + } + cfg.VmOrchestration = append(cfg.VmOrchestration, *spec.Info) + case types.ArrayUpdateOperationEdit: + if !exists { + return new(types.InvalidArgument) + } + if spec.Info.VmReadiness.ReadyCondition != "" { + cfg.VmOrchestration[i].VmReadiness.ReadyCondition = spec.Info.VmReadiness.ReadyCondition + } + if spec.Info.VmReadiness.PostReadyDelay != 0 { + cfg.VmOrchestration[i].VmReadiness.PostReadyDelay = spec.Info.VmReadiness.PostReadyDelay + } + case types.ArrayUpdateOperationRemove: + if !exists { + return new(types.InvalidArgument) + } + cfg.VmOrchestration = append(cfg.VmOrchestration[:i], cfg.VmOrchestration[i+1:]...) + } + } + + return nil +} + func (c *ClusterComputeResource) ReconfigureComputeResourceTask(req *types.ReconfigureComputeResource_Task) soap.HasFault { task := CreateTask(c, "reconfigureCluster", func(*Task) (types.AnyType, types.BaseMethodFault) { spec, ok := req.Spec.(*types.ClusterConfigSpecEx) @@ -275,6 +321,7 @@ func (c *ClusterComputeResource) ReconfigureComputeResourceTask(req *types.Recon c.updateGroups, c.updateOverridesDAS, c.updateOverridesDRS, + c.updateOverridesVmOrchestration, } for _, update := range updates { diff --git a/vendor/github.com/vmware/govmomi/simulator/snapshot.go b/vendor/github.com/vmware/govmomi/simulator/snapshot.go index 0001e2894c..bec373c353 100644 --- a/vendor/github.com/vmware/govmomi/simulator/snapshot.go +++ b/vendor/github.com/vmware/govmomi/simulator/snapshot.go @@ -147,7 +147,7 @@ func (v *VirtualMachineSnapshot) RemoveSnapshotTask(ctx *Context, req *types.Rem } func (v *VirtualMachineSnapshot) RevertToSnapshotTask(req *types.RevertToSnapshot_Task) soap.HasFault { - task := CreateTask(v, "revertToSnapshot", func(t *Task) (types.AnyType, types.BaseMethodFault) { + task := CreateTask(v.Vm, "revertToSnapshot", func(t *Task) (types.AnyType, types.BaseMethodFault) { vm := Map.Get(v.Vm).(*VirtualMachine) Map.WithLock(vm, func() { diff --git a/vendor/github.com/vmware/govmomi/simulator/virtual_machine.go b/vendor/github.com/vmware/govmomi/simulator/virtual_machine.go index 7eeba7b70b..76a83cc2b9 100644 --- a/vendor/github.com/vmware/govmomi/simulator/virtual_machine.go +++ b/vendor/github.com/vmware/govmomi/simulator/virtual_machine.go @@ -80,6 +80,7 @@ func NewVirtualMachine(ctx *Context, parent types.ManagedObjectReference, spec * CpuAllocation: &rspec.CpuAllocation, LatencySensitivity: &types.LatencySensitivity{Level: types.LatencySensitivitySensitivityLevelNormal}, BootOptions: &types.VirtualMachineBootOptions{}, + CreateDate: types.NewTime(time.Now()), } vm.Layout = &types.VirtualMachineFileLayout{} vm.LayoutEx = &types.VirtualMachineFileLayoutEx{ diff --git a/vendor/github.com/vmware/govmomi/vim25/retry.go b/vendor/github.com/vmware/govmomi/vim25/retry.go index f10e2cb6c9..bf663a1012 100644 --- a/vendor/github.com/vmware/govmomi/vim25/retry.go +++ b/vendor/github.com/vmware/govmomi/vim25/retry.go @@ -25,33 +25,40 @@ import ( type RetryFunc func(err error) (retry bool, delay time.Duration) -// TemporaryNetworkError returns a RetryFunc that retries up to a maximum of n -// times, only if the error returned by the RoundTrip function is a temporary -// network error (for example: a connect timeout). +// TemporaryNetworkError is deprecated. Use Retry() with RetryTemporaryNetworkError and retryAttempts instead. func TemporaryNetworkError(n int) RetryFunc { - return func(err error) (retry bool, delay time.Duration) { - var ok bool - - t, ok := err.(interface { - // Temporary is implemented by url.Error and net.Error - Temporary() bool - }) - if !ok { - // Never retry if this is not a Temporary error. - return false, 0 - } - - if !t.Temporary() { - return false, 0 + return func(err error) (bool, time.Duration) { + if IsTemporaryNetworkError(err) { + // Don't retry if we're out of tries. + if n--; n <= 0 { + return false, 0 + } + return true, 0 } + return false, 0 + } +} - // Don't retry if we're out of tries. - if n--; n <= 0 { - return false, 0 - } +// RetryTemporaryNetworkError returns a RetryFunc that returns IsTemporaryNetworkError(err) +func RetryTemporaryNetworkError(err error) (bool, time.Duration) { + return IsTemporaryNetworkError(err), 0 +} - return true, 0 +// IsTemporaryNetworkError returns false unless the error implements +// a Temporary() bool method such as url.Error and net.Error. +// Otherwise, returns the value of the Temporary() method. +func IsTemporaryNetworkError(err error) bool { + t, ok := err.(interface { + // Temporary is implemented by url.Error and net.Error + Temporary() bool + }) + + if !ok { + // Not a Temporary error. + return false } + + return t.Temporary() } type retry struct { @@ -60,7 +67,8 @@ type retry struct { // fn is a custom function that is called when an error occurs. // It returns whether or not to retry, and if so, how long to // delay before retrying. - fn RetryFunc + fn RetryFunc + maxRetryAttempts int } // Retry wraps the specified soap.RoundTripper and invokes the @@ -68,10 +76,16 @@ type retry struct { // retry the call, and if so, how long to wait before retrying. If // the result of this function is to not retry, the original error // is returned from the RoundTrip function. -func Retry(roundTripper soap.RoundTripper, fn RetryFunc) soap.RoundTripper { +// The soap.RoundTripper will return the original error if retryAttempts is specified and reached. +func Retry(roundTripper soap.RoundTripper, fn RetryFunc, retryAttempts ...int) soap.RoundTripper { r := &retry{ - roundTripper: roundTripper, - fn: fn, + roundTripper: roundTripper, + fn: fn, + maxRetryAttempts: 1, + } + + if len(retryAttempts) == 1 { + r.maxRetryAttempts = retryAttempts[0] } return r @@ -80,7 +94,7 @@ func Retry(roundTripper soap.RoundTripper, fn RetryFunc) soap.RoundTripper { func (r *retry) RoundTrip(ctx context.Context, req, res soap.HasFault) error { var err error - for { + for attempt := 0; attempt < r.maxRetryAttempts; attempt++ { err = r.roundTripper.RoundTrip(ctx, req, res) if err == nil { break diff --git a/vendor/github.com/vmware/govmomi/vim25/soap/client.go b/vendor/github.com/vmware/govmomi/vim25/soap/client.go index 531e797e3b..637330462c 100644 --- a/vendor/github.com/vmware/govmomi/vim25/soap/client.go +++ b/vendor/github.com/vmware/govmomi/vim25/soap/client.go @@ -560,7 +560,7 @@ type statusError struct { } // Temporary returns true for HTTP response codes that can be retried -// See vim25.TemporaryNetworkError +// See vim25.IsTemporaryNetworkError func (e *statusError) Temporary() bool { switch e.res.StatusCode { case http.StatusBadGateway: diff --git a/vendor/github.com/vmware/govmomi/vim25/soap/error.go b/vendor/github.com/vmware/govmomi/vim25/soap/error.go index 46111556cb..1e1508733a 100644 --- a/vendor/github.com/vmware/govmomi/vim25/soap/error.go +++ b/vendor/github.com/vmware/govmomi/vim25/soap/error.go @@ -17,6 +17,7 @@ limitations under the License. package soap import ( + "encoding/json" "fmt" "reflect" @@ -49,6 +50,15 @@ func (s soapFaultError) Error() string { return fmt.Sprintf("%s: %s", s.fault.Code, msg) } +func (s soapFaultError) MarshalJSON() ([]byte, error) { + out := struct { + Fault *Fault + }{ + Fault: s.fault, + } + return json.Marshal(out) +} + type vimFaultError struct { fault types.BaseMethodFault } diff --git a/vendor/go.uber.org/zap/CHANGELOG.md b/vendor/go.uber.org/zap/CHANGELOG.md index aeff90e4ea..fa817e6a10 100644 --- a/vendor/go.uber.org/zap/CHANGELOG.md +++ b/vendor/go.uber.org/zap/CHANGELOG.md @@ -1,16 +1,37 @@ # Changelog +## 1.16.0 (1 Sep 2020) + +Bugfixes: +* [#828][]: Fix missing newline in IncreaseLevel error messages. +* [#835][]: Fix panic in JSON encoder when encoding times or durations + without specifying a time or duration encoder. +* [#843][]: Honor CallerSkip when taking stack traces. +* [#862][]: Fix the default file permissions to use `0666` and rely on the umask instead. +* [#854][]: Encode `` for nil `Stringer` instead of a panic error log. + +Enhancements: +* [#629][]: Added `zapcore.TimeEncoderOfLayout` to easily create time encoders + for custom layouts. +* [#697][]: Added support for a configurable delimiter in the console encoder. +* [#852][]: Optimize console encoder by pooling the underlying JSON encoder. +* [#844][]: Add ability to include the calling function as part of logs. +* [#843][]: Add `StackSkip` for including truncated stacks as a field. +* [#861][]: Add options to customize Fatal behaviour for better testability. + +Thanks to @SteelPhase, @tmshn, @lixingwang, @wyxloading, @moul, @segevfiner, @andy-retailnext and @jcorbin for their contributions to this release. + ## 1.15.0 (23 Apr 2020) Bugfixes: * [#804][]: Fix handling of `Time` values out of `UnixNano` range. -* [#812][]: Fix `IncreaseLevel` being reset after a call to `With`. +* [#812][]: Fix `IncreaseLevel` being reset after a call to `With`. Enhancements: * [#806][]: Add `WithCaller` option to supersede the `AddCaller` option. This allows disabling annotation of log entries with caller information if previously enabled with `AddCaller`. -* [#813][]: Deprecate `NewSampler` constructor in favor of +* [#813][]: Deprecate `NewSampler` constructor in favor of `NewSamplerWithOptions` which supports a `SamplerHook` option. This option adds support for monitoring sampling decisions through a hook. @@ -399,3 +420,13 @@ upgrade to the upcoming stable release. [#812]: https://github.com/uber-go/zap/pull/812 [#806]: https://github.com/uber-go/zap/pull/806 [#813]: https://github.com/uber-go/zap/pull/813 +[#629]: https://github.com/uber-go/zap/pull/629 +[#697]: https://github.com/uber-go/zap/pull/697 +[#828]: https://github.com/uber-go/zap/pull/828 +[#835]: https://github.com/uber-go/zap/pull/835 +[#843]: https://github.com/uber-go/zap/pull/843 +[#844]: https://github.com/uber-go/zap/pull/844 +[#852]: https://github.com/uber-go/zap/pull/852 +[#854]: https://github.com/uber-go/zap/pull/854 +[#861]: https://github.com/uber-go/zap/pull/861 +[#862]: https://github.com/uber-go/zap/pull/862 diff --git a/vendor/go.uber.org/zap/FAQ.md b/vendor/go.uber.org/zap/FAQ.md index 4256d35c76..5ec7288750 100644 --- a/vendor/go.uber.org/zap/FAQ.md +++ b/vendor/go.uber.org/zap/FAQ.md @@ -149,6 +149,7 @@ We're aware of the following extensions, but haven't used them ourselves: | `github.com/tchap/zapext` | Sentry, syslog | | `github.com/fgrosse/zaptest` | Ginkgo | | `github.com/blendle/zapdriver` | Stackdriver | +| `github.com/moul/zapgorm` | Gorm | [go-proverbs]: https://go-proverbs.github.io/ [import-path]: https://golang.org/cmd/go/#hdr-Remote_import_paths diff --git a/vendor/go.uber.org/zap/config.go b/vendor/go.uber.org/zap/config.go index 192fd1a947..55637fb0b4 100644 --- a/vendor/go.uber.org/zap/config.go +++ b/vendor/go.uber.org/zap/config.go @@ -101,6 +101,7 @@ func NewProductionEncoderConfig() zapcore.EncoderConfig { LevelKey: "level", NameKey: "logger", CallerKey: "caller", + FunctionKey: zapcore.OmitKey, MessageKey: "msg", StacktraceKey: "stacktrace", LineEnding: zapcore.DefaultLineEnding, @@ -140,6 +141,7 @@ func NewDevelopmentEncoderConfig() zapcore.EncoderConfig { LevelKey: "L", NameKey: "N", CallerKey: "C", + FunctionKey: zapcore.OmitKey, MessageKey: "M", StacktraceKey: "S", LineEnding: zapcore.DefaultLineEnding, diff --git a/vendor/go.uber.org/zap/field.go b/vendor/go.uber.org/zap/field.go index dd558fc231..3c0d7d9578 100644 --- a/vendor/go.uber.org/zap/field.go +++ b/vendor/go.uber.org/zap/field.go @@ -364,11 +364,17 @@ func Timep(key string, val *time.Time) Field { // expensive (relatively speaking); this function both makes an allocation and // takes about two microseconds. func Stack(key string) Field { + return StackSkip(key, 1) // skip Stack +} + +// StackSkip constructs a field similarly to Stack, but also skips the given +// number of frames from the top of the stacktrace. +func StackSkip(key string, skip int) Field { // Returning the stacktrace as a string costs an allocation, but saves us // from expanding the zapcore.Field union struct to include a byte slice. Since // taking a stacktrace is already so expensive (~10us), the extra allocation // is okay. - return String(key, takeStacktrace()) + return String(key, takeStacktrace(skip+1)) // skip StackSkip } // Duration constructs a field with the given key and value. The encoder diff --git a/vendor/go.uber.org/zap/go.mod b/vendor/go.uber.org/zap/go.mod index 118abda151..6ef4db70ed 100644 --- a/vendor/go.uber.org/zap/go.mod +++ b/vendor/go.uber.org/zap/go.mod @@ -8,5 +8,6 @@ require ( go.uber.org/atomic v1.6.0 go.uber.org/multierr v1.5.0 golang.org/x/lint v0.0.0-20190930215403-16217165b5de + gopkg.in/yaml.v2 v2.2.2 honnef.co/go/tools v0.0.1-2019.2.3 ) diff --git a/vendor/go.uber.org/zap/logger.go b/vendor/go.uber.org/zap/logger.go index cd6e19551a..ea484aed10 100644 --- a/vendor/go.uber.org/zap/logger.go +++ b/vendor/go.uber.org/zap/logger.go @@ -49,6 +49,7 @@ type Logger struct { addStack zapcore.LevelEnabler callerSkip int + onFatal zapcore.CheckWriteAction // default is WriteThenFatal } // New constructs a new Logger from the provided zapcore.Core and Options. If @@ -280,7 +281,13 @@ func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry { case zapcore.PanicLevel: ce = ce.Should(ent, zapcore.WriteThenPanic) case zapcore.FatalLevel: - ce = ce.Should(ent, zapcore.WriteThenFatal) + onFatal := log.onFatal + // Noop is the default value for CheckWriteAction, and it leads to + // continued execution after a Fatal which is unexpected. + if onFatal == zapcore.WriteThenNoop { + onFatal = zapcore.WriteThenFatal + } + ce = ce.Should(ent, onFatal) case zapcore.DPanicLevel: if log.development { ce = ce.Should(ent, zapcore.WriteThenPanic) @@ -297,15 +304,41 @@ func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry { // Thread the error output through to the CheckedEntry. ce.ErrorOutput = log.errorOutput if log.addCaller { - ce.Entry.Caller = zapcore.NewEntryCaller(runtime.Caller(log.callerSkip + callerSkipOffset)) - if !ce.Entry.Caller.Defined { + frame, defined := getCallerFrame(log.callerSkip + callerSkipOffset) + if !defined { fmt.Fprintf(log.errorOutput, "%v Logger.check error: failed to get caller\n", time.Now().UTC()) log.errorOutput.Sync() } + + ce.Entry.Caller = zapcore.EntryCaller{ + Defined: defined, + PC: frame.PC, + File: frame.File, + Line: frame.Line, + Function: frame.Function, + } } if log.addStack.Enabled(ce.Entry.Level) { - ce.Entry.Stack = Stack("").String + ce.Entry.Stack = StackSkip("", log.callerSkip+callerSkipOffset).String } return ce } + +// getCallerFrame gets caller frame. The argument skip is the number of stack +// frames to ascend, with 0 identifying the caller of getCallerFrame. The +// boolean ok is false if it was not possible to recover the information. +// +// Note: This implementation is similar to runtime.Caller, but it returns the whole frame. +func getCallerFrame(skip int) (frame runtime.Frame, ok bool) { + const skipOffset = 2 // skip getCallerFrame and Callers + + pc := make([]uintptr, 1) + numFrames := runtime.Callers(skip+skipOffset, pc[:]) + if numFrames < 1 { + return + } + + frame, _ = runtime.CallersFrames(pc).Next() + return frame, frame.PC != 0 +} diff --git a/vendor/go.uber.org/zap/options.go b/vendor/go.uber.org/zap/options.go index 59f1b54a04..0135c20923 100644 --- a/vendor/go.uber.org/zap/options.go +++ b/vendor/go.uber.org/zap/options.go @@ -86,15 +86,15 @@ func Development() Option { }) } -// AddCaller configures the Logger to annotate each message with the filename -// and line number of zap's caller. See also WithCaller. +// AddCaller configures the Logger to annotate each message with the filename, +// line number, and function name of zap's caller. See also WithCaller. func AddCaller() Option { return WithCaller(true) } -// WithCaller configures the Logger to annotate each message with the filename -// and line number of zap's caller, or not, depending on the value of enabled. -// This is a generalized form of AddCaller. +// WithCaller configures the Logger to annotate each message with the filename, +// line number, and function name of zap's caller, or not, depending on the +// value of enabled. This is a generalized form of AddCaller. func WithCaller(enabled bool) Option { return optionFunc(func(log *Logger) { log.addCaller = enabled @@ -125,9 +125,16 @@ func IncreaseLevel(lvl zapcore.LevelEnabler) Option { return optionFunc(func(log *Logger) { core, err := zapcore.NewIncreaseLevelCore(log.core, lvl) if err != nil { - fmt.Fprintf(log.errorOutput, "failed to IncreaseLevel: %v", err) + fmt.Fprintf(log.errorOutput, "failed to IncreaseLevel: %v\n", err) } else { log.core = core } }) } + +// OnFatal sets the action to take on fatal logs. +func OnFatal(action zapcore.CheckWriteAction) Option { + return optionFunc(func(log *Logger) { + log.onFatal = action + }) +} diff --git a/vendor/go.uber.org/zap/sink.go b/vendor/go.uber.org/zap/sink.go index ff0becfe5d..df46fa87a7 100644 --- a/vendor/go.uber.org/zap/sink.go +++ b/vendor/go.uber.org/zap/sink.go @@ -136,7 +136,7 @@ func newFileSink(u *url.URL) (Sink, error) { case "stderr": return nopCloserSink{os.Stderr}, nil } - return os.OpenFile(u.Path, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0644) + return os.OpenFile(u.Path, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0666) } func normalizeScheme(s string) (string, error) { diff --git a/vendor/go.uber.org/zap/stacktrace.go b/vendor/go.uber.org/zap/stacktrace.go index 100fac2168..0cf8c1ddff 100644 --- a/vendor/go.uber.org/zap/stacktrace.go +++ b/vendor/go.uber.org/zap/stacktrace.go @@ -22,28 +22,20 @@ package zap import ( "runtime" - "strings" "sync" "go.uber.org/zap/internal/bufferpool" ) -const _zapPackage = "go.uber.org/zap" - var ( _stacktracePool = sync.Pool{ New: func() interface{} { return newProgramCounters(64) }, } - - // We add "." and "/" suffixes to the package name to ensure we only match - // the exact package and not any package with the same prefix. - _zapStacktracePrefixes = addPrefix(_zapPackage, ".", "/") - _zapStacktraceVendorContains = addPrefix("/vendor/", _zapStacktracePrefixes...) ) -func takeStacktrace() string { +func takeStacktrace(skip int) string { buffer := bufferpool.Get() defer buffer.Free() programCounters := _stacktracePool.Get().(*programCounters) @@ -51,9 +43,9 @@ func takeStacktrace() string { var numFrames int for { - // Skip the call to runtime.Counters and takeStacktrace so that the + // Skip the call to runtime.Callers and takeStacktrace so that the // program counters start at the caller of takeStacktrace. - numFrames = runtime.Callers(2, programCounters.pcs) + numFrames = runtime.Callers(skip+2, programCounters.pcs) if numFrames < len(programCounters.pcs) { break } @@ -63,19 +55,12 @@ func takeStacktrace() string { } i := 0 - skipZapFrames := true // skip all consecutive zap frames at the beginning. frames := runtime.CallersFrames(programCounters.pcs[:numFrames]) // Note: On the last iteration, frames.Next() returns false, with a valid // frame, but we ignore this frame. The last frame is a a runtime frame which // adds noise, since it's only either runtime.main or runtime.goexit. for frame, more := frames.Next(); more; frame, more = frames.Next() { - if skipZapFrames && isZapFrame(frame.Function) { - continue - } else { - skipZapFrames = false - } - if i != 0 { buffer.AppendByte('\n') } @@ -91,24 +76,6 @@ func takeStacktrace() string { return buffer.String() } -func isZapFrame(function string) bool { - for _, prefix := range _zapStacktracePrefixes { - if strings.HasPrefix(function, prefix) { - return true - } - } - - // We can't use a prefix match here since the location of the vendor - // directory affects the prefix. Instead we do a contains match. - for _, contains := range _zapStacktraceVendorContains { - if strings.Contains(function, contains) { - return true - } - } - - return false -} - type programCounters struct { pcs []uintptr } @@ -116,11 +83,3 @@ type programCounters struct { func newProgramCounters(size int) *programCounters { return &programCounters{make([]uintptr, size)} } - -func addPrefix(prefix string, ss ...string) []string { - withPrefix := make([]string, len(ss)) - for i, s := range ss { - withPrefix[i] = prefix + s - } - return withPrefix -} diff --git a/vendor/go.uber.org/zap/zapcore/console_encoder.go b/vendor/go.uber.org/zap/zapcore/console_encoder.go index b7875966f4..3b68f8c0c5 100644 --- a/vendor/go.uber.org/zap/zapcore/console_encoder.go +++ b/vendor/go.uber.org/zap/zapcore/console_encoder.go @@ -56,6 +56,10 @@ type consoleEncoder struct { // encoder configuration, it will omit any element whose key is set to the empty // string. func NewConsoleEncoder(cfg EncoderConfig) Encoder { + if len(cfg.ConsoleSeparator) == 0 { + // Use a default delimiter of '\t' for backwards compatibility + cfg.ConsoleSeparator = "\t" + } return consoleEncoder{newJSONEncoder(cfg, true)} } @@ -89,12 +93,17 @@ func (c consoleEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer, nameEncoder(ent.LoggerName, arr) } - if ent.Caller.Defined && c.CallerKey != "" && c.EncodeCaller != nil { - c.EncodeCaller(ent.Caller, arr) + if ent.Caller.Defined { + if c.CallerKey != "" && c.EncodeCaller != nil { + c.EncodeCaller(ent.Caller, arr) + } + if c.FunctionKey != "" { + arr.AppendString(ent.Caller.Function) + } } for i := range arr.elems { if i > 0 { - line.AppendByte('\t') + line.AppendString(c.ConsoleSeparator) } fmt.Fprint(line, arr.elems[i]) } @@ -102,7 +111,7 @@ func (c consoleEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer, // Add the message itself. if c.MessageKey != "" { - c.addTabIfNecessary(line) + c.addSeparatorIfNecessary(line) line.AppendString(ent.Message) } @@ -126,7 +135,12 @@ func (c consoleEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer, func (c consoleEncoder) writeContext(line *buffer.Buffer, extra []Field) { context := c.jsonEncoder.Clone().(*jsonEncoder) - defer context.buf.Free() + defer func() { + // putJSONEncoder assumes the buffer is still used, but we write out the buffer so + // we can free it. + context.buf.Free() + putJSONEncoder(context) + }() addFields(context, extra) context.closeOpenNamespaces() @@ -134,14 +148,14 @@ func (c consoleEncoder) writeContext(line *buffer.Buffer, extra []Field) { return } - c.addTabIfNecessary(line) + c.addSeparatorIfNecessary(line) line.AppendByte('{') line.Write(context.buf.Bytes()) line.AppendByte('}') } -func (c consoleEncoder) addTabIfNecessary(line *buffer.Buffer) { +func (c consoleEncoder) addSeparatorIfNecessary(line *buffer.Buffer) { if line.Len() > 0 { - line.AppendByte('\t') + line.AppendString(c.ConsoleSeparator) } } diff --git a/vendor/go.uber.org/zap/zapcore/encoder.go b/vendor/go.uber.org/zap/zapcore/encoder.go index 6c78f7e49a..6601ca166c 100644 --- a/vendor/go.uber.org/zap/zapcore/encoder.go +++ b/vendor/go.uber.org/zap/zapcore/encoder.go @@ -21,6 +21,7 @@ package zapcore import ( + "encoding/json" "time" "go.uber.org/zap/buffer" @@ -151,6 +152,14 @@ func RFC3339NanoTimeEncoder(t time.Time, enc PrimitiveArrayEncoder) { encodeTimeLayout(t, time.RFC3339Nano, enc) } +// TimeEncoderOfLayout returns TimeEncoder which serializes a time.Time using +// given layout. +func TimeEncoderOfLayout(layout string) TimeEncoder { + return func(t time.Time, enc PrimitiveArrayEncoder) { + encodeTimeLayout(t, layout, enc) + } +} + // UnmarshalText unmarshals text to a TimeEncoder. // "rfc3339nano" and "RFC3339Nano" are unmarshaled to RFC3339NanoTimeEncoder. // "rfc3339" and "RFC3339" are unmarshaled to RFC3339TimeEncoder. @@ -176,6 +185,35 @@ func (e *TimeEncoder) UnmarshalText(text []byte) error { return nil } +// UnmarshalYAML unmarshals YAML to a TimeEncoder. +// If value is an object with a "layout" field, it will be unmarshaled to TimeEncoder with given layout. +// timeEncoder: +// layout: 06/01/02 03:04pm +// If value is string, it uses UnmarshalText. +// timeEncoder: iso8601 +func (e *TimeEncoder) UnmarshalYAML(unmarshal func(interface{}) error) error { + var o struct { + Layout string `json:"layout" yaml:"layout"` + } + if err := unmarshal(&o); err == nil { + *e = TimeEncoderOfLayout(o.Layout) + return nil + } + + var s string + if err := unmarshal(&s); err != nil { + return err + } + return e.UnmarshalText([]byte(s)) +} + +// UnmarshalJSON unmarshals JSON to a TimeEncoder as same way UnmarshalYAML does. +func (e *TimeEncoder) UnmarshalJSON(data []byte) error { + return e.UnmarshalYAML(func(v interface{}) error { + return json.Unmarshal(data, v) + }) +} + // A DurationEncoder serializes a time.Duration to a primitive type. type DurationEncoder func(time.Duration, PrimitiveArrayEncoder) @@ -279,6 +317,7 @@ type EncoderConfig struct { TimeKey string `json:"timeKey" yaml:"timeKey"` NameKey string `json:"nameKey" yaml:"nameKey"` CallerKey string `json:"callerKey" yaml:"callerKey"` + FunctionKey string `json:"functionKey" yaml:"functionKey"` StacktraceKey string `json:"stacktraceKey" yaml:"stacktraceKey"` LineEnding string `json:"lineEnding" yaml:"lineEnding"` // Configure the primitive representations of common complex types. For @@ -291,6 +330,9 @@ type EncoderConfig struct { // Unlike the other primitive type encoders, EncodeName is optional. The // zero value falls back to FullNameEncoder. EncodeName NameEncoder `json:"nameEncoder" yaml:"nameEncoder"` + // Configures the field separator used by the console encoder. Defaults + // to tab. + ConsoleSeparator string `json:"consoleSeparator" yaml:"consoleSeparator"` } // ObjectEncoder is a strongly-typed, encoding-agnostic interface for adding a diff --git a/vendor/go.uber.org/zap/zapcore/entry.go b/vendor/go.uber.org/zap/zapcore/entry.go index 8273abdf07..4aa8b4f90b 100644 --- a/vendor/go.uber.org/zap/zapcore/entry.go +++ b/vendor/go.uber.org/zap/zapcore/entry.go @@ -22,6 +22,7 @@ package zapcore import ( "fmt" + "runtime" "strings" "sync" "time" @@ -70,10 +71,11 @@ func NewEntryCaller(pc uintptr, file string, line int, ok bool) EntryCaller { // EntryCaller represents the caller of a logging function. type EntryCaller struct { - Defined bool - PC uintptr - File string - Line int + Defined bool + PC uintptr + File string + Line int + Function string } // String returns the full path and line number of the caller. @@ -158,6 +160,8 @@ const ( // WriteThenNoop indicates that nothing special needs to be done. It's the // default behavior. WriteThenNoop CheckWriteAction = iota + // WriteThenGoexit runs runtime.Goexit after Write. + WriteThenGoexit // WriteThenPanic causes a panic after Write. WriteThenPanic // WriteThenFatal causes a fatal os.Exit after Write. @@ -230,6 +234,8 @@ func (ce *CheckedEntry) Write(fields ...Field) { panic(msg) case WriteThenFatal: exit.Exit() + case WriteThenGoexit: + runtime.Goexit() } } diff --git a/vendor/go.uber.org/zap/zapcore/field.go b/vendor/go.uber.org/zap/zapcore/field.go index 6e05f831ff..7e255d63e0 100644 --- a/vendor/go.uber.org/zap/zapcore/field.go +++ b/vendor/go.uber.org/zap/zapcore/field.go @@ -205,13 +205,23 @@ func addFields(enc ObjectEncoder, fields []Field) { } } -func encodeStringer(key string, stringer interface{}, enc ObjectEncoder) (err error) { +func encodeStringer(key string, stringer interface{}, enc ObjectEncoder) (retErr error) { + // Try to capture panics (from nil references or otherwise) when calling + // the String() method, similar to https://golang.org/src/fmt/print.go#L540 defer func() { - if v := recover(); v != nil { - err = fmt.Errorf("PANIC=%v", v) + if err := recover(); err != nil { + // If it's a nil pointer, just say "". The likeliest causes are a + // Stringer that fails to guard against nil or a nil pointer for a + // value receiver, and in either case, "" is a nice result. + if v := reflect.ValueOf(stringer); v.Kind() == reflect.Ptr && v.IsNil() { + enc.AddString(key, "") + return + } + + retErr = fmt.Errorf("PANIC=%v", err) } }() enc.AddString(key, stringer.(fmt.Stringer).String()) - return + return nil } diff --git a/vendor/go.uber.org/zap/zapcore/json_encoder.go b/vendor/go.uber.org/zap/zapcore/json_encoder.go index 7facc1b36b..5cf7d917e9 100644 --- a/vendor/go.uber.org/zap/zapcore/json_encoder.go +++ b/vendor/go.uber.org/zap/zapcore/json_encoder.go @@ -236,7 +236,9 @@ func (enc *jsonEncoder) AppendComplex128(val complex128) { func (enc *jsonEncoder) AppendDuration(val time.Duration) { cur := enc.buf.Len() - enc.EncodeDuration(val, enc) + if e := enc.EncodeDuration; e != nil { + e(val, enc) + } if cur == enc.buf.Len() { // User-supplied EncodeDuration is a no-op. Fall back to nanoseconds to keep // JSON valid. @@ -275,7 +277,9 @@ func (enc *jsonEncoder) AppendTimeLayout(time time.Time, layout string) { func (enc *jsonEncoder) AppendTime(val time.Time) { cur := enc.buf.Len() - enc.EncodeTime(val, enc) + if e := enc.EncodeTime; e != nil { + e(val, enc) + } if cur == enc.buf.Len() { // User-supplied EncodeTime is a no-op. Fall back to nanos since epoch to keep // output JSON valid. @@ -362,14 +366,20 @@ func (enc *jsonEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer, final.AppendString(ent.LoggerName) } } - if ent.Caller.Defined && final.CallerKey != "" { - final.addKey(final.CallerKey) - cur := final.buf.Len() - final.EncodeCaller(ent.Caller, final) - if cur == final.buf.Len() { - // User-supplied EncodeCaller was a no-op. Fall back to strings to - // keep output JSON valid. - final.AppendString(ent.Caller.String()) + if ent.Caller.Defined { + if final.CallerKey != "" { + final.addKey(final.CallerKey) + cur := final.buf.Len() + final.EncodeCaller(ent.Caller, final) + if cur == final.buf.Len() { + // User-supplied EncodeCaller was a no-op. Fall back to strings to + // keep output JSON valid. + final.AppendString(ent.Caller.String()) + } + } + if final.FunctionKey != "" { + final.addKey(final.FunctionKey) + final.AppendString(ent.Caller.Function) } } if final.MessageKey != "" { diff --git a/vendor/go.uber.org/zap/zapcore/marshaler.go b/vendor/go.uber.org/zap/zapcore/marshaler.go index 2627a653df..c3c55ba0d9 100644 --- a/vendor/go.uber.org/zap/zapcore/marshaler.go +++ b/vendor/go.uber.org/zap/zapcore/marshaler.go @@ -23,6 +23,10 @@ package zapcore // ObjectMarshaler allows user-defined types to efficiently add themselves to the // logging context, and to selectively omit information which shouldn't be // included in logs (e.g., passwords). +// +// Note: ObjectMarshaler is only used when zap.Object is used or when +// passed directly to zap.Any. It is not used when reflection-based +// encoding is used. type ObjectMarshaler interface { MarshalLogObject(ObjectEncoder) error } @@ -39,6 +43,10 @@ func (f ObjectMarshalerFunc) MarshalLogObject(enc ObjectEncoder) error { // ArrayMarshaler allows user-defined types to efficiently add themselves to the // logging context, and to selectively omit information which shouldn't be // included in logs (e.g., passwords). +// +// Note: ArrayMarshaler is only used when zap.Array is used or when +// passed directly to zap.Any. It is not used when reflection-based +// encoding is used. type ArrayMarshaler interface { MarshalLogArray(ArrayEncoder) error } diff --git a/vendor/golang.org/x/net/html/parse.go b/vendor/golang.org/x/net/html/parse.go index 2cd12fc816..f91466f7cd 100644 --- a/vendor/golang.org/x/net/html/parse.go +++ b/vendor/golang.org/x/net/html/parse.go @@ -728,7 +728,13 @@ func inHeadNoscriptIM(p *parser) bool { return inBodyIM(p) case a.Basefont, a.Bgsound, a.Link, a.Meta, a.Noframes, a.Style: return inHeadIM(p) - case a.Head, a.Noscript: + case a.Head: + // Ignore the token. + return true + case a.Noscript: + // Don't let the tokenizer go into raw text mode even when a