Skip to content

Commit

Permalink
Use WCP FSS configmap in CSI driver for features dependent on WCP ser…
Browse files Browse the repository at this point in the history
…vice (#2780)

Optimize the number of IsFSSEnabled calls for WCP defined FSS
  • Loading branch information
shalini-b committed Feb 2, 2024
1 parent 5873766 commit 3345236
Show file tree
Hide file tree
Showing 10 changed files with 106 additions and 75 deletions.
1 change: 0 additions & 1 deletion manifests/supervisorcluster/1.25/cns-csi.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -467,7 +467,6 @@ data:
"list-volumes": "true"
"cnsmgr-suspend-create-volume": "true"
"listview-tasks": "true"
"podvm-on-stretched-supervisor": "false"
kind: ConfigMap
metadata:
name: csi-feature-states
Expand Down
1 change: 0 additions & 1 deletion manifests/supervisorcluster/1.26/cns-csi.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -467,7 +467,6 @@ data:
"list-volumes": "true"
"cnsmgr-suspend-create-volume": "true"
"listview-tasks": "true"
"podvm-on-stretched-supervisor": "false"
kind: ConfigMap
metadata:
name: csi-feature-states
Expand Down
1 change: 0 additions & 1 deletion manifests/supervisorcluster/1.27/cns-csi.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -467,7 +467,6 @@ data:
"list-volumes": "true"
"cnsmgr-suspend-create-volume": "true"
"listview-tasks": "true"
"podvm-on-stretched-supervisor": "false"
kind: ConfigMap
metadata:
name: csi-feature-states
Expand Down
1 change: 0 additions & 1 deletion manifests/supervisorcluster/1.28/cns-csi.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -467,7 +467,6 @@ data:
"list-volumes": "true"
"cnsmgr-suspend-create-volume": "true"
"listview-tasks": "true"
"podvm-on-stretched-supervisor": "false"
kind: ConfigMap
metadata:
name: csi-feature-states
Expand Down
33 changes: 31 additions & 2 deletions pkg/csi/service/common/commonco/k8sorchestrator/k8sorchestrator.go
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,8 @@ var (
operationMode string
svFssCRMutex = &sync.RWMutex{}
k8sOrchestratorInitMutex = &sync.RWMutex{}
// wcpCapabilityFssMap is the cache variable which stores the data of wcp-cluster-capabilities configmap.
wcpCapabilityFssMap map[string]string
)

// FSSConfigMapInfo contains details about the FSS configmap(s) present in
Expand Down Expand Up @@ -1076,6 +1078,33 @@ func (c *K8sOrchestrator) IsFSSEnabled(ctx context.Context, featureName string)
"Setting the feature state to false", featureName, c.internalFSS.configMapName)
return false
} else if c.clusterFlavor == cnstypes.CnsClusterFlavorWorkload {
// Check if it is WCP defined feature state.
if _, exists := common.WCPFeatureStates[featureName]; exists {
log.Infof("Feature %q is a WCP defined feature state. Reading the %q configmap in %q namespace.",
featureName, common.WCPCapabilityConfigMapName, common.KubeSystemNamespace)
// Check the `wcp-cluster-capabilities` configmap in supervisor for the FSS value.
if wcpCapabilityFssMap == nil {
wcpCapabilityConfigMap, err := c.k8sClient.CoreV1().ConfigMaps(common.KubeSystemNamespace).Get(ctx,
common.WCPCapabilityConfigMapName, metav1.GetOptions{})
if err != nil {
log.Errorf("failed to fetch WCP FSS configmap %q/%q. Setting the feature state "+
"to false. Error: %+v", common.KubeSystemNamespace, common.WCPCapabilityConfigMapName, err)
return false
}
wcpCapabilityFssMap = wcpCapabilityConfigMap.Data
}
if fssVal, exists := wcpCapabilityFssMap[featureName]; exists {
supervisorFeatureState, err = strconv.ParseBool(fssVal)
if err != nil {
log.Errorf("Error while converting %q feature state with value: %q in "+
"%q/%q configmap to boolean. Setting the feature state to false. Error: %+v", featureName,
fssVal, common.KubeSystemNamespace, common.WCPCapabilityConfigMapName, err)
return false
}
return supervisorFeatureState
}
}

// Check SV FSS map.
c.supervisorFSS.featureStatesLock.RLock()
if flag, ok := c.supervisorFSS.featureStates[featureName]; ok {
Expand All @@ -1097,7 +1126,7 @@ func (c *K8sOrchestrator) IsFSSEnabled(ctx context.Context, featureName string)
c.internalFSS.featureStatesLock.RLock()
if flag, ok := c.internalFSS.featureStates[featureName]; ok {
c.internalFSS.featureStatesLock.RUnlock()
internalFeatureState, err := strconv.ParseBool(flag)
internalFeatureState, err = strconv.ParseBool(flag)
if err != nil {
log.Errorf("Error while converting %v feature state value: %v to boolean. "+
"Setting the feature state to false", featureName, internalFeatureState)
Expand All @@ -1118,7 +1147,7 @@ func (c *K8sOrchestrator) IsFSSEnabled(ctx context.Context, featureName string)
c.supervisorFSS.featureStatesLock.RLock()
if flag, ok := c.supervisorFSS.featureStates[featureName]; ok {
c.supervisorFSS.featureStatesLock.RUnlock()
supervisorFeatureState, err := strconv.ParseBool(flag)
supervisorFeatureState, err = strconv.ParseBool(flag)
if err != nil {
log.Errorf("Error while converting %v feature state value: %v to boolean. "+
"Setting the feature state to false", featureName, supervisorFeatureState)
Expand Down
15 changes: 13 additions & 2 deletions pkg/csi/service/common/constants.go
Original file line number Diff line number Diff line change
Expand Up @@ -341,6 +341,12 @@ const (
// CreateCSINodeAnnotation is the annotation applied by spherelet
// to convey to CSI driver to create a CSINode instance for each node.
CreateCSINodeAnnotation = "vmware-system/csi-create-csinode-object"

// KubeSystemNamespace is the namespace for system resources.
KubeSystemNamespace = "kube-system"

// WCPCapabilityConfigMapName is the name of the configmap where WCP component's FSS values are stored.
WCPCapabilityConfigMapName = "wcp-cluster-capabilities"
)

// Supported container orchestrators.
Expand Down Expand Up @@ -408,6 +414,11 @@ const (
ListViewPerf = "listview-tasks"
// TopologyAwareFileVolume enables provisioning of file volumes in a topology enabled environment
TopologyAwareFileVolume = "topology-aware-file-volume"
// PodVMOnStretchedSupervisor enables Pod Vm Support on stretched supervisor cluster
PodVMOnStretchedSupervisor = "podvm-on-stretched-supervisor"
// PodVMOnStretchedSupervisor is the WCP FSS which determines if PodVM
// support is available on stretched supervisor cluster.
PodVMOnStretchedSupervisor = "PodVM_On_Stretched_Supervisor_Supported"
)

var WCPFeatureStates = map[string]struct{}{
PodVMOnStretchedSupervisor: {},
}
Original file line number Diff line number Diff line change
Expand Up @@ -40,12 +40,11 @@ import (
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/controller-runtime/pkg/source"
storagepolicyusagev1alpha1 "sigs.k8s.io/vsphere-csi-driver/v3/pkg/apis/cnsoperator/storagepolicy/v1alpha1"
"sigs.k8s.io/vsphere-csi-driver/v3/pkg/syncer"

clientConfig "sigs.k8s.io/controller-runtime/pkg/client/config"
apis "sigs.k8s.io/vsphere-csi-driver/v3/pkg/apis/cnsoperator"
cnsregistervolumev1alpha1 "sigs.k8s.io/vsphere-csi-driver/v3/pkg/apis/cnsoperator/cnsregistervolume/v1alpha1"
storagepolicyusagev1alpha1 "sigs.k8s.io/vsphere-csi-driver/v3/pkg/apis/cnsoperator/storagepolicy/v1alpha1"
volumes "sigs.k8s.io/vsphere-csi-driver/v3/pkg/common/cns-lib/volume"
cnsvsphere "sigs.k8s.io/vsphere-csi-driver/v3/pkg/common/cns-lib/vsphere"
commonconfig "sigs.k8s.io/vsphere-csi-driver/v3/pkg/common/config"
Expand All @@ -55,26 +54,26 @@ import (
"sigs.k8s.io/vsphere-csi-driver/v3/pkg/csi/service/logger"
"sigs.k8s.io/vsphere-csi-driver/v3/pkg/internalapis/cnsvolumeinfo"
k8s "sigs.k8s.io/vsphere-csi-driver/v3/pkg/kubernetes"
"sigs.k8s.io/vsphere-csi-driver/v3/pkg/syncer"
)

const (
defaultMaxWorkerThreadsForRegisterVolume = 40
staticPvNamePrefix = "static-pv-"
)

// backOffDuration is a map of cnsregistervolume name's to the time after which
// a request for this instance will be requeued.
// Initialized to 1 second for new instances and for instances whose latest
// reconcile operation succeeded.
// If the reconcile fails, backoff is incremented exponentially.
var (
// backOffDuration is a map of cnsregistervolume name's to the time after which
// a request for this instance will be requeued.
// Initialized to 1 second for new instances and for instances whose latest
// reconcile operation succeeded.
// If the reconcile fails, backoff is incremented exponentially.
backOffDuration map[string]time.Duration
backOffDurationMapMutex = sync.Mutex{}
)

var topologyMgr commoncotypes.ControllerTopologyService
var isPodVMOnStretchedSupervisorEnabled bool
var clusterComputeResourceMoIds []string
topologyMgr commoncotypes.ControllerTopologyService
clusterComputeResourceMoIds []string
)

// Add creates a new CnsRegisterVolume Controller and adds it to the Manager,
// ConfigurationInfo and VirtualCenterTypes. The Manager will set fields on
Expand All @@ -95,9 +94,7 @@ func Add(mgr manager.Manager, clusterFlavor cnstypes.CnsClusterFlavor,
log.Errorf("failed to get clusterComputeResourceMoIds. err: %v", err)
return err
}
isPodVMOnStretchedSupervisorEnabled =
commonco.ContainerOrchestratorUtility.IsFSSEnabled(ctx, common.PodVMOnStretchedSupervisor)
if isPodVMOnStretchedSupervisorEnabled {
if syncer.IsPodVMOnStretchSupervisorFSSEnabled {
topologyMgr, err = commonco.ContainerOrchestratorUtility.InitTopologyServiceInController(ctx)
if err != nil {
log.Errorf("failed to init topology manager. err: %v", err)
Expand Down Expand Up @@ -299,7 +296,7 @@ func (r *ReconcileCnsRegisterVolume) Reconcile(ctx context.Context,
return reconcile.Result{RequeueAfter: timeout}, nil
}

if isPodVMOnStretchedSupervisorEnabled && len(clusterComputeResourceMoIds) > 1 {
if syncer.IsPodVMOnStretchSupervisorFSSEnabled && len(clusterComputeResourceMoIds) > 1 {
azClustersMap := topologyMgr.GetAZClustersMap(ctx)
isAccessible := isDatastoreAccessibleToAZClusters(ctx, vc, azClustersMap, volume.DatastoreUrl)
if !isAccessible {
Expand Down Expand Up @@ -339,7 +336,7 @@ func (r *ReconcileCnsRegisterVolume) Reconcile(ctx context.Context,
return reconcile.Result{RequeueAfter: timeout}, nil
}

if isPodVMOnStretchedSupervisorEnabled && len(clusterComputeResourceMoIds) > 1 {
if syncer.IsPodVMOnStretchSupervisorFSSEnabled && len(clusterComputeResourceMoIds) > 1 {
// Calculate accessible topology for the provisioned volume.
datastoreAccessibleTopology, err := topologyMgr.GetTopologyInfoFromNodes(ctx,
commoncotypes.WCPRetrieveTopologyInfoParams{
Expand Down Expand Up @@ -382,7 +379,7 @@ func (r *ReconcileCnsRegisterVolume) Reconcile(ctx context.Context,

// Get K8S storageclass name mapping the storagepolicy id with Immediate volume binding mode
storageClassName, err := getK8sStorageClassNameWithImmediateBindingModeForPolicy(ctx, k8sclient, r.client,
volume.StoragePolicyId, request.Namespace, isPodVMOnStretchedSupervisorEnabled)
volume.StoragePolicyId, request.Namespace, syncer.IsPodVMOnStretchSupervisorFSSEnabled)
if err != nil {
msg := fmt.Sprintf("Failed to find K8S Storageclass mapping storagepolicyId: %s and assigned to namespace: %s",
volume.StoragePolicyId, request.Namespace)
Expand Down Expand Up @@ -496,7 +493,7 @@ func (r *ReconcileCnsRegisterVolume) Reconcile(ctx context.Context,
isBound, err := isPVCBound(ctx, k8sclient, pvc, time.Duration(1*time.Minute))
if isBound {
log.Infof("PVC: %s is bound", instance.Spec.PvcName)
if commonco.ContainerOrchestratorUtility.IsFSSEnabled(ctx, common.PodVMOnStretchedSupervisor) {
if syncer.IsPodVMOnStretchSupervisorFSSEnabled {
// Create CNSVolumeInfo CR for static pv
capacityInBytes := capacityInMb * common.MbInBytes
capacity := resource.NewQuantity(capacityInBytes, resource.BinarySI)
Expand Down
68 changes: 33 additions & 35 deletions pkg/syncer/cnsoperator/manager/init.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,23 +31,23 @@ import (
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/manager/signals"

cnsoperatorconfig "sigs.k8s.io/vsphere-csi-driver/v3/pkg/apis/cnsoperator/config"
internalapiscnsoperatorconfig "sigs.k8s.io/vsphere-csi-driver/v3/pkg/internalapis/cnsoperator/config"
csinodetopologyconfig "sigs.k8s.io/vsphere-csi-driver/v3/pkg/internalapis/csinodetopology/config"

cnsoperatorv1alpha1 "sigs.k8s.io/vsphere-csi-driver/v3/pkg/apis/cnsoperator"
cnsvolumemetadatav1alpha1 "sigs.k8s.io/vsphere-csi-driver/v3/pkg/apis/cnsoperator/cnsvolumemetadata/v1alpha1"
cnsoperatorconfig "sigs.k8s.io/vsphere-csi-driver/v3/pkg/apis/cnsoperator/config"
volumes "sigs.k8s.io/vsphere-csi-driver/v3/pkg/common/cns-lib/volume"
cnsvsphere "sigs.k8s.io/vsphere-csi-driver/v3/pkg/common/cns-lib/vsphere"
commonconfig "sigs.k8s.io/vsphere-csi-driver/v3/pkg/common/config"
"sigs.k8s.io/vsphere-csi-driver/v3/pkg/csi/service/common"
"sigs.k8s.io/vsphere-csi-driver/v3/pkg/csi/service/common/commonco"
"sigs.k8s.io/vsphere-csi-driver/v3/pkg/csi/service/logger"
"sigs.k8s.io/vsphere-csi-driver/v3/pkg/internalapis"
internalapiscnsoperatorconfig "sigs.k8s.io/vsphere-csi-driver/v3/pkg/internalapis/cnsoperator/config"
triggercsifullsyncv1alpha1 "sigs.k8s.io/vsphere-csi-driver/v3/pkg/internalapis/cnsoperator/triggercsifullsync/v1alpha1"
"sigs.k8s.io/vsphere-csi-driver/v3/pkg/internalapis/csinodetopology"
csinodetopologyconfig "sigs.k8s.io/vsphere-csi-driver/v3/pkg/internalapis/csinodetopology/config"
csinodetopologyv1alpha1 "sigs.k8s.io/vsphere-csi-driver/v3/pkg/internalapis/csinodetopology/v1alpha1"
k8s "sigs.k8s.io/vsphere-csi-driver/v3/pkg/kubernetes"
"sigs.k8s.io/vsphere-csi-driver/v3/pkg/syncer"
"sigs.k8s.io/vsphere-csi-driver/v3/pkg/syncer/cnsoperator/controller"
)

Expand All @@ -57,7 +57,7 @@ var (
metricsPort int32 = 8383
)

type cnsOperator struct {
type cnsOperatorInfo struct {
configInfo *commonconfig.ConfigurationInfo
coCommonInterface commonco.COCommonInterface
}
Expand All @@ -67,7 +67,7 @@ func InitCnsOperator(ctx context.Context, clusterFlavor cnstypes.CnsClusterFlavo
configInfo *commonconfig.ConfigurationInfo, coInitParams *interface{}) error {
log := logger.GetLogger(ctx)
log.Infof("Initializing CNS Operator")
cnsOperator := &cnsOperator{}
cnsOperator := &cnsOperatorInfo{}
cnsOperator.configInfo = configInfo

var volumeManager volumes.Manager
Expand Down Expand Up @@ -119,6 +119,8 @@ func InitCnsOperator(ctx context.Context, clusterFlavor cnstypes.CnsClusterFlavo
// TODO: Verify leader election for CNS Operator in multi-master mode
// Create CRD's for WCP flavor.
if clusterFlavor == cnstypes.CnsClusterFlavorWorkload {
syncer.IsPodVMOnStretchSupervisorFSSEnabled = cnsOperator.coCommonInterface.IsFSSEnabled(ctx,
common.PodVMOnStretchedSupervisor)
// Create CnsNodeVmAttachment CRD
err = k8s.CreateCustomResourceDefinitionFromManifest(ctx, cnsoperatorconfig.EmbedCnsNodeVmAttachmentCRFile,
cnsoperatorconfig.EmbedCnsNodeVmAttachmentCRFileName)
Expand Down Expand Up @@ -152,8 +154,7 @@ func InitCnsOperator(ctx context.Context, clusterFlavor cnstypes.CnsClusterFlavo
if stretchedSupervisor {
log.Info("Observed stretchedSupervisor setup")
}
if !stretchedSupervisor ||
(stretchedSupervisor && commonco.ContainerOrchestratorUtility.IsFSSEnabled(ctx, common.PodVMOnStretchedSupervisor)) {
if !stretchedSupervisor || (stretchedSupervisor && syncer.IsPodVMOnStretchSupervisorFSSEnabled) {
// Create CnsRegisterVolume CRD from manifest.
log.Infof("Creating %q CRD", cnsoperatorv1alpha1.CnsRegisterVolumePlural)
err = k8s.CreateCustomResourceDefinitionFromManifest(ctx, cnsoperatorconfig.EmbedCnsRegisterVolumeCRFile,
Expand All @@ -163,6 +164,27 @@ func InitCnsOperator(ctx context.Context, clusterFlavor cnstypes.CnsClusterFlavo
return err
}
log.Infof("%q CRD is created successfully", cnsoperatorv1alpha1.CnsRegisterVolumePlural)

// Clean up routine to cleanup successful CnsRegisterVolume instances.
log.Info("Starting go routine to cleanup successful CnsRegisterVolume instances.")
err = watcher(ctx, cnsOperator)
if err != nil {
log.Error("Failed to watch on config file for changes to "+
"CnsRegisterVolumesCleanupIntervalInMin. Error: %+v", err)
return err
}
go func() {
for {
ctx, log = logger.GetNewContextWithLogger()
log.Infof("Triggering CnsRegisterVolume cleanup routine")
cleanUpCnsRegisterVolumeInstances(ctx, restConfig,
cnsOperator.configInfo.Cfg.Global.CnsRegisterVolumesCleanupIntervalInMin)
log.Infof("Completed CnsRegisterVolume cleanup")
for i := 1; i <= cnsOperator.configInfo.Cfg.Global.CnsRegisterVolumesCleanupIntervalInMin; i++ {
time.Sleep(time.Duration(1 * time.Minute))
}
}
}()
}

if !stretchedSupervisor {
Expand All @@ -186,30 +208,6 @@ func InitCnsOperator(ctx context.Context, clusterFlavor cnstypes.CnsClusterFlavo
}
}
}

if !stretchedSupervisor ||
(stretchedSupervisor && commonco.ContainerOrchestratorUtility.IsFSSEnabled(ctx, common.PodVMOnStretchedSupervisor)) {
// Clean up routine to cleanup successful CnsRegisterVolume instances.
log.Info("Starting go routine to cleanup successful CnsRegisterVolume instances.")
err = watcher(ctx, cnsOperator)
if err != nil {
log.Error("Failed to watch on config file for changes to CnsRegisterVolumesCleanupIntervalInMin. Error: %+v",
err)
return err
}
go func() {
for {
ctx, log = logger.GetNewContextWithLogger()
log.Infof("Triggering CnsRegisterVolume cleanup routine")
cleanUpCnsRegisterVolumeInstances(ctx, restConfig,
cnsOperator.configInfo.Cfg.Global.CnsRegisterVolumesCleanupIntervalInMin)
log.Infof("Completed CnsRegisterVolume cleanup")
for i := 1; i <= cnsOperator.configInfo.Cfg.Global.CnsRegisterVolumesCleanupIntervalInMin; i++ {
time.Sleep(time.Duration(1 * time.Minute))
}
}
}()
}
} else if clusterFlavor == cnstypes.CnsClusterFlavorVanilla {
// Create CSINodeTopology CRD.
err = k8s.CreateCustomResourceDefinitionFromManifest(ctx, csinodetopologyconfig.EmbedCSINodeTopologyFile,
Expand Down Expand Up @@ -351,7 +349,7 @@ func InitCommonModules(ctx context.Context, clusterFlavor cnstypes.CnsClusterFla

// watcher watches on the vsphere.conf file mounted as secret within the syncer
// container.
func watcher(ctx context.Context, cnsOperator *cnsOperator) error {
func watcher(ctx context.Context, cnsOperator *cnsOperatorInfo) error {
log := logger.GetLogger(ctx)
cfgPath := commonconfig.GetConfigPath(ctx)
watcher, err := fsnotify.NewWatcher()
Expand Down Expand Up @@ -395,9 +393,9 @@ func watcher(ctx context.Context, cnsOperator *cnsOperator) error {
return err
}

// reloadConfiguration reloads configuration from the secret, and cnsOperator
// reloadConfiguration reloads configuration from the secret, and cnsOperatorInfo
// with the latest configInfo.
func reloadConfiguration(ctx context.Context, cnsOperator *cnsOperator) error {
func reloadConfiguration(ctx context.Context, cnsOperator *cnsOperatorInfo) error {
log := logger.GetLogger(ctx)
cfg, err := commonconfig.GetConfig(ctx)
if err != nil {
Expand Down
9 changes: 5 additions & 4 deletions pkg/syncer/fullsync.go
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ func CsiFullSync(ctx context.Context, metadataSyncer *metadataSyncInformer, vc s
}
// Attempt to patch StoragePolicyUsage CRs
if metadataSyncer.clusterFlavor == cnstypes.CnsClusterFlavorWorkload {
if metadataSyncer.coCommonInterface.IsFSSEnabled(ctx, common.PodVMOnStretchedSupervisor) {
if IsPodVMOnStretchSupervisorFSSEnabled {
storagePolicyUsageCRSync(ctx, metadataSyncer)
}
}
Expand Down Expand Up @@ -290,7 +290,7 @@ func CsiFullSync(ctx context.Context, metadataSyncer *metadataSyncInformer, vc s
// Sync VolumeInfo CRs for the below conditions:
// Either it is a Vanilla k8s deployment with Multi-VC configuration or, it's a StretchSupervisor cluster
if isMultiVCenterFssEnabled && len(metadataSyncer.configInfo.Cfg.VirtualCenter) > 1 ||
(metadataSyncer.clusterFlavor == cnstypes.CnsClusterFlavorWorkload && isPodVMOnStretchSupervisorFSSEnabled) {
(metadataSyncer.clusterFlavor == cnstypes.CnsClusterFlavorWorkload && IsPodVMOnStretchSupervisorFSSEnabled) {
volumeInfoCRFullSync(ctx, metadataSyncer, vc)
cleanUpVolumeInfoCrDeletionMap(ctx, metadataSyncer, vc)
}
Expand Down Expand Up @@ -365,7 +365,7 @@ func volumeInfoCRFullSync(ctx context.Context, metadataSyncer *metadataSyncInfor

volumeIdTok8sPVMap := make(map[string]*v1.PersistentVolume)
scNameToPolicyIdMap := make(map[string]string)
if metadataSyncer.clusterFlavor == cnstypes.CnsClusterFlavorWorkload && isPodVMOnStretchSupervisorFSSEnabled {
if metadataSyncer.clusterFlavor == cnstypes.CnsClusterFlavorWorkload && IsPodVMOnStretchSupervisorFSSEnabled {
// Create volumeIdTok8sPVMap map for easy lookup of PVs
for _, pv := range currentK8sPV {
if pv.Spec.CSI != nil {
Expand Down Expand Up @@ -414,7 +414,8 @@ func volumeInfoCRFullSync(ctx context.Context, metadataSyncer *metadataSyncInfor
"Error: %+v", vc, volumeID, err)
continue
}
} else if metadataSyncer.clusterFlavor == cnstypes.CnsClusterFlavorWorkload && isPodVMOnStretchSupervisorFSSEnabled {
} else if metadataSyncer.clusterFlavor == cnstypes.CnsClusterFlavorWorkload &&
IsPodVMOnStretchSupervisorFSSEnabled {
pv := volumeIdTok8sPVMap[volumeID]
pvc, err := metadataSyncer.pvcLister.PersistentVolumeClaims(
pv.Spec.ClaimRef.Namespace).Get(pv.Spec.ClaimRef.Name)
Expand Down
Loading

0 comments on commit 3345236

Please sign in to comment.