diff --git a/assets/rbac/csi_driver_controller_role.yaml b/assets/rbac/csi_driver_controller_role.yaml index 114205d14..b70434f46 100644 --- a/assets/rbac/csi_driver_controller_role.yaml +++ b/assets/rbac/csi_driver_controller_role.yaml @@ -40,7 +40,7 @@ rules: resources: ["triggercsifullsyncs"] verbs: ["create", "get", "update", "watch", "list"] - apiGroups: ["cns.vmware.com"] - resources: ["cnsvspherevolumemigrations"] + resources: ["cnsvolumeinfoes"] verbs: ["create", "get", "list", "watch", "update", "delete"] - apiGroups: ["cns.vmware.com"] resources: ["cnsvolumeoperationrequests"] diff --git a/pkg/operator/storageclasscontroller/vmware.go b/pkg/operator/storageclasscontroller/vmware.go index 9ad3c2fbe..f868e83f4 100644 --- a/pkg/operator/storageclasscontroller/vmware.go +++ b/pkg/operator/storageclasscontroller/vmware.go @@ -43,7 +43,7 @@ const ( var associatedTypesRaw = []string{"StoragePod", "Datastore", "ResourcePool", "VirtualMachine", "Folder"} type vCenterInterface interface { - GetDefaultDatastore(ctx context.Context) (*mo.Datastore, error) + GetDefaultDatastore(ctx context.Context, infra *v1.Infrastructure) (*mo.Datastore, error) createStoragePolicy(ctx context.Context) (string, error) checkForExistingPolicy(ctx context.Context) (bool, error) createOrUpdateTag(ctx context.Context, ds *mo.Datastore) error @@ -78,12 +78,21 @@ func NewStoragePolicyAPI(ctx context.Context, connection []*vclib.VSphereConnect return storagePolicyAPIClient } -func (v *storagePolicyAPI) GetDefaultDatastore(ctx context.Context) (*mo.Datastore, error) { +func (v *storagePolicyAPI) GetDefaultDatastore(ctx context.Context, infra *v1.Infrastructure) (*mo.Datastore, error) { vmClient := v.vcenterApiConnection.Client - config := v.vcenterApiConnection.Config + //config := v.vcenterApiConnection.Config finder := find.NewFinder(vmClient.Client, false) - dcName := config.Workspace.Datacenter - dsName := config.Workspace.DefaultDatastore + + // Following pattern of older generated ini config, default datastore is from FD[0], else if no FDs are define, we'll + // grab from deprecated fields + var dcName, dsName string + if infra.Spec.PlatformSpec.VSphere != nil && len(infra.Spec.PlatformSpec.VSphere.FailureDomains) > 0 { + fd := infra.Spec.PlatformSpec.VSphere.FailureDomains[0] + dcName = fd.Topology.Datacenter + dsName = fd.Topology.Datastore + } else { + return nil, fmt.Errorf("unable to determine default datastore from current config") + } dc, err := finder.Datacenter(ctx, dcName) if err != nil { return nil, fmt.Errorf("failed to access datacenter %s: %s", dcName, err) @@ -197,7 +206,11 @@ func (v *storagePolicyAPI) createStoragePolicy(ctx context.Context) (string, err return v.createZonalStoragePolicy(ctx) } - dsName := v.vcenterApiConnection.Config.Workspace.DefaultDatastore + // Since we create zonal storage policy when in multi vcenter or single vcenter w/ zones, the below is + // only for case where cluster is upgrade from a version that used a non FailureDomain config. For now, let's + // just return an error. + return v.policyName, fmt.Errorf("current cluster config is not supported and needs to be migrated to zonal.") + /*dsName := v.vcenterApiConnection.Config.Workspace.DefaultDatastore dcName := v.vcenterApiConnection.Config.Workspace.Datacenter err = v.attachTags(ctx, dcName, dsName) @@ -212,7 +225,7 @@ func (v *storagePolicyAPI) createStoragePolicy(ctx context.Context) (string, err } } - return v.policyName, nil + return v.policyName, nil*/ } func (v *storagePolicyAPI) checkForTagOnDatastore(ctx context.Context, dsMo *mo.Datastore) bool { diff --git a/pkg/operator/utils/topology.go b/pkg/operator/utils/topology.go index 96d2b2465..8e84c4aaf 100644 --- a/pkg/operator/utils/topology.go +++ b/pkg/operator/utils/topology.go @@ -8,7 +8,7 @@ import ( opv1 "github.com/openshift/api/operator/v1" "github.com/sirupsen/logrus" "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/legacy-cloud-providers/vsphere" + vsphere "k8s.io/cloud-provider-vsphere/pkg/common/config" ) const ( @@ -46,23 +46,23 @@ func GetTopologyCategories(clusterCSIDriver *opv1.ClusterCSIDriver, infra *cfgv1 return GetCSIDriverTopologyCategories(clusterCSIDriver) } -func GetDatacenters(config *vsphere.VSphereConfig, multiVCenterEnabled bool) ([]string, error) { - datacenters := []string{config.Workspace.Datacenter} - - virtualCenterIPs := sets.StringKeySet(config.VirtualCenter) - - if len(virtualCenterIPs) > 1 && !multiVCenterEnabled { +func GetVCenters(config *vsphere.Config, multiVCenterEnabled bool) ([]string, error) { + if len(config.VirtualCenter) > 1 && !multiVCenterEnabled { return nil, fmt.Errorf("the multi vcenter cloud config must define a single VirtualCenter") - } else if len(virtualCenterIPs) == 0 { + } else if len(config.VirtualCenter) == 0 { return nil, fmt.Errorf("cloud config must define at lease a single VirtualCenter") } - for _, virtualCenterIP := range virtualCenterIPs.List() { - logrus.Infof("Processing virtual center: %v", virtualCenterIP) - if virtualCenterConfig, ok := config.VirtualCenter[virtualCenterIP]; ok { - datacenters = strings.Split(virtualCenterConfig.Datacenters, ",") - } + var vCenters []string + for _, vcenter := range config.VirtualCenter { + vCenters = append(vCenters, vcenter.VCenterIP) } + + return vCenters, nil +} + +func GetDatacenters(config *vsphere.Config, vcenter string, multiVCenterEnabled bool) ([]string, error) { + datacenters := strings.Split(config.VirtualCenter[vcenter].Datacenters, ",") logrus.Infof("Gathered the following data centers: %v", datacenters) return datacenters, nil } diff --git a/pkg/operator/vclib/connection.go b/pkg/operator/vclib/connection.go index 79a0a3b15..ac76e2842 100644 --- a/pkg/operator/vclib/connection.go +++ b/pkg/operator/vclib/connection.go @@ -6,7 +6,7 @@ import ( "github.com/openshift/vmware-vsphere-csi-driver-operator/pkg/version" "github.com/vmware/govmomi/vapi/rest" "github.com/vmware/govmomi/vim25/soap" - "k8s.io/legacy-cloud-providers/vsphere" + vsphere "k8s.io/cloud-provider-vsphere/pkg/common/config" "net/url" "sync" "time" @@ -24,7 +24,7 @@ type VSphereConnection struct { Hostname string Port string Insecure bool - Config *vsphere.VSphereConfig + Config *vsphere.Config } const apiTimeout = 10 * time.Minute @@ -33,13 +33,13 @@ var ( clientLock sync.Mutex ) -func NewVSphereConnection(username, password string, cfg *vsphere.VSphereConfig) *VSphereConnection { +func NewVSphereConnection(username, password, vcenter string, cfg *vsphere.Config) *VSphereConnection { return &VSphereConnection{ Username: username, Password: password, Config: cfg, - Hostname: cfg.Workspace.VCenterIP, - Insecure: cfg.Global.InsecureFlag, + Hostname: cfg.VirtualCenter[vcenter].VCenterIP, + Insecure: cfg.VirtualCenter[vcenter].InsecureFlag, } } diff --git a/pkg/operator/vspherecontroller/checks/check_nodes.go b/pkg/operator/vspherecontroller/checks/check_nodes.go index 5f0f90867..fee2d350a 100644 --- a/pkg/operator/vspherecontroller/checks/check_nodes.go +++ b/pkg/operator/vspherecontroller/checks/check_nodes.go @@ -220,15 +220,22 @@ func (n *NodeChecker) checkOrMarkHostForProcessing(hostName string) bool { func (n *NodeChecker) getHost(ctx context.Context, checkOpts CheckArgs, hostRef *types.ManagedObjectReference) (mo.HostSystem, error) { var o mo.HostSystem + var err error hostName := hostRef.Value + + // Since we need to iterate through connections again, lets not throw error prematurely. for _, vConn := range checkOpts.vmConnection { hostSystemObject := object.NewHostSystem(vConn.Client.Client, *hostRef) - err := hostSystemObject.Properties(ctx, hostSystemObject.Reference(), []string{"name", "config.product"}, &o) - if err != nil { - return o, fmt.Errorf("failed to load ESXi host %s: %v", hostName, err) + err = hostSystemObject.Properties(ctx, hostSystemObject.Reference(), []string{"name", "config.product"}, &o) + if err == nil { + // Object found + break } } + if err != nil { + return o, fmt.Errorf("failed to load ESXi host %s: %v", hostName, err) + } if o.Config == nil { return o, fmt.Errorf("error getting ESXi host version %s: host.config is nil", hostName) } @@ -241,7 +248,9 @@ func getVM(ctx context.Context, checkOpts CheckArgs, node *v1.Node) (*mo.Virtual vmClient := client.Client.Client vmConfig := client.Config - dataCenterNames, err := utils.GetDatacenters(vmConfig, checkOpts.multiVCenterEnabled) + // When checking VMs, the VMs may be spread across vCenters. So we'll need to get vCenters from config and + // check each for the vm. + dataCenterNames, err := utils.GetDatacenters(vmConfig, client.Hostname, checkOpts.multiVCenterEnabled) if err != nil { return nil, err } diff --git a/pkg/operator/vspherecontroller/vspherecontroller.go b/pkg/operator/vspherecontroller/vspherecontroller.go index 1a209dd2d..95fad1adc 100644 --- a/pkg/operator/vspherecontroller/vspherecontroller.go +++ b/pkg/operator/vspherecontroller/vspherecontroller.go @@ -8,14 +8,12 @@ import ( "time" "github.com/openshift/vmware-vsphere-csi-driver-operator/assets" + "github.com/openshift/vmware-vsphere-csi-driver-operator/pkg/operator/storageclasscontroller" iniv1 "gopkg.in/ini.v1" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" utilruntime "k8s.io/apimachinery/pkg/util/runtime" vsphere "k8s.io/cloud-provider-vsphere/pkg/common/config" - legacy "k8s.io/legacy-cloud-providers/vsphere" - - "github.com/openshift/vmware-vsphere-csi-driver-operator/pkg/operator/storageclasscontroller" ocpv1 "github.com/openshift/api/config/v1" operatorapi "github.com/openshift/api/operator/v1" @@ -451,7 +449,7 @@ func (c *VSphereController) createVCenterConnection(ctx context.Context, infra * // Eventually cluster needs to migrate to the new version. For now, lets just use infrastructure object // as the source of truth. klog.V(3).Infof("Creating vSphere connection") - /*cloudConfig := infra.Spec.CloudConfig + cloudConfig := infra.Spec.CloudConfig cloudConfigMap, err := c.configMapLister.ConfigMaps(cloudConfigNamespace).Get(cloudConfig.Name) if err != nil { return fmt.Errorf("failed to get cloud config: %v", err) @@ -460,15 +458,15 @@ func (c *VSphereController) createVCenterConnection(ctx context.Context, infra * cfgString, ok := cloudConfigMap.Data[infra.Spec.CloudConfig.Key] if !ok { return fmt.Errorf("cloud config %s/%s does not contain key %q", cloudConfigNamespace, cloudConfig.Name, cloudConfig.Key) - }*/ + } //cfg := new(vsphere.VSphereConfig) //err = gcfg.ReadStringInto(cfg, cfgString) // If we use infra to iterate through vcenters, do we need to load config? - /*cfg, err := vsphere.ReadConfig([]byte(cfgString)) + cfg, err := vsphere.ReadConfig([]byte(cfgString)) if err != nil { return err - }*/ + } for _, vcenter := range infra.Spec.PlatformSpec.VSphere.VCenters { secret, err := c.secretLister.Secrets(c.targetNamespace).Get(cloudCredSecretName) @@ -487,13 +485,13 @@ func (c *VSphereController) createVCenterConnection(ctx context.Context, infra * } // just a hack for other function compatibility. - cfg := new(legacy.VSphereConfig) + /*cfg := new(legacy.VSphereConfig) cfg.Workspace.VCenterIP = vcenter.Server cfg.Workspace.Datacenter = vcenter.Datacenters[0] cfg.Workspace.DefaultDatastore = infra.Spec.PlatformSpec.VSphere.FailureDomains[0].Topology.Datastore - cfg.Global.InsecureFlag = true + cfg.Global.InsecureFlag = true*/ - vs := vclib.NewVSphereConnection(string(username), string(password), cfg) + vs := vclib.NewVSphereConnection(string(username), string(password), vcenter.Server, cfg) c.vSphereConnections = append(c.vSphereConnections, vs) } return nil @@ -650,10 +648,10 @@ func (c *VSphereController) createCSIConfigMap( return err } - // TODO: For multi vcenter, what is our approach here? + // TODO: NAG - For multi vcenter, what is our approach here? storageApiClient := storageclasscontroller.NewStoragePolicyAPI(ctx, c.vSphereConnections, infra) - defaultDatastore, err := storageApiClient.GetDefaultDatastore(ctx) + defaultDatastore, err := storageApiClient.GetDefaultDatastore(ctx, infra) if err != nil { return fmt.Errorf("unable to fetch default datastore url: %v", err)