Skip to content

Commit

Permalink
Initial changes for multi vcenter support.
Browse files Browse the repository at this point in the history
  • Loading branch information
vr4manta committed Apr 24, 2024
1 parent cc0edd5 commit 8248abd
Show file tree
Hide file tree
Showing 6 changed files with 62 additions and 42 deletions.
2 changes: 1 addition & 1 deletion assets/rbac/csi_driver_controller_role.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ rules:
resources: ["triggercsifullsyncs"]
verbs: ["create", "get", "update", "watch", "list"]
- apiGroups: ["cns.vmware.com"]
resources: ["cnsvspherevolumemigrations"]
resources: ["cnsvolumeinfoes"]
verbs: ["create", "get", "list", "watch", "update", "delete"]
- apiGroups: ["cns.vmware.com"]
resources: ["cnsvolumeoperationrequests"]
Expand Down
27 changes: 20 additions & 7 deletions pkg/operator/storageclasscontroller/vmware.go
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ const (
var associatedTypesRaw = []string{"StoragePod", "Datastore", "ResourcePool", "VirtualMachine", "Folder"}

type vCenterInterface interface {
GetDefaultDatastore(ctx context.Context) (*mo.Datastore, error)
GetDefaultDatastore(ctx context.Context, infra *v1.Infrastructure) (*mo.Datastore, error)
createStoragePolicy(ctx context.Context) (string, error)
checkForExistingPolicy(ctx context.Context) (bool, error)
createOrUpdateTag(ctx context.Context, ds *mo.Datastore) error
Expand Down Expand Up @@ -78,12 +78,21 @@ func NewStoragePolicyAPI(ctx context.Context, connection []*vclib.VSphereConnect
return storagePolicyAPIClient
}

func (v *storagePolicyAPI) GetDefaultDatastore(ctx context.Context) (*mo.Datastore, error) {
func (v *storagePolicyAPI) GetDefaultDatastore(ctx context.Context, infra *v1.Infrastructure) (*mo.Datastore, error) {
vmClient := v.vcenterApiConnection.Client
config := v.vcenterApiConnection.Config
//config := v.vcenterApiConnection.Config
finder := find.NewFinder(vmClient.Client, false)
dcName := config.Workspace.Datacenter
dsName := config.Workspace.DefaultDatastore

// Following pattern of older generated ini config, default datastore is from FD[0], else if no FDs are define, we'll
// grab from deprecated fields
var dcName, dsName string
if infra.Spec.PlatformSpec.VSphere != nil && len(infra.Spec.PlatformSpec.VSphere.FailureDomains) > 0 {
fd := infra.Spec.PlatformSpec.VSphere.FailureDomains[0]
dcName = fd.Topology.Datacenter
dsName = fd.Topology.Datastore
} else {
return nil, fmt.Errorf("unable to determine default datastore from current config")
}
dc, err := finder.Datacenter(ctx, dcName)
if err != nil {
return nil, fmt.Errorf("failed to access datacenter %s: %s", dcName, err)
Expand Down Expand Up @@ -197,7 +206,11 @@ func (v *storagePolicyAPI) createStoragePolicy(ctx context.Context) (string, err
return v.createZonalStoragePolicy(ctx)
}

dsName := v.vcenterApiConnection.Config.Workspace.DefaultDatastore
// Since we create zonal storage policy when in multi vcenter or single vcenter w/ zones, the below is
// only for case where cluster is upgrade from a version that used a non FailureDomain config. For now, let's
// just return an error.
return v.policyName, fmt.Errorf("current cluster config is not supported and needs to be migrated to zonal.")
/*dsName := v.vcenterApiConnection.Config.Workspace.DefaultDatastore
dcName := v.vcenterApiConnection.Config.Workspace.Datacenter
err = v.attachTags(ctx, dcName, dsName)
Expand All @@ -212,7 +225,7 @@ func (v *storagePolicyAPI) createStoragePolicy(ctx context.Context) (string, err
}
}
return v.policyName, nil
return v.policyName, nil*/
}

func (v *storagePolicyAPI) checkForTagOnDatastore(ctx context.Context, dsMo *mo.Datastore) bool {
Expand Down
26 changes: 13 additions & 13 deletions pkg/operator/utils/topology.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ import (
opv1 "github.com/openshift/api/operator/v1"
"github.com/sirupsen/logrus"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/legacy-cloud-providers/vsphere"
vsphere "k8s.io/cloud-provider-vsphere/pkg/common/config"
)

const (
Expand Down Expand Up @@ -46,23 +46,23 @@ func GetTopologyCategories(clusterCSIDriver *opv1.ClusterCSIDriver, infra *cfgv1
return GetCSIDriverTopologyCategories(clusterCSIDriver)
}

func GetDatacenters(config *vsphere.VSphereConfig, multiVCenterEnabled bool) ([]string, error) {
datacenters := []string{config.Workspace.Datacenter}

virtualCenterIPs := sets.StringKeySet(config.VirtualCenter)

if len(virtualCenterIPs) > 1 && !multiVCenterEnabled {
func GetVCenters(config *vsphere.Config, multiVCenterEnabled bool) ([]string, error) {
if len(config.VirtualCenter) > 1 && !multiVCenterEnabled {
return nil, fmt.Errorf("the multi vcenter cloud config must define a single VirtualCenter")
} else if len(virtualCenterIPs) == 0 {
} else if len(config.VirtualCenter) == 0 {
return nil, fmt.Errorf("cloud config must define at lease a single VirtualCenter")
}

for _, virtualCenterIP := range virtualCenterIPs.List() {
logrus.Infof("Processing virtual center: %v", virtualCenterIP)
if virtualCenterConfig, ok := config.VirtualCenter[virtualCenterIP]; ok {
datacenters = strings.Split(virtualCenterConfig.Datacenters, ",")
}
var vCenters []string
for _, vcenter := range config.VirtualCenter {
vCenters = append(vCenters, vcenter.VCenterIP)
}

return vCenters, nil
}

func GetDatacenters(config *vsphere.Config, vcenter string, multiVCenterEnabled bool) ([]string, error) {
datacenters := strings.Split(config.VirtualCenter[vcenter].Datacenters, ",")
logrus.Infof("Gathered the following data centers: %v", datacenters)
return datacenters, nil
}
Expand Down
10 changes: 5 additions & 5 deletions pkg/operator/vclib/connection.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ import (
"github.com/openshift/vmware-vsphere-csi-driver-operator/pkg/version"
"github.com/vmware/govmomi/vapi/rest"
"github.com/vmware/govmomi/vim25/soap"
"k8s.io/legacy-cloud-providers/vsphere"
vsphere "k8s.io/cloud-provider-vsphere/pkg/common/config"
"net/url"
"sync"
"time"
Expand All @@ -24,7 +24,7 @@ type VSphereConnection struct {
Hostname string
Port string
Insecure bool
Config *vsphere.VSphereConfig
Config *vsphere.Config
}

const apiTimeout = 10 * time.Minute
Expand All @@ -33,13 +33,13 @@ var (
clientLock sync.Mutex
)

func NewVSphereConnection(username, password string, cfg *vsphere.VSphereConfig) *VSphereConnection {
func NewVSphereConnection(username, password, vcenter string, cfg *vsphere.Config) *VSphereConnection {
return &VSphereConnection{
Username: username,
Password: password,
Config: cfg,
Hostname: cfg.Workspace.VCenterIP,
Insecure: cfg.Global.InsecureFlag,
Hostname: cfg.VirtualCenter[vcenter].VCenterIP,
Insecure: cfg.VirtualCenter[vcenter].InsecureFlag,
}
}

Expand Down
17 changes: 13 additions & 4 deletions pkg/operator/vspherecontroller/checks/check_nodes.go
Original file line number Diff line number Diff line change
Expand Up @@ -220,15 +220,22 @@ func (n *NodeChecker) checkOrMarkHostForProcessing(hostName string) bool {

func (n *NodeChecker) getHost(ctx context.Context, checkOpts CheckArgs, hostRef *types.ManagedObjectReference) (mo.HostSystem, error) {
var o mo.HostSystem
var err error
hostName := hostRef.Value

// Since we need to iterate through connections again, lets not throw error prematurely.
for _, vConn := range checkOpts.vmConnection {
hostSystemObject := object.NewHostSystem(vConn.Client.Client, *hostRef)

err := hostSystemObject.Properties(ctx, hostSystemObject.Reference(), []string{"name", "config.product"}, &o)
if err != nil {
return o, fmt.Errorf("failed to load ESXi host %s: %v", hostName, err)
err = hostSystemObject.Properties(ctx, hostSystemObject.Reference(), []string{"name", "config.product"}, &o)
if err == nil {
// Object found
break
}
}
if err != nil {
return o, fmt.Errorf("failed to load ESXi host %s: %v", hostName, err)
}
if o.Config == nil {
return o, fmt.Errorf("error getting ESXi host version %s: host.config is nil", hostName)
}
Expand All @@ -241,7 +248,9 @@ func getVM(ctx context.Context, checkOpts CheckArgs, node *v1.Node) (*mo.Virtual
vmClient := client.Client.Client
vmConfig := client.Config

dataCenterNames, err := utils.GetDatacenters(vmConfig, checkOpts.multiVCenterEnabled)
// When checking VMs, the VMs may be spread across vCenters. So we'll need to get vCenters from config and
// check each for the vm.
dataCenterNames, err := utils.GetDatacenters(vmConfig, client.Hostname, checkOpts.multiVCenterEnabled)
if err != nil {
return nil, err
}
Expand Down
22 changes: 10 additions & 12 deletions pkg/operator/vspherecontroller/vspherecontroller.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,14 +8,12 @@ import (
"time"

"github.com/openshift/vmware-vsphere-csi-driver-operator/assets"
"github.com/openshift/vmware-vsphere-csi-driver-operator/pkg/operator/storageclasscontroller"
iniv1 "gopkg.in/ini.v1"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
vsphere "k8s.io/cloud-provider-vsphere/pkg/common/config"
legacy "k8s.io/legacy-cloud-providers/vsphere"

"github.com/openshift/vmware-vsphere-csi-driver-operator/pkg/operator/storageclasscontroller"

ocpv1 "github.com/openshift/api/config/v1"
operatorapi "github.com/openshift/api/operator/v1"
Expand Down Expand Up @@ -451,7 +449,7 @@ func (c *VSphereController) createVCenterConnection(ctx context.Context, infra *
// Eventually cluster needs to migrate to the new version. For now, lets just use infrastructure object
// as the source of truth.
klog.V(3).Infof("Creating vSphere connection")
/*cloudConfig := infra.Spec.CloudConfig
cloudConfig := infra.Spec.CloudConfig
cloudConfigMap, err := c.configMapLister.ConfigMaps(cloudConfigNamespace).Get(cloudConfig.Name)
if err != nil {
return fmt.Errorf("failed to get cloud config: %v", err)
Expand All @@ -460,15 +458,15 @@ func (c *VSphereController) createVCenterConnection(ctx context.Context, infra *
cfgString, ok := cloudConfigMap.Data[infra.Spec.CloudConfig.Key]
if !ok {
return fmt.Errorf("cloud config %s/%s does not contain key %q", cloudConfigNamespace, cloudConfig.Name, cloudConfig.Key)
}*/
}
//cfg := new(vsphere.VSphereConfig)
//err = gcfg.ReadStringInto(cfg, cfgString)

// If we use infra to iterate through vcenters, do we need to load config?
/*cfg, err := vsphere.ReadConfig([]byte(cfgString))
cfg, err := vsphere.ReadConfig([]byte(cfgString))
if err != nil {
return err
}*/
}

for _, vcenter := range infra.Spec.PlatformSpec.VSphere.VCenters {
secret, err := c.secretLister.Secrets(c.targetNamespace).Get(cloudCredSecretName)
Expand All @@ -487,13 +485,13 @@ func (c *VSphereController) createVCenterConnection(ctx context.Context, infra *
}

// just a hack for other function compatibility.
cfg := new(legacy.VSphereConfig)
/*cfg := new(legacy.VSphereConfig)
cfg.Workspace.VCenterIP = vcenter.Server
cfg.Workspace.Datacenter = vcenter.Datacenters[0]
cfg.Workspace.DefaultDatastore = infra.Spec.PlatformSpec.VSphere.FailureDomains[0].Topology.Datastore
cfg.Global.InsecureFlag = true
cfg.Global.InsecureFlag = true*/

vs := vclib.NewVSphereConnection(string(username), string(password), cfg)
vs := vclib.NewVSphereConnection(string(username), string(password), vcenter.Server, cfg)
c.vSphereConnections = append(c.vSphereConnections, vs)
}
return nil
Expand Down Expand Up @@ -650,10 +648,10 @@ func (c *VSphereController) createCSIConfigMap(
return err
}

// TODO: For multi vcenter, what is our approach here?
// TODO: NAG - For multi vcenter, what is our approach here?
storageApiClient := storageclasscontroller.NewStoragePolicyAPI(ctx, c.vSphereConnections, infra)

defaultDatastore, err := storageApiClient.GetDefaultDatastore(ctx)
defaultDatastore, err := storageApiClient.GetDefaultDatastore(ctx, infra)

if err != nil {
return fmt.Errorf("unable to fetch default datastore url: %v", err)
Expand Down

0 comments on commit 8248abd

Please sign in to comment.