Skip to content

Commit

Permalink
Support for Failure Domain for VSphereMachine
Browse files Browse the repository at this point in the history
- Introduces support for failure domain during VSphereVM generation.

Signed-off-by: Sagar Muchhal <muchhals@vmware.com>
  • Loading branch information
srm09 committed Jul 12, 2021
1 parent d85398f commit fff9f7a
Show file tree
Hide file tree
Showing 7 changed files with 589 additions and 21 deletions.
92 changes: 87 additions & 5 deletions controllers/vspheremachine_controller.go
Expand Up @@ -31,6 +31,8 @@ import (
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
apitypes "k8s.io/apimachinery/pkg/types"
kerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/utils/integer"
clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3"
clusterutilv1 "sigs.k8s.io/cluster-api/util"
"sigs.k8s.io/cluster-api/util/conditions"
Expand All @@ -46,7 +48,6 @@ import (
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/controller-runtime/pkg/source"

kerrors "k8s.io/apimachinery/pkg/util/errors"
infrav1 "sigs.k8s.io/cluster-api-provider-vsphere/api/v1alpha3"
"sigs.k8s.io/cluster-api-provider-vsphere/pkg/context"
"sigs.k8s.io/cluster-api-provider-vsphere/pkg/record"
Expand Down Expand Up @@ -334,6 +335,11 @@ func (r machineReconciler) reconcileNormal(ctx *context.MachineContext) (reconci
return reconcile.Result{}, nil
}

// Propagating the failure domain name to the VSphereMachine object
if failureDomain := ctx.Machine.Spec.FailureDomain; failureDomain != nil {
ctx.VSphereMachine.Spec.FailureDomain = failureDomain
}

// TODO(akutz) Determine the version of vSphere.
vm, err := r.reconcileNormalPre7(ctx, vsphereVM)
if err != nil {
Expand Down Expand Up @@ -441,12 +447,18 @@ func (r machineReconciler) reconcileNormalPre7(ctx *context.MachineContext, vsph
// clone spec.
ctx.VSphereMachine.Spec.VirtualMachineCloneSpec.DeepCopyInto(&vm.Spec.VirtualMachineCloneSpec)

// If Failure Domain is present on CAPI machine, use that to override the vm clone spec.
if overrideFunc, ok := r.generateOverrideFunc(ctx); ok {
overrideFunc(vm)
}

// Several of the VSphereVM's clone spec properties can be derived
// from multiple places. The order is:
//
// 1. From the VSphereMachine.Spec (the DeepCopyInto above)
// 2. From the VSphereCluster.Spec.CloudProviderConfiguration.Workspace
// 3. From the VSphereCluster.Spec
// 1. From the Machine.Spec.FailureDomain
// 2. From the VSphereMachine.Spec (the DeepCopyInto above)
// 3. From the VSphereCluster.Spec.CloudProviderConfiguration.Workspace
// 4. From the VSphereCluster.Spec
vsphereCloudConfig := ctx.VSphereCluster.Spec.CloudProviderConfiguration.Workspace
if vm.Spec.Server == "" {
if vm.Spec.Server = vsphereCloudConfig.Server; vm.Spec.Server == "" {
Expand Down Expand Up @@ -486,10 +498,80 @@ func (r machineReconciler) reconcileNormalPre7(ctx *context.MachineContext, vsph
return vm, nil
}

// generateOverrideFunc returns a function which can override the values in the VSphereVM Spec
// with the values from the FailureDomain (if any) set on the owner CAPI machine.
func (r machineReconciler) generateOverrideFunc(ctx *context.MachineContext) (func(vm *infrav1.VSphereVM), bool) {
var overrideWithFailureDomainFunc func(vm *infrav1.VSphereVM)
if failureDomainName := ctx.Machine.Spec.FailureDomain; failureDomainName != nil {
var vsphereDeploymentZoneList infrav1.VSphereDeploymentZoneList
if err := r.Client.List(ctx, &vsphereDeploymentZoneList); err != nil {
r.Logger.Error(err, "unable to fetch list of deployment zones")
return overrideWithFailureDomainFunc, false
}

var vsphereFailureDomain infrav1.VSphereFailureDomain
if err := r.Client.Get(ctx, client.ObjectKey{Name: *failureDomainName}, &vsphereFailureDomain); err != nil {
r.Logger.Error(err, "unable to fetch failure domain", "name", *failureDomainName)
return overrideWithFailureDomainFunc, false
}

for index := range vsphereDeploymentZoneList.Items {
zone := vsphereDeploymentZoneList.Items[index]
if zone.Spec.FailureDomain == *ctx.Machine.Spec.FailureDomain {
overrideWithFailureDomainFunc = func(vm *infrav1.VSphereVM) {
vm.Spec.Server = zone.Spec.Server
vm.Spec.Datacenter = vsphereFailureDomain.Spec.Topology.Datacenter
if zone.Spec.PlacementConstraint.Folder != "" {
vm.Spec.Folder = zone.Spec.PlacementConstraint.Folder
}
if zone.Spec.PlacementConstraint.ResourcePool != "" {
vm.Spec.ResourcePool = zone.Spec.PlacementConstraint.ResourcePool
}
if vsphereFailureDomain.Spec.Topology.Datastore != "" {
vm.Spec.Datastore = vsphereFailureDomain.Spec.Topology.Datastore
}
if len(vsphereFailureDomain.Spec.Topology.Networks) > 0 {
vm.Spec.Network.Devices = overrideNetworkDeviceSpecs(vm.Spec.Network.Devices, vsphereFailureDomain.Spec.Topology.Networks)
}
}
return overrideWithFailureDomainFunc, true
}
}
}
return overrideWithFailureDomainFunc, false
}

// overrideNetworkDeviceSpecs updates the network devices with the network definitions from the PlacementConstraint.
// The substitution is done based on the order in which the network devices have been defined.
//
// In case there are more network definitions than the number of network devices specified, the definitions are appended to the list.
func overrideNetworkDeviceSpecs(deviceSpecs []infrav1.NetworkDeviceSpec, networks []string) []infrav1.NetworkDeviceSpec {
index, length := 0, len(networks)

devices := make([]infrav1.NetworkDeviceSpec, 0, integer.IntMax(length, len(deviceSpecs)))
// override the networks on the VM spec with placement constraint network definitions
for i := range deviceSpecs {
vmNetworkDeviceSpec := deviceSpecs[i]
if i < length {
index++
vmNetworkDeviceSpec.NetworkName = networks[i]
}
devices = append(devices, vmNetworkDeviceSpec)
}
// append the remaining network definitions to the VM spec
for ; index < length; index++ {
devices = append(devices, infrav1.NetworkDeviceSpec{
NetworkName: networks[index],
})
}

return devices
}

func (r machineReconciler) reconcileNetwork(ctx *context.MachineContext, vm *unstructured.Unstructured) (bool, error) {
var errs []error
if networkStatusListOfIfaces, ok, _ := unstructured.NestedSlice(vm.Object, "status", "network"); ok {
networkStatusList := []infrav1.NetworkStatus{}
var networkStatusList []infrav1.NetworkStatus
for i, networkStatusListMemberIface := range networkStatusListOfIfaces {
if buf, err := json.Marshal(networkStatusListMemberIface); err != nil {
ctx.Logger.Error(err,
Expand Down
207 changes: 207 additions & 0 deletions controllers/vspheremachine_controller_test.go
@@ -0,0 +1,207 @@
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package controllers

import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"

corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/utils/pointer"
clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3"
"sigs.k8s.io/cluster-api/util/conditions"
"sigs.k8s.io/cluster-api/util/patch"
"sigs.k8s.io/controller-runtime/pkg/client"

infrav1 "sigs.k8s.io/cluster-api-provider-vsphere/api/v1alpha3"
)

var _ = Describe("VsphereMachineReconciler", func() {

var (
capiCluster *clusterv1.Cluster
capiMachine *clusterv1.Machine

infraCluster *infrav1.VSphereCluster
infraMachine *infrav1.VSphereMachine

testNs *corev1.Namespace
key client.ObjectKey
)

isPresentAndFalseWithReason := func(getter conditions.Getter, condition clusterv1.ConditionType, reason string) bool {
ExpectWithOffset(1, testEnv.Get(ctx, key, getter)).To(Succeed())
if !conditions.Has(getter, condition) {
return false
}
objectCondition := conditions.Get(getter, condition)
return objectCondition.Status == corev1.ConditionFalse &&
objectCondition.Reason == reason
}

BeforeEach(func() {
var err error
testNs, err = testEnv.CreateNamespace(ctx, "vsphere-machine-reconciler")
Expect(err).NotTo(HaveOccurred())

capiCluster = &clusterv1.Cluster{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "test1-",
Namespace: testNs.Name,
},
Spec: clusterv1.ClusterSpec{
InfrastructureRef: &corev1.ObjectReference{
APIVersion: "infrastructure.cluster.x-k8s.io/v1alpha3",
Kind: "VSphereCluster",
Name: "vsphere-test1",
},
},
}
Expect(testEnv.Create(ctx, capiCluster)).To(Succeed())

infraCluster = &infrav1.VSphereCluster{
ObjectMeta: metav1.ObjectMeta{
Name: "vsphere-test1",
Namespace: testNs.Name,
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: "cluster.x-k8s.io/v1alpha3",
Kind: "Cluster",
Name: capiCluster.Name,
UID: "blah",
},
},
},
Spec: infrav1.VSphereClusterSpec{},
}
Expect(testEnv.Create(ctx, infraCluster)).To(Succeed())

capiMachine = &clusterv1.Machine{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "machine-created-",
Namespace: testNs.Name,
Finalizers: []string{clusterv1.MachineFinalizer},
Labels: map[string]string{
clusterv1.ClusterLabelName: capiCluster.Name,
},
},
Spec: clusterv1.MachineSpec{
ClusterName: capiCluster.Name,
InfrastructureRef: corev1.ObjectReference{
APIVersion: "infrastructure.cluster.x-k8s.io/v1alpha3",
Kind: "VSphereMachine",
Name: "vsphere-machine-1",
},
},
}
Expect(testEnv.Create(ctx, capiMachine)).To(Succeed())

infraMachine = &infrav1.VSphereMachine{
ObjectMeta: metav1.ObjectMeta{
Name: "vsphere-machine-1",
Namespace: testNs.Name,
Labels: map[string]string{
clusterv1.ClusterLabelName: capiCluster.Name,
clusterv1.MachineControlPlaneLabelName: "",
},
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: clusterv1.GroupVersion.String(),
Kind: "Machine",
Name: capiMachine.Name,
UID: "blah",
},
},
},
Spec: infrav1.VSphereMachineSpec{
VirtualMachineCloneSpec: infrav1.VirtualMachineCloneSpec{
Template: "ubuntu-k9s-1.19",
Network: infrav1.NetworkSpec{
Devices: []infrav1.NetworkDeviceSpec{
{NetworkName: "network-1", DHCP4: true},
},
},
},
},
}
Expect(testEnv.Create(ctx, infraMachine)).To(Succeed())

key = client.ObjectKey{Namespace: testNs.Name, Name: infraMachine.Name}
})

AfterEach(func() {
Expect(testEnv.Cleanup(ctx, testNs, capiCluster, infraCluster, capiMachine, infraMachine)).To(Succeed())
})

It("waits for cluster status to be ready", func() {
Eventually(func() bool {
// this is to make sure that the VSphereMachine is created before the next check for the
// presence of conditions on the VSphereMachine proceeds.
if err := testEnv.Get(ctx, key, infraMachine); err != nil {
return false
}
return isPresentAndFalseWithReason(infraMachine, infrav1.VMProvisionedCondition, infrav1.WaitingForClusterInfrastructureReason)
}, timeout).Should(BeTrue())

By("setting the cluster infrastructure to be ready")
Eventually(func() error {
ph, err := patch.NewHelper(capiCluster, testEnv)
Expect(err).ShouldNot(HaveOccurred())
capiCluster.Status.InfrastructureReady = true
return ph.Patch(ctx, capiCluster, patch.WithStatusObservedGeneration{})
}, timeout).Should(BeNil())

Eventually(func() bool {
return isPresentAndFalseWithReason(infraMachine, infrav1.VMProvisionedCondition, infrav1.WaitingForClusterInfrastructureReason)
}, timeout).Should(BeFalse())
})

Context("With Cluster Infrastructure status ready", func() {
BeforeEach(func() {
ph, err := patch.NewHelper(capiCluster, testEnv)
Expect(err).ShouldNot(HaveOccurred())
capiCluster.Status.InfrastructureReady = true
Expect(ph.Patch(ctx, capiCluster, patch.WithStatusObservedGeneration{})).To(Succeed())
})

It("moves to VSphere VM creation", func() {
Eventually(func() bool {
vms := infrav1.VSphereVMList{}
Expect(testEnv.List(ctx, &vms)).To(Succeed())
return isPresentAndFalseWithReason(infraMachine, infrav1.VMProvisionedCondition, infrav1.WaitingForBootstrapDataReason) &&
len(vms.Items) == 0
}, timeout).Should(BeTrue())

By("setting the bootstrap data")
Eventually(func() error {
ph, err := patch.NewHelper(capiMachine, testEnv)
Expect(err).ShouldNot(HaveOccurred())
capiMachine.Spec.Bootstrap = clusterv1.Bootstrap{
DataSecretName: pointer.StringPtr("some-secret"),
}
return ph.Patch(ctx, capiMachine, patch.WithStatusObservedGeneration{})
}, timeout).Should(BeNil())

Eventually(func() int {
vms := infrav1.VSphereVMList{}
Expect(testEnv.List(ctx, &vms)).To(Succeed())
return len(vms.Items)
}, timeout).Should(BeNumerically(">", 0))
})
})
})

0 comments on commit fff9f7a

Please sign in to comment.