diff --git a/Makefile b/Makefile index 2531a2e4162..109afa9a2e3 100644 --- a/Makefile +++ b/Makefile @@ -587,5 +587,5 @@ verify-modules: modules .PHONY: verify-gen verify-gen: generate @if !(git diff --quiet HEAD); then \ - echo "generated files are out of date, run make generate"; exit 1; \ + git diff; echo "generated files are out of date, run make generate"; exit 1; \ fi diff --git a/api/v1alpha4/conditions_consts.go b/api/v1alpha4/conditions_consts.go index 08c0799791e..1458eb64952 100644 --- a/api/v1alpha4/conditions_consts.go +++ b/api/v1alpha4/conditions_consts.go @@ -62,4 +62,16 @@ const ( ScaleSetDeletingReason = "ScaleSetDeleting" // ScaleSetProvisionFailedReason used for failures during scale set provisioning. ScaleSetProvisionFailedReason = "ScaleSetProvisionFailed" + + // ScaleSetDesiredReplicasCondition reports on the scaling state of the machine pool + ScaleSetDesiredReplicasCondition clusterv1.ConditionType = "ScaleSetDesiredReplicas" + // ScaleSetScaleUpReason describes the machine pool scaling up + ScaleSetScaleUpReason = "ScaleSetScalingUp" + // ScaleSetScaleDownReason describes the machine pool scaling down + ScaleSetScaleDownReason = "ScaleSetScalingDown" + + // ScaleSetModelUpdatedCondition reports on the model state of the pool + ScaleSetModelUpdatedCondition clusterv1.ConditionType = "ScaleSetModelUpdated" + // ScaleSetModelOutOfDateReason describes the machine pool model being out of date + ScaleSetModelOutOfDateReason = "ScaleSetModelOutOfDate" ) diff --git a/api/v1alpha4/types.go b/api/v1alpha4/types.go index adb47f08f38..4ce3962bccf 100644 --- a/api/v1alpha4/types.go +++ b/api/v1alpha4/types.go @@ -587,3 +587,8 @@ type AzureBastion struct { // +optional PublicIP PublicIPSpec `json:"publicIP,omitempty"` } + +// IsTerminalProvisioningState returns true if the ProvisioningState is a terminal state for an Azure resource +func IsTerminalProvisioningState(state ProvisioningState) bool { + return state == Failed || state == Succeeded +} diff --git a/azure/converters/vmss.go b/azure/converters/vmss.go index 0780b6da957..38fdab00770 100644 --- a/azure/converters/vmss.go +++ b/azure/converters/vmss.go @@ -19,14 +19,13 @@ package converters import ( "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-30/compute" "github.com/Azure/go-autorest/autorest/to" - infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1alpha4" - infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1alpha4" + "sigs.k8s.io/cluster-api-provider-azure/azure" ) // SDKToVMSS converts an Azure SDK VirtualMachineScaleSet to the AzureMachinePool type. -func SDKToVMSS(sdkvmss compute.VirtualMachineScaleSet, sdkinstances []compute.VirtualMachineScaleSetVM) *infrav1exp.VMSS { - vmss := &infrav1exp.VMSS{ +func SDKToVMSS(sdkvmss compute.VirtualMachineScaleSet, sdkinstances []compute.VirtualMachineScaleSetVM) *azure.VMSS { + vmss := &azure.VMSS{ ID: to.String(sdkvmss.ID), Name: to.String(sdkvmss.Name), State: infrav1.ProvisioningState(to.String(sdkvmss.ProvisioningState)), @@ -46,25 +45,66 @@ func SDKToVMSS(sdkvmss compute.VirtualMachineScaleSet, sdkinstances []compute.Vi } if len(sdkinstances) > 0 { - vmss.Instances = make([]infrav1exp.VMSSVM, len(sdkinstances)) + vmss.Instances = make([]azure.VMSSVM, len(sdkinstances)) for i, vm := range sdkinstances { - instance := infrav1exp.VMSSVM{ - ID: to.String(vm.ID), - InstanceID: to.String(vm.InstanceID), - Name: to.String(vm.OsProfile.ComputerName), - State: infrav1.ProvisioningState(to.String(vm.ProvisioningState)), - } - - if vm.LatestModelApplied != nil { - instance.LatestModelApplied = *vm.LatestModelApplied - } - - if vm.Zones != nil && len(*vm.Zones) > 0 { - instance.AvailabilityZone = to.StringSlice(vm.Zones)[0] - } - vmss.Instances[i] = instance + vmss.Instances[i] = *SDKToVMSSVM(vm) } } + if sdkvmss.VirtualMachineProfile != nil && + sdkvmss.VirtualMachineProfile.StorageProfile != nil && + sdkvmss.VirtualMachineProfile.StorageProfile.ImageReference != nil { + + imageRef := sdkvmss.VirtualMachineProfile.StorageProfile.ImageReference + vmss.Image = SDKImageToImage(imageRef, sdkvmss.Plan != nil) + } + return vmss } + +// SDKToVMSSVM converts an Azure SDK VirtualMachineScaleSetVM into an infrav1exp.VMSSVM +func SDKToVMSSVM(sdkInstance compute.VirtualMachineScaleSetVM) *azure.VMSSVM { + instance := azure.VMSSVM{ + ID: to.String(sdkInstance.ID), + InstanceID: to.String(sdkInstance.InstanceID), + } + + if sdkInstance.VirtualMachineScaleSetVMProperties == nil { + return &instance + } + + instance.State = infrav1.Creating + if sdkInstance.ProvisioningState != nil { + instance.State = infrav1.ProvisioningState(to.String(sdkInstance.ProvisioningState)) + } + + if sdkInstance.OsProfile != nil && sdkInstance.OsProfile.ComputerName != nil { + instance.Name = *sdkInstance.OsProfile.ComputerName + } + + if sdkInstance.StorageProfile != nil && sdkInstance.StorageProfile.ImageReference != nil { + imageRef := sdkInstance.StorageProfile.ImageReference + instance.Image = SDKImageToImage(imageRef, sdkInstance.Plan != nil) + } + + if sdkInstance.Zones != nil && len(*sdkInstance.Zones) > 0 { + // an instance should only have 1 zone, so we select the first item of the slice + instance.AvailabilityZone = to.StringSlice(sdkInstance.Zones)[0] + } + + return &instance +} + +// SDKImageToImage converts a SDK image reference to infrav1.Image +func SDKImageToImage(sdkImageRef *compute.ImageReference, isThirdPartyImage bool) infrav1.Image { + return infrav1.Image{ + ID: sdkImageRef.ID, + Marketplace: &infrav1.AzureMarketplaceImage{ + Publisher: to.String(sdkImageRef.Publisher), + Offer: to.String(sdkImageRef.Offer), + SKU: to.String(sdkImageRef.Sku), + Version: to.String(sdkImageRef.Version), + ThirdPartyImage: isThirdPartyImage, + }, + } +} diff --git a/azure/converters/vmss_test.go b/azure/converters/vmss_test.go index 9317f5c3c6a..d11b3b7cad1 100644 --- a/azure/converters/vmss_test.go +++ b/azure/converters/vmss_test.go @@ -23,16 +23,15 @@ import ( "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-30/compute" "github.com/Azure/go-autorest/autorest/to" "github.com/onsi/gomega" - + "sigs.k8s.io/cluster-api-provider-azure/azure" "sigs.k8s.io/cluster-api-provider-azure/azure/converters" - infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1alpha4" ) func Test_SDKToVMSS(t *testing.T) { cases := []struct { Name string SubjectFactory func(*gomega.GomegaWithT) (compute.VirtualMachineScaleSet, []compute.VirtualMachineScaleSetVM) - Expect func(*gomega.GomegaWithT, *infrav1exp.VMSS) + Expect func(*gomega.GomegaWithT, *azure.VMSS) }{ { Name: "ShouldPopulateWithData", @@ -83,8 +82,8 @@ func Test_SDKToVMSS(t *testing.T) { }, } }, - Expect: func(g *gomega.GomegaWithT, actual *infrav1exp.VMSS) { - expected := infrav1exp.VMSS{ + Expect: func(g *gomega.GomegaWithT, actual *azure.VMSS) { + expected := azure.VMSS{ ID: "vmssID", Name: "vmssName", Sku: "skuName", @@ -94,11 +93,11 @@ func Test_SDKToVMSS(t *testing.T) { Tags: map[string]string{ "foo": "bazz", }, - Instances: make([]infrav1exp.VMSSVM, 2), + Instances: make([]azure.VMSSVM, 2), } for i := 0; i < 2; i++ { - expected.Instances[i] = infrav1exp.VMSSVM{ + expected.Instances[i] = azure.VMSSVM{ ID: fmt.Sprintf("vm/%d", i), InstanceID: fmt.Sprintf("%d", i), Name: fmt.Sprintf("instance-00000%d", i), diff --git a/azure/errors.go b/azure/errors.go index 777acec7558..d305b23a1db 100644 --- a/azure/errors.go +++ b/azure/errors.go @@ -109,6 +109,11 @@ func (t ReconcileError) IsTerminal() bool { return t.errorType == TerminalErrorType } +// Is returns true if the target is a ReconcileError +func (t ReconcileError) Is(target error) bool { + return errors.As(target, &ReconcileError{}) +} + // RequeueAfter returns requestAfter value func (t ReconcileError) RequeueAfter() time.Duration { return t.requestAfter diff --git a/azure/scope/machine.go b/azure/scope/machine.go index ee4d4a562fb..964ee9f34dd 100644 --- a/azure/scope/machine.go +++ b/azure/scope/machine.go @@ -297,7 +297,7 @@ func (m *MachineScope) Role() string { return infrav1.Node } -// GetVMID returns the AzureMachine instance id by parsing Spec.ProviderID. +// GetVMID returns the AzureMachine instance id by parsing Spec.FakeProviderID. func (m *MachineScope) GetVMID() string { parsed, err := noderefutil.NewProviderID(m.ProviderID()) if err != nil { diff --git a/azure/scope/machinepool.go b/azure/scope/machinepool.go index 56ff130f4eb..bc0ed3beb50 100644 --- a/azure/scope/machinepool.go +++ b/azure/scope/machinepool.go @@ -19,31 +19,30 @@ package scope import ( "context" "encoding/base64" + "strings" "time" - clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4" - "sigs.k8s.io/cluster-api/util/conditions" - "github.com/Azure/go-autorest/autorest/to" "github.com/go-logr/logr" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/tools/clientcmd" "k8s.io/klog/v2/klogr" "k8s.io/utils/pointer" - "sigs.k8s.io/cluster-api/controllers/noderefutil" - capierrors "sigs.k8s.io/cluster-api/errors" - capiv1exp "sigs.k8s.io/cluster-api/exp/api/v1alpha4" - utilkubeconfig "sigs.k8s.io/cluster-api/util/kubeconfig" - "sigs.k8s.io/cluster-api/util/patch" - infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1alpha4" "sigs.k8s.io/cluster-api-provider-azure/azure" + machinepool "sigs.k8s.io/cluster-api-provider-azure/azure/scope/strategies/machinepool_deployments" infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1alpha4" "sigs.k8s.io/cluster-api-provider-azure/util/tele" - + clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4" + "sigs.k8s.io/cluster-api/controllers/noderefutil" + capierrors "sigs.k8s.io/cluster-api/errors" + capiv1exp "sigs.k8s.io/cluster-api/exp/api/v1alpha4" + "sigs.k8s.io/cluster-api/util/conditions" + "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" ) type ( @@ -58,12 +57,13 @@ type ( // MachinePoolScope defines a scope defined around a machine pool and its cluster. MachinePoolScope struct { + azure.ClusterScoper logr.Logger + AzureMachinePool *infrav1exp.AzureMachinePool + MachinePool *capiv1exp.MachinePool client client.Client patchHelper *patch.Helper - MachinePool *capiv1exp.MachinePool - AzureMachinePool *infrav1exp.AzureMachinePool - azure.ClusterScoper + vmssState *azure.VMSS } // NodeStatus represents the status of a Kubernetes node @@ -79,9 +79,11 @@ func NewMachinePoolScope(params MachinePoolScopeParams) (*MachinePoolScope, erro if params.Client == nil { return nil, errors.New("client is required when creating a MachinePoolScope") } + if params.MachinePool == nil { return nil, errors.New("machine pool is required when creating a MachinePoolScope") } + if params.AzureMachinePool == nil { return nil, errors.New("azure machine pool is required when creating a MachinePoolScope") } @@ -94,6 +96,7 @@ func NewMachinePoolScope(params MachinePoolScopeParams) (*MachinePoolScope, erro if err != nil { return nil, errors.Wrap(err, "failed to init patch helper") } + return &MachinePoolScope{ client: params.Client, MachinePool: params.MachinePool, @@ -136,7 +139,7 @@ func (m *MachinePoolScope) Name() string { return m.AzureMachinePool.Name } -// ProviderID returns the AzureMachinePool ID by parsing Spec.ProviderID. +// ProviderID returns the AzureMachinePool ID by parsing Spec.FakeProviderID. func (m *MachinePoolScope) ProviderID() string { parsed, err := noderefutil.NewProviderID(m.AzureMachinePool.Spec.ProviderID) if err != nil { @@ -158,61 +161,213 @@ func (m *MachinePoolScope) ProvisioningState() infrav1.ProvisioningState { return "" } -// NeedsK8sVersionUpdate compares the MachinePool spec and the AzureMachinePool status to determine if the -// VMSS model needs to be updated -func (m *MachinePoolScope) NeedsK8sVersionUpdate() bool { - return m.AzureMachinePool.Status.Version != *m.MachinePool.Spec.Template.Spec.Version +// SetVMSSState updates the machine pool scope with the current state of the VMSS. +func (m *MachinePoolScope) SetVMSSState(vmssState *azure.VMSS) { + m.vmssState = vmssState } -// UpdateInstanceStatuses ties the Azure VMSS instance data and the Node status data together to build and update -// the AzureMachinePool. This calculates the number of ready replicas, the current version the kubelet -// is running on the node, the provider IDs for the instances and the providerIDList for the AzureMachinePool spec. -func (m *MachinePoolScope) UpdateInstanceStatuses(ctx context.Context, instances []infrav1exp.VMSSVM) error { - ctx, span := tele.Tracer().Start(ctx, "scope.MachinePoolScope.UpdateInstanceStatuses") - defer span.End() +// NeedsRequeue return true if any machines are not on the latest model or the VMSS is not in a terminal provisioning +// state. +func (m *MachinePoolScope) NeedsRequeue() bool { + state := m.AzureMachinePool.Status.ProvisioningState + if m.vmssState == nil { + return state != nil && infrav1.IsTerminalProvisioningState(*state) + } + + if !m.vmssState.HasLatestModelAppliedToAll() { + return true + } + + desiredMatchesActual := len(m.vmssState.Instances) == int(m.DesiredReplicas()) + return !(state != nil && infrav1.IsTerminalProvisioningState(*state) && desiredMatchesActual) +} + +// DesiredReplicas returns the replica count on machine pool or 0 if machine pool replicas is nil. +func (m MachinePoolScope) DesiredReplicas() int32 { + return to.Int32(m.MachinePool.Spec.Replicas) +} + +// MaxSurge returns the number of machines to surge, or 0 if the deployment strategy does not support surge. +func (m MachinePoolScope) MaxSurge() (int, error) { + if surger, ok := m.getDeploymentStrategy().(machinepool.Surger); ok { + surgeCount, err := surger.Surge(int(m.DesiredReplicas())) + if err != nil { + return 0, errors.Wrap(err, "failed to calculate surge for the machine pool") + } - providerIDs := make([]string, len(instances)) - for i, instance := range instances { - providerIDs[i] = azure.ProviderIDPrefix + instance.ID + return surgeCount, nil } - nodeStatusByProviderID, err := m.getNodeStatusByProviderID(ctx, providerIDs) + return 0, nil +} + +// updateReplicasAndProviderIDs ties the Azure VMSS instance data and the Node status data together to build and update +// the AzureMachinePool replica count and providerIDList. +func (m *MachinePoolScope) updateReplicasAndProviderIDs(ctx context.Context) error { + ctx, span := tele.Tracer().Start(ctx, "scope.MachinePoolScope.UpdateInstanceStatuses") + defer span.End() + + machines, err := m.getMachinePoolMachines(ctx) if err != nil { - return errors.Wrap(err, "failed to get node status by provider id") + return errors.Wrap(err, "failed to get machine pool machines") } var readyReplicas int32 - instanceStatuses := make([]*infrav1exp.AzureMachinePoolInstanceStatus, len(instances)) - for i, instance := range instances { - instanceStatuses[i] = &infrav1exp.AzureMachinePoolInstanceStatus{ - ProviderID: azure.ProviderIDPrefix + instance.ID, - InstanceID: instance.InstanceID, - InstanceName: instance.Name, - ProvisioningState: &instance.State, + providerIDs := make([]string, len(machines)) + for i, machine := range machines { + if machine.Status.Ready { + readyReplicas++ } + providerIDs[i] = machine.Spec.ProviderID + } + + m.AzureMachinePool.Status.Replicas = readyReplicas + m.AzureMachinePool.Spec.ProviderIDList = providerIDs + return nil +} + +func (m *MachinePoolScope) getMachinePoolMachines(ctx context.Context) ([]infrav1exp.AzureMachinePoolMachine, error) { + ctx, span := tele.Tracer().Start(ctx, "scope.MachinePoolScope.getMachinePoolMachines") + defer span.End() + + labels := map[string]string{ + clusterv1.ClusterLabelName: m.ClusterName(), + infrav1exp.MachinePoolNameLabel: m.AzureMachinePool.Name, + } + ampml := &infrav1exp.AzureMachinePoolMachineList{} + if err := m.client.List(ctx, ampml, client.InNamespace(m.AzureMachinePool.Namespace), client.MatchingLabels(labels)); err != nil { + return nil, errors.Wrap(err, "failed to list AzureMachinePoolMachines") + } + + return ampml.Items, nil +} + +func (m *MachinePoolScope) applyAzureMachinePoolMachines(ctx context.Context) error { + ctx, span := tele.Tracer().Start(ctx, "scope.MachinePoolScope.applyAzureMachinePoolMachines") + defer span.End() + + if m.vmssState == nil { + m.Info("vmssState is nil") + return nil + } + + labels := map[string]string{ + clusterv1.ClusterLabelName: m.ClusterName(), + infrav1exp.MachinePoolNameLabel: m.AzureMachinePool.Name, + } + ampml := &infrav1exp.AzureMachinePoolMachineList{} + if err := m.client.List(ctx, ampml, client.InNamespace(m.AzureMachinePool.Namespace), client.MatchingLabels(labels)); err != nil { + return errors.Wrap(err, "failed to list AzureMachinePoolMachines") + } - instanceStatus := instanceStatuses[i] - if nodeStatus, ok := nodeStatusByProviderID[instanceStatus.ProviderID]; ok { - instanceStatus.Version = nodeStatus.Version - if m.MachinePool.Spec.Template.Spec.Version != nil { - instanceStatus.LatestModelApplied = instanceStatus.Version == *m.MachinePool.Spec.Template.Spec.Version + existingMachinesByProviderID := make(map[string]infrav1exp.AzureMachinePoolMachine, len(ampml.Items)) + for _, machine := range ampml.Items { + existingMachinesByProviderID[machine.Spec.ProviderID] = machine + } + + // determine which machines need to be created to reflect the current state in Azure + azureMachinesByProviderID := m.vmssState.InstancesByProviderID() + for key, val := range azureMachinesByProviderID { + if _, ok := existingMachinesByProviderID[key]; !ok { + m.V(4).Info("creating AzureMachinePoolMachine", "providerID", key) + if err := m.createMachine(ctx, val); err != nil { + return errors.Wrap(err, "failed creating AzureMachinePoolMachine") } + continue + } + } - if nodeStatus.Ready { - readyReplicas++ + deleted := false + // delete machines that no longer exist in Azure + for key, machine := range existingMachinesByProviderID { + machine := machine + if _, ok := azureMachinesByProviderID[key]; !ok { + deleted = true + m.V(4).Info("deleting AzureMachinePoolMachine because it no longer exists in the VMSS", "providerID", key) + delete(existingMachinesByProviderID, key) + if err := m.client.Delete(ctx, &machine); err != nil { + return errors.Wrap(err, "failed deleting AzureMachinePoolMachine to reduce replica count") } } } - m.AzureMachinePool.Status.Replicas = readyReplicas - m.AzureMachinePool.Spec.ProviderIDList = providerIDs - m.AzureMachinePool.Status.Instances = instanceStatuses + if deleted { + m.V(4).Info("exiting early due to finding AzureMachinePoolMachine(s) that were deleted because they no longer exist in the VMSS") + // exit early to be less greedy about delete + return nil + } + + if m.GetLongRunningOperationState() != nil { + m.V(4).Info("exiting early due an in-progress long running operation on the ScaleSet") + // exit early to be less greedy about delete + return nil + } + + deleteSelector := m.getDeploymentStrategy() + if deleteSelector == nil { + m.V(4).Info("can not select AzureMachinePoolMachines to delete because no deployment strategy is specified") + return nil + } + + // select machines to delete to lower the replica count + toDelete, err := deleteSelector.SelectMachinesToDelete(ctx, m.DesiredReplicas(), existingMachinesByProviderID) + if err != nil { + return errors.Wrap(err, "failed selecting AzureMachinePoolMachine(s) to delete") + } + + for _, machine := range toDelete { + machine := machine + m.Info("deleting selected AzureMachinePoolMachine", "providerID", machine.Spec.ProviderID) + if err := m.client.Delete(ctx, &machine); err != nil { + return errors.Wrap(err, "failed deleting AzureMachinePoolMachine to reduce replica count") + } + } + + m.V(4).Info("done reconciling AzureMachinePoolMachine(s)") return nil } -// SaveK8sVersion stores the MachinePool spec K8s version to the AzureMachinePool status -func (m *MachinePoolScope) SaveK8sVersion() { - m.AzureMachinePool.Status.Version = *m.MachinePool.Spec.Template.Spec.Version +func (m *MachinePoolScope) createMachine(ctx context.Context, machine azure.VMSSVM) error { + if machine.InstanceID == "" { + return errors.New("machine.InstanceID must not be empty") + } + + if machine.Name == "" { + return errors.New("machine.Name must not be empty") + } + + ampm := infrav1exp.AzureMachinePoolMachine{ + ObjectMeta: metav1.ObjectMeta{ + Name: strings.Join([]string{m.AzureMachinePool.Name, machine.InstanceID}, "-"), + Namespace: m.AzureMachinePool.Namespace, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: infrav1exp.GroupVersion.String(), + Kind: "AzureMachinePool", + Name: m.AzureMachinePool.Name, + BlockOwnerDeletion: to.BoolPtr(true), + UID: m.AzureMachinePool.UID, + }, + }, + Labels: map[string]string{ + m.ClusterName(): string(infrav1.ResourceLifecycleOwned), + clusterv1.ClusterLabelName: m.ClusterName(), + infrav1exp.MachinePoolNameLabel: m.AzureMachinePool.Name, + }, + }, + Spec: infrav1exp.AzureMachinePoolMachineSpec{ + ProviderID: machine.ProviderID(), + InstanceID: machine.InstanceID, + }, + } + + controllerutil.AddFinalizer(&m, infrav1exp.AzureMachinePoolMachineFinalizer) + conditions.MarkFalse(&m, infrav1.VMRunningCondition, string(infrav1.Creating), clusterv1.ConditionSeverityInfo, "") + if err := m.client.Create(ctx, &m); err != nil { + return errors.Wrapf(err, "failed creating AzureMachinePoolMachine %s in AzureMachinePool %s", machine.ID, m.AzureMachinePool.Name) + } + + return nil } // SetLongRunningOperationState will set the future on the AzureMachinePool status to allow the resource to continue @@ -227,18 +382,38 @@ func (m *MachinePoolScope) GetLongRunningOperationState() *infrav1.Future { return m.AzureMachinePool.Status.LongRunningOperationState } -// SetProvisioningState sets the AzureMachinePool provisioning state. -func (m *MachinePoolScope) SetProvisioningState(v infrav1.ProvisioningState) { +// setProvisioningStateAndConditions sets the AzureMachinePool provisioning state and conditions. +func (m *MachinePoolScope) setProvisioningStateAndConditions(v infrav1.ProvisioningState) { + m.AzureMachinePool.Status.ProvisioningState = &v switch { case v == infrav1.Succeeded && *m.MachinePool.Spec.Replicas == m.AzureMachinePool.Status.Replicas: // vmss is provisioned with enough ready replicas - m.AzureMachinePool.Status.ProvisioningState = &v + conditions.MarkTrue(m.AzureMachinePool, infrav1.ScaleSetRunningCondition) + conditions.MarkTrue(m.AzureMachinePool, infrav1.ScaleSetModelUpdatedCondition) + conditions.MarkTrue(m.AzureMachinePool, infrav1.ScaleSetDesiredReplicasCondition) + m.SetReady() case v == infrav1.Succeeded && *m.MachinePool.Spec.Replicas != m.AzureMachinePool.Status.Replicas: // not enough ready or too many ready replicas we must still be scaling up or down updatingState := infrav1.Updating m.AzureMachinePool.Status.ProvisioningState = &updatingState + if *m.MachinePool.Spec.Replicas > m.AzureMachinePool.Status.Replicas { + conditions.MarkFalse(m.AzureMachinePool, infrav1.ScaleSetDesiredReplicasCondition, infrav1.ScaleSetScaleUpReason, clusterv1.ConditionSeverityInfo, "") + } else { + conditions.MarkFalse(m.AzureMachinePool, infrav1.ScaleSetDesiredReplicasCondition, infrav1.ScaleSetScaleDownReason, clusterv1.ConditionSeverityInfo, "") + } + m.SetNotReady() + case v == infrav1.Updating: + conditions.MarkFalse(m.AzureMachinePool, infrav1.ScaleSetModelUpdatedCondition, infrav1.ScaleSetModelOutOfDateReason, clusterv1.ConditionSeverityInfo, "") + m.SetNotReady() + case v == infrav1.Creating: + conditions.MarkFalse(m.AzureMachinePool, infrav1.ScaleSetRunningCondition, infrav1.ScaleSetCreatingReason, clusterv1.ConditionSeverityInfo, "") + m.SetNotReady() + case v == infrav1.Deleting: + conditions.MarkFalse(m.AzureMachinePool, infrav1.ScaleSetRunningCondition, infrav1.ScaleSetDeletingReason, clusterv1.ConditionSeverityInfo, "") + m.SetNotReady() default: - m.AzureMachinePool.Status.ProvisioningState = &v + conditions.MarkFalse(m.AzureMachinePool, infrav1.ScaleSetRunningCondition, string(v), clusterv1.ConditionSeverityInfo, "") + m.SetNotReady() } } @@ -317,6 +492,18 @@ func (m *MachinePoolScope) Close(ctx context.Context) error { ctx, span := tele.Tracer().Start(ctx, "scope.MachinePoolScope.Close") defer span.End() + if m.vmssState != nil { + if err := m.applyAzureMachinePoolMachines(ctx); err != nil { + m.Error(err, "failed to apply changes to the AzureMachinePoolMachines") + return errors.Wrap(err, "failed to apply changes to AzureMachinePoolMachines") + } + + m.setProvisioningStateAndConditions(m.vmssState.State) + if err := m.updateReplicasAndProviderIDs(ctx); err != nil { + return errors.Wrap(err, "failed to update replicas and providerIDs") + } + } + return m.patchHelper.Patch(ctx, m.AzureMachinePool) } @@ -349,13 +536,27 @@ func (m *MachinePoolScope) GetVMImage() (*infrav1.Image, error) { return m.AzureMachinePool.Spec.Template.Image, nil } + var ( + err error + defaultImage *infrav1.Image + ) if m.AzureMachinePool.Spec.Template.OSDisk.OSType == azure.WindowsOS { - m.Info("No image specified for machine, using default Windows Image", "machine", m.MachinePool.GetName()) - return azure.GetDefaultWindowsImage(to.String(m.MachinePool.Spec.Template.Spec.Version)) + m.V(4).Info("No image specified for machine, using default Windows Image", "machine", m.MachinePool.GetName()) + defaultImage, err = azure.GetDefaultWindowsImage(to.String(m.MachinePool.Spec.Template.Spec.Version)) + } else { + defaultImage, err = azure.GetDefaultUbuntuImage(to.String(m.MachinePool.Spec.Template.Spec.Version)) + } + + if err != nil { + return defaultImage, errors.Wrap(err, "failed to get default OS image") } - m.Info("No image specified for machine, using default", "machine", m.MachinePool.GetName()) - return azure.GetDefaultUbuntuImage(to.String(m.MachinePool.Spec.Template.Spec.Version)) + return defaultImage, nil +} + +// SaveVMImageToStatus persists the AzureMachinePool image to the status +func (m *MachinePoolScope) SaveVMImageToStatus(image *infrav1.Image) { + m.AzureMachinePool.Status.Image = image } // RoleAssignmentSpecs returns the role assignment specs. @@ -391,72 +592,10 @@ func (m *MachinePoolScope) VMSSExtensionSpecs() []azure.VMSSExtensionSpec { return []azure.VMSSExtensionSpec{} } -func (m *MachinePoolScope) getNodeStatusByProviderID(ctx context.Context, providerIDList []string) (map[string]*NodeStatus, error) { - ctx, span := tele.Tracer().Start(ctx, "scope.MachinePoolScope.getNodeStatusByProviderID") - defer span.End() - - nodeStatusMap := map[string]*NodeStatus{} - for _, id := range providerIDList { - nodeStatusMap[id] = &NodeStatus{} - } - - workloadClient, err := m.getWorkloadClient(ctx) - if err != nil { - return nil, errors.Wrap(err, "failed to create the workload cluster client") - } - - nodeList := corev1.NodeList{} - for { - if err := workloadClient.List(ctx, &nodeList, client.Continue(nodeList.Continue)); err != nil { - return nil, errors.Wrap(err, "failed to List nodes") - } - - for _, node := range nodeList.Items { - if status, ok := nodeStatusMap[node.Spec.ProviderID]; ok { - status.Ready = nodeIsReady(node) - status.Version = node.Status.NodeInfo.KubeletVersion - } - } - - if nodeList.Continue == "" { - break - } - } - - return nodeStatusMap, nil -} - -func (m *MachinePoolScope) getWorkloadClient(ctx context.Context) (client.Client, error) { - ctx, span := tele.Tracer().Start(ctx, "scope.MachinePoolScope.getWorkloadClient") - defer span.End() - - obj := client.ObjectKey{ - Namespace: m.MachinePool.Namespace, - Name: m.ClusterName(), - } - dataBytes, err := utilkubeconfig.FromSecret(ctx, m.client, obj) - if err != nil { - return nil, errors.Wrapf(err, "\"%s-kubeconfig\" not found in namespace %q", obj.Name, obj.Namespace) - } - - config, err := clientcmd.Load(dataBytes) - if err != nil { - return nil, errors.Wrapf(err, "failed to load \"%s-kubeconfig\" in namespace %q", obj.Name, obj.Namespace) - } - - restConfig, err := clientcmd.NewDefaultClientConfig(*config, &clientcmd.ConfigOverrides{}).ClientConfig() - if err != nil { - return nil, errors.Wrapf(err, "failed transform config \"%s-kubeconfig\" in namespace %q", obj.Name, obj.Namespace) +func (m *MachinePoolScope) getDeploymentStrategy() machinepool.TypedDeleteSelector { + if m.AzureMachinePool == nil { + return nil } - return client.New(restConfig, client.Options{}) -} - -func nodeIsReady(node corev1.Node) bool { - for _, n := range node.Status.Conditions { - if n.Type == corev1.NodeReady { - return n.Status == corev1.ConditionTrue - } - } - return false + return machinepool.NewMachinePoolDeploymentStrategy(m.AzureMachinePool.Spec.Strategy) } diff --git a/azure/scope/machinepool_test.go b/azure/scope/machinepool_test.go index 7a0cd5b6f9e..abf2fc3a310 100644 --- a/azure/scope/machinepool_test.go +++ b/azure/scope/machinepool_test.go @@ -17,12 +17,25 @@ limitations under the License. package scope import ( + "context" + "fmt" "testing" + "github.com/Azure/go-autorest/autorest/to" + "github.com/golang/mock/gomock" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/klog/v2/klogr" infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1alpha4" - + "sigs.k8s.io/cluster-api-provider-azure/azure" infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1alpha4" + clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4" + clusterv1exp "sigs.k8s.io/cluster-api/exp/api/v1alpha4" + "sigs.k8s.io/cluster-api/util/conditions" + "sigs.k8s.io/controller-runtime/pkg/client/fake" ) func TestMachinePoolScope_Name(t *testing.T) { @@ -66,6 +79,7 @@ func TestMachinePoolScope_Name(t *testing.T) { want: "win-23456", }, } + for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got := tt.machinePoolScope.Name() @@ -79,3 +93,564 @@ func TestMachinePoolScope_Name(t *testing.T) { }) } } + +func TestMachinePoolScope_SetBootstrapConditions(t *testing.T) { + cases := []struct { + Name string + Setup func() (provisioningState string, extensionName string) + Verify func(g *WithT, amp *infrav1exp.AzureMachinePool, err error) + }{ + { + Name: "should set bootstrap succeeded condition if provisioning state succeeded", + Setup: func() (provisioningState string, extensionName string) { + return string(infrav1.Succeeded), "foo" + }, + Verify: func(g *WithT, amp *infrav1exp.AzureMachinePool, err error) { + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(conditions.IsTrue(amp, infrav1.BootstrapSucceededCondition)) + }, + }, + { + Name: "should set bootstrap succeeded false condition with reason if provisioning state creating", + Setup: func() (provisioningState string, extensionName string) { + return string(infrav1.Creating), "bazz" + }, + Verify: func(g *WithT, amp *infrav1exp.AzureMachinePool, err error) { + g.Expect(err).To(MatchError("reconcile error occurred that can be recovered. Object will be requeued after 30s The actual error is: extension still provisioning")) + g.Expect(conditions.IsFalse(amp, infrav1.BootstrapSucceededCondition)) + g.Expect(conditions.GetReason(amp, infrav1.BootstrapSucceededCondition)).To(Equal(infrav1.BootstrapInProgressReason)) + severity := conditions.GetSeverity(amp, infrav1.BootstrapSucceededCondition) + g.Expect(severity).ToNot(BeNil()) + g.Expect(*severity).To(Equal(clusterv1.ConditionSeverityInfo)) + }, + }, + { + Name: "should set bootstrap succeeded false condition with reason if provisioning state failed", + Setup: func() (provisioningState string, extensionName string) { + return string(infrav1.Failed), "buzz" + }, + Verify: func(g *WithT, amp *infrav1exp.AzureMachinePool, err error) { + g.Expect(err).To(MatchError("reconcile error occurred that cannot be recovered. Object will not be requeued. The actual error is: extension state failed")) + g.Expect(conditions.IsFalse(amp, infrav1.BootstrapSucceededCondition)) + g.Expect(conditions.GetReason(amp, infrav1.BootstrapSucceededCondition)).To(Equal(infrav1.BootstrapFailedReason)) + severity := conditions.GetSeverity(amp, infrav1.BootstrapSucceededCondition) + g.Expect(severity).ToNot(BeNil()) + g.Expect(*severity).To(Equal(clusterv1.ConditionSeverityError)) + }, + }, + } + + for _, c := range cases { + t.Run(c.Name, func(t *testing.T) { + var ( + g = NewWithT(t) + mockCtrl = gomock.NewController(t) + ) + defer mockCtrl.Finish() + + state, name := c.Setup() + s := &MachinePoolScope{ + AzureMachinePool: &infrav1exp.AzureMachinePool{}, + Logger: klogr.New(), + } + err := s.SetBootstrapConditions(state, name) + c.Verify(g, s.AzureMachinePool, err) + }) + } +} + +func TestMachinePoolScope_MaxSurge(t *testing.T) { + cases := []struct { + Name string + Setup func(mp *clusterv1exp.MachinePool, amp *infrav1exp.AzureMachinePool) + Verify func(g *WithT, surge int, err error) + }{ + { + Name: "default surge should be 1 if no deployment strategy is set", + Verify: func(g *WithT, surge int, err error) { + g.Expect(surge).To(Equal(1)) + g.Expect(err).ToNot(HaveOccurred()) + }, + }, + { + Name: "default surge should be 1 regardless of replica count with no surger", + Setup: func(mp *clusterv1exp.MachinePool, amp *infrav1exp.AzureMachinePool) { + mp.Spec.Replicas = to.Int32Ptr(3) + }, + Verify: func(g *WithT, surge int, err error) { + g.Expect(surge).To(Equal(1)) + g.Expect(err).ToNot(HaveOccurred()) + }, + }, + { + Name: "default surge should be 2 as specified by the surger", + Setup: func(mp *clusterv1exp.MachinePool, amp *infrav1exp.AzureMachinePool) { + mp.Spec.Replicas = to.Int32Ptr(3) + two := intstr.FromInt(2) + amp.Spec.Strategy = infrav1exp.AzureMachinePoolDeploymentStrategy{ + Type: infrav1exp.RollingUpdateAzureMachinePoolDeploymentStrategyType, + RollingUpdate: &infrav1exp.MachineRollingUpdateDeployment{ + MaxSurge: &two, + }, + } + }, + Verify: func(g *WithT, surge int, err error) { + g.Expect(surge).To(Equal(2)) + g.Expect(err).ToNot(HaveOccurred()) + }, + }, + { + Name: "default surge should be 2 (50%) of the desired replicas", + Setup: func(mp *clusterv1exp.MachinePool, amp *infrav1exp.AzureMachinePool) { + mp.Spec.Replicas = to.Int32Ptr(4) + fiftyPercent := intstr.FromString("50%") + amp.Spec.Strategy = infrav1exp.AzureMachinePoolDeploymentStrategy{ + Type: infrav1exp.RollingUpdateAzureMachinePoolDeploymentStrategyType, + RollingUpdate: &infrav1exp.MachineRollingUpdateDeployment{ + MaxSurge: &fiftyPercent, + }, + } + }, + Verify: func(g *WithT, surge int, err error) { + g.Expect(surge).To(Equal(2)) + g.Expect(err).ToNot(HaveOccurred()) + }, + }, + } + + for _, c := range cases { + t.Run(c.Name, func(t *testing.T) { + var ( + g = NewWithT(t) + mockCtrl = gomock.NewController(t) + amp = &infrav1exp.AzureMachinePool{ + ObjectMeta: metav1.ObjectMeta{ + Name: "amp1", + Namespace: "default", + OwnerReferences: []metav1.OwnerReference{ + { + Name: "mp1", + Kind: "MachinePool", + APIVersion: clusterv1exp.GroupVersion.String(), + }, + }, + }, + } + mp = &clusterv1exp.MachinePool{ + ObjectMeta: metav1.ObjectMeta{ + Name: "mp1", + Namespace: "default", + }, + } + ) + defer mockCtrl.Finish() + + if c.Setup != nil { + c.Setup(mp, amp) + } + + s := &MachinePoolScope{ + MachinePool: mp, + AzureMachinePool: amp, + Logger: klogr.New(), + } + surge, err := s.MaxSurge() + c.Verify(g, surge, err) + }) + } +} + +func TestMachinePoolScope_SaveVMImageToStatus(t *testing.T) { + var ( + g = NewWithT(t) + mockCtrl = gomock.NewController(t) + amp = &infrav1exp.AzureMachinePool{ + ObjectMeta: metav1.ObjectMeta{ + Name: "amp1", + Namespace: "default", + OwnerReferences: []metav1.OwnerReference{ + { + Name: "mp1", + Kind: "MachinePool", + APIVersion: clusterv1exp.GroupVersion.String(), + }, + }, + }, + } + s = &MachinePoolScope{ + AzureMachinePool: amp, + Logger: klogr.New(), + } + image = &infrav1.Image{ + Marketplace: &infrav1.AzureMarketplaceImage{ + Publisher: "cncf-upstream", + Offer: "capi", + SKU: "k8s-1dot19dot11-ubuntu-1804", + Version: "latest", + ThirdPartyImage: false, + }, + } + ) + defer mockCtrl.Finish() + + s.SaveVMImageToStatus(image) + g.Expect(s.AzureMachinePool.Status.Image).To(Equal(image)) +} + +func TestMachinePoolScope_GetVMImage(t *testing.T) { + cases := []struct { + Name string + Setup func(mp *clusterv1exp.MachinePool, amp *infrav1exp.AzureMachinePool) + Verify func(g *WithT, amp *infrav1exp.AzureMachinePool, vmImage *infrav1.Image, err error) + }{ + { + Name: "should set and default the image if no image is specified for the AzureMachinePool", + Setup: func(mp *clusterv1exp.MachinePool, amp *infrav1exp.AzureMachinePool) { + mp.Spec.Template.Spec.Version = to.StringPtr("v1.19.11") + }, + Verify: func(g *WithT, amp *infrav1exp.AzureMachinePool, vmImage *infrav1.Image, err error) { + g.Expect(err).ToNot(HaveOccurred()) + image := &infrav1.Image{ + Marketplace: &infrav1.AzureMarketplaceImage{ + Publisher: "cncf-upstream", + Offer: "capi", + SKU: "k8s-1dot19dot11-ubuntu-1804", + Version: "latest", + ThirdPartyImage: false, + }, + } + g.Expect(vmImage).To(Equal(image)) + g.Expect(amp.Spec.Template.Image).To(BeNil()) + }, + }, + { + Name: "should not default or set the image on the AzureMachinePool if it already exists", + Setup: func(mp *clusterv1exp.MachinePool, amp *infrav1exp.AzureMachinePool) { + mp.Spec.Template.Spec.Version = to.StringPtr("v1.19.11") + amp.Spec.Template.Image = &infrav1.Image{ + Marketplace: &infrav1.AzureMarketplaceImage{ + Publisher: "cncf-upstream", + Offer: "capi", + SKU: "k8s-1dot19dot19-ubuntu-1804", + Version: "latest", + ThirdPartyImage: false, + }, + } + }, + Verify: func(g *WithT, amp *infrav1exp.AzureMachinePool, vmImage *infrav1.Image, err error) { + g.Expect(err).ToNot(HaveOccurred()) + image := &infrav1.Image{ + Marketplace: &infrav1.AzureMarketplaceImage{ + Publisher: "cncf-upstream", + Offer: "capi", + SKU: "k8s-1dot19dot19-ubuntu-1804", + Version: "latest", + ThirdPartyImage: false, + }, + } + g.Expect(vmImage).To(Equal(image)) + g.Expect(amp.Spec.Template.Image).To(Equal(image)) + }, + }, + } + + for _, c := range cases { + t.Run(c.Name, func(t *testing.T) { + var ( + g = NewWithT(t) + mockCtrl = gomock.NewController(t) + amp = &infrav1exp.AzureMachinePool{ + ObjectMeta: metav1.ObjectMeta{ + Name: "amp1", + Namespace: "default", + OwnerReferences: []metav1.OwnerReference{ + { + Name: "mp1", + Kind: "MachinePool", + APIVersion: clusterv1exp.GroupVersion.String(), + }, + }, + }, + } + mp = &clusterv1exp.MachinePool{ + ObjectMeta: metav1.ObjectMeta{ + Name: "mp1", + Namespace: "default", + }, + } + ) + defer mockCtrl.Finish() + + if c.Setup != nil { + c.Setup(mp, amp) + } + + s := &MachinePoolScope{ + MachinePool: mp, + AzureMachinePool: amp, + Logger: klogr.New(), + } + image, err := s.GetVMImage() + c.Verify(g, amp, image, err) + }) + } +} + +func TestMachinePoolScope_NeedsRequeue(t *testing.T) { + cases := []struct { + Name string + Setup func(mp *clusterv1exp.MachinePool, amp *infrav1exp.AzureMachinePool, vmss *azure.VMSS) + Verify func(g *WithT, requeue bool) + }{ + { + Name: "should requeue if the machine is not in succeeded state", + Setup: func(mp *clusterv1exp.MachinePool, amp *infrav1exp.AzureMachinePool, vmss *azure.VMSS) { + creating := infrav1.Creating + mp.Spec.Replicas = to.Int32Ptr(0) + amp.Status.ProvisioningState = &creating + }, + Verify: func(g *WithT, requeue bool) { + g.Expect(requeue).To(BeTrue()) + }, + }, + { + Name: "should not requeue if the machine is in succeeded state", + Setup: func(mp *clusterv1exp.MachinePool, amp *infrav1exp.AzureMachinePool, vmss *azure.VMSS) { + succeeded := infrav1.Succeeded + mp.Spec.Replicas = to.Int32Ptr(0) + amp.Status.ProvisioningState = &succeeded + }, + Verify: func(g *WithT, requeue bool) { + g.Expect(requeue).To(BeFalse()) + }, + }, + { + Name: "should requeue if the machine is in succeeded state but desired replica count does not match", + Setup: func(mp *clusterv1exp.MachinePool, amp *infrav1exp.AzureMachinePool, vmss *azure.VMSS) { + succeeded := infrav1.Succeeded + mp.Spec.Replicas = to.Int32Ptr(1) + amp.Status.ProvisioningState = &succeeded + }, + Verify: func(g *WithT, requeue bool) { + g.Expect(requeue).To(BeTrue()) + }, + }, + { + Name: "should not requeue if the machine is in succeeded state but desired replica count does match", + Setup: func(mp *clusterv1exp.MachinePool, amp *infrav1exp.AzureMachinePool, vmss *azure.VMSS) { + succeeded := infrav1.Succeeded + mp.Spec.Replicas = to.Int32Ptr(1) + amp.Status.ProvisioningState = &succeeded + vmss.Instances = []azure.VMSSVM{ + { + Name: "instance1", + }, + } + }, + Verify: func(g *WithT, requeue bool) { + g.Expect(requeue).To(BeFalse()) + }, + }, + { + Name: "should requeue if an instance VM image does not match the VM image of the VMSS", + Setup: func(mp *clusterv1exp.MachinePool, amp *infrav1exp.AzureMachinePool, vmss *azure.VMSS) { + succeeded := infrav1.Succeeded + mp.Spec.Replicas = to.Int32Ptr(1) + amp.Status.ProvisioningState = &succeeded + vmss.Instances = []azure.VMSSVM{ + { + Name: "instance1", + Image: infrav1.Image{ + Marketplace: &infrav1.AzureMarketplaceImage{ + Version: "foo1", + }, + }, + }, + } + }, + Verify: func(g *WithT, requeue bool) { + g.Expect(requeue).To(BeTrue()) + }, + }, + } + + for _, c := range cases { + t.Run(c.Name, func(t *testing.T) { + var ( + g = NewWithT(t) + mockCtrl = gomock.NewController(t) + amp = &infrav1exp.AzureMachinePool{ + ObjectMeta: metav1.ObjectMeta{ + Name: "amp1", + Namespace: "default", + OwnerReferences: []metav1.OwnerReference{ + { + Name: "mp1", + Kind: "MachinePool", + APIVersion: clusterv1exp.GroupVersion.String(), + }, + }, + }, + } + mp = &clusterv1exp.MachinePool{ + ObjectMeta: metav1.ObjectMeta{ + Name: "mp1", + Namespace: "default", + }, + } + vmssState = &azure.VMSS{} + ) + defer mockCtrl.Finish() + + if c.Setup != nil { + c.Setup(mp, amp, vmssState) + } + + s := &MachinePoolScope{ + vmssState: vmssState, + MachinePool: mp, + AzureMachinePool: amp, + Logger: klogr.New(), + } + c.Verify(g, s.NeedsRequeue()) + }) + } +} + +func TestMachinePoolScope_updateReplicasAndProviderIDs(t *testing.T) { + scheme := runtime.NewScheme() + _ = clusterv1.AddToScheme(scheme) + _ = infrav1exp.AddToScheme(scheme) + + cases := []struct { + Name string + Setup func(cb *fake.ClientBuilder) + Verify func(g *WithT, amp *infrav1exp.AzureMachinePool, err error) + }{ + { + Name: "if there are three ready machines with matching labels, then should count them", + Setup: func(cb *fake.ClientBuilder) { + for _, machine := range getReadyAzureMachinePoolMachines(3) { + obj := machine + cb.WithObjects(&obj) + } + }, + Verify: func(g *WithT, amp *infrav1exp.AzureMachinePool, err error) { + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(amp.Status.Replicas).To(BeEquivalentTo(3)) + g.Expect(amp.Spec.ProviderIDList).To(ConsistOf("/foo/ampm0", "/foo/ampm1", "/foo/ampm2")) + }, + }, + { + Name: "should only count machines with matching machine pool label", + Setup: func(cb *fake.ClientBuilder) { + machines := getReadyAzureMachinePoolMachines(3) + machines[0].Labels[infrav1exp.MachinePoolNameLabel] = "not_correct" + for _, machine := range machines { + obj := machine + cb.WithObjects(&obj) + } + }, + Verify: func(g *WithT, amp *infrav1exp.AzureMachinePool, err error) { + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(amp.Status.Replicas).To(BeEquivalentTo(2)) + }, + }, + { + Name: "should only count machines with matching cluster name label", + Setup: func(cb *fake.ClientBuilder) { + machines := getReadyAzureMachinePoolMachines(3) + machines[0].Labels[clusterv1.ClusterLabelName] = "not_correct" + for _, machine := range machines { + obj := machine + cb.WithObjects(&obj) + } + }, + Verify: func(g *WithT, amp *infrav1exp.AzureMachinePool, err error) { + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(amp.Status.Replicas).To(BeEquivalentTo(2)) + }, + }, + } + + for _, c := range cases { + t.Run(c.Name, func(t *testing.T) { + var ( + g = NewWithT(t) + mockCtrl = gomock.NewController(t) + cb = fake.NewClientBuilder().WithScheme(scheme) + cluster = &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster1", + Namespace: "default", + }, + Spec: clusterv1.ClusterSpec{ + InfrastructureRef: &corev1.ObjectReference{ + Name: "azCluster1", + }, + }, + Status: clusterv1.ClusterStatus{ + InfrastructureReady: true, + }, + } + amp = &infrav1exp.AzureMachinePool{ + ObjectMeta: metav1.ObjectMeta{ + Name: "amp1", + Namespace: "default", + OwnerReferences: []metav1.OwnerReference{ + { + Name: "mp1", + Kind: "MachinePool", + APIVersion: clusterv1exp.GroupVersion.String(), + }, + }, + }, + } + ) + defer mockCtrl.Finish() + + c.Setup(cb.WithObjects(amp, cluster)) + s := &MachinePoolScope{ + client: cb.Build(), + ClusterScoper: &ClusterScope{ + Cluster: cluster, + }, + AzureMachinePool: amp, + Logger: klogr.New(), + } + err := s.updateReplicasAndProviderIDs(context.TODO()) + c.Verify(g, s.AzureMachinePool, err) + }) + } +} + +func getReadyAzureMachinePoolMachines(count int32) []infrav1exp.AzureMachinePoolMachine { + machines := make([]infrav1exp.AzureMachinePoolMachine, count) + for i := 0; i < int(count); i++ { + machines[i] = infrav1exp.AzureMachinePoolMachine{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("ampm%d", i), + Namespace: "default", + OwnerReferences: []metav1.OwnerReference{ + { + Name: "amp", + Kind: "AzureMachinePool", + APIVersion: infrav1exp.GroupVersion.String(), + }, + }, + Labels: map[string]string{ + clusterv1.ClusterLabelName: "cluster1", + infrav1exp.MachinePoolNameLabel: "amp1", + }, + }, + Spec: infrav1exp.AzureMachinePoolMachineSpec{ + ProviderID: fmt.Sprintf("/foo/ampm%d", i), + }, + Status: infrav1exp.AzureMachinePoolMachineStatus{ + Ready: true, + }, + } + } + + return machines +} diff --git a/azure/scope/machinepoolmachine.go b/azure/scope/machinepoolmachine.go new file mode 100644 index 00000000000..f6b0d8ae46c --- /dev/null +++ b/azure/scope/machinepoolmachine.go @@ -0,0 +1,367 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scope + +import ( + "context" + "reflect" + + "github.com/go-logr/logr" + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/client-go/tools/clientcmd" + "k8s.io/klog/v2/klogr" + "k8s.io/utils/pointer" + "sigs.k8s.io/cluster-api/controllers/noderefutil" + capierrors "sigs.k8s.io/cluster-api/errors" + capiv1exp "sigs.k8s.io/cluster-api/exp/api/v1alpha4" + utilkubeconfig "sigs.k8s.io/cluster-api/util/kubeconfig" + "sigs.k8s.io/cluster-api/util/patch" + "sigs.k8s.io/controller-runtime/pkg/client" + + infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1alpha4" + "sigs.k8s.io/cluster-api-provider-azure/azure" + infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1alpha4" + "sigs.k8s.io/cluster-api-provider-azure/util/tele" +) + +type ( + nodeGetter interface { + GetNodeByProviderID(ctx context.Context, providerID string) (*corev1.Node, error) + GetNodeByObjectReference(ctx context.Context, nodeRef corev1.ObjectReference) (*corev1.Node, error) + } + + workloadClusterProxy struct { + Client client.Client + Cluster client.ObjectKey + } + + // MachinePoolMachineScopeParams defines the input parameters used to create a new MachinePoolScope. + MachinePoolMachineScopeParams struct { + AzureMachinePool *infrav1exp.AzureMachinePool + AzureMachinePoolMachine *infrav1exp.AzureMachinePoolMachine + Client client.Client + ClusterScope azure.ClusterScoper + Logger logr.Logger + MachinePool *capiv1exp.MachinePool + + // workloadNodeGetter is only used for testing purposes and provides a way for mocking requests to the workload cluster + workloadNodeGetter nodeGetter + } + + // MachinePoolMachineScope defines a scope defined around a machine pool machine. + MachinePoolMachineScope struct { + azure.ClusterScoper + logr.Logger + AzureMachinePoolMachine *infrav1exp.AzureMachinePoolMachine + AzureMachinePool *infrav1exp.AzureMachinePool + MachinePool *capiv1exp.MachinePool + MachinePoolScope *MachinePoolScope + client client.Client + patchHelper *patch.Helper + instance *azure.VMSSVM + + // workloadNodeGetter is only used for testing purposes and provides a way for mocking requests to the workload cluster + workloadNodeGetter nodeGetter + } +) + +// NewMachinePoolMachineScope creates a new MachinePoolMachineScope from the supplied parameters. +// This is meant to be called for each reconcile iteration. +func NewMachinePoolMachineScope(params MachinePoolMachineScopeParams) (*MachinePoolMachineScope, error) { + if params.Client == nil { + return nil, errors.New("client is required when creating a MachinePoolScope") + } + + if params.ClusterScope == nil { + return nil, errors.New("cluster scope is required when creating a MachinePoolScope") + } + + if params.MachinePool == nil { + return nil, errors.New("machine pool is required when creating a MachinePoolScope") + } + + if params.AzureMachinePool == nil { + return nil, errors.New("azure machine pool is required when creating a MachinePoolScope") + } + + if params.AzureMachinePoolMachine == nil { + return nil, errors.New("azure machine pool machine is required when creating a MachinePoolScope") + } + + if params.workloadNodeGetter == nil { + params.workloadNodeGetter = newWorkloadClusterProxy( + params.Client, + client.ObjectKey{ + Namespace: params.MachinePool.Namespace, + Name: params.ClusterScope.ClusterName(), + }, + ) + } + + if params.Logger == nil { + params.Logger = klogr.New() + } + + mpScope, err := NewMachinePoolScope(MachinePoolScopeParams{ + Client: params.Client, + Logger: params.Logger, + MachinePool: params.MachinePool, + AzureMachinePool: params.AzureMachinePool, + ClusterScope: params.ClusterScope, + }) + if err != nil { + return nil, errors.Wrap(err, "failed to build machine pool scope") + } + + helper, err := patch.NewHelper(params.AzureMachinePoolMachine, params.Client) + if err != nil { + return nil, errors.Wrap(err, "failed to init patch helper") + } + + return &MachinePoolMachineScope{ + AzureMachinePool: params.AzureMachinePool, + AzureMachinePoolMachine: params.AzureMachinePoolMachine, + ClusterScoper: params.ClusterScope, + Logger: params.Logger, + MachinePool: params.MachinePool, + MachinePoolScope: mpScope, + client: params.Client, + patchHelper: helper, + workloadNodeGetter: params.workloadNodeGetter, + }, nil +} + +// Name is the name of the Machine Pool Machine +func (s *MachinePoolMachineScope) Name() string { + return s.AzureMachinePoolMachine.Name +} + +// InstanceID is the unique ID of the machine within the Machine Pool +func (s *MachinePoolMachineScope) InstanceID() string { + return s.AzureMachinePoolMachine.Spec.InstanceID +} + +// ScaleSetName is the name of the VMSS +func (s *MachinePoolMachineScope) ScaleSetName() string { + return s.MachinePoolScope.Name() +} + +// GetLongRunningOperationState gets a future representing the current state of a long running operation if one exists +func (s *MachinePoolMachineScope) GetLongRunningOperationState() *infrav1.Future { + return s.AzureMachinePoolMachine.Status.LongRunningOperationState +} + +// SetLongRunningOperationState sets a future representing the current state of a long running operation +func (s *MachinePoolMachineScope) SetLongRunningOperationState(future *infrav1.Future) { + s.AzureMachinePoolMachine.Status.LongRunningOperationState = future +} + +// SetVMSSVM update the scope with the current state of the VMSS VM +func (s *MachinePoolMachineScope) SetVMSSVM(instance *azure.VMSSVM) { + s.instance = instance +} + +// ProvisioningState returns the AzureMachinePoolMachine provisioning state. +func (s *MachinePoolMachineScope) ProvisioningState() infrav1.ProvisioningState { + if s.AzureMachinePoolMachine.Status.ProvisioningState != nil { + return *s.AzureMachinePoolMachine.Status.ProvisioningState + } + return "" +} + +// IsReady indicates the machine has successfully provisioned and has a node ref associated +func (s *MachinePoolMachineScope) IsReady() bool { + state := s.AzureMachinePoolMachine.Status.ProvisioningState + return s.AzureMachinePoolMachine.Status.Ready && state != nil && *state == infrav1.Succeeded +} + +// SetFailureMessage sets the AzureMachinePoolMachine status failure message. +func (s *MachinePoolMachineScope) SetFailureMessage(v error) { + s.AzureMachinePool.Status.FailureMessage = pointer.StringPtr(v.Error()) +} + +// SetFailureReason sets the AzureMachinePoolMachine status failure reason. +func (s *MachinePoolMachineScope) SetFailureReason(v capierrors.MachineStatusError) { + s.AzureMachinePool.Status.FailureReason = &v +} + +// ProviderID returns the AzureMachinePool ID by parsing Spec.FakeProviderID. +func (s *MachinePoolMachineScope) ProviderID() string { + return s.AzureMachinePoolMachine.Spec.ProviderID +} + +// Close updates the state of MachinePoolMachine +func (s *MachinePoolMachineScope) Close(ctx context.Context) error { + ctx, span := tele.Tracer().Start(ctx, "scope.MachinePoolMachineScope.Close") + defer span.End() + + return s.patchHelper.Patch(ctx, s.AzureMachinePoolMachine) +} + +// UpdateStatus updates the node reference for the machine and other status fields. This func should be called at the +// end of a reconcile request and after updating the scope with the most recent Azure data. +func (s *MachinePoolMachineScope) UpdateStatus(ctx context.Context) error { + ctx, span := tele.Tracer().Start(ctx, "scope.MachinePoolMachineScope.Get") + defer span.End() + + var ( + nodeRef = s.AzureMachinePoolMachine.Status.NodeRef + node *corev1.Node + err error + ) + if nodeRef == nil || nodeRef.Name == "" { + node, err = s.workloadNodeGetter.GetNodeByProviderID(ctx, s.ProviderID()) + } else { + node, err = s.workloadNodeGetter.GetNodeByObjectReference(ctx, *nodeRef) + } + + if err != nil && !apierrors.IsNotFound(err) { + return errors.Wrap(err, "failed to to get node by providerID or object reference") + } + + if node != nil { + s.AzureMachinePoolMachine.Status.NodeRef = &corev1.ObjectReference{ + Kind: node.Kind, + Namespace: node.Namespace, + Name: node.Name, + UID: node.UID, + APIVersion: node.APIVersion, + } + + s.AzureMachinePoolMachine.Status.Ready = noderefutil.IsNodeReady(node) + s.AzureMachinePoolMachine.Status.Version = node.Status.NodeInfo.KubeletVersion + } + + if s.instance != nil { + hasLatestModel, err := s.hasLatestModelApplied() + if err != nil { + return errors.Wrap(err, "failed to determine if the VMSS instance has the latest model") + } + + s.AzureMachinePoolMachine.Status.LatestModelApplied = hasLatestModel + s.AzureMachinePoolMachine.Status.ProvisioningState = &s.instance.State + } + + return nil +} + +func (s *MachinePoolMachineScope) hasLatestModelApplied() (bool, error) { + if s.instance == nil { + return false, errors.New("instance must not be nil") + } + + image, err := s.MachinePoolScope.GetVMImage() + if err != nil { + return false, errors.Wrap(err, "unable to build vm image information from MachinePoolScope") + } + + // this should never happen as GetVMImage should only return nil when err != nil. Just in case. + if image == nil { + return false, errors.New("machinepoolscope image must not be nil") + } + + // if the images match, then the VM is of the same model + return reflect.DeepEqual(s.instance.Image, *image), nil +} + +func newWorkloadClusterProxy(c client.Client, cluster client.ObjectKey) *workloadClusterProxy { + return &workloadClusterProxy{ + Client: c, + Cluster: cluster, + } +} + +// GetNodeByObjectReference will fetch a *corev1.Node via a node object reference +func (np *workloadClusterProxy) GetNodeByObjectReference(ctx context.Context, nodeRef corev1.ObjectReference) (*corev1.Node, error) { + workloadClient, err := getWorkloadClient(ctx, np.Client, np.Cluster) + if err != nil { + return nil, errors.Wrap(err, "failed to create the workload cluster client") + } + + var node corev1.Node + err = workloadClient.Get(ctx, client.ObjectKey{ + Namespace: nodeRef.Namespace, + Name: nodeRef.Name, + }, &node) + + return &node, err +} + +// GetNodeByProviderID will fetch a node from the workload cluster by it's providerID +func (np *workloadClusterProxy) GetNodeByProviderID(ctx context.Context, providerID string) (*corev1.Node, error) { + ctx, span := tele.Tracer().Start(ctx, "scope.MachinePoolMachineScope.getNode") + defer span.End() + + workloadClient, err := getWorkloadClient(ctx, np.Client, np.Cluster) + if err != nil { + return nil, errors.Wrap(err, "failed to create the workload cluster client") + } + + return getNodeByProviderID(ctx, workloadClient, providerID) +} + +func getNodeByProviderID(ctx context.Context, workloadClient client.Client, providerID string) (*corev1.Node, error) { + ctx, span := tele.Tracer().Start(ctx, "scope.MachinePoolMachineScope.getNodeRefForProviderID") + defer span.End() + + nodeList := corev1.NodeList{} + for { + if err := workloadClient.List(ctx, &nodeList, client.Continue(nodeList.Continue)); err != nil { + return nil, errors.Wrapf(err, "failed to List nodes") + } + + for _, node := range nodeList.Items { + if node.Spec.ProviderID == providerID { + return &node, nil + } + } + + if nodeList.Continue == "" { + break + } + } + + return nil, nil +} + +func getWorkloadClient(ctx context.Context, c client.Client, cluster client.ObjectKey) (client.Client, error) { + ctx, span := tele.Tracer().Start(ctx, "scope.MachinePoolMachineScope.getWorkloadClient") + defer span.End() + + obj := client.ObjectKey{ + Namespace: cluster.Namespace, + Name: cluster.Name, + } + dataBytes, err := utilkubeconfig.FromSecret(ctx, c, obj) + if err != nil { + return nil, errors.Wrapf(err, "\"%s-kubeconfig\" not found in namespace %q", obj.Name, obj.Namespace) + } + + config, err := clientcmd.Load(dataBytes) + if err != nil { + return nil, errors.Wrapf(err, "failed to load \"%s-kubeconfig\" in namespace %q", obj.Name, obj.Namespace) + } + + restConfig, err := clientcmd.NewDefaultClientConfig(*config, &clientcmd.ConfigOverrides{}).ClientConfig() + if err != nil { + return nil, errors.Wrapf(err, "failed transform config \"%s-kubeconfig\" in namespace %q", obj.Name, obj.Namespace) + } + + return client.New(restConfig, client.Options{}) +} diff --git a/azure/scope/machinepoolmachine_test.go b/azure/scope/machinepoolmachine_test.go new file mode 100644 index 00000000000..3a1cce4f224 --- /dev/null +++ b/azure/scope/machinepoolmachine_test.go @@ -0,0 +1,355 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scope + +import ( + "context" + "testing" + + "github.com/Azure/go-autorest/autorest/to" + "github.com/golang/mock/gomock" + . "github.com/onsi/gomega" + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/cluster-api-provider-azure/api/v1alpha4" + "sigs.k8s.io/cluster-api-provider-azure/azure" + mock_scope "sigs.k8s.io/cluster-api-provider-azure/azure/scope/mocks" + infrav1 "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1alpha4" + gomock2 "sigs.k8s.io/cluster-api-provider-azure/internal/test/matchers/gomock" + clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4" + capiv1exp "sigs.k8s.io/cluster-api/exp/api/v1alpha4" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +const ( + FakeProviderID = "/foo/bin/bazz" +) + +func TestNewMachinePoolMachineScope(t *testing.T) { + scheme := runtime.NewScheme() + _ = capiv1exp.AddToScheme(scheme) + _ = infrav1.AddToScheme(scheme) + + cases := []struct { + Name string + Input MachinePoolMachineScopeParams + Err string + }{ + { + Name: "successfully create machine scope", + Input: MachinePoolMachineScopeParams{ + Client: fake.NewClientBuilder().WithScheme(scheme).Build(), + ClusterScope: &ClusterScope{ + Cluster: &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "clusterName", + }, + }, + }, + MachinePool: new(capiv1exp.MachinePool), + AzureMachinePool: new(infrav1.AzureMachinePool), + AzureMachinePoolMachine: new(infrav1.AzureMachinePoolMachine), + }, + }, + { + Name: "no client", + Input: MachinePoolMachineScopeParams{ + ClusterScope: new(ClusterScope), + MachinePool: new(capiv1exp.MachinePool), + AzureMachinePool: new(infrav1.AzureMachinePool), + AzureMachinePoolMachine: new(infrav1.AzureMachinePoolMachine), + }, + Err: "client is required when creating a MachinePoolScope", + }, + { + Name: "no ClusterScope", + Input: MachinePoolMachineScopeParams{ + Client: fake.NewClientBuilder().WithScheme(scheme).Build(), + MachinePool: new(capiv1exp.MachinePool), + AzureMachinePool: new(infrav1.AzureMachinePool), + AzureMachinePoolMachine: new(infrav1.AzureMachinePoolMachine), + }, + Err: "cluster scope is required when creating a MachinePoolScope", + }, + { + Name: "no MachinePool", + Input: MachinePoolMachineScopeParams{ + Client: fake.NewClientBuilder().WithScheme(scheme).Build(), + ClusterScope: new(ClusterScope), + AzureMachinePool: new(infrav1.AzureMachinePool), + AzureMachinePoolMachine: new(infrav1.AzureMachinePoolMachine), + }, + Err: "machine pool is required when creating a MachinePoolScope", + }, + { + Name: "no AzureMachinePool", + Input: MachinePoolMachineScopeParams{ + Client: fake.NewClientBuilder().WithScheme(scheme).Build(), + ClusterScope: new(ClusterScope), + MachinePool: new(capiv1exp.MachinePool), + AzureMachinePoolMachine: new(infrav1.AzureMachinePoolMachine), + }, + Err: "azure machine pool is required when creating a MachinePoolScope", + }, + { + Name: "no AzureMachinePoolMachine", + Input: MachinePoolMachineScopeParams{ + Client: fake.NewClientBuilder().WithScheme(scheme).Build(), + ClusterScope: new(ClusterScope), + MachinePool: new(capiv1exp.MachinePool), + AzureMachinePool: new(infrav1.AzureMachinePool), + }, + Err: "azure machine pool machine is required when creating a MachinePoolScope", + }, + } + + for _, c := range cases { + t.Run(c.Name, func(t *testing.T) { + g := NewWithT(t) + s, err := NewMachinePoolMachineScope(c.Input) + if c.Err != "" { + g.Expect(err).To(MatchError(c.Err)) + } else { + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(s).ToNot(BeNil()) + } + }) + } +} + +func TestMachineScope_UpdateStatus(t *testing.T) { + scheme := runtime.NewScheme() + _ = capiv1exp.AddToScheme(scheme) + _ = infrav1.AddToScheme(scheme) + + var ( + clusterScope = ClusterScope{ + Cluster: &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster-foo", + }, + }, + } + ) + + cases := []struct { + Name string + Setup func(mockNodeGetter *mock_scope.MocknodeGetter, ampm *infrav1.AzureMachinePoolMachine) (*azure.VMSSVM, *infrav1.AzureMachinePoolMachine) + Verify func(g *WithT, scope *MachinePoolMachineScope) + Err string + }{ + { + Name: "should set kubernetes version, ready, and node reference upon finding the node", + Setup: func(mockNodeGetter *mock_scope.MocknodeGetter, ampm *infrav1.AzureMachinePoolMachine) (*azure.VMSSVM, *infrav1.AzureMachinePoolMachine) { + mockNodeGetter.EXPECT().GetNodeByProviderID(gomock2.AContext(), FakeProviderID).Return(getReadyNode(), nil) + return nil, ampm + }, + Verify: func(g *WithT, scope *MachinePoolMachineScope) { + g.Expect(scope.AzureMachinePoolMachine.Status).To(Equal(infrav1.AzureMachinePoolMachineStatus{ + Ready: true, + Version: "1.2.3", + NodeRef: &corev1.ObjectReference{ + Name: "node1", + }, + })) + }, + }, + { + Name: "should not mark AMPM ready if node is not ready", + Setup: func(mockNodeGetter *mock_scope.MocknodeGetter, ampm *infrav1.AzureMachinePoolMachine) (*azure.VMSSVM, *infrav1.AzureMachinePoolMachine) { + mockNodeGetter.EXPECT().GetNodeByProviderID(gomock2.AContext(), FakeProviderID).Return(getNotReadyNode(), nil) + return nil, ampm + }, + Verify: func(g *WithT, scope *MachinePoolMachineScope) { + g.Expect(scope.AzureMachinePoolMachine.Status).To(Equal(infrav1.AzureMachinePoolMachineStatus{ + Ready: false, + Version: "1.2.3", + NodeRef: &corev1.ObjectReference{ + Name: "node1", + }, + })) + }, + }, + { + Name: "fails fetching the node", + Setup: func(mockNodeGetter *mock_scope.MocknodeGetter, ampm *infrav1.AzureMachinePoolMachine) (*azure.VMSSVM, *infrav1.AzureMachinePoolMachine) { + mockNodeGetter.EXPECT().GetNodeByProviderID(gomock2.AContext(), FakeProviderID).Return(nil, errors.New("boom")) + return nil, ampm + }, + Err: "failed to to get node by providerID or object reference: boom", + }, + { + Name: "should not mark AMPM ready if node is not ready", + Setup: func(mockNodeGetter *mock_scope.MocknodeGetter, ampm *infrav1.AzureMachinePoolMachine) (*azure.VMSSVM, *infrav1.AzureMachinePoolMachine) { + mockNodeGetter.EXPECT().GetNodeByProviderID(gomock2.AContext(), FakeProviderID).Return(getNotReadyNode(), nil) + return nil, ampm + }, + Verify: func(g *WithT, scope *MachinePoolMachineScope) { + g.Expect(scope.AzureMachinePoolMachine.Status).To(Equal(infrav1.AzureMachinePoolMachineStatus{ + Ready: false, + Version: "1.2.3", + NodeRef: &corev1.ObjectReference{ + Name: "node1", + }, + })) + }, + }, + { + Name: "node is not found", + Setup: func(mockNodeGetter *mock_scope.MocknodeGetter, ampm *infrav1.AzureMachinePoolMachine) (*azure.VMSSVM, *infrav1.AzureMachinePoolMachine) { + mockNodeGetter.EXPECT().GetNodeByProviderID(gomock2.AContext(), FakeProviderID).Return(nil, nil) + return nil, ampm + }, + Verify: func(g *WithT, scope *MachinePoolMachineScope) { + g.Expect(scope.AzureMachinePoolMachine.Status).To(Equal(infrav1.AzureMachinePoolMachineStatus{})) + }, + }, + { + Name: "node is found by ObjectReference", + Setup: func(mockNodeGetter *mock_scope.MocknodeGetter, ampm *infrav1.AzureMachinePoolMachine) (*azure.VMSSVM, *infrav1.AzureMachinePoolMachine) { + nodeRef := corev1.ObjectReference{ + Name: "node1", + } + ampm.Status.NodeRef = &nodeRef + mockNodeGetter.EXPECT().GetNodeByObjectReference(gomock2.AContext(), nodeRef).Return(getReadyNode(), nil) + return nil, ampm + }, + Verify: func(g *WithT, scope *MachinePoolMachineScope) { + g.Expect(scope.AzureMachinePoolMachine.Status).To(Equal(infrav1.AzureMachinePoolMachineStatus{ + NodeRef: &corev1.ObjectReference{ + Name: "node1", + }, + Version: "1.2.3", + Ready: true, + })) + }, + }, + { + Name: "instance information with latest model populates the AMPM status", + Setup: func(mockNodeGetter *mock_scope.MocknodeGetter, ampm *infrav1.AzureMachinePoolMachine) (*azure.VMSSVM, *infrav1.AzureMachinePoolMachine) { + mockNodeGetter.EXPECT().GetNodeByProviderID(gomock2.AContext(), FakeProviderID).Return(nil, nil) + return &azure.VMSSVM{ + State: v1alpha4.Succeeded, + Image: v1alpha4.Image{ + Marketplace: &v1alpha4.AzureMarketplaceImage{ + Publisher: "cncf-upstream", + Offer: "capi", + SKU: "k8s-1dot19dot11-ubuntu-1804", + Version: "latest", + }, + }, + }, ampm + }, + Verify: func(g *WithT, scope *MachinePoolMachineScope) { + succeeded := v1alpha4.Succeeded + g.Expect(scope.AzureMachinePoolMachine.Status).To(Equal(infrav1.AzureMachinePoolMachineStatus{ + ProvisioningState: &succeeded, + LatestModelApplied: true, + })) + }, + }, + } + + for _, c := range cases { + t.Run(c.Name, func(t *testing.T) { + var ( + controller = gomock.NewController(t) + mockClient = mock_scope.NewMocknodeGetter(controller) + g = NewWithT(t) + params = MachinePoolMachineScopeParams{ + Client: fake.NewClientBuilder().WithScheme(scheme).Build(), + ClusterScope: &clusterScope, + MachinePool: &capiv1exp.MachinePool{ + Spec: capiv1exp.MachinePoolSpec{ + Template: clusterv1.MachineTemplateSpec{ + Spec: clusterv1.MachineSpec{ + Version: to.StringPtr("v1.19.11"), + }, + }, + }, + }, + AzureMachinePool: new(infrav1.AzureMachinePool), + } + ) + + defer controller.Finish() + + instance, ampm := c.Setup(mockClient, &infrav1.AzureMachinePoolMachine{ + Spec: infrav1.AzureMachinePoolMachineSpec{ + ProviderID: FakeProviderID, + }, + }) + params.AzureMachinePoolMachine = ampm + s, err := NewMachinePoolMachineScope(params) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(s).ToNot(BeNil()) + s.instance = instance + s.workloadNodeGetter = mockClient + + err = s.UpdateStatus(context.TODO()) + if c.Err == "" { + g.Expect(err).To(Succeed()) + } else { + g.Expect(err).To(MatchError(c.Err)) + } + + if c.Verify != nil { + c.Verify(g, s) + } + }) + } +} + +func getReadyNode() *corev1.Node { + return &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "node1", + }, + Status: corev1.NodeStatus{ + NodeInfo: corev1.NodeSystemInfo{ + KubeletVersion: "1.2.3", + }, + Conditions: []corev1.NodeCondition{ + { + Type: corev1.NodeReady, + Status: corev1.ConditionTrue, + }, + }, + }, + } +} + +func getNotReadyNode() *corev1.Node { + return &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "node1", + }, + Status: corev1.NodeStatus{ + NodeInfo: corev1.NodeSystemInfo{ + KubeletVersion: "1.2.3", + }, + Conditions: []corev1.NodeCondition{ + { + Type: corev1.NodeReady, + Status: corev1.ConditionFalse, + }, + }, + }, + } +} diff --git a/azure/scope/mocks/doc.go b/azure/scope/mocks/doc.go new file mode 100644 index 00000000000..18992b72ef1 --- /dev/null +++ b/azure/scope/mocks/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Run go generate to regenerate this mock. +//go:generate ../../../hack/tools/bin/mockgen -destination node_getter_mock.go -package mock_scope -source ../machinepoolmachine.go nodeGetter +//go:generate /usr/bin/env bash -c "cat ../../../hack/boilerplate/boilerplate.generatego.txt node_getter_mock.go > _node_getter_mock.go && mv _node_getter_mock.go node_getter_mock.go" +package mock_scope //nolint diff --git a/azure/scope/mocks/node_getter_mock.go b/azure/scope/mocks/node_getter_mock.go new file mode 100644 index 00000000000..b2f9a35a2ef --- /dev/null +++ b/azure/scope/mocks/node_getter_mock.go @@ -0,0 +1,82 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by MockGen. DO NOT EDIT. +// Source: ../machinepoolmachine.go + +// Package mock_scope is a generated GoMock package. +package mock_scope + +import ( + context "context" + reflect "reflect" + + gomock "github.com/golang/mock/gomock" + v1 "k8s.io/api/core/v1" +) + +// MocknodeGetter is a mock of nodeGetter interface. +type MocknodeGetter struct { + ctrl *gomock.Controller + recorder *MocknodeGetterMockRecorder +} + +// MocknodeGetterMockRecorder is the mock recorder for MocknodeGetter. +type MocknodeGetterMockRecorder struct { + mock *MocknodeGetter +} + +// NewMocknodeGetter creates a new mock instance. +func NewMocknodeGetter(ctrl *gomock.Controller) *MocknodeGetter { + mock := &MocknodeGetter{ctrl: ctrl} + mock.recorder = &MocknodeGetterMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MocknodeGetter) EXPECT() *MocknodeGetterMockRecorder { + return m.recorder +} + +// GetNodeByObjectReference mocks base method. +func (m *MocknodeGetter) GetNodeByObjectReference(ctx context.Context, nodeRef v1.ObjectReference) (*v1.Node, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetNodeByObjectReference", ctx, nodeRef) + ret0, _ := ret[0].(*v1.Node) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetNodeByObjectReference indicates an expected call of GetNodeByObjectReference. +func (mr *MocknodeGetterMockRecorder) GetNodeByObjectReference(ctx, nodeRef interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNodeByObjectReference", reflect.TypeOf((*MocknodeGetter)(nil).GetNodeByObjectReference), ctx, nodeRef) +} + +// GetNodeByProviderID mocks base method. +func (m *MocknodeGetter) GetNodeByProviderID(ctx context.Context, providerID string) (*v1.Node, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetNodeByProviderID", ctx, providerID) + ret0, _ := ret[0].(*v1.Node) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetNodeByProviderID indicates an expected call of GetNodeByProviderID. +func (mr *MocknodeGetterMockRecorder) GetNodeByProviderID(ctx, providerID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNodeByProviderID", reflect.TypeOf((*MocknodeGetter)(nil).GetNodeByProviderID), ctx, providerID) +} diff --git a/azure/scope/strategies/machinepool_deployments/machinepool_deployment_strategy.go b/azure/scope/strategies/machinepool_deployments/machinepool_deployment_strategy.go new file mode 100644 index 00000000000..2257f019e17 --- /dev/null +++ b/azure/scope/strategies/machinepool_deployments/machinepool_deployment_strategy.go @@ -0,0 +1,297 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package machinepool + +import ( + "context" + "math/rand" + "sort" + "time" + + "github.com/pkg/errors" + "k8s.io/apimachinery/pkg/util/intstr" + infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1alpha4" + infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1alpha4" + "sigs.k8s.io/cluster-api-provider-azure/util/tele" + ctrl "sigs.k8s.io/controller-runtime" +) + +type ( + // Surger is the ability to surge a number of replica. + Surger interface { + Surge(desiredReplicaCount int) (int, error) + } + + // DeleteSelector is the ability to select nodes to be delete with respect to a desired number of replicas + DeleteSelector interface { + SelectMachinesToDelete(ctx context.Context, desiredReplicas int32, machinesByProviderID map[string]infrav1exp.AzureMachinePoolMachine) ([]infrav1exp.AzureMachinePoolMachine, error) + } + + // TypedDeleteSelector is the ability to select nodes to be deleted with respect to a desired number of nodes, and + // the ability to describe the underlying type of the deployment strategy. + TypedDeleteSelector interface { + DeleteSelector + Type() infrav1exp.AzureMachinePoolDeploymentStrategyType + } + + rollingUpdateStrategy struct { + infrav1exp.MachineRollingUpdateDeployment + } +) + +// NewMachinePoolDeploymentStrategy constructs a strategy implementation described in the AzureMachinePoolDeploymentStrategy +// specification. +func NewMachinePoolDeploymentStrategy(strategy infrav1exp.AzureMachinePoolDeploymentStrategy) TypedDeleteSelector { + switch strategy.Type { + case infrav1exp.RollingUpdateAzureMachinePoolDeploymentStrategyType: + rollingUpdate := strategy.RollingUpdate + if rollingUpdate == nil { + rollingUpdate = &infrav1exp.MachineRollingUpdateDeployment{} + } + + return &rollingUpdateStrategy{ + MachineRollingUpdateDeployment: *rollingUpdate, + } + default: + // default to a rolling update strategy if unknown type + return &rollingUpdateStrategy{ + MachineRollingUpdateDeployment: infrav1exp.MachineRollingUpdateDeployment{}, + } + } +} + +// Type is the AzureMachinePoolDeploymentStrategyType for the strategy +func (rollingUpdateStrategy *rollingUpdateStrategy) Type() infrav1exp.AzureMachinePoolDeploymentStrategyType { + return infrav1exp.RollingUpdateAzureMachinePoolDeploymentStrategyType +} + +// Surge calculates the number of replicas that can be added during an upgrade operation +func (rollingUpdateStrategy *rollingUpdateStrategy) Surge(desiredReplicaCount int) (int, error) { + if rollingUpdateStrategy.MaxSurge == nil { + return 1, nil + } + + return intstr.GetScaledValueFromIntOrPercent(rollingUpdateStrategy.MaxSurge, desiredReplicaCount, true) +} + +// maxUnavailable calculates the maximum number of replicas which can be unavailable at any time. +func (rollingUpdateStrategy *rollingUpdateStrategy) maxUnavailable(desiredReplicaCount int) (int, error) { + if rollingUpdateStrategy.MaxUnavailable != nil { + val, err := intstr.GetScaledValueFromIntOrPercent(rollingUpdateStrategy.MaxUnavailable, desiredReplicaCount, false) + if err != nil { + return 0, errors.Wrap(err, "failed to get scaled value or int from maxUnavailable") + } + + return val, nil + } + + return 0, nil +} + +// SelectMachinesToDelete selects the machines to delete based on the machine state, desired replica count, and +// the DeletePolicy. +func (rollingUpdateStrategy rollingUpdateStrategy) SelectMachinesToDelete(ctx context.Context, desiredReplicaCount int32, machinesByProviderID map[string]infrav1exp.AzureMachinePoolMachine) ([]infrav1exp.AzureMachinePoolMachine, error) { + ctx, span := tele.Tracer().Start(ctx, "strategies.rollingUpdateStrategy.SelectMachinesToDelete") + defer span.End() + + maxUnavailable, err := rollingUpdateStrategy.maxUnavailable(int(desiredReplicaCount)) + if err != nil { + return nil, err + } + + var ( + order = func() func(machines []infrav1exp.AzureMachinePoolMachine) []infrav1exp.AzureMachinePoolMachine { + switch rollingUpdateStrategy.DeletePolicy { + case infrav1exp.OldestDeletePolicyType: + return orderByOldest + case infrav1exp.NewestDeletePolicyType: + return orderByNewest + default: + return orderRandom + } + }() + log = ctrl.LoggerFrom(ctx).V(4) + failedMachines = order(getFailedMachines(machinesByProviderID)) + deletingMachines = order(getDeletingMachines(machinesByProviderID)) + readyMachines = order(getReadyMachines(machinesByProviderID)) + machinesWithoutLatestModel = order(getMachinesWithoutLatestModel(machinesByProviderID)) + overProvisionCount = len(readyMachines) - int(desiredReplicaCount) + disruptionBudget = func() int { + if maxUnavailable > int(desiredReplicaCount) { + return int(desiredReplicaCount) + } + + return len(readyMachines) - int(desiredReplicaCount) + maxUnavailable + }() + ) + + log.Info("selecting machines to delete", + "readyMachines", len(readyMachines), + "desiredReplicaCount", desiredReplicaCount, + "maxUnavailable", maxUnavailable, + "disruptionBudget", disruptionBudget, + "machinesWithoutTheLatestModel", len(machinesWithoutLatestModel), + "failedMachines", len(failedMachines), + ) + + // if we have failed or deleting machines, remove them + if len(failedMachines) > 0 || len(deletingMachines) > 0 { + log.Info("failed or deleting machines", "desiredReplicaCount", desiredReplicaCount, "maxUnavailable", maxUnavailable, "failedMachines", getProviderIDs(failedMachines), "deletingMachines", getProviderIDs(deletingMachines)) + return append(failedMachines, deletingMachines...), nil + } + + // if we have deleting machines, remove them + if len(failedMachines) > 0 { + log.Info("failed machines", "desiredReplicaCount", desiredReplicaCount, "maxUnavailable", maxUnavailable, "failedMachines", getProviderIDs(failedMachines)) + return failedMachines, nil + } + + // if we have not yet reached our desired count, don't try to delete anything but failed machines + if len(readyMachines) < int(desiredReplicaCount) { + log.Info("not enough ready machines", "desiredReplicaCount", desiredReplicaCount, "readyMachinesCount", len(readyMachines), "machinesByProviderID", len(machinesByProviderID)) + return []infrav1exp.AzureMachinePoolMachine{}, nil + } + + // we have too many machines, let's choose the oldest to remove + if overProvisionCount > 0 { + var toDelete []infrav1exp.AzureMachinePoolMachine + log.Info("over-provisioned", "desiredReplicaCount", desiredReplicaCount, "overProvisionCount", overProvisionCount, "machinesWithoutLatestModel", getProviderIDs(machinesWithoutLatestModel)) + // we are over-provisioned try to remove old models + for _, v := range machinesWithoutLatestModel { + if len(toDelete) >= overProvisionCount { + return toDelete, nil + } + + toDelete = append(toDelete, v) + } + + log.Info("over-provisioned ready", "desiredReplicaCount", desiredReplicaCount, "overProvisionCount", overProvisionCount, "readyMachines", getProviderIDs(readyMachines)) + // remove ready machines + for _, v := range readyMachines { + if len(toDelete) >= overProvisionCount { + return toDelete, nil + } + + toDelete = append(toDelete, v) + } + + return toDelete, nil + } + + if len(machinesWithoutLatestModel) <= 0 { + log.Info("nothing more to do since all the AzureMachinePoolMachine(s) are the latest model and not over-provisioned") + return []infrav1exp.AzureMachinePoolMachine{}, nil + } + + if disruptionBudget <= 0 { + log.Info("exit early since disruption budget is less than or equal to zero", "disruptionBudget", disruptionBudget, "desiredReplicaCount", desiredReplicaCount, "maxUnavailable", maxUnavailable, "readyMachines", getProviderIDs(readyMachines), "readyMachinesCount", len(readyMachines)) + return []infrav1exp.AzureMachinePoolMachine{}, nil + } + + var toDelete []infrav1exp.AzureMachinePoolMachine + log.Info("removing ready machines within disruption budget", "desiredReplicaCount", desiredReplicaCount, "maxUnavailable", maxUnavailable, "readyMachines", getProviderIDs(readyMachines), "readyMachinesCount", len(readyMachines)) + for _, v := range readyMachines { + if len(toDelete) >= disruptionBudget { + return toDelete, nil + } + + if !v.Status.LatestModelApplied { + toDelete = append(toDelete, v) + } + } + + log.Info("completed without filling toDelete", "toDelete", getProviderIDs(toDelete), "numToDelete", len(toDelete)) + return toDelete, nil +} + +func getFailedMachines(machinesByProviderID map[string]infrav1exp.AzureMachinePoolMachine) []infrav1exp.AzureMachinePoolMachine { + var machines []infrav1exp.AzureMachinePoolMachine + for _, v := range machinesByProviderID { + // ready status, with provisioning state Succeeded, and not marked for delete + if v.Status.ProvisioningState != nil && *v.Status.ProvisioningState == infrav1.Failed { + machines = append(machines, v) + } + } + + return machines +} + +func getDeletingMachines(machinesByProviderID map[string]infrav1exp.AzureMachinePoolMachine) []infrav1exp.AzureMachinePoolMachine { + var machines []infrav1exp.AzureMachinePoolMachine + for _, v := range machinesByProviderID { + // ready status, with provisioning state Succeeded, and not marked for delete + if v.Status.ProvisioningState != nil && *v.Status.ProvisioningState == infrav1.Deleting { + machines = append(machines, v) + } + } + + return machines +} + +func getReadyMachines(machinesByProviderID map[string]infrav1exp.AzureMachinePoolMachine) []infrav1exp.AzureMachinePoolMachine { + var readyMachines []infrav1exp.AzureMachinePoolMachine + for _, v := range machinesByProviderID { + // ready status, with provisioning state Succeeded, and not marked for delete + if v.Status.Ready && v.Status.ProvisioningState != nil && *v.Status.ProvisioningState == infrav1.Succeeded { + readyMachines = append(readyMachines, v) + } + } + + return readyMachines +} + +func getMachinesWithoutLatestModel(machinesByProviderID map[string]infrav1exp.AzureMachinePoolMachine) []infrav1exp.AzureMachinePoolMachine { + var machinesWithLatestModel []infrav1exp.AzureMachinePoolMachine + for _, v := range machinesByProviderID { + if !v.Status.LatestModelApplied { + machinesWithLatestModel = append(machinesWithLatestModel, v) + } + } + + return machinesWithLatestModel +} + +func orderByNewest(machines []infrav1exp.AzureMachinePoolMachine) []infrav1exp.AzureMachinePoolMachine { + sort.Slice(machines, func(i, j int) bool { + return machines[i].ObjectMeta.CreationTimestamp.After(machines[j].ObjectMeta.CreationTimestamp.Time) + }) + + return machines +} + +func orderByOldest(machines []infrav1exp.AzureMachinePoolMachine) []infrav1exp.AzureMachinePoolMachine { + sort.Slice(machines, func(i, j int) bool { + return machines[j].ObjectMeta.CreationTimestamp.After(machines[i].ObjectMeta.CreationTimestamp.Time) + }) + + return machines +} + +func orderRandom(machines []infrav1exp.AzureMachinePoolMachine) []infrav1exp.AzureMachinePoolMachine { + rand.Seed(time.Now().UnixNano()) + rand.Shuffle(len(machines), func(i, j int) { machines[i], machines[j] = machines[j], machines[i] }) + return machines +} + +func getProviderIDs(machines []infrav1exp.AzureMachinePoolMachine) []string { + ids := make([]string, len(machines)) + for i, machine := range machines { + ids[i] = machine.Spec.ProviderID + } + + return ids +} diff --git a/azure/scope/strategies/machinepool_deployments/machinepool_deployment_strategy_test.go b/azure/scope/strategies/machinepool_deployments/machinepool_deployment_strategy_test.go new file mode 100644 index 00000000000..adf20789919 --- /dev/null +++ b/azure/scope/strategies/machinepool_deployments/machinepool_deployment_strategy_test.go @@ -0,0 +1,356 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package machinepool + +import ( + "context" + "testing" + "time" + + . "github.com/onsi/gomega" + "github.com/onsi/gomega/types" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1alpha4" + infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1alpha4" + "sigs.k8s.io/cluster-api-provider-azure/internal/test/matchers/gomega" +) + +func TestMachinePoolRollingUpdateStrategy_Type(t *testing.T) { + g := NewWithT(t) + strategy := NewMachinePoolDeploymentStrategy(infrav1exp.AzureMachinePoolDeploymentStrategy{ + Type: infrav1exp.RollingUpdateAzureMachinePoolDeploymentStrategyType, + }) + g.Expect(strategy.Type()).To(Equal(infrav1exp.RollingUpdateAzureMachinePoolDeploymentStrategyType)) +} + +func TestMachinePoolRollingUpdateStrategy_Surge(t *testing.T) { + var ( + two = intstr.FromInt(2) + twentyPercent = intstr.FromString("20%") + ) + + tests := []struct { + name string + strategy Surger + desiredReplicas int + want int + errStr string + }{ + { + name: "Strategy is empty", + strategy: &rollingUpdateStrategy{}, + want: 1, + }, + { + name: "MaxSurge is set to 2", + strategy: &rollingUpdateStrategy{ + MachineRollingUpdateDeployment: infrav1exp.MachineRollingUpdateDeployment{ + MaxSurge: &two, + }, + }, + want: 2, + }, + { + name: "MaxSurge is set to 20% and desiredReplicas is 20", + strategy: &rollingUpdateStrategy{ + MachineRollingUpdateDeployment: infrav1exp.MachineRollingUpdateDeployment{ + MaxSurge: &twentyPercent, + }, + }, + desiredReplicas: 20, + want: 4, + }, + { + name: "MaxSurge is set to 20% and desiredReplicas is 21; rounds up", + strategy: &rollingUpdateStrategy{ + MachineRollingUpdateDeployment: infrav1exp.MachineRollingUpdateDeployment{ + MaxSurge: &twentyPercent, + }, + }, + desiredReplicas: 21, + want: 5, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + got, err := tt.strategy.Surge(tt.desiredReplicas) + if tt.errStr == "" { + g.Expect(err).To(Succeed()) + g.Expect(got).To(Equal(tt.want)) + } else { + g.Expect(err).To(MatchError(tt.errStr)) + } + }) + } +} + +func TestMachinePoolScope_maxUnavailable(t *testing.T) { + var ( + two = intstr.FromInt(2) + twentyPercent = intstr.FromString("20%") + ) + + tests := []struct { + name string + strategy *rollingUpdateStrategy + desiredReplicas int + want int + errStr string + }{ + { + name: "Strategy is empty", + strategy: &rollingUpdateStrategy{}, + }, + { + name: "MaxUnavailable is nil", + strategy: &rollingUpdateStrategy{ + MachineRollingUpdateDeployment: infrav1exp.MachineRollingUpdateDeployment{}, + }, + want: 0, + }, + { + name: "MaxUnavailable is set to 2", + strategy: &rollingUpdateStrategy{ + MachineRollingUpdateDeployment: infrav1exp.MachineRollingUpdateDeployment{ + MaxUnavailable: &two, + }, + }, + want: 2, + }, + { + name: "MaxUnavailable is set to 20%", + strategy: &rollingUpdateStrategy{ + MachineRollingUpdateDeployment: infrav1exp.MachineRollingUpdateDeployment{ + MaxUnavailable: &twentyPercent, + }, + }, + desiredReplicas: 20, + want: 4, + }, + { + name: "MaxUnavailable is set to 20% and it rounds down", + strategy: &rollingUpdateStrategy{ + MachineRollingUpdateDeployment: infrav1exp.MachineRollingUpdateDeployment{ + MaxUnavailable: &twentyPercent, + }, + }, + desiredReplicas: 21, + want: 4, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + got, err := tt.strategy.maxUnavailable(tt.desiredReplicas) + if tt.errStr == "" { + g.Expect(err).To(Succeed()) + g.Expect(got).To(Equal(tt.want)) + } else { + g.Expect(err).To(MatchError(tt.errStr)) + } + }) + } +} + +func TestMachinePoolRollingUpdateStrategy_SelectMachinesToDelete(t *testing.T) { + var ( + one = intstr.FromInt(1) + two = intstr.FromInt(2) + fortyFivePercent = intstr.FromString("45%") + thirtyPercent = intstr.FromString("30%") + succeeded = infrav1.Succeeded + baseTime = time.Now().Add(-24 * time.Hour).Truncate(time.Microsecond) + ) + + tests := []struct { + name string + strategy DeleteSelector + input map[string]infrav1exp.AzureMachinePoolMachine + desiredReplicas int32 + want types.GomegaMatcher + errStr string + }{ + { + name: "should not select machines to delete if less than desired replica count", + strategy: makeRollingUpdateStrategy(infrav1exp.MachineRollingUpdateDeployment{}), + desiredReplicas: 1, + input: map[string]infrav1exp.AzureMachinePoolMachine{ + "foo": makeAMPM(ampmOptions{Ready: true, LatestModel: true, ProvisioningState: succeeded}), + }, + want: Equal([]infrav1exp.AzureMachinePoolMachine{}), + }, + { + name: "if over-provisioned, select a machine with an out-of-date model", + strategy: makeRollingUpdateStrategy(infrav1exp.MachineRollingUpdateDeployment{}), + desiredReplicas: 2, + input: map[string]infrav1exp.AzureMachinePoolMachine{ + "foo": makeAMPM(ampmOptions{Ready: true, LatestModel: true, ProvisioningState: succeeded}), + "bin": makeAMPM(ampmOptions{Ready: true, LatestModel: true, ProvisioningState: succeeded}), + "baz": makeAMPM(ampmOptions{Ready: true, LatestModel: false, ProvisioningState: succeeded}), + }, + want: Equal([]infrav1exp.AzureMachinePoolMachine{ + makeAMPM(ampmOptions{Ready: true, LatestModel: false, ProvisioningState: succeeded}), + }), + }, + { + name: "if over-provisioned, select the oldest machine", + strategy: makeRollingUpdateStrategy(infrav1exp.MachineRollingUpdateDeployment{DeletePolicy: infrav1exp.OldestDeletePolicyType}), + desiredReplicas: 2, + input: map[string]infrav1exp.AzureMachinePoolMachine{ + "foo": makeAMPM(ampmOptions{Ready: true, LatestModel: true, ProvisioningState: succeeded, CreationTime: metav1.NewTime(baseTime.Add(1 * time.Hour))}), + "bin": makeAMPM(ampmOptions{Ready: true, LatestModel: true, ProvisioningState: succeeded, CreationTime: metav1.NewTime(baseTime.Add(2 * time.Hour))}), + "baz": makeAMPM(ampmOptions{Ready: true, LatestModel: true, ProvisioningState: succeeded, CreationTime: metav1.NewTime(baseTime.Add(3 * time.Hour))}), + }, + want: gomega.DiffEq([]infrav1exp.AzureMachinePoolMachine{ + makeAMPM(ampmOptions{Ready: true, LatestModel: true, ProvisioningState: succeeded, CreationTime: metav1.NewTime(baseTime.Add(1 * time.Hour))}), + }), + }, + { + name: "if over-provisioned, select machines ordered by creation date", + strategy: makeRollingUpdateStrategy(infrav1exp.MachineRollingUpdateDeployment{DeletePolicy: infrav1exp.OldestDeletePolicyType}), + desiredReplicas: 2, + input: map[string]infrav1exp.AzureMachinePoolMachine{ + "foo": makeAMPM(ampmOptions{Ready: true, LatestModel: true, ProvisioningState: succeeded, CreationTime: metav1.NewTime(baseTime.Add(4 * time.Hour))}), + "bin": makeAMPM(ampmOptions{Ready: true, LatestModel: true, ProvisioningState: succeeded, CreationTime: metav1.NewTime(baseTime.Add(3 * time.Hour))}), + "baz": makeAMPM(ampmOptions{Ready: true, LatestModel: true, ProvisioningState: succeeded, CreationTime: metav1.NewTime(baseTime.Add(2 * time.Hour))}), + "bar": makeAMPM(ampmOptions{Ready: true, LatestModel: true, ProvisioningState: succeeded, CreationTime: metav1.NewTime(baseTime.Add(1 * time.Hour))}), + }, + want: gomega.DiffEq([]infrav1exp.AzureMachinePoolMachine{ + makeAMPM(ampmOptions{Ready: true, LatestModel: true, ProvisioningState: succeeded, CreationTime: metav1.NewTime(baseTime.Add(1 * time.Hour))}), + makeAMPM(ampmOptions{Ready: true, LatestModel: true, ProvisioningState: succeeded, CreationTime: metav1.NewTime(baseTime.Add(2 * time.Hour))}), + }), + }, + { + name: "if over-provisioned, select machines ordered by newest first", + strategy: makeRollingUpdateStrategy(infrav1exp.MachineRollingUpdateDeployment{DeletePolicy: infrav1exp.NewestDeletePolicyType}), + desiredReplicas: 2, + input: map[string]infrav1exp.AzureMachinePoolMachine{ + "foo": makeAMPM(ampmOptions{Ready: true, LatestModel: true, ProvisioningState: succeeded, CreationTime: metav1.NewTime(baseTime.Add(4 * time.Hour))}), + "bin": makeAMPM(ampmOptions{Ready: true, LatestModel: true, ProvisioningState: succeeded, CreationTime: metav1.NewTime(baseTime.Add(3 * time.Hour))}), + "baz": makeAMPM(ampmOptions{Ready: true, LatestModel: true, ProvisioningState: succeeded, CreationTime: metav1.NewTime(baseTime.Add(2 * time.Hour))}), + "bar": makeAMPM(ampmOptions{Ready: true, LatestModel: true, ProvisioningState: succeeded, CreationTime: metav1.NewTime(baseTime.Add(1 * time.Hour))}), + }, + want: gomega.DiffEq([]infrav1exp.AzureMachinePoolMachine{ + makeAMPM(ampmOptions{Ready: true, LatestModel: true, ProvisioningState: succeeded, CreationTime: metav1.NewTime(baseTime.Add(4 * time.Hour))}), + makeAMPM(ampmOptions{Ready: true, LatestModel: true, ProvisioningState: succeeded, CreationTime: metav1.NewTime(baseTime.Add(3 * time.Hour))}), + }), + }, + { + name: "if maxUnavailable is 1, and 1 is not the latest model, delete it.", + strategy: makeRollingUpdateStrategy(infrav1exp.MachineRollingUpdateDeployment{MaxUnavailable: &one}), + desiredReplicas: 3, + input: map[string]infrav1exp.AzureMachinePoolMachine{ + "foo": makeAMPM(ampmOptions{Ready: true, LatestModel: true, ProvisioningState: succeeded}), + "bin": makeAMPM(ampmOptions{Ready: true, LatestModel: true, ProvisioningState: succeeded}), + "baz": makeAMPM(ampmOptions{Ready: true, LatestModel: false, ProvisioningState: succeeded}), + }, + want: Equal([]infrav1exp.AzureMachinePoolMachine{ + makeAMPM(ampmOptions{Ready: true, LatestModel: false, ProvisioningState: succeeded}), + }), + }, + { + name: "if maxUnavailable is 1, and all are the latest model, delete nothing.", + strategy: makeRollingUpdateStrategy(infrav1exp.MachineRollingUpdateDeployment{MaxUnavailable: &one}), + desiredReplicas: 3, + input: map[string]infrav1exp.AzureMachinePoolMachine{ + "foo": makeAMPM(ampmOptions{Ready: true, LatestModel: true, ProvisioningState: succeeded}), + "bin": makeAMPM(ampmOptions{Ready: true, LatestModel: true, ProvisioningState: succeeded}), + "baz": makeAMPM(ampmOptions{Ready: true, LatestModel: true, ProvisioningState: succeeded}), + }, + want: HaveLen(0), + }, + { + name: "if maxUnavailable is 2, and there are 2 with the latest model == false, delete 2.", + strategy: makeRollingUpdateStrategy(infrav1exp.MachineRollingUpdateDeployment{MaxUnavailable: &two}), + desiredReplicas: 3, + input: map[string]infrav1exp.AzureMachinePoolMachine{ + "foo": makeAMPM(ampmOptions{Ready: true, LatestModel: true, ProvisioningState: succeeded}), + "bin": makeAMPM(ampmOptions{Ready: true, LatestModel: false, ProvisioningState: succeeded}), + "baz": makeAMPM(ampmOptions{Ready: true, LatestModel: false, ProvisioningState: succeeded}), + }, + want: Equal([]infrav1exp.AzureMachinePoolMachine{ + makeAMPM(ampmOptions{Ready: true, LatestModel: false, ProvisioningState: succeeded}), + makeAMPM(ampmOptions{Ready: true, LatestModel: false, ProvisioningState: succeeded}), + }), + }, + { + name: "if maxUnavailable is 45%, and there are 2 with the latest model == false, delete 1.", + strategy: makeRollingUpdateStrategy(infrav1exp.MachineRollingUpdateDeployment{MaxUnavailable: &fortyFivePercent}), + desiredReplicas: 3, + input: map[string]infrav1exp.AzureMachinePoolMachine{ + "foo": makeAMPM(ampmOptions{Ready: true, LatestModel: true, ProvisioningState: succeeded}), + "bin": makeAMPM(ampmOptions{Ready: true, LatestModel: false, ProvisioningState: succeeded}), + "baz": makeAMPM(ampmOptions{Ready: true, LatestModel: false, ProvisioningState: succeeded}), + }, + want: HaveLen(1), + }, + { + name: "if maxUnavailable is 30%, and there are 2 with the latest model == false, delete 0.", + strategy: makeRollingUpdateStrategy(infrav1exp.MachineRollingUpdateDeployment{MaxUnavailable: &thirtyPercent}), + desiredReplicas: 3, + input: map[string]infrav1exp.AzureMachinePoolMachine{ + "foo": makeAMPM(ampmOptions{Ready: true, LatestModel: true, ProvisioningState: succeeded}), + "bin": makeAMPM(ampmOptions{Ready: true, LatestModel: false, ProvisioningState: succeeded}), + "baz": makeAMPM(ampmOptions{Ready: true, LatestModel: false, ProvisioningState: succeeded}), + }, + want: HaveLen(0), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + got, err := tt.strategy.SelectMachinesToDelete(context.Background(), tt.desiredReplicas, tt.input) + if tt.errStr == "" { + g.Expect(err).To(Succeed()) + g.Expect(got).To(tt.want) + } else { + g.Expect(err).To(MatchError(tt.errStr)) + } + }) + } +} + +func makeRollingUpdateStrategy(rolling infrav1exp.MachineRollingUpdateDeployment) *rollingUpdateStrategy { + return &rollingUpdateStrategy{ + MachineRollingUpdateDeployment: rolling, + } +} + +type ampmOptions struct { + Ready bool + LatestModel bool + ProvisioningState infrav1.ProvisioningState + CreationTime metav1.Time +} + +func makeAMPM(opts ampmOptions) infrav1exp.AzureMachinePoolMachine { + return infrav1exp.AzureMachinePoolMachine{ + ObjectMeta: metav1.ObjectMeta{ + CreationTimestamp: opts.CreationTime, + }, + Status: infrav1exp.AzureMachinePoolMachineStatus{ + Ready: opts.Ready, + LatestModelApplied: opts.LatestModel, + ProvisioningState: &opts.ProvisioningState, + }, + } +} diff --git a/azure/services/resourceskus/cache.go b/azure/services/resourceskus/cache.go index f3fa19935e3..f43a4efade6 100644 --- a/azure/services/resourceskus/cache.go +++ b/azure/services/resourceskus/cache.go @@ -51,7 +51,7 @@ type Cache struct { // Cacher describes the ability to get and to add items to cache type Cacher interface { Get(key interface{}) (value interface{}, ok bool) - Add(key interface{}, value interface{}) + Add(key interface{}, value interface{}) bool } // NewCacheFunc allows for mocking out the underlying client @@ -89,7 +89,7 @@ func GetCache(auth azure.Authorizer, location string) (*Cache, error) { } c = newCache(auth, location) - clientCache.Add(key, c) + _ = clientCache.Add(key, c) return c.(*Cache), nil } diff --git a/azure/services/scalesets/mock_scalesets/scalesets_mock.go b/azure/services/scalesets/mock_scalesets/scalesets_mock.go index 96453b5a984..0e2af5edfbd 100644 --- a/azure/services/scalesets/mock_scalesets/scalesets_mock.go +++ b/azure/services/scalesets/mock_scalesets/scalesets_mock.go @@ -29,7 +29,6 @@ import ( gomock "github.com/golang/mock/gomock" v1alpha4 "sigs.k8s.io/cluster-api-provider-azure/api/v1alpha4" azure "sigs.k8s.io/cluster-api-provider-azure/azure" - v1alpha40 "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1alpha4" ) // MockScaleSetScope is a mock of ScaleSetScope interface. @@ -301,18 +300,19 @@ func (mr *MockScaleSetScopeMockRecorder) Location() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Location", reflect.TypeOf((*MockScaleSetScope)(nil).Location)) } -// NeedsK8sVersionUpdate mocks base method. -func (m *MockScaleSetScope) NeedsK8sVersionUpdate() bool { +// MaxSurge mocks base method. +func (m *MockScaleSetScope) MaxSurge() (int, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "NeedsK8sVersionUpdate") - ret0, _ := ret[0].(bool) - return ret0 + ret := m.ctrl.Call(m, "MaxSurge") + ret0, _ := ret[0].(int) + ret1, _ := ret[1].(error) + return ret0, ret1 } -// NeedsK8sVersionUpdate indicates an expected call of NeedsK8sVersionUpdate. -func (mr *MockScaleSetScopeMockRecorder) NeedsK8sVersionUpdate() *gomock.Call { +// MaxSurge indicates an expected call of MaxSurge. +func (mr *MockScaleSetScopeMockRecorder) MaxSurge() *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NeedsK8sVersionUpdate", reflect.TypeOf((*MockScaleSetScope)(nil).NeedsK8sVersionUpdate)) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MaxSurge", reflect.TypeOf((*MockScaleSetScope)(nil).MaxSurge)) } // ResourceGroup mocks base method. @@ -329,16 +329,16 @@ func (mr *MockScaleSetScopeMockRecorder) ResourceGroup() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ResourceGroup", reflect.TypeOf((*MockScaleSetScope)(nil).ResourceGroup)) } -// SaveK8sVersion mocks base method. -func (m *MockScaleSetScope) SaveK8sVersion() { +// SaveVMImageToStatus mocks base method. +func (m *MockScaleSetScope) SaveVMImageToStatus(arg0 *v1alpha4.Image) { m.ctrl.T.Helper() - m.ctrl.Call(m, "SaveK8sVersion") + m.ctrl.Call(m, "SaveVMImageToStatus", arg0) } -// SaveK8sVersion indicates an expected call of SaveK8sVersion. -func (mr *MockScaleSetScopeMockRecorder) SaveK8sVersion() *gomock.Call { +// SaveVMImageToStatus indicates an expected call of SaveVMImageToStatus. +func (mr *MockScaleSetScopeMockRecorder) SaveVMImageToStatus(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SaveK8sVersion", reflect.TypeOf((*MockScaleSetScope)(nil).SaveK8sVersion)) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SaveVMImageToStatus", reflect.TypeOf((*MockScaleSetScope)(nil).SaveVMImageToStatus), arg0) } // ScaleSetSpec mocks base method. @@ -391,16 +391,16 @@ func (mr *MockScaleSetScopeMockRecorder) SetProviderID(arg0 interface{}) *gomock return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetProviderID", reflect.TypeOf((*MockScaleSetScope)(nil).SetProviderID), arg0) } -// SetProvisioningState mocks base method. -func (m *MockScaleSetScope) SetProvisioningState(arg0 v1alpha4.ProvisioningState) { +// SetVMSSState mocks base method. +func (m *MockScaleSetScope) SetVMSSState(arg0 *azure.VMSS) { m.ctrl.T.Helper() - m.ctrl.Call(m, "SetProvisioningState", arg0) + m.ctrl.Call(m, "SetVMSSState", arg0) } -// SetProvisioningState indicates an expected call of SetProvisioningState. -func (mr *MockScaleSetScopeMockRecorder) SetProvisioningState(arg0 interface{}) *gomock.Call { +// SetVMSSState indicates an expected call of SetVMSSState. +func (mr *MockScaleSetScopeMockRecorder) SetVMSSState(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetProvisioningState", reflect.TypeOf((*MockScaleSetScope)(nil).SetProvisioningState), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetVMSSState", reflect.TypeOf((*MockScaleSetScope)(nil).SetVMSSState), arg0) } // SubscriptionID mocks base method. @@ -431,20 +431,6 @@ func (mr *MockScaleSetScopeMockRecorder) TenantID() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TenantID", reflect.TypeOf((*MockScaleSetScope)(nil).TenantID)) } -// UpdateInstanceStatuses mocks base method. -func (m *MockScaleSetScope) UpdateInstanceStatuses(arg0 context.Context, arg1 []v1alpha40.VMSSVM) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "UpdateInstanceStatuses", arg0, arg1) - ret0, _ := ret[0].(error) - return ret0 -} - -// UpdateInstanceStatuses indicates an expected call of UpdateInstanceStatuses. -func (mr *MockScaleSetScopeMockRecorder) UpdateInstanceStatuses(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateInstanceStatuses", reflect.TypeOf((*MockScaleSetScope)(nil).UpdateInstanceStatuses), arg0, arg1) -} - // V mocks base method. func (m *MockScaleSetScope) V(level int) logr.Logger { m.ctrl.T.Helper() diff --git a/azure/services/scalesets/scalesets.go b/azure/services/scalesets/scalesets.go index 3def1bd1327..2660badb863 100644 --- a/azure/services/scalesets/scalesets.go +++ b/azure/services/scalesets/scalesets.go @@ -18,7 +18,6 @@ package scalesets import ( "context" - "crypto/sha256" "encoding/base64" "fmt" "time" @@ -35,40 +34,34 @@ import ( "sigs.k8s.io/cluster-api-provider-azure/azure" "sigs.k8s.io/cluster-api-provider-azure/azure/converters" "sigs.k8s.io/cluster-api-provider-azure/azure/services/resourceskus" - infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1alpha4" "sigs.k8s.io/cluster-api-provider-azure/util/tele" ) -// ScaleSetScope defines the scope interface for a scale sets service. -type ScaleSetScope interface { - logr.Logger - azure.ClusterDescriber - ScaleSetSpec() azure.ScaleSetSpec - VMSSExtensionSpecs() []azure.VMSSExtensionSpec - GetBootstrapData(ctx context.Context) (string, error) - GetVMImage() (*infrav1.Image, error) - SetAnnotation(string, string) - SetProviderID(string) - UpdateInstanceStatuses(context.Context, []infrav1exp.VMSSVM) error - NeedsK8sVersionUpdate() bool - SaveK8sVersion() - SetProvisioningState(infrav1.ProvisioningState) - SetLongRunningOperationState(*infrav1.Future) - GetLongRunningOperationState() *infrav1.Future -} - -type vmssBuildResult struct { - VMSSWithoutHash compute.VirtualMachineScaleSet - Tags infrav1.Tags - Hash string -} - -// Service provides operations on azure resources -type Service struct { - Scope ScaleSetScope - Client - resourceSKUCache *resourceskus.Cache -} +type ( + // ScaleSetScope defines the scope interface for a scale sets service. + ScaleSetScope interface { + logr.Logger + azure.ClusterDescriber + GetBootstrapData(ctx context.Context) (string, error) + GetLongRunningOperationState() *infrav1.Future + GetVMImage() (*infrav1.Image, error) + SaveVMImageToStatus(*infrav1.Image) + MaxSurge() (int, error) + ScaleSetSpec() azure.ScaleSetSpec + VMSSExtensionSpecs() []azure.VMSSExtensionSpec + SetAnnotation(string, string) + SetLongRunningOperationState(*infrav1.Future) + SetProviderID(string) + SetVMSSState(*azure.VMSS) + } + + // Service provides operations on azure resources + Service struct { + Scope ScaleSetScope + Client + resourceSKUCache *resourceskus.Cache + } +) // NewService creates a new service. func NewService(scope ScaleSetScope, skuCache *resourceskus.Cache) *Service { @@ -80,7 +73,7 @@ func NewService(scope ScaleSetScope, skuCache *resourceskus.Cache) *Service { } // Reconcile idempotently gets, creates, and updates a scale set. -func (s *Service) Reconcile(ctx context.Context) error { +func (s *Service) Reconcile(ctx context.Context) (retErr error) { ctx, span := tele.Tracer().Start(ctx, "scalesets.Service.Reconcile") defer span.End() @@ -90,9 +83,27 @@ func (s *Service) Reconcile(ctx context.Context) error { } // check if there is an ongoing long running operation - future := s.Scope.GetLongRunningOperationState() - var fetchedVMSS *infrav1exp.VMSS - var err error + var ( + future = s.Scope.GetLongRunningOperationState() + fetchedVMSS *azure.VMSS + err error + ) + + defer func() { + // save the updated state of the VMSS for the MachinePoolScope to use for updating K8s state + if fetchedVMSS == nil { + fetchedVMSS, err = s.getVirtualMachineScaleSet(ctx) + if err != nil && !azure.ResourceNotFound(err) { + s.Scope.Error(err, "failed to get vmss in deferred update") + } + } + + if fetchedVMSS != nil { + s.Scope.SetProviderID(azure.ProviderIDPrefix + fetchedVMSS.ID) + s.Scope.SetVMSSState(fetchedVMSS) + } + }() + if future == nil { fetchedVMSS, err = s.getVirtualMachineScaleSet(ctx) } else { @@ -101,7 +112,7 @@ func (s *Service) Reconcile(ctx context.Context) error { switch { case err != nil && !azure.ResourceNotFound(err): - // There was an error and it was not an HTTP 404 not found. This is either a transient error in Azure or a bug. + // There was an error and it was not an HTTP 404 not found. This is either a transient error, like long running operation not done, or a Azure service error. return errors.Wrapf(err, "failed to get VMSS %s", s.Scope.ScaleSetSpec().Name) case err != nil && azure.ResourceNotFound(err): // HTTP(404) resource was not found, so we need to create it with a PUT @@ -117,9 +128,6 @@ func (s *Service) Reconcile(ctx context.Context) error { if err != nil { return errors.Wrap(err, "failed to start updating VMSS") } - default: - // just in case, set the provider ID if the instance exists - s.Scope.SetProviderID(azure.ProviderIDPrefix + fetchedVMSS.ID) } // Try to get the VMSS to update status if we have created a long running operation. If the VMSS is still in a long @@ -133,16 +141,6 @@ func (s *Service) Reconcile(ctx context.Context) error { // if we get to hear, we have completed any long running VMSS operations (creates / updates) s.Scope.SetLongRunningOperationState(nil) - - defer func() { - // make sure we always set the provisioning state at the end of reconcile - s.Scope.SetProvisioningState(fetchedVMSS.State) - }() - - if err := s.reconcileInstances(ctx, fetchedVMSS); err != nil { - return errors.Wrap(err, "failed to reconcile instances") - } - return nil } @@ -152,9 +150,36 @@ func (s *Service) Delete(ctx context.Context) error { ctx, span := tele.Tracer().Start(ctx, "scalesets.Service.Delete") defer span.End() + defer func() { + // save the updated state of the VMSS for the MachinePoolScope to use for updating K8s state + fetchedVMSS, err := s.getVirtualMachineScaleSet(ctx) + if err != nil && !azure.ResourceNotFound(err) { + s.Scope.Error(err, "failed to get vmss in deferred update") + } + + if fetchedVMSS != nil { + s.Scope.SetVMSSState(fetchedVMSS) + } + }() + + // check if there is an ongoing long running operation + future := s.Scope.GetLongRunningOperationState() + if future != nil { + // if the operation is not complete this will return an error + _, err := s.GetResultIfDone(ctx, future) + if err != nil { + return errors.Wrap(err, "failed to get result from future") + } + + // ScaleSet has been deleted + s.Scope.SetLongRunningOperationState(nil) + return nil + } + + // no long running delete operation is active, so delete the ScaleSet vmssSpec := s.Scope.ScaleSetSpec() s.Scope.V(2).Info("deleting VMSS", "scale set", vmssSpec.Name) - err := s.Client.Delete(ctx, s.Scope.ResourceGroup(), vmssSpec.Name) + future, err := s.Client.DeleteAsync(ctx, s.Scope.ResourceGroup(), vmssSpec.Name) if err != nil { if azure.ResourceNotFound(err) { // already deleted @@ -163,7 +188,16 @@ func (s *Service) Delete(ctx context.Context) error { return errors.Wrapf(err, "failed to delete VMSS %s in resource group %s", vmssSpec.Name, s.Scope.ResourceGroup()) } - s.Scope.V(2).Info("successfully deleted VMSS", "scale set", vmssSpec.Name) + s.Scope.SetLongRunningOperationState(future) + if future != nil { + // if future exists, check state of the future + if _, err = s.GetResultIfDone(ctx, future); err != nil { + return errors.Wrap(err, "not done with long running operation, or failed to get result") + } + } + + // future is either nil, or the result of the future is complete + s.Scope.SetLongRunningOperationState(nil) return nil } @@ -172,14 +206,11 @@ func (s *Service) createVMSS(ctx context.Context) (*infrav1.Future, error) { defer span.End() spec := s.Scope.ScaleSetSpec() - result, err := s.buildVMSSFromSpec(ctx, spec) + vmss, err := s.buildVMSSFromSpec(ctx, spec) if err != nil { return nil, errors.Wrap(err, "failed building VMSS from spec") } - vmss := result.VMSSWithoutHash - vmss.Tags = converters.TagsToMap(result.Tags.AddSpecVersionHashTag(result.Hash)) - s.Scope.SetProvisioningState(infrav1.Creating) future, err := s.Client.CreateOrUpdateAsync(ctx, s.Scope.ResourceGroup(), spec.Name, vmss) if err != nil { return future, errors.Wrap(err, "cannot create VMSS") @@ -187,45 +218,45 @@ func (s *Service) createVMSS(ctx context.Context) (*infrav1.Future, error) { s.Scope.V(2).Info("starting to create VMSS", "scale set", spec.Name) s.Scope.SetLongRunningOperationState(future) - s.Scope.SaveK8sVersion() return future, err } -func (s *Service) patchVMSSIfNeeded(ctx context.Context, infraVMSS *infrav1exp.VMSS) (*infrav1.Future, error) { +func (s *Service) patchVMSSIfNeeded(ctx context.Context, infraVMSS *azure.VMSS) (*infrav1.Future, error) { ctx, span := tele.Tracer().Start(ctx, "scalesets.Service.patchVMSSIfNeeded") defer span.End() - s.Scope.SetProviderID(azure.ProviderIDPrefix + infraVMSS.ID) - spec := s.Scope.ScaleSetSpec() - result, err := s.buildVMSSFromSpec(ctx, spec) + vmss, err := s.buildVMSSFromSpec(ctx, spec) if err != nil { return nil, errors.Wrapf(err, "failed to generate scale set update parameters for %s", spec.Name) } - if infraVMSS.Tags.HasMatchingSpecVersionHash(result.Hash) { - // The VMSS built from the AzureMachinePool spec matches the hash in the tag of the existing VMSS. This means - // the VMSS does not need to be patched since it has not changed. - // - // hash(AzureMachinePool.Spec) - // - // Note: if a user were to mutate the VMSS in Azure rather than through CAPZ, this hash match may match, but not - // reflect the state of the specification in K8s. - s.Scope.V(2).Info("found matching spec hash no need to PATCH") - return nil, nil - } - - s.Scope.V(2).Info("hashes don't match PATCHING VMSS", "oldHash", infraVMSS.Tags[infrav1.SpecVersionHashTagKey()], "newHash", result.Hash) - s.Scope.V(4).Info("diff", "oldVMSS", infraVMSS, "newVMSS", result) - vmss := result.VMSSWithoutHash - vmss.Tags = converters.TagsToMap(result.Tags.AddSpecVersionHashTag(result.Hash)) patch, err := getVMSSUpdateFromVMSS(vmss) if err != nil { return nil, errors.Wrapf(err, "failed to generate vmss patch for %s", spec.Name) } - // wipe out network profile, so updates won't conflict with Cloud Provider updates - patch.VirtualMachineProfile.NetworkProfile = nil + maxSurge, err := s.Scope.MaxSurge() + if err != nil { + return nil, errors.Wrap(err, "failed to calculate maxSurge") + } + + hasModelChanges := hasModelModifyingDifferences(infraVMSS, vmss) + if maxSurge > 0 && (hasModelChanges || !infraVMSS.HasEnoughLatestModelOrNotMixedModel()) { + // surge capacity with the intention of lowering during instance reconciliation + surge := spec.Capacity + int64(maxSurge) + s.Scope.V(4).Info("surging...", "surge", surge) + patch.Sku.Capacity = to.Int64Ptr(surge) + } + + // If there are no model changes and no increase in the replica count, do not update the VMSS. + // Decreases in replica count is handled by deleting AzureMachinePoolMachine instances in the MachinePoolScope + if *patch.Sku.Capacity <= infraVMSS.Capacity && !hasModelChanges { + s.Scope.V(4).Info("nothing to update on vmss", "scale set", spec.Name, "newReplicas", *patch.Sku.Capacity, "oldReplicas", infraVMSS.Capacity, "hasChanges", hasModelChanges) + return nil, nil + } + + s.Scope.V(4).Info("patching vmss", "scale set", spec.Name, "patch", patch) future, err := s.UpdateAsync(ctx, s.Scope.ResourceGroup(), spec.Name, patch) if err != nil { if azure.ResourceConflict(err) { @@ -234,43 +265,14 @@ func (s *Service) patchVMSSIfNeeded(ctx context.Context, infraVMSS *infrav1exp.V return future, errors.Wrap(err, "failed updating VMSS") } - s.Scope.SetProvisioningState(infrav1.Updating) s.Scope.SetLongRunningOperationState(future) s.Scope.V(2).Info("successfully started to update vmss", "scale set", spec.Name) return future, err } -func (s *Service) reconcileInstances(ctx context.Context, vmss *infrav1exp.VMSS) error { - ctx, span := tele.Tracer().Start(ctx, "scalesets.Service.reconcileInstances") - defer span.End() - - // check to see if we are running the most K8s version specified in the MachinePool spec - // if not, then update the instances that are not running that model - if s.Scope.NeedsK8sVersionUpdate() { - instanceIDs := make([]string, len(vmss.Instances)) - for i, vm := range vmss.Instances { - instanceIDs[i] = vm.InstanceID - } - - if err := s.Client.UpdateInstances(ctx, s.Scope.ResourceGroup(), vmss.Name, instanceIDs); err != nil { - return errors.Wrapf(err, "failed to update VMSS %s instances", vmss.Name) - } - - s.Scope.SaveK8sVersion() - // get the VMSS to update status - var err error - vmss, err = s.getVirtualMachineScaleSet(ctx) - if err != nil { - return errors.Wrap(err, "failed to get VMSS after updating an instance") - } - } - - // update the status. - if err := s.Scope.UpdateInstanceStatuses(ctx, vmss.Instances); err != nil { - return errors.Wrap(err, "unable to update instance status") - } - - return nil +func hasModelModifyingDifferences(infraVMSS *azure.VMSS, vmss compute.VirtualMachineScaleSet) bool { + other := converters.SDKToVMSS(vmss, []compute.VirtualMachineScaleSetVM{}) + return infraVMSS.HasModelChanges(*other) } func (s *Service) validateSpec(ctx context.Context) error { @@ -327,15 +329,13 @@ func (s *Service) validateSpec(ctx context.Context) error { return nil } -func (s *Service) buildVMSSFromSpec(ctx context.Context, vmssSpec azure.ScaleSetSpec) (vmssBuildResult, error) { +func (s *Service) buildVMSSFromSpec(ctx context.Context, vmssSpec azure.ScaleSetSpec) (compute.VirtualMachineScaleSet, error) { ctx, span := tele.Tracer().Start(ctx, "scalesets.Service.buildVMSSFromSpec") defer span.End() - var result vmssBuildResult - sku, err := s.resourceSKUCache.Get(ctx, vmssSpec.Size, resourceskus.VirtualMachines) if err != nil { - return result, errors.Wrapf(err, "failed to get SKU %s in compute api", vmssSpec.Size) + return compute.VirtualMachineScaleSet{}, errors.Wrapf(err, "failed to get find SKU %s in compute api", vmssSpec.Size) } if vmssSpec.AcceleratedNetworking == nil { @@ -348,17 +348,17 @@ func (s *Service) buildVMSSFromSpec(ctx context.Context, vmssSpec azure.ScaleSet storageProfile, err := s.generateStorageProfile(vmssSpec, sku) if err != nil { - return result, err + return compute.VirtualMachineScaleSet{}, err } securityProfile, err := getSecurityProfile(vmssSpec, sku) if err != nil { - return result, err + return compute.VirtualMachineScaleSet{}, err } priority, evictionPolicy, billingProfile, err := converters.GetSpotVMOptions(vmssSpec.SpotVMOptions) if err != nil { - return result, errors.Wrap(err, "failed to get Spot VM options") + return compute.VirtualMachineScaleSet{}, errors.Wrapf(err, "failed to get Spot VM options") } // Get the node outbound LB backend pool ID @@ -374,7 +374,7 @@ func (s *Service) buildVMSSFromSpec(ctx context.Context, vmssSpec azure.ScaleSet osProfile, err := s.generateOSProfile(ctx, vmssSpec) if err != nil { - return result, err + return compute.VirtualMachineScaleSet{}, err } vmss := compute.VirtualMachineScaleSet{ @@ -389,7 +389,7 @@ func (s *Service) buildVMSSFromSpec(ctx context.Context, vmssSpec azure.ScaleSet UpgradePolicy: &compute.UpgradePolicy{ Mode: compute.UpgradeModeManual, }, - DoNotRunExtensionsOnOverprovisionedVMs: to.BoolPtr(true), + Overprovision: to.BoolPtr(false), VirtualMachineProfile: &compute.VirtualMachineScaleSetVMProfile{ OsProfile: osProfile, StorageProfile: storageProfile, @@ -434,23 +434,6 @@ func (s *Service) buildVMSSFromSpec(ctx context.Context, vmssSpec azure.ScaleSet }, } - if vmssSpec.TerminateNotificationTimeout != nil { - vmss.VirtualMachineProfile.ScheduledEventsProfile = &compute.ScheduledEventsProfile{ - TerminateNotificationProfile: &compute.TerminateNotificationProfile{ - Enable: to.BoolPtr(true), - NotBeforeTimeout: to.StringPtr(fmt.Sprintf("PT%dM", *vmssSpec.TerminateNotificationTimeout)), - }, - } - // Once we have scheduled events termination notification we can switch upgrade policy to be rolling - vmss.VirtualMachineScaleSetProperties.UpgradePolicy = &compute.UpgradePolicy{ - // Prefer rolling upgrade compared to Automatic (which updates all instances at same time) - Mode: compute.UpgradeModeRolling, - // We need to set the rolling upgrade policy based on user defined values - // for now lets stick to defaults, future PR will include the configurability - // RollingUpgradePolicy: &compute.RollingUpgradePolicy{}, - } - } - // Assign Identity to VMSS if vmssSpec.Identity == infrav1.VMIdentitySystemAssigned { vmss.Identity = &compute.VirtualMachineScaleSetIdentity{ @@ -459,7 +442,7 @@ func (s *Service) buildVMSSFromSpec(ctx context.Context, vmssSpec azure.ScaleSet } else if vmssSpec.Identity == infrav1.VMIdentityUserAssigned { userIdentitiesMap, err := converters.UserAssignedIdentitiesToVMSSSDK(vmssSpec.UserAssignedIdentities) if err != nil { - return result, errors.Wrapf(err, "failed to assign identity %q", vmssSpec.Name) + return vmss, errors.Wrapf(err, "failed to assign identity %q", vmssSpec.Name) } vmss.Identity = &compute.VirtualMachineScaleSetIdentity{ Type: compute.ResourceIdentityTypeUserAssigned, @@ -467,7 +450,7 @@ func (s *Service) buildVMSSFromSpec(ctx context.Context, vmssSpec azure.ScaleSet } } - tagsWithoutHash := infrav1.Build(infrav1.BuildParams{ + tags := infrav1.Build(infrav1.BuildParams{ ClusterName: s.Scope.ClusterName(), Lifecycle: infrav1.ResourceLifecycleOwned, Name: to.StringPtr(vmssSpec.Name), @@ -475,21 +458,12 @@ func (s *Service) buildVMSSFromSpec(ctx context.Context, vmssSpec azure.ScaleSet Additional: s.Scope.AdditionalTags(), }) - vmss.Tags = converters.TagsToMap(tagsWithoutHash) - hash, err := base64EncodedHash(vmss) - if err != nil { - return result, errors.Wrap(err, "failed to generate hash in vmss create") - } - - return vmssBuildResult{ - VMSSWithoutHash: vmss, - Tags: tagsWithoutHash, - Hash: hash, - }, nil + vmss.Tags = converters.TagsToMap(tags) + return vmss, nil } // getVirtualMachineScaleSet provides information about a Virtual Machine Scale Set and its instances -func (s *Service) getVirtualMachineScaleSet(ctx context.Context) (*infrav1exp.VMSS, error) { +func (s *Service) getVirtualMachineScaleSet(ctx context.Context) (*azure.VMSS, error) { ctx, span := tele.Tracer().Start(ctx, "scalesets.Service.getVirtualMachineScaleSet") defer span.End() @@ -508,7 +482,7 @@ func (s *Service) getVirtualMachineScaleSet(ctx context.Context) (*infrav1exp.VM } // getVirtualMachineScaleSetIfDone gets a Virtual Machine Scale Set and its instances from Azure if the future is completed -func (s *Service) getVirtualMachineScaleSetIfDone(ctx context.Context, future *infrav1.Future) (*infrav1exp.VMSS, error) { +func (s *Service) getVirtualMachineScaleSetIfDone(ctx context.Context, future *infrav1.Future) (*azure.VMSS, error) { ctx, span := tele.Tracer().Start(ctx, "scalesets.Service.getVirtualMachineScaleSetIfDone") defer span.End() @@ -599,6 +573,8 @@ func (s *Service) generateStorageProfile(vmssSpec azure.ScaleSetSpec, sku resour return nil, errors.Wrap(err, "failed to get VM image") } + s.Scope.SaveVMImageToStatus(image) + imageRef, err := converters.ImageToSDK(image) if err != nil { return nil, err @@ -660,9 +636,15 @@ func getVMSSUpdateFromVMSS(vmss compute.VirtualMachineScaleSet) (compute.Virtual if err != nil { return compute.VirtualMachineScaleSetUpdate{}, err } + var update compute.VirtualMachineScaleSetUpdate - err = update.UnmarshalJSON(jsonData) - return update, err + if err := update.UnmarshalJSON(jsonData); err != nil { + return update, err + } + + // wipe out network profile, so updates won't conflict with Cloud Provider updates + update.VirtualMachineProfile.NetworkProfile = nil + return update, nil } func getSecurityProfile(vmssSpec azure.ScaleSetSpec, sku resourceskus.SKU) (*compute.SecurityProfile, error) { @@ -678,27 +660,3 @@ func getSecurityProfile(vmssSpec azure.ScaleSetSpec, sku resourceskus.SKU) (*com EncryptionAtHost: to.BoolPtr(*vmssSpec.SecurityProfile.EncryptionAtHost), }, nil } - -// base64EncodedHash transforms a VMSS into json and then creates a sha256 hash of the data encoded as a base64 encoded string -func base64EncodedHash(vmss compute.VirtualMachineScaleSet) (string, error) { - // Setting Admin Password is not supported but an initial password is required for Windows - // Don't include it in the hash since it is generated and won't be the same each the spec is created (#1182) - tmpPass := vmss.VirtualMachineProfile.OsProfile.AdminPassword - // Don't include customData in the hash since it will change due to the kubeadm bootstrap token being regenerated. - tmpCustomData := vmss.VirtualMachineProfile.OsProfile.CustomData - vmss.VirtualMachineProfile.OsProfile.AdminPassword = nil - vmss.VirtualMachineProfile.OsProfile.CustomData = nil - defer func() { - vmss.VirtualMachineProfile.OsProfile.AdminPassword = tmpPass - vmss.VirtualMachineProfile.OsProfile.CustomData = tmpCustomData - }() - - jsonData, err := vmss.MarshalJSON() - if err != nil { - return "", errors.Wrap(err, "failed marshaling vmss") - } - - hasher := sha256.New() - _, _ = hasher.Write(jsonData) - return base64.URLEncoding.EncodeToString(hasher.Sum(nil)), nil -} diff --git a/azure/services/scalesets/scalesets_test.go b/azure/services/scalesets/scalesets_test.go index 57345fca006..52b2222c372 100644 --- a/azure/services/scalesets/scalesets_test.go +++ b/azure/services/scalesets/scalesets_test.go @@ -37,7 +37,6 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1alpha4" "sigs.k8s.io/cluster-api-provider-azure/azure" - "sigs.k8s.io/cluster-api-provider-azure/azure/converters" "sigs.k8s.io/cluster-api-provider-azure/azure/scope" "sigs.k8s.io/cluster-api-provider-azure/azure/services/resourceskus" "sigs.k8s.io/cluster-api-provider-azure/azure/services/scalesets/mock_scalesets" @@ -101,14 +100,14 @@ func TestGetExistingVMSS(t *testing.T) { testcases := []struct { name string vmssName string - result *infrav1exp.VMSS + result *azure.VMSS expectedError string expect func(s *mock_scalesets.MockScaleSetScopeMockRecorder, m *mock_scalesets.MockClientMockRecorder) }{ { name: "scale set not found", vmssName: "my-vmss", - result: &infrav1exp.VMSS{}, + result: &azure.VMSS{}, expectedError: "failed to get existing vmss: #: Not found: StatusCode=404", expect: func(s *mock_scalesets.MockScaleSetScopeMockRecorder, m *mock_scalesets.MockClientMockRecorder) { s.ScaleSetSpec().Return(newDefaultVMSSSpec()) @@ -120,7 +119,7 @@ func TestGetExistingVMSS(t *testing.T) { { name: "get existing vmss", vmssName: "my-vmss", - result: &infrav1exp.VMSS{ + result: &azure.VMSS{ ID: "my-id", Name: "my-vmss", State: "Succeeded", @@ -129,7 +128,7 @@ func TestGetExistingVMSS(t *testing.T) { Tags: nil, Capacity: int64(1), Zones: []string{"1", "3"}, - Instances: []infrav1exp.VMSSVM{ + Instances: []azure.VMSSVM{ { ID: "my-vm-id", InstanceID: "my-vm-1", @@ -173,7 +172,7 @@ func TestGetExistingVMSS(t *testing.T) { { name: "list instances fails", vmssName: "my-vmss", - result: &infrav1exp.VMSS{}, + result: &azure.VMSS{}, expectedError: "failed to list instances: #: Not found: StatusCode=404", expect: func(s *mock_scalesets.MockScaleSetScopeMockRecorder, m *mock_scalesets.MockClientMockRecorder) { s.ScaleSetSpec().Return(newDefaultVMSSSpec()) @@ -249,10 +248,10 @@ func TestReconcileVMSS(t *testing.T) { defaultSpec := newDefaultVMSSSpec() s.ScaleSetSpec().Return(defaultSpec).AnyTimes() setupDefaultVMSSStartCreatingExpectations(s, m) - vmss := setHashOnVMSS(g, newDefaultVMSS()) + vmss := newDefaultVMSS() m.CreateOrUpdateAsync(gomockinternal.AContext(), defaultResourceGroup, defaultVMSSName, gomockinternal.DiffEq(vmss)). Return(putFuture, nil) - setupCreatingSucceededExpectations(s, m, putFuture) + setupCreatingSucceededExpectations(s, m, newDefaultExistingVMSS(), putFuture) }, }, { @@ -263,44 +262,8 @@ func TestReconcileVMSS(t *testing.T) { s.ScaleSetSpec().Return(defaultSpec).AnyTimes() createdVMSS := newDefaultVMSS() instances := newDefaultInstances() - createdVMSS = setupDefaultVMSSInProgressOperationDoneExpectations(g, s, m, createdVMSS, instances) - s.SetProviderID(azure.ProviderIDPrefix + *createdVMSS.ID) + _ = setupDefaultVMSSInProgressOperationDoneExpectations(s, m, createdVMSS, instances) s.SetLongRunningOperationState(nil) - s.SetProvisioningState(infrav1.Succeeded) - s.NeedsK8sVersionUpdate().Return(false) - infraVMSS := converters.SDKToVMSS(createdVMSS, instances) - s.UpdateInstanceStatuses(gomockinternal.AContext(), infraVMSS.Instances).Return(nil) - }, - }, - { - name: "should try to update VMSS if the hash does not match", - expectedError: "failed to get VMSS my-vmss after create or update: failed to get result from future: operation type PATCH on Azure resource my-rg/my-vmss is not done", - expect: func(g *WithT, s *mock_scalesets.MockScaleSetScopeMockRecorder, m *mock_scalesets.MockClientMockRecorder) { - // create a spec which will be different than the default VMSS - defaultSpec := newDefaultVMSSSpec() - defaultSpec.Capacity = 3 - s.ScaleSetSpec().Return(defaultSpec).AnyTimes() - - // expect Azure already has a default VMSS created with an operation that is done - vmss := newDefaultVMSS() - instances := newDefaultInstances() - vmss = setupDefaultVMSSInProgressOperationDoneExpectations(g, s, m, vmss, instances) - s.SetProviderID(azure.ProviderIDPrefix + *vmss.ID) - s.SetProvisioningState(infrav1.Updating) - - // create a VMSS patch with an updated hash to match the spec - updatedVMSS := newDefaultVMSS() - updatedVMSS.ID = vmss.ID - updatedVMSS.Sku.Capacity = to.Int64Ptr(3) - updatedVMSS = setHashOnVMSS(g, updatedVMSS) - patch, err := getVMSSUpdateFromVMSS(updatedVMSS) - g.Expect(err).ToNot(HaveOccurred()) - patch.VirtualMachineProfile.NetworkProfile = nil - m.UpdateAsync(gomockinternal.AContext(), defaultResourceGroup, defaultVMSSName, gomockinternal.DiffEq(patch)). - Return(patchFuture, nil) - s.SetLongRunningOperationState(patchFuture) - m.GetResultIfDone(gomockinternal.AContext(), patchFuture).Return(compute.VirtualMachineScaleSet{}, - azure.NewOperationNotDoneError(patchFuture)) }, }, { @@ -311,13 +274,8 @@ func TestReconcileVMSS(t *testing.T) { s.ScaleSetSpec().Return(defaultSpec).AnyTimes() createdVMSS := newDefaultWindowsVMSS() instances := newDefaultInstances() - createdVMSS = setupDefaultVMSSInProgressOperationDoneExpectations(g, s, m, createdVMSS, instances) - s.SetProviderID(azure.ProviderIDPrefix + *createdVMSS.ID) + _ = setupDefaultVMSSInProgressOperationDoneExpectations(s, m, createdVMSS, instances) s.SetLongRunningOperationState(nil) - s.SetProvisioningState(infrav1.Succeeded) - s.NeedsK8sVersionUpdate().Return(false) - infraVMSS := converters.SDKToVMSS(createdVMSS, instances) - s.UpdateInstanceStatuses(gomockinternal.AContext(), infraVMSS.Instances).Return(nil) }, }, { @@ -332,10 +290,9 @@ func TestReconcileVMSS(t *testing.T) { netConfigs := vmss.VirtualMachineScaleSetProperties.VirtualMachineProfile.NetworkProfile.NetworkInterfaceConfigurations (*netConfigs)[0].EnableAcceleratedNetworking = to.BoolPtr(true) vmss.Sku.Name = to.StringPtr(spec.Size) - vmss = setHashOnVMSS(g, vmss) m.CreateOrUpdateAsync(gomockinternal.AContext(), defaultResourceGroup, defaultVMSSName, gomockinternal.DiffEq(vmss)). Return(putFuture, nil) - setupCreatingSucceededExpectations(s, m, putFuture) + setupCreatingSucceededExpectations(s, m, newDefaultExistingVMSS(), putFuture) }, }, { @@ -349,10 +306,9 @@ func TestReconcileVMSS(t *testing.T) { vmss := newDefaultVMSS() vmss.VirtualMachineScaleSetProperties.VirtualMachineProfile.Priority = compute.Spot vmss.VirtualMachineScaleSetProperties.VirtualMachineProfile.EvictionPolicy = compute.Deallocate - vmss = setHashOnVMSS(g, vmss) m.CreateOrUpdateAsync(gomockinternal.AContext(), defaultResourceGroup, defaultVMSSName, gomockinternal.DiffEq(vmss)). Return(putFuture, nil) - setupCreatingSucceededExpectations(s, m, putFuture) + setupCreatingSucceededExpectations(s, m, newDefaultExistingVMSS(), putFuture) }, }, { @@ -372,10 +328,9 @@ func TestReconcileVMSS(t *testing.T) { MaxPrice: to.Float64Ptr(0.001), } vmss.VirtualMachineScaleSetProperties.VirtualMachineProfile.EvictionPolicy = compute.Deallocate - vmss = setHashOnVMSS(g, vmss) m.CreateOrUpdateAsync(gomockinternal.AContext(), defaultResourceGroup, defaultVMSSName, gomockinternal.DiffEq(vmss)). Return(putFuture, nil) - setupCreatingSucceededExpectations(s, m, putFuture) + setupCreatingSucceededExpectations(s, m, newDefaultExistingVMSS(), putFuture) }, }, { @@ -396,10 +351,9 @@ func TestReconcileVMSS(t *testing.T) { ID: to.StringPtr("my-diskencryptionset-id"), }, } - vmss = setHashOnVMSS(g, vmss) m.CreateOrUpdateAsync(gomockinternal.AContext(), defaultResourceGroup, defaultVMSSName, gomockinternal.DiffEq(vmss)). Return(putFuture, nil) - setupCreatingSucceededExpectations(s, m, putFuture) + setupCreatingSucceededExpectations(s, m, newDefaultExistingVMSS(), putFuture) }, }, { @@ -422,10 +376,9 @@ func TestReconcileVMSS(t *testing.T) { "/subscriptions/123/resourcegroups/456/providers/Microsoft.ManagedIdentity/userAssignedIdentities/id1": {}, }, } - vmss = setHashOnVMSS(g, vmss) m.CreateOrUpdateAsync(gomockinternal.AContext(), defaultResourceGroup, defaultVMSSName, gomockinternal.DiffEq(vmss)). Return(putFuture, nil) - setupCreatingSucceededExpectations(s, m, putFuture) + setupCreatingSucceededExpectations(s, m, newDefaultExistingVMSS(), putFuture) }, }, { @@ -442,10 +395,9 @@ func TestReconcileVMSS(t *testing.T) { EncryptionAtHost: to.BoolPtr(true), } vmss.Sku.Name = to.StringPtr(spec.Size) - vmss = setHashOnVMSS(g, vmss) m.CreateOrUpdateAsync(gomockinternal.AContext(), defaultResourceGroup, defaultVMSSName, gomockinternal.DiffEq(vmss)). Return(putFuture, nil) - setupCreatingSucceededExpectations(s, m, putFuture) + setupCreatingSucceededExpectations(s, m, newDefaultExistingVMSS(), putFuture) }, }, { @@ -471,22 +423,23 @@ func TestReconcileVMSS(t *testing.T) { setupDefaultVMSSUpdateExpectations(s) existingVMSS := newDefaultExistingVMSS() - existingVMSS.Sku.Capacity = to.Int64Ptr(1) - existingVMSS = setHashOnVMSS(g, existingVMSS) + existingVMSS.Sku.Capacity = to.Int64Ptr(2) instances := newDefaultInstances() m.Get(gomockinternal.AContext(), defaultResourceGroup, defaultVMSSName).Return(existingVMSS, nil) m.ListInstances(gomockinternal.AContext(), defaultResourceGroup, defaultVMSSName).Return(instances, nil) clone := newDefaultExistingVMSS() - clone.Sku.Capacity = to.Int64Ptr(2) + clone.Sku.Capacity = to.Int64Ptr(3) patchVMSS, err := getVMSSUpdateFromVMSS(clone) - patchVMSS = setHashOnVMSSUpdate(g, clone, patchVMSS) - patchVMSS.VirtualMachineProfile.NetworkProfile = nil g.Expect(err).NotTo(HaveOccurred()) + patchVMSS.VirtualMachineProfile.StorageProfile.ImageReference.Version = to.StringPtr("2.0") + patchVMSS.VirtualMachineProfile.NetworkProfile = nil m.UpdateAsync(gomockinternal.AContext(), defaultResourceGroup, defaultVMSSName, gomockinternal.DiffEq(patchVMSS)). Return(patchFuture, nil) s.SetLongRunningOperationState(patchFuture) m.GetResultIfDone(gomockinternal.AContext(), patchFuture).Return(compute.VirtualMachineScaleSet{}, azure.NewOperationNotDoneError(patchFuture)) + m.Get(gomockinternal.AContext(), defaultResourceGroup, defaultVMSSName).Return(clone, nil) + m.ListInstances(gomockinternal.AContext(), defaultResourceGroup, defaultVMSSName).Return(instances, nil) }, }, { @@ -534,6 +487,8 @@ func TestReconcileVMSS(t *testing.T) { setupDefaultVMSSStartCreatingExpectations(s, m) m.CreateOrUpdateAsync(gomockinternal.AContext(), defaultResourceGroup, defaultVMSSName, gomock.AssignableToTypeOf(compute.VirtualMachineScaleSet{})). Return(nil, autorest.NewErrorWithResponse("", "", &http.Response{StatusCode: 500}, "Internal error")) + m.Get(gomockinternal.AContext(), defaultResourceGroup, defaultVMSSName). + Return(compute.VirtualMachineScaleSet{}, autorest.NewErrorWithResponse("", "", &http.Response{StatusCode: 404}, "Not found")) }, }, } @@ -587,11 +542,15 @@ func TestDeleteVMSS(t *testing.T) { Name: "my-existing-vmss", Size: "VM_SIZE", Capacity: 3, - }) + }).AnyTimes() s.ResourceGroup().AnyTimes().Return("my-existing-rg") s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) - m.Delete(gomockinternal.AContext(), "my-existing-rg", "my-existing-vmss"). - Return(nil) + future := &infrav1.Future{} + s.GetLongRunningOperationState().Return(future) + m.GetResultIfDone(gomockinternal.AContext(), future).Return(compute.VirtualMachineScaleSet{}, nil) + m.Get(gomockinternal.AContext(), "my-existing-rg", "my-existing-vmss"). + Return(compute.VirtualMachineScaleSet{}, autorest.NewErrorWithResponse("", "", &http.Response{StatusCode: 404}, "Not found")) + s.SetLongRunningOperationState(nil) }, }, { @@ -602,11 +561,14 @@ func TestDeleteVMSS(t *testing.T) { Name: name, Size: "VM_SIZE", Capacity: 3, - }) + }).AnyTimes() s.ResourceGroup().AnyTimes().Return(resourceGroup) s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) - m.Delete(gomockinternal.AContext(), resourceGroup, name). - Return(autorest.NewErrorWithResponse("", "", &http.Response{StatusCode: 404}, "Not found")) + s.GetLongRunningOperationState().Return(nil) + m.DeleteAsync(gomockinternal.AContext(), resourceGroup, name). + Return(nil, autorest.NewErrorWithResponse("", "", &http.Response{StatusCode: 404}, "Not found")) + m.Get(gomockinternal.AContext(), resourceGroup, name). + Return(compute.VirtualMachineScaleSet{}, autorest.NewErrorWithResponse("", "", &http.Response{StatusCode: 404}, "Not found")) }, }, { @@ -617,11 +579,16 @@ func TestDeleteVMSS(t *testing.T) { Name: name, Size: "VM_SIZE", Capacity: 3, - }) + }).AnyTimes() s.ResourceGroup().AnyTimes().Return(resourceGroup) s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) - m.Delete(gomockinternal.AContext(), resourceGroup, name). - Return(autorest.NewErrorWithResponse("", "", &http.Response{StatusCode: 500}, "Internal Server Error")) + s.GetLongRunningOperationState().Return(nil) + m.DeleteAsync(gomockinternal.AContext(), resourceGroup, name). + Return(nil, autorest.NewErrorWithResponse("", "", &http.Response{StatusCode: 500}, "Internal Server Error")) + m.Get(gomockinternal.AContext(), resourceGroup, name). + Return(newDefaultVMSS(), nil) + m.ListInstances(gomockinternal.AContext(), defaultResourceGroup, defaultVMSSName).Return(newDefaultInstances(), nil).AnyTimes() + s.SetVMSSState(gomock.AssignableToTypeOf(&azure.VMSS{})) }, }, } @@ -887,9 +854,9 @@ func newDefaultVMSS() compute.VirtualMachineScaleSet { Zones: &[]string{"1", "3"}, VirtualMachineScaleSetProperties: &compute.VirtualMachineScaleSetProperties{ UpgradePolicy: &compute.UpgradePolicy{ - Mode: compute.UpgradeModeRolling, + Mode: compute.UpgradeModeManual, }, - DoNotRunExtensionsOnOverprovisionedVMs: to.BoolPtr(true), + Overprovision: to.BoolPtr(false), VirtualMachineProfile: &compute.VirtualMachineScaleSetVMProfile{ OsProfile: &compute.VirtualMachineScaleSetOSProfile{ ComputerNamePrefix: to.StringPtr(defaultVMSSName), @@ -997,12 +964,6 @@ func newDefaultVMSS() compute.VirtualMachineScaleSet { }, }, }, - ScheduledEventsProfile: &compute.ScheduledEventsProfile{ - TerminateNotificationProfile: &compute.TerminateNotificationProfile{ - Enable: to.BoolPtr(true), - NotBeforeTimeout: to.StringPtr("PT7M"), - }, - }, }, }, } @@ -1019,27 +980,39 @@ func newDefaultInstances() []compute.VirtualMachineScaleSetVM { OsProfile: &compute.OSProfile{ ComputerName: to.StringPtr("instance-000001"), }, + StorageProfile: &compute.StorageProfile{ + ImageReference: &compute.ImageReference{ + Publisher: to.StringPtr("fake-publisher"), + Offer: to.StringPtr("my-offer"), + Sku: to.StringPtr("sku-id"), + Version: to.StringPtr("1.0"), + }, + }, + }, + }, + { + ID: to.StringPtr("my-vm-id"), + InstanceID: to.StringPtr("my-vm-2"), + Name: to.StringPtr("my-vm"), + VirtualMachineScaleSetVMProperties: &compute.VirtualMachineScaleSetVMProperties{ + ProvisioningState: to.StringPtr("Succeeded"), + OsProfile: &compute.OSProfile{ + ComputerName: to.StringPtr("instance-000002"), + }, + StorageProfile: &compute.StorageProfile{ + ImageReference: &compute.ImageReference{ + Publisher: to.StringPtr("fake-publisher"), + Offer: to.StringPtr("my-offer"), + Sku: to.StringPtr("sku-id"), + Version: to.StringPtr("1.0"), + }, + }, }, }, } } -func setHashOnVMSS(g *WithT, vmss compute.VirtualMachineScaleSet) compute.VirtualMachineScaleSet { - hash, err := base64EncodedHash(vmss) - g.Expect(err).To(BeNil()) - vmss.Tags["sigs.k8s.io_cluster-api-provider-azure_spec-version-hash"] = &hash - return vmss -} - -func setHashOnVMSSUpdate(g *WithT, vmss compute.VirtualMachineScaleSet, update compute.VirtualMachineScaleSetUpdate) compute.VirtualMachineScaleSetUpdate { - hash, err := base64EncodedHash(vmss) - g.Expect(err).To(BeNil()) - update.Tags["sigs.k8s.io_cluster-api-provider-azure_spec-version-hash"] = &hash - return update -} - -func setupDefaultVMSSInProgressOperationDoneExpectations(g *WithT, s *mock_scalesets.MockScaleSetScopeMockRecorder, m *mock_scalesets.MockClientMockRecorder, createdVMSS compute.VirtualMachineScaleSet, instances []compute.VirtualMachineScaleSetVM) compute.VirtualMachineScaleSet { - setHashOnVMSS(g, createdVMSS) +func setupDefaultVMSSInProgressOperationDoneExpectations(s *mock_scalesets.MockScaleSetScopeMockRecorder, m *mock_scalesets.MockClientMockRecorder, createdVMSS compute.VirtualMachineScaleSet, instances []compute.VirtualMachineScaleSetVM) compute.VirtualMachineScaleSet { createdVMSS.ID = to.StringPtr("vmss-id") createdVMSS.ProvisioningState = to.StringPtr(string(infrav1.Succeeded)) setupDefaultVMSSExpectations(s) @@ -1052,6 +1025,9 @@ func setupDefaultVMSSInProgressOperationDoneExpectations(g *WithT, s *mock_scale s.GetLongRunningOperationState().Return(future) m.GetResultIfDone(gomockinternal.AContext(), future).Return(createdVMSS, nil).AnyTimes() m.ListInstances(gomockinternal.AContext(), defaultResourceGroup, defaultVMSSName).Return(instances, nil).AnyTimes() + s.MaxSurge().Return(1, nil) + s.SetVMSSState(gomock.Any()) + s.SetProviderID(azure.ProviderIDPrefix + *createdVMSS.ID) return createdVMSS } @@ -1060,16 +1036,46 @@ func setupDefaultVMSSStartCreatingExpectations(s *mock_scalesets.MockScaleSetSco s.GetLongRunningOperationState().Return(nil) m.Get(gomockinternal.AContext(), defaultResourceGroup, defaultVMSSName). Return(compute.VirtualMachineScaleSet{}, autorest.NewErrorWithResponse("", "", &http.Response{StatusCode: 404}, "Not found")) - s.SetProvisioningState(infrav1.Creating) } -func setupCreatingSucceededExpectations(s *mock_scalesets.MockScaleSetScopeMockRecorder, m *mock_scalesets.MockClientMockRecorder, future *infrav1.Future) { +func setupCreatingSucceededExpectations(s *mock_scalesets.MockScaleSetScopeMockRecorder, m *mock_scalesets.MockClientMockRecorder, vmss compute.VirtualMachineScaleSet, future *infrav1.Future) { s.SetLongRunningOperationState(future) - s.SaveK8sVersion() m.GetResultIfDone(gomockinternal.AContext(), future).Return(compute.VirtualMachineScaleSet{}, azure.NewOperationNotDoneError(future)) + m.Get(gomockinternal.AContext(), defaultResourceGroup, defaultVMSSName).Return(vmss, nil) + m.ListInstances(gomockinternal.AContext(), defaultResourceGroup, defaultVMSSName).Return(newDefaultInstances(), nil).AnyTimes() + s.SetVMSSState(gomock.Any()) + s.SetProviderID(azure.ProviderIDPrefix + *vmss.ID) } func setupDefaultVMSSExpectations(s *mock_scalesets.MockScaleSetScopeMockRecorder) { + setupVMSSExpectationsWithoutVMImage(s) + image := &infrav1.Image{ + Marketplace: &infrav1.AzureMarketplaceImage{ + Publisher: "fake-publisher", + Offer: "my-offer", + SKU: "sku-id", + Version: "1.0", + }, + } + s.GetVMImage().Return(image, nil) + s.SaveVMImageToStatus(image) +} + +func setupUpdateVMSSExpectations(s *mock_scalesets.MockScaleSetScopeMockRecorder) { + setupVMSSExpectationsWithoutVMImage(s) + image := &infrav1.Image{ + Marketplace: &infrav1.AzureMarketplaceImage{ + Publisher: "fake-publisher", + Offer: "my-offer", + SKU: "sku-id", + Version: "2.0", + }, + } + s.GetVMImage().Return(image, nil) + s.SaveVMImageToStatus(image) +} + +func setupVMSSExpectationsWithoutVMImage(s *mock_scalesets.MockScaleSetScopeMockRecorder) { s.SubscriptionID().AnyTimes().Return(defaultSubscriptionID) s.ResourceGroup().AnyTimes().Return(defaultResourceGroup) s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New()) @@ -1077,14 +1083,6 @@ func setupDefaultVMSSExpectations(s *mock_scalesets.MockScaleSetScopeMockRecorde s.Location().AnyTimes().Return("test-location") s.ClusterName().Return("my-cluster") s.GetBootstrapData(gomockinternal.AContext()).Return("fake-bootstrap-data", nil) - s.GetVMImage().Return(&infrav1.Image{ - Marketplace: &infrav1.AzureMarketplaceImage{ - Publisher: "fake-publisher", - Offer: "my-offer", - SKU: "sku-id", - Version: "1.0", - }, - }, nil) s.VMSSExtensionSpecs().Return([]azure.VMSSExtensionSpec{ { Name: "someExtension", @@ -1099,8 +1097,9 @@ func setupDefaultVMSSExpectations(s *mock_scalesets.MockScaleSetScopeMockRecorde } func setupDefaultVMSSUpdateExpectations(s *mock_scalesets.MockScaleSetScopeMockRecorder) { - setupDefaultVMSSExpectations(s) + setupUpdateVMSSExpectations(s) s.SetProviderID(azure.ProviderIDPrefix + "vmss-id") - s.SetProvisioningState(infrav1.Updating) s.GetLongRunningOperationState().Return(nil) + s.MaxSurge().Return(1, nil) + s.SetVMSSState(gomock.Any()) } diff --git a/azure/services/scalesetvms/client.go b/azure/services/scalesetvms/client.go new file mode 100644 index 00000000000..07b95db042f --- /dev/null +++ b/azure/services/scalesetvms/client.go @@ -0,0 +1,165 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scalesetvms + +import ( + "context" + "encoding/base64" + "encoding/json" + "time" + + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-30/compute" + "github.com/Azure/go-autorest/autorest" + "github.com/pkg/errors" + + infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1alpha4" + "sigs.k8s.io/cluster-api-provider-azure/azure" + "sigs.k8s.io/cluster-api-provider-azure/util/tele" +) + +// client wraps go-sdk +type client interface { + Get(context.Context, string, string, string) (compute.VirtualMachineScaleSetVM, error) + GetResultIfDone(ctx context.Context, future *infrav1.Future) (compute.VirtualMachineScaleSetVM, error) + DeleteAsync(context.Context, string, string, string) (*infrav1.Future, error) +} + +type ( + // azureClient contains the Azure go-sdk Client + azureClient struct { + scalesetvms compute.VirtualMachineScaleSetVMsClient + } + + genericScaleSetVMFuture interface { + DoneWithContext(ctx context.Context, sender autorest.Sender) (done bool, err error) + Result(client compute.VirtualMachineScaleSetVMsClient) (vmss compute.VirtualMachineScaleSetVM, err error) + } + + deleteFutureAdapter struct { + compute.VirtualMachineScaleSetVMsDeleteFuture + } +) + +const ( + // DeleteFuture is a future that was derived from a DELETE request to VMSS + DeleteFuture string = "DELETE" +) + +var _ client = &azureClient{} + +// newClient creates a new VMSS client from subscription ID. +func newClient(auth azure.Authorizer) *azureClient { + return &azureClient{ + scalesetvms: newVirtualMachineScaleSetVMsClient(auth.SubscriptionID(), auth.BaseURI(), auth.Authorizer()), + } +} + +// newVirtualMachineScaleSetVMsClient creates a new vmss VM client from subscription ID. +func newVirtualMachineScaleSetVMsClient(subscriptionID string, baseURI string, authorizer autorest.Authorizer) compute.VirtualMachineScaleSetVMsClient { + c := compute.NewVirtualMachineScaleSetVMsClientWithBaseURI(baseURI, subscriptionID) + c.Authorizer = authorizer + c.RetryAttempts = 1 + _ = c.AddToUserAgent(azure.UserAgent()) // intentionally ignore error as it doesn't matter + return c +} + +// Get retrieves the Virtual Machine Scale Set Virtual Machine +func (ac *azureClient) Get(ctx context.Context, resourceGroupName, vmssName, instanceID string) (compute.VirtualMachineScaleSetVM, error) { + ctx, span := tele.Tracer().Start(ctx, "scalesetvms.azureClient.Get") + defer span.End() + + return ac.scalesetvms.Get(ctx, resourceGroupName, vmssName, instanceID, "") +} + +// GetResultIfDone fetches the result of a long running operation future if it is done +func (ac *azureClient) GetResultIfDone(ctx context.Context, future *infrav1.Future) (compute.VirtualMachineScaleSetVM, error) { + ctx, span := tele.Tracer().Start(ctx, "scalesetvms.azureClient.GetResultIfDone") + defer span.End() + + var genericFuture genericScaleSetVMFuture + futureData, err := base64.URLEncoding.DecodeString(future.FutureData) + if err != nil { + return compute.VirtualMachineScaleSetVM{}, errors.Wrapf(err, "failed to base64 decode future data") + } + + switch future.Type { + case DeleteFuture: + var future compute.VirtualMachineScaleSetVMsDeleteFuture + if err := json.Unmarshal(futureData, &future); err != nil { + return compute.VirtualMachineScaleSetVM{}, errors.Wrap(err, "failed to unmarshal future data") + } + + genericFuture = &deleteFutureAdapter{ + VirtualMachineScaleSetVMsDeleteFuture: future, + } + default: + return compute.VirtualMachineScaleSetVM{}, errors.Errorf("unknown furture type %q", future.Type) + } + + done, err := genericFuture.DoneWithContext(ctx, ac.scalesetvms) + if err != nil { + return compute.VirtualMachineScaleSetVM{}, errors.Wrapf(err, "failed checking if the operation was complete") + } + + if !done { + return compute.VirtualMachineScaleSetVM{}, azure.WithTransientError(azure.NewOperationNotDoneError(future), 15*time.Second) + } + + vm, err := genericFuture.Result(ac.scalesetvms) + if err != nil { + return vm, errors.Wrapf(err, "failed fetching the result of operation for vmss") + } + + return vm, nil +} + +// DeleteAsync is the operation to delete a virtual machine scale set instance asynchronously. DeleteAsync sends a DELETE +// request to Azure and if accepted without error, the func will return a Future which can be used to track the ongoing +// progress of the operation. +// +// Parameters: +// resourceGroupName - the name of the resource group. +// vmssName - the name of the VM scale set to create or update. parameters - the scale set object. +// instanceID - the ID of the VM scale set VM. +func (ac *azureClient) DeleteAsync(ctx context.Context, resourceGroupName, vmssName, instanceID string) (*infrav1.Future, error) { + ctx, span := tele.Tracer().Start(ctx, "scalesetvms.azureClient.DeleteAsync") + defer span.End() + + future, err := ac.scalesetvms.Delete(ctx, resourceGroupName, vmssName, instanceID) + if err != nil { + return nil, errors.Wrapf(err, "failed deleting vmss named %q", vmssName) + } + + jsonData, err := future.MarshalJSON() + if err != nil { + return nil, errors.Wrapf(err, "failed to marshal async future") + } + + return &infrav1.Future{ + Type: DeleteFuture, + ResourceGroup: resourceGroupName, + Name: vmssName, + FutureData: base64.URLEncoding.EncodeToString(jsonData), + }, nil +} + +// Result wraps the delete result so that we can treat it generically. The only thing we care about is if the delete +// was successful. If it wasn't, an error will be returned. +func (da *deleteFutureAdapter) Result(client compute.VirtualMachineScaleSetVMsClient) (compute.VirtualMachineScaleSetVM, error) { + _, err := da.VirtualMachineScaleSetVMsDeleteFuture.Result(client) + return compute.VirtualMachineScaleSetVM{}, err +} diff --git a/azure/services/scalesetvms/mock_scalesetvms/client_mock.go b/azure/services/scalesetvms/mock_scalesetvms/client_mock.go new file mode 100644 index 00000000000..89c6a54c894 --- /dev/null +++ b/azure/services/scalesetvms/mock_scalesetvms/client_mock.go @@ -0,0 +1,152 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by MockGen. DO NOT EDIT. +// Source: ../client.go + +// Package mock_scalesetvms is a generated GoMock package. +package mock_scalesetvms + +import ( + context "context" + reflect "reflect" + + compute "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-30/compute" + autorest "github.com/Azure/go-autorest/autorest" + gomock "github.com/golang/mock/gomock" + v1alpha4 "sigs.k8s.io/cluster-api-provider-azure/api/v1alpha4" +) + +// Mockclient is a mock of client interface. +type Mockclient struct { + ctrl *gomock.Controller + recorder *MockclientMockRecorder +} + +// MockclientMockRecorder is the mock recorder for Mockclient. +type MockclientMockRecorder struct { + mock *Mockclient +} + +// NewMockclient creates a new mock instance. +func NewMockclient(ctrl *gomock.Controller) *Mockclient { + mock := &Mockclient{ctrl: ctrl} + mock.recorder = &MockclientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *Mockclient) EXPECT() *MockclientMockRecorder { + return m.recorder +} + +// DeleteAsync mocks base method. +func (m *Mockclient) DeleteAsync(arg0 context.Context, arg1, arg2, arg3 string) (*v1alpha4.Future, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteAsync", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(*v1alpha4.Future) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteAsync indicates an expected call of DeleteAsync. +func (mr *MockclientMockRecorder) DeleteAsync(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteAsync", reflect.TypeOf((*Mockclient)(nil).DeleteAsync), arg0, arg1, arg2, arg3) +} + +// Get mocks base method. +func (m *Mockclient) Get(arg0 context.Context, arg1, arg2, arg3 string) (compute.VirtualMachineScaleSetVM, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Get", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(compute.VirtualMachineScaleSetVM) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Get indicates an expected call of Get. +func (mr *MockclientMockRecorder) Get(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*Mockclient)(nil).Get), arg0, arg1, arg2, arg3) +} + +// GetResultIfDone mocks base method. +func (m *Mockclient) GetResultIfDone(ctx context.Context, future *v1alpha4.Future) (compute.VirtualMachineScaleSetVM, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetResultIfDone", ctx, future) + ret0, _ := ret[0].(compute.VirtualMachineScaleSetVM) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetResultIfDone indicates an expected call of GetResultIfDone. +func (mr *MockclientMockRecorder) GetResultIfDone(ctx, future interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetResultIfDone", reflect.TypeOf((*Mockclient)(nil).GetResultIfDone), ctx, future) +} + +// MockgenericScaleSetVMFuture is a mock of genericScaleSetVMFuture interface. +type MockgenericScaleSetVMFuture struct { + ctrl *gomock.Controller + recorder *MockgenericScaleSetVMFutureMockRecorder +} + +// MockgenericScaleSetVMFutureMockRecorder is the mock recorder for MockgenericScaleSetVMFuture. +type MockgenericScaleSetVMFutureMockRecorder struct { + mock *MockgenericScaleSetVMFuture +} + +// NewMockgenericScaleSetVMFuture creates a new mock instance. +func NewMockgenericScaleSetVMFuture(ctrl *gomock.Controller) *MockgenericScaleSetVMFuture { + mock := &MockgenericScaleSetVMFuture{ctrl: ctrl} + mock.recorder = &MockgenericScaleSetVMFutureMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockgenericScaleSetVMFuture) EXPECT() *MockgenericScaleSetVMFutureMockRecorder { + return m.recorder +} + +// DoneWithContext mocks base method. +func (m *MockgenericScaleSetVMFuture) DoneWithContext(ctx context.Context, sender autorest.Sender) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DoneWithContext", ctx, sender) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DoneWithContext indicates an expected call of DoneWithContext. +func (mr *MockgenericScaleSetVMFutureMockRecorder) DoneWithContext(ctx, sender interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DoneWithContext", reflect.TypeOf((*MockgenericScaleSetVMFuture)(nil).DoneWithContext), ctx, sender) +} + +// Result mocks base method. +func (m *MockgenericScaleSetVMFuture) Result(client compute.VirtualMachineScaleSetVMsClient) (compute.VirtualMachineScaleSetVM, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Result", client) + ret0, _ := ret[0].(compute.VirtualMachineScaleSetVM) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Result indicates an expected call of Result. +func (mr *MockgenericScaleSetVMFutureMockRecorder) Result(client interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Result", reflect.TypeOf((*MockgenericScaleSetVMFuture)(nil).Result), client) +} diff --git a/azure/services/scalesetvms/mock_scalesetvms/doc.go b/azure/services/scalesetvms/mock_scalesetvms/doc.go new file mode 100644 index 00000000000..36fa78d2523 --- /dev/null +++ b/azure/services/scalesetvms/mock_scalesetvms/doc.go @@ -0,0 +1,22 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Run go generate to regenerate this mock. +//go:generate ../../../../hack/tools/bin/mockgen -destination client_mock.go -package mock_scalesetvms -source ../client.go client +//go:generate ../../../../hack/tools/bin/mockgen -destination scalesetvms_mock.go -package mock_scalesetvms -source ../scalesetvms.go ScaleSetVMScope +//go:generate /usr/bin/env bash -c "cat ../../../../hack/boilerplate/boilerplate.generatego.txt client_mock.go > _client_mock.go && mv _client_mock.go client_mock.go" +//go:generate /usr/bin/env bash -c "cat ../../../../hack/boilerplate/boilerplate.generatego.txt scalesetvms_mock.go > _scalesetvms_mock.go && mv _scalesetvms_mock.go scalesetvms_mock.go" +package mock_scalesetvms //nolint diff --git a/azure/services/scalesetvms/mock_scalesetvms/scalesetvms_mock.go b/azure/services/scalesetvms/mock_scalesetvms/scalesetvms_mock.go new file mode 100644 index 00000000000..593a95eccf7 --- /dev/null +++ b/azure/services/scalesetvms/mock_scalesetvms/scalesetvms_mock.go @@ -0,0 +1,410 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by MockGen. DO NOT EDIT. +// Source: ../scalesetvms.go + +// Package mock_scalesetvms is a generated GoMock package. +package mock_scalesetvms + +import ( + reflect "reflect" + + autorest "github.com/Azure/go-autorest/autorest" + logr "github.com/go-logr/logr" + gomock "github.com/golang/mock/gomock" + v1alpha4 "sigs.k8s.io/cluster-api-provider-azure/api/v1alpha4" + azure "sigs.k8s.io/cluster-api-provider-azure/azure" +) + +// MockScaleSetVMScope is a mock of ScaleSetVMScope interface. +type MockScaleSetVMScope struct { + ctrl *gomock.Controller + recorder *MockScaleSetVMScopeMockRecorder +} + +// MockScaleSetVMScopeMockRecorder is the mock recorder for MockScaleSetVMScope. +type MockScaleSetVMScopeMockRecorder struct { + mock *MockScaleSetVMScope +} + +// NewMockScaleSetVMScope creates a new mock instance. +func NewMockScaleSetVMScope(ctrl *gomock.Controller) *MockScaleSetVMScope { + mock := &MockScaleSetVMScope{ctrl: ctrl} + mock.recorder = &MockScaleSetVMScopeMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockScaleSetVMScope) EXPECT() *MockScaleSetVMScopeMockRecorder { + return m.recorder +} + +// AdditionalTags mocks base method. +func (m *MockScaleSetVMScope) AdditionalTags() v1alpha4.Tags { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AdditionalTags") + ret0, _ := ret[0].(v1alpha4.Tags) + return ret0 +} + +// AdditionalTags indicates an expected call of AdditionalTags. +func (mr *MockScaleSetVMScopeMockRecorder) AdditionalTags() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AdditionalTags", reflect.TypeOf((*MockScaleSetVMScope)(nil).AdditionalTags)) +} + +// Authorizer mocks base method. +func (m *MockScaleSetVMScope) Authorizer() autorest.Authorizer { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Authorizer") + ret0, _ := ret[0].(autorest.Authorizer) + return ret0 +} + +// Authorizer indicates an expected call of Authorizer. +func (mr *MockScaleSetVMScopeMockRecorder) Authorizer() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Authorizer", reflect.TypeOf((*MockScaleSetVMScope)(nil).Authorizer)) +} + +// AvailabilitySetEnabled mocks base method. +func (m *MockScaleSetVMScope) AvailabilitySetEnabled() bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AvailabilitySetEnabled") + ret0, _ := ret[0].(bool) + return ret0 +} + +// AvailabilitySetEnabled indicates an expected call of AvailabilitySetEnabled. +func (mr *MockScaleSetVMScopeMockRecorder) AvailabilitySetEnabled() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AvailabilitySetEnabled", reflect.TypeOf((*MockScaleSetVMScope)(nil).AvailabilitySetEnabled)) +} + +// BaseURI mocks base method. +func (m *MockScaleSetVMScope) BaseURI() string { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "BaseURI") + ret0, _ := ret[0].(string) + return ret0 +} + +// BaseURI indicates an expected call of BaseURI. +func (mr *MockScaleSetVMScopeMockRecorder) BaseURI() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BaseURI", reflect.TypeOf((*MockScaleSetVMScope)(nil).BaseURI)) +} + +// ClientID mocks base method. +func (m *MockScaleSetVMScope) ClientID() string { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientID") + ret0, _ := ret[0].(string) + return ret0 +} + +// ClientID indicates an expected call of ClientID. +func (mr *MockScaleSetVMScopeMockRecorder) ClientID() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientID", reflect.TypeOf((*MockScaleSetVMScope)(nil).ClientID)) +} + +// ClientSecret mocks base method. +func (m *MockScaleSetVMScope) ClientSecret() string { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientSecret") + ret0, _ := ret[0].(string) + return ret0 +} + +// ClientSecret indicates an expected call of ClientSecret. +func (mr *MockScaleSetVMScopeMockRecorder) ClientSecret() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientSecret", reflect.TypeOf((*MockScaleSetVMScope)(nil).ClientSecret)) +} + +// CloudEnvironment mocks base method. +func (m *MockScaleSetVMScope) CloudEnvironment() string { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CloudEnvironment") + ret0, _ := ret[0].(string) + return ret0 +} + +// CloudEnvironment indicates an expected call of CloudEnvironment. +func (mr *MockScaleSetVMScopeMockRecorder) CloudEnvironment() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CloudEnvironment", reflect.TypeOf((*MockScaleSetVMScope)(nil).CloudEnvironment)) +} + +// CloudProviderConfigOverrides mocks base method. +func (m *MockScaleSetVMScope) CloudProviderConfigOverrides() *v1alpha4.CloudProviderConfigOverrides { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CloudProviderConfigOverrides") + ret0, _ := ret[0].(*v1alpha4.CloudProviderConfigOverrides) + return ret0 +} + +// CloudProviderConfigOverrides indicates an expected call of CloudProviderConfigOverrides. +func (mr *MockScaleSetVMScopeMockRecorder) CloudProviderConfigOverrides() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CloudProviderConfigOverrides", reflect.TypeOf((*MockScaleSetVMScope)(nil).CloudProviderConfigOverrides)) +} + +// ClusterName mocks base method. +func (m *MockScaleSetVMScope) ClusterName() string { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClusterName") + ret0, _ := ret[0].(string) + return ret0 +} + +// ClusterName indicates an expected call of ClusterName. +func (mr *MockScaleSetVMScopeMockRecorder) ClusterName() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClusterName", reflect.TypeOf((*MockScaleSetVMScope)(nil).ClusterName)) +} + +// Enabled mocks base method. +func (m *MockScaleSetVMScope) Enabled() bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Enabled") + ret0, _ := ret[0].(bool) + return ret0 +} + +// Enabled indicates an expected call of Enabled. +func (mr *MockScaleSetVMScopeMockRecorder) Enabled() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Enabled", reflect.TypeOf((*MockScaleSetVMScope)(nil).Enabled)) +} + +// Error mocks base method. +func (m *MockScaleSetVMScope) Error(err error, msg string, keysAndValues ...interface{}) { + m.ctrl.T.Helper() + varargs := []interface{}{err, msg} + for _, a := range keysAndValues { + varargs = append(varargs, a) + } + m.ctrl.Call(m, "Error", varargs...) +} + +// Error indicates an expected call of Error. +func (mr *MockScaleSetVMScopeMockRecorder) Error(err, msg interface{}, keysAndValues ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{err, msg}, keysAndValues...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Error", reflect.TypeOf((*MockScaleSetVMScope)(nil).Error), varargs...) +} + +// GetLongRunningOperationState mocks base method. +func (m *MockScaleSetVMScope) GetLongRunningOperationState() *v1alpha4.Future { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetLongRunningOperationState") + ret0, _ := ret[0].(*v1alpha4.Future) + return ret0 +} + +// GetLongRunningOperationState indicates an expected call of GetLongRunningOperationState. +func (mr *MockScaleSetVMScopeMockRecorder) GetLongRunningOperationState() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetLongRunningOperationState", reflect.TypeOf((*MockScaleSetVMScope)(nil).GetLongRunningOperationState)) +} + +// HashKey mocks base method. +func (m *MockScaleSetVMScope) HashKey() string { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "HashKey") + ret0, _ := ret[0].(string) + return ret0 +} + +// HashKey indicates an expected call of HashKey. +func (mr *MockScaleSetVMScopeMockRecorder) HashKey() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HashKey", reflect.TypeOf((*MockScaleSetVMScope)(nil).HashKey)) +} + +// Info mocks base method. +func (m *MockScaleSetVMScope) Info(msg string, keysAndValues ...interface{}) { + m.ctrl.T.Helper() + varargs := []interface{}{msg} + for _, a := range keysAndValues { + varargs = append(varargs, a) + } + m.ctrl.Call(m, "Info", varargs...) +} + +// Info indicates an expected call of Info. +func (mr *MockScaleSetVMScopeMockRecorder) Info(msg interface{}, keysAndValues ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{msg}, keysAndValues...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Info", reflect.TypeOf((*MockScaleSetVMScope)(nil).Info), varargs...) +} + +// InstanceID mocks base method. +func (m *MockScaleSetVMScope) InstanceID() string { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "InstanceID") + ret0, _ := ret[0].(string) + return ret0 +} + +// InstanceID indicates an expected call of InstanceID. +func (mr *MockScaleSetVMScopeMockRecorder) InstanceID() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InstanceID", reflect.TypeOf((*MockScaleSetVMScope)(nil).InstanceID)) +} + +// Location mocks base method. +func (m *MockScaleSetVMScope) Location() string { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Location") + ret0, _ := ret[0].(string) + return ret0 +} + +// Location indicates an expected call of Location. +func (mr *MockScaleSetVMScopeMockRecorder) Location() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Location", reflect.TypeOf((*MockScaleSetVMScope)(nil).Location)) +} + +// ResourceGroup mocks base method. +func (m *MockScaleSetVMScope) ResourceGroup() string { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ResourceGroup") + ret0, _ := ret[0].(string) + return ret0 +} + +// ResourceGroup indicates an expected call of ResourceGroup. +func (mr *MockScaleSetVMScopeMockRecorder) ResourceGroup() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ResourceGroup", reflect.TypeOf((*MockScaleSetVMScope)(nil).ResourceGroup)) +} + +// ScaleSetName mocks base method. +func (m *MockScaleSetVMScope) ScaleSetName() string { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ScaleSetName") + ret0, _ := ret[0].(string) + return ret0 +} + +// ScaleSetName indicates an expected call of ScaleSetName. +func (mr *MockScaleSetVMScopeMockRecorder) ScaleSetName() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ScaleSetName", reflect.TypeOf((*MockScaleSetVMScope)(nil).ScaleSetName)) +} + +// SetLongRunningOperationState mocks base method. +func (m *MockScaleSetVMScope) SetLongRunningOperationState(future *v1alpha4.Future) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetLongRunningOperationState", future) +} + +// SetLongRunningOperationState indicates an expected call of SetLongRunningOperationState. +func (mr *MockScaleSetVMScopeMockRecorder) SetLongRunningOperationState(future interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetLongRunningOperationState", reflect.TypeOf((*MockScaleSetVMScope)(nil).SetLongRunningOperationState), future) +} + +// SetVMSSVM mocks base method. +func (m *MockScaleSetVMScope) SetVMSSVM(vmssvm *azure.VMSSVM) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetVMSSVM", vmssvm) +} + +// SetVMSSVM indicates an expected call of SetVMSSVM. +func (mr *MockScaleSetVMScopeMockRecorder) SetVMSSVM(vmssvm interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetVMSSVM", reflect.TypeOf((*MockScaleSetVMScope)(nil).SetVMSSVM), vmssvm) +} + +// SubscriptionID mocks base method. +func (m *MockScaleSetVMScope) SubscriptionID() string { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SubscriptionID") + ret0, _ := ret[0].(string) + return ret0 +} + +// SubscriptionID indicates an expected call of SubscriptionID. +func (mr *MockScaleSetVMScopeMockRecorder) SubscriptionID() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SubscriptionID", reflect.TypeOf((*MockScaleSetVMScope)(nil).SubscriptionID)) +} + +// TenantID mocks base method. +func (m *MockScaleSetVMScope) TenantID() string { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "TenantID") + ret0, _ := ret[0].(string) + return ret0 +} + +// TenantID indicates an expected call of TenantID. +func (mr *MockScaleSetVMScopeMockRecorder) TenantID() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TenantID", reflect.TypeOf((*MockScaleSetVMScope)(nil).TenantID)) +} + +// V mocks base method. +func (m *MockScaleSetVMScope) V(level int) logr.Logger { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "V", level) + ret0, _ := ret[0].(logr.Logger) + return ret0 +} + +// V indicates an expected call of V. +func (mr *MockScaleSetVMScopeMockRecorder) V(level interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "V", reflect.TypeOf((*MockScaleSetVMScope)(nil).V), level) +} + +// WithName mocks base method. +func (m *MockScaleSetVMScope) WithName(name string) logr.Logger { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WithName", name) + ret0, _ := ret[0].(logr.Logger) + return ret0 +} + +// WithName indicates an expected call of WithName. +func (mr *MockScaleSetVMScopeMockRecorder) WithName(name interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WithName", reflect.TypeOf((*MockScaleSetVMScope)(nil).WithName), name) +} + +// WithValues mocks base method. +func (m *MockScaleSetVMScope) WithValues(keysAndValues ...interface{}) logr.Logger { + m.ctrl.T.Helper() + varargs := []interface{}{} + for _, a := range keysAndValues { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "WithValues", varargs...) + ret0, _ := ret[0].(logr.Logger) + return ret0 +} + +// WithValues indicates an expected call of WithValues. +func (mr *MockScaleSetVMScopeMockRecorder) WithValues(keysAndValues ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WithValues", reflect.TypeOf((*MockScaleSetVMScope)(nil).WithValues), keysAndValues...) +} diff --git a/azure/services/scalesetvms/scalesetvms.go b/azure/services/scalesetvms/scalesetvms.go new file mode 100644 index 00000000000..b388100a409 --- /dev/null +++ b/azure/services/scalesetvms/scalesetvms.go @@ -0,0 +1,142 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scalesetvms + +import ( + "context" + "time" + + "github.com/go-logr/logr" + "github.com/pkg/errors" + + infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1alpha4" + "sigs.k8s.io/cluster-api-provider-azure/azure" + "sigs.k8s.io/cluster-api-provider-azure/azure/converters" + "sigs.k8s.io/cluster-api-provider-azure/util/tele" +) + +type ( + // ScaleSetVMScope defines the scope interface for a scale sets service. + ScaleSetVMScope interface { + logr.Logger + azure.ClusterDescriber + InstanceID() string + ScaleSetName() string + SetVMSSVM(vmssvm *azure.VMSSVM) + GetLongRunningOperationState() *infrav1.Future + SetLongRunningOperationState(future *infrav1.Future) + } + + // Service provides operations on azure resources + Service struct { + Client client + Scope ScaleSetVMScope + } +) + +// NewService creates a new service. +func NewService(scope ScaleSetVMScope) *Service { + return &Service{ + Client: newClient(scope), + Scope: scope, + } +} + +// Reconcile idempotently gets, creates, and updates a scale set. +func (s *Service) Reconcile(ctx context.Context) error { + ctx, span := tele.Tracer().Start(ctx, "scalesetvms.Service.Reconcile") + defer span.End() + + var ( + resourceGroup = s.Scope.ResourceGroup() + vmssName = s.Scope.ScaleSetName() + instanceID = s.Scope.InstanceID() + ) + + // fetch the latest data about the instance -- model mutations are handled by the AzureMachinePoolReconciler + instance, err := s.Client.Get(ctx, resourceGroup, vmssName, instanceID) + if err != nil { + if azure.ResourceNotFound(err) { + return azure.WithTransientError(errors.New("instance does not exist yet"), 30*time.Second) + } + return errors.Wrap(err, "failed getting instance") + } + + s.Scope.SetVMSSVM(converters.SDKToVMSSVM(instance)) + return nil +} + +// Delete deletes a scaleset instance asynchronously returning a future which encapsulates the long running operation. +func (s *Service) Delete(ctx context.Context) error { + ctx, span := tele.Tracer().Start(ctx, "scalesetvms.Service.Delete") + defer span.End() + + var ( + resourceGroup = s.Scope.ResourceGroup() + vmssName = s.Scope.ScaleSetName() + instanceID = s.Scope.InstanceID() + ) + + log := s.Scope.WithValues("resourceGroup", resourceGroup, "scaleset", vmssName, "instanceID", instanceID) + + defer func() { + if instance, err := s.Client.Get(ctx, resourceGroup, vmssName, instanceID); err == nil && instance.VirtualMachineScaleSetVMProperties != nil { + log.V(4).Info("updating vmss vm state", "state", instance.ProvisioningState) + s.Scope.SetVMSSVM(converters.SDKToVMSSVM(instance)) + } + }() + + log.V(4).Info("entering delete") + future := s.Scope.GetLongRunningOperationState() + if future != nil { + if future.Type != DeleteFuture { + return azure.WithTransientError(errors.New("attempting to delete, non-delete operation in progress"), 30*time.Second) + } + + log.V(4).Info("checking if the instance is done deleting") + if _, err := s.Client.GetResultIfDone(ctx, future); err != nil { + // fetch instance to update status + return errors.Wrap(err, "failed to get result of long running operation") + } + + // there was no error in fetching the result, the future has been completed + log.V(4).Info("successfully deleted the instance") + s.Scope.SetLongRunningOperationState(nil) + return nil + } + + // since the future was nil, there is no ongoing activity; start deleting the instance + future, err := s.Client.DeleteAsync(ctx, resourceGroup, vmssName, instanceID) + if err != nil { + if azure.ResourceNotFound(err) { + // already deleted + return nil + } + return errors.Wrapf(err, "failed to delete instance %s/%s", vmssName, instanceID) + } + + s.Scope.SetLongRunningOperationState(future) + + log.V(4).Info("checking if the instance is done deleting") + if _, err := s.Client.GetResultIfDone(ctx, future); err != nil { + // fetch instance to update status + return errors.Wrap(err, "failed to get result of long running operation") + } + + s.Scope.SetLongRunningOperationState(nil) + return nil +} diff --git a/azure/services/scalesetvms/scalesetvms_test.go b/azure/services/scalesetvms/scalesetvms_test.go new file mode 100644 index 00000000000..ab0b74cc366 --- /dev/null +++ b/azure/services/scalesetvms/scalesetvms_test.go @@ -0,0 +1,280 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scalesetvms + +import ( + "context" + "net/http" + "testing" + "time" + + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-30/compute" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/to" + "github.com/golang/mock/gomock" + . "github.com/onsi/gomega" + "github.com/pkg/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1alpha4" + "sigs.k8s.io/cluster-api-provider-azure/azure" + "sigs.k8s.io/cluster-api-provider-azure/azure/converters" + "sigs.k8s.io/cluster-api-provider-azure/azure/scope" + "sigs.k8s.io/cluster-api-provider-azure/azure/services/scalesetvms/mock_scalesetvms" + infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1alpha4" + gomock2 "sigs.k8s.io/cluster-api-provider-azure/internal/test/matchers/gomock" + clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4" + clusterv1exp "sigs.k8s.io/cluster-api/exp/api/v1alpha4" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +var ( + autorest404 = autorest.NewErrorWithResponse("", "", &http.Response{StatusCode: 404}, "Not Found") +) + +func TestNewService(t *testing.T) { + g := NewGomegaWithT(t) + scheme := runtime.NewScheme() + _ = clusterv1.AddToScheme(scheme) + _ = infrav1.AddToScheme(scheme) + _ = infrav1exp.AddToScheme(scheme) + + cluster := &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{Name: "test-cluster"}, + } + client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(cluster).Build() + s, err := scope.NewClusterScope(context.Background(), scope.ClusterScopeParams{ + AzureClients: scope.AzureClients{ + Authorizer: autorest.NullAuthorizer{}, + }, + Client: client, + Cluster: cluster, + AzureCluster: &infrav1.AzureCluster{ + Spec: infrav1.AzureClusterSpec{ + Location: "test-location", + ResourceGroup: "my-rg", + SubscriptionID: "123", + NetworkSpec: infrav1.NetworkSpec{ + Vnet: infrav1.VnetSpec{Name: "my-vnet", ResourceGroup: "my-rg"}, + }, + }, + }, + }) + g.Expect(err).ToNot(HaveOccurred()) + + mpms, err := scope.NewMachinePoolMachineScope(scope.MachinePoolMachineScopeParams{ + Client: client, + Logger: s.Logger, + MachinePool: new(clusterv1exp.MachinePool), + AzureMachinePool: new(infrav1exp.AzureMachinePool), + AzureMachinePoolMachine: new(infrav1exp.AzureMachinePoolMachine), + ClusterScope: s, + }) + g.Expect(err).ToNot(HaveOccurred()) + actual := NewService(mpms) + g.Expect(actual).ToNot(BeNil()) +} + +func TestService_Reconcile(t *testing.T) { + cases := []struct { + Name string + Setup func(s *mock_scalesetvms.MockScaleSetVMScopeMockRecorder, m *mock_scalesetvms.MockclientMockRecorder) + Err error + CheckIsErr bool + }{ + { + Name: "should reconcile successfully", + Setup: func(s *mock_scalesetvms.MockScaleSetVMScopeMockRecorder, m *mock_scalesetvms.MockclientMockRecorder) { + s.ResourceGroup().Return("rg") + s.InstanceID().Return("0") + s.ScaleSetName().Return("scaleset") + vm := compute.VirtualMachineScaleSetVM{ + InstanceID: to.StringPtr("0"), + } + m.Get(gomock2.AContext(), "rg", "scaleset", "0").Return(vm, nil) + s.SetVMSSVM(converters.SDKToVMSSVM(vm)) + }, + }, + { + Name: "if 404, then should respond with transient error", + Setup: func(s *mock_scalesetvms.MockScaleSetVMScopeMockRecorder, m *mock_scalesetvms.MockclientMockRecorder) { + s.ResourceGroup().Return("rg") + s.InstanceID().Return("0") + s.ScaleSetName().Return("scaleset") + m.Get(gomock2.AContext(), "rg", "scaleset", "0").Return(compute.VirtualMachineScaleSetVM{}, autorest404) + }, + Err: azure.WithTransientError(errors.New("instance does not exist yet"), 30*time.Second), + CheckIsErr: true, + }, + { + Name: "if other error, then should respond with error", + Setup: func(s *mock_scalesetvms.MockScaleSetVMScopeMockRecorder, m *mock_scalesetvms.MockclientMockRecorder) { + s.ResourceGroup().Return("rg") + s.InstanceID().Return("0") + s.ScaleSetName().Return("scaleset") + m.Get(gomock2.AContext(), "rg", "scaleset", "0").Return(compute.VirtualMachineScaleSetVM{}, errors.New("boom")) + }, + Err: errors.Wrap(errors.New("boom"), "failed getting instance"), + }, + } + + for _, c := range cases { + t.Run(c.Name, func(t *testing.T) { + var ( + g = NewWithT(t) + mockCtrl = gomock.NewController(t) + scopeMock = mock_scalesetvms.NewMockScaleSetVMScope(mockCtrl) + clientMock = mock_scalesetvms.NewMockclient(mockCtrl) + ) + defer mockCtrl.Finish() + + scopeMock.EXPECT().SubscriptionID().Return("subID") + scopeMock.EXPECT().BaseURI().Return("https://localhost/") + scopeMock.EXPECT().Authorizer().Return(nil) + + service := NewService(scopeMock) + service.Client = clientMock + c.Setup(scopeMock.EXPECT(), clientMock.EXPECT()) + + if err := service.Reconcile(context.TODO()); c.Err == nil { + g.Expect(err).To(Succeed()) + } else { + g.Expect(err).To(HaveOccurred()) + g.Expect(err).To(MatchError(c.Err.Error())) + if c.CheckIsErr { + g.Expect(errors.Is(err, c.Err)).To(BeTrue()) + } + } + }) + } +} + +func TestService_Delete(t *testing.T) { + cases := []struct { + Name string + Setup func(s *mock_scalesetvms.MockScaleSetVMScopeMockRecorder, m *mock_scalesetvms.MockclientMockRecorder) + Err error + CheckIsErr bool + }{ + { + Name: "should start deleting successfully if no long running operation is active", + Setup: func(s *mock_scalesetvms.MockScaleSetVMScopeMockRecorder, m *mock_scalesetvms.MockclientMockRecorder) { + s.ResourceGroup().Return("rg") + s.InstanceID().Return("0") + s.ScaleSetName().Return("scaleset") + s.GetLongRunningOperationState().Return(nil) + future := &infrav1.Future{ + Type: DeleteFuture, + } + m.DeleteAsync(gomock2.AContext(), "rg", "scaleset", "0").Return(future, nil) + s.SetLongRunningOperationState(future) + m.GetResultIfDone(gomock2.AContext(), future).Return(compute.VirtualMachineScaleSetVM{}, azure.WithTransientError(azure.NewOperationNotDoneError(future), 15*time.Second)) + m.Get(gomock2.AContext(), "rg", "scaleset", "0").Return(compute.VirtualMachineScaleSetVM{}, nil) + }, + CheckIsErr: true, + Err: errors.Wrap(azure.WithTransientError(azure.NewOperationNotDoneError(&infrav1.Future{ + Type: DeleteFuture, + }), 15*time.Second), "failed to get result of long running operation"), + }, + { + Name: "should finish deleting successfully when there's a long running operation that has completed", + Setup: func(s *mock_scalesetvms.MockScaleSetVMScopeMockRecorder, m *mock_scalesetvms.MockclientMockRecorder) { + s.ResourceGroup().Return("rg") + s.InstanceID().Return("0") + s.ScaleSetName().Return("scaleset") + future := &infrav1.Future{ + Type: DeleteFuture, + } + s.GetLongRunningOperationState().Return(future) + m.GetResultIfDone(gomock2.AContext(), future).Return(compute.VirtualMachineScaleSetVM{}, nil) + s.SetLongRunningOperationState(nil) + m.Get(gomock2.AContext(), "rg", "scaleset", "0").Return(compute.VirtualMachineScaleSetVM{}, nil) + }, + }, + { + Name: "should not error when deleting, but resource is 404", + Setup: func(s *mock_scalesetvms.MockScaleSetVMScopeMockRecorder, m *mock_scalesetvms.MockclientMockRecorder) { + s.ResourceGroup().Return("rg") + s.InstanceID().Return("0") + s.ScaleSetName().Return("scaleset") + s.GetLongRunningOperationState().Return(nil) + m.DeleteAsync(gomock2.AContext(), "rg", "scaleset", "0").Return(nil, autorest404) + m.Get(gomock2.AContext(), "rg", "scaleset", "0").Return(compute.VirtualMachineScaleSetVM{}, nil) + }, + }, + { + Name: "should error when deleting, but a non-404 error is returned from DELETE call", + Setup: func(s *mock_scalesetvms.MockScaleSetVMScopeMockRecorder, m *mock_scalesetvms.MockclientMockRecorder) { + s.ResourceGroup().Return("rg") + s.InstanceID().Return("0") + s.ScaleSetName().Return("scaleset") + s.GetLongRunningOperationState().Return(nil) + m.DeleteAsync(gomock2.AContext(), "rg", "scaleset", "0").Return(nil, errors.New("boom")) + m.Get(gomock2.AContext(), "rg", "scaleset", "0").Return(compute.VirtualMachineScaleSetVM{}, nil) + }, + Err: errors.Wrap(errors.New("boom"), "failed to delete instance scaleset/0"), + }, + { + Name: "should return error when a long running operation is active and getting the result returns an error", + Setup: func(s *mock_scalesetvms.MockScaleSetVMScopeMockRecorder, m *mock_scalesetvms.MockclientMockRecorder) { + s.ResourceGroup().Return("rg") + s.InstanceID().Return("0") + s.ScaleSetName().Return("scaleset") + future := &infrav1.Future{ + Type: DeleteFuture, + } + s.GetLongRunningOperationState().Return(future) + m.GetResultIfDone(gomock2.AContext(), future).Return(compute.VirtualMachineScaleSetVM{}, errors.New("boom")) + m.Get(gomock2.AContext(), "rg", "scaleset", "0").Return(compute.VirtualMachineScaleSetVM{}, nil) + }, + Err: errors.Wrap(errors.New("boom"), "failed to get result of long running operation"), + }, + } + + for _, c := range cases { + t.Run(c.Name, func(t *testing.T) { + var ( + g = NewWithT(t) + mockCtrl = gomock.NewController(t) + scopeMock = mock_scalesetvms.NewMockScaleSetVMScope(mockCtrl) + clientMock = mock_scalesetvms.NewMockclient(mockCtrl) + ) + defer mockCtrl.Finish() + + scopeMock.EXPECT().SubscriptionID().Return("subID") + scopeMock.EXPECT().BaseURI().Return("https://localhost/") + scopeMock.EXPECT().Authorizer().Return(nil) + scopeMock.EXPECT().WithValues(gomock.Any()).Return(scopeMock) + scopeMock.EXPECT().V(gomock.Any()).Return(scopeMock).AnyTimes() + scopeMock.EXPECT().Info(gomock.Any(), gomock.Any()).AnyTimes() + + service := NewService(scopeMock) + service.Client = clientMock + c.Setup(scopeMock.EXPECT(), clientMock.EXPECT()) + + if err := service.Delete(context.TODO()); c.Err == nil { + g.Expect(err).To(Succeed()) + } else { + g.Expect(err).To(HaveOccurred()) + g.Expect(err).To(MatchError(c.Err.Error())) + if c.CheckIsErr { + g.Expect(errors.Is(err, c.Err)).To(BeTrue()) + } + } + }) + } +} diff --git a/azure/types.go b/azure/types.go index 81573a31864..1e87b40f193 100644 --- a/azure/types.go +++ b/azure/types.go @@ -17,6 +17,9 @@ limitations under the License. package azure import ( + "reflect" + + "github.com/google/go-cmp/cmp" infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1alpha4" ) @@ -211,3 +214,87 @@ type VMSSExtensionSpec struct { Version string ProtectedSettings map[string]string } + +type ( + // VMSSVM defines a VM in a virtual machine scale set. + VMSSVM struct { + ID string `json:"id,omitempty"` + InstanceID string `json:"instanceID,omitempty"` + Image infrav1.Image `json:"image,omitempty"` + Name string `json:"name,omitempty"` + AvailabilityZone string `json:"availabilityZone,omitempty"` + State infrav1.ProvisioningState `json:"vmState,omitempty"` + } + + // VMSS defines a virtual machine scale set. + VMSS struct { + ID string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Sku string `json:"sku,omitempty"` + Capacity int64 `json:"capacity,omitempty"` + Zones []string `json:"zones,omitempty"` + Image infrav1.Image `json:"image,omitempty"` + State infrav1.ProvisioningState `json:"vmState,omitempty"` + Identity infrav1.VMIdentity `json:"identity,omitempty"` + Tags infrav1.Tags `json:"tags,omitempty"` + Instances []VMSSVM `json:"instances,omitempty"` + } +) + +// HasModelChanges returns true if the spec fields which will mutate the Azure VMSS model are different. +func (vmss VMSS) HasModelChanges(other VMSS) bool { + equal := cmp.Equal(vmss.Image, other.Image) && + cmp.Equal(vmss.Identity, other.Identity) && + cmp.Equal(vmss.Zones, other.Zones) && + cmp.Equal(vmss.Tags, other.Tags) && + cmp.Equal(vmss.Sku, other.Sku) + return !equal +} + +// InstancesByProviderID returns VMSSVMs by ID +func (vmss VMSS) InstancesByProviderID() map[string]VMSSVM { + instancesByProviderID := make(map[string]VMSSVM, len(vmss.Instances)) + for _, instance := range vmss.Instances { + instancesByProviderID[instance.ProviderID()] = instance + } + + return instancesByProviderID +} + +// ProviderID returns the K8s provider ID for the VMSS instance +func (vm VMSSVM) ProviderID() string { + return ProviderIDPrefix + vm.ID +} + +// HasLatestModelAppliedToAll returns true if all VMSS instance have the latest model applied +func (vmss VMSS) HasLatestModelAppliedToAll() bool { + for _, instance := range vmss.Instances { + if !vmss.HasLatestModelApplied(instance) { + return false + } + } + + return true +} + +// HasEnoughLatestModelOrNotMixedModel returns true if VMSS instance have the latest model applied to all or equal to the capacity +func (vmss VMSS) HasEnoughLatestModelOrNotMixedModel() bool { + if vmss.HasLatestModelAppliedToAll() { + return true + } + + counter := int64(0) + for _, instance := range vmss.Instances { + if vmss.HasLatestModelApplied(instance) { + counter++ + } + } + + return counter == vmss.Capacity +} + +// HasLatestModelApplied returns true if the VMSS instance matches the VMSS image reference +func (vmss VMSS) HasLatestModelApplied(vm VMSSVM) bool { + // if the images match, then the VM is of the same model + return reflect.DeepEqual(vm.Image, vmss.Image) +} diff --git a/azure/types_test.go b/azure/types_test.go new file mode 100644 index 00000000000..13c1f1ff64b --- /dev/null +++ b/azure/types_test.go @@ -0,0 +1,165 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package azure + +import ( + "testing" + + "github.com/Azure/go-autorest/autorest/to" + . "github.com/onsi/gomega" + infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1alpha4" +) + +func TestVMSS_HasModelChanges(t *testing.T) { + cases := []struct { + Name string + Factory func() (VMSS, VMSS) + HasModelChanges bool + }{ + { + Name: "two empty VMSS", + Factory: func() (VMSS, VMSS) { + return VMSS{}, VMSS{} + }, + HasModelChanges: false, + }, + { + Name: "one empty and other with image changes", + Factory: func() (VMSS, VMSS) { + return VMSS{}, VMSS{ + Image: infrav1.Image{ + Marketplace: &infrav1.AzureMarketplaceImage{ + Version: "foo", + }, + }, + } + }, + HasModelChanges: true, + }, + { + Name: "one empty and other with image changes", + Factory: func() (VMSS, VMSS) { + return VMSS{}, VMSS{ + Image: infrav1.Image{ + Marketplace: &infrav1.AzureMarketplaceImage{ + Version: "foo", + }, + }, + } + }, + HasModelChanges: true, + }, + { + Name: "same default VMSS", + Factory: func() (VMSS, VMSS) { + l := getDefaultVMSSForModelTesting() + r := getDefaultVMSSForModelTesting() + return r, l + }, + HasModelChanges: false, + }, + { + Name: "with different identity", + Factory: func() (VMSS, VMSS) { + l := getDefaultVMSSForModelTesting() + l.Identity = infrav1.VMIdentityNone + r := getDefaultVMSSForModelTesting() + return r, l + }, + HasModelChanges: true, + }, + { + Name: "with different Zones", + Factory: func() (VMSS, VMSS) { + l := getDefaultVMSSForModelTesting() + l.Zones = []string{"0"} + r := getDefaultVMSSForModelTesting() + return r, l + }, + HasModelChanges: true, + }, + { + Name: "with empty image", + Factory: func() (VMSS, VMSS) { + l := getDefaultVMSSForModelTesting() + l.Image = infrav1.Image{} + r := getDefaultVMSSForModelTesting() + return r, l + }, + HasModelChanges: true, + }, + { + Name: "with different image reference ID", + Factory: func() (VMSS, VMSS) { + l := getDefaultVMSSForModelTesting() + l.Image = infrav1.Image{ + ID: to.StringPtr("foo"), + } + r := getDefaultVMSSForModelTesting() + return r, l + }, + HasModelChanges: true, + }, + { + Name: "with different SKU", + Factory: func() (VMSS, VMSS) { + l := getDefaultVMSSForModelTesting() + l.Sku = "reallySmallVM" + r := getDefaultVMSSForModelTesting() + return r, l + }, + HasModelChanges: true, + }, + { + Name: "with different Tags", + Factory: func() (VMSS, VMSS) { + l := getDefaultVMSSForModelTesting() + l.Tags = infrav1.Tags{ + "bin": "baz", + } + r := getDefaultVMSSForModelTesting() + return r, l + }, + HasModelChanges: true, + }, + } + + for _, c := range cases { + c := c + t.Run(c.Name, func(t *testing.T) { + l, r := c.Factory() + g := NewWithT(t) + g.Expect(l.HasModelChanges(r)).To(Equal(c.HasModelChanges)) + }) + } +} + +func getDefaultVMSSForModelTesting() VMSS { + return VMSS{ + Zones: []string{"0", "1"}, + Image: infrav1.Image{ + Marketplace: &infrav1.AzureMarketplaceImage{ + Version: "foo", + }, + }, + Sku: "reallyBigVM", + Identity: infrav1.VMIdentitySystemAssigned, + Tags: infrav1.Tags{ + "foo": "baz", + }, + } +} diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_azuremachinepoolmachines.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_azuremachinepoolmachines.yaml new file mode 100644 index 00000000000..2d68232b0e9 --- /dev/null +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_azuremachinepoolmachines.yaml @@ -0,0 +1,181 @@ + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.5.0 + creationTimestamp: null + name: azuremachinepoolmachines.infrastructure.cluster.x-k8s.io +spec: + group: infrastructure.cluster.x-k8s.io + names: + categories: + - cluster-api + kind: AzureMachinePoolMachine + listKind: AzureMachinePoolMachineList + plural: azuremachinepoolmachines + shortNames: + - ampm + singular: azuremachinepoolmachine + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Kubernetes version + jsonPath: .status.version + name: Version + type: string + - description: Flag indicating infrastructure is successfully provisioned + jsonPath: .status.ready + name: Ready + type: string + - description: Azure VMSS VM provisioning state + jsonPath: .status.provisioningState + name: State + type: string + - description: Cluster to which this AzureMachinePoolMachine belongs + jsonPath: .metadata.labels.cluster\.x-k8s\.io/cluster-name + name: Cluster + priority: 1 + type: string + - description: Azure VMSS VM ID + jsonPath: .spec.providerID + name: VMSS VM ID + priority: 1 + type: string + name: v1alpha4 + schema: + openAPIV3Schema: + description: AzureMachinePoolMachine is the Schema for the azuremachinepoolmachines API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: AzureMachinePoolMachineSpec defines the desired state of AzureMachinePoolMachine + properties: + instanceID: + description: InstanceID is the identification of the Machine Instance within the VMSS + type: string + providerID: + description: ProviderID is the identification ID of the Virtual Machine Scale Set + type: string + required: + - instanceID + - providerID + type: object + status: + description: AzureMachinePoolMachineStatus defines the observed state of AzureMachinePoolMachine + properties: + conditions: + description: Conditions defines current service state of the AzureMachinePool. + items: + description: Condition defines an observation of a Cluster API resource operational state. + properties: + lastTransitionTime: + description: Last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: A human readable message indicating details about the transition. This field may be empty. + type: string + reason: + description: The reason for the condition's last transition in CamelCase. The specific API may choose whether or not this field is considered a guaranteed API. This field may not be empty. + type: string + severity: + description: Severity provides an explicit classification of Reason code, so the users or machines can immediately understand the current situation and act accordingly. The Severity field MUST be set only when Status=False. + type: string + status: + description: Status of the condition, one of True, False, Unknown. + type: string + type: + description: Type of condition in CamelCase or in foo.example.com/CamelCase. Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. + type: string + required: + - status + - type + type: object + type: array + failureMessage: + description: "FailureMessage will be set in the event that there is a terminal problem reconciling the MachinePool and will contain a more verbose string suitable for logging and human consumption. \n Any transient errors that occur during the reconciliation of MachinePools can be added as events to the MachinePool object and/or logged in the controller's output." + type: string + failureReason: + description: "FailureReason will be set in the event that there is a terminal problem reconciling the MachinePool machine and will contain a succinct value suitable for machine interpretation. \n Any transient errors that occur during the reconciliation of MachinePools can be added as events to the MachinePool object and/or logged in the controller's output." + type: string + instanceName: + description: InstanceName is the name of the Machine Instance within the VMSS + type: string + latestModelApplied: + description: LatestModelApplied indicates the instance is running the most up-to-date VMSS model. A VMSS model describes the image version the VM is running. If the instance is not running the latest model, it means the instance may not be running the version of Kubernetes the Machine Pool has specified and needs to be updated. + type: boolean + longRunningOperationState: + description: LongRunningOperationState saves the state for an Azure long running operations so it can be continued on the next reconciliation loop. + properties: + futureData: + description: FutureData is the base64 url encoded json Azure AutoRest Future + type: string + name: + description: Name is the name of the Azure resource + type: string + resourceGroup: + description: ResourceGroup is the Azure resource group for the resource + type: string + type: + description: Type describes the type of future, update, create, delete, etc + type: string + required: + - type + type: object + nodeRef: + description: NodeRef will point to the corresponding Node if it exists. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: "spec.containers{name}" (where "name" refers to the name of the container that triggered the event) or if no container name is specified "spec.containers[2]" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object. TODO: this design is not final and this field is subject to change in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + provisioningState: + description: ProvisioningState is the provisioning state of the Azure virtual machine instance. + type: string + ready: + description: Ready is true when the provider resource is ready. + type: boolean + version: + description: Version defines the Kubernetes version for the VM Instance + type: string + required: + - latestModelApplied + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_azuremachinepools.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_azuremachinepools.yaml index b5caa4a7d76..e7441590475 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_azuremachinepools.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_azuremachinepools.yaml @@ -473,6 +473,48 @@ spec: roleAssignmentName: description: RoleAssignmentName is the name of the role assignment to create for a system assigned identity. It can be any valid GUID. If not specified, a random GUID will be generated. type: string + strategy: + default: + rollingUpdate: + deletePolicy: Oldest + maxSurge: 1 + maxUnavailable: 0 + type: RollingUpdate + description: The deployment strategy to use to replace existing AzureMachinePoolMachines with new ones. + properties: + rollingUpdate: + description: Rolling update config params. Present only if MachineDeploymentStrategyType = RollingUpdate. + properties: + deletePolicy: + default: Oldest + description: DeletePolicy defines the policy used by the MachineDeployment to identify nodes to delete when downscaling. Valid values are "Random, "Newest", "Oldest" When no value is supplied, the default is Oldest + enum: + - Random + - Newest + - Oldest + type: string + maxSurge: + anyOf: + - type: integer + - type: string + default: 1 + description: 'The maximum number of machines that can be scheduled above the desired number of machines. Value can be an absolute number (ex: 5) or a percentage of desired machines (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up. Defaults to 1. Example: when this is set to 30%, the new MachineSet can be scaled up immediately when the rolling update starts, such that the total number of old and new machines do not exceed 130% of desired machines. Once old machines have been killed, new MachineSet can be scaled up further, ensuring that total number of machines running at any time during the update is at most 130% of desired machines.' + x-kubernetes-int-or-string: true + maxUnavailable: + anyOf: + - type: integer + - type: string + default: 0 + description: 'The maximum number of machines that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of desired machines (ex: 10%). Absolute number is calculated from percentage by rounding down. This can not be 0 if MaxSurge is 0. Defaults to 0. Example: when this is set to 30%, the old MachineSet can be scaled down to 70% of desired machines immediately when the rolling update starts. Once new machines are ready, old MachineSet can be scaled down further, followed by scaling up the new MachineSet, ensuring that the total number of machines available at all times during the update is at least 70% of desired machines.' + x-kubernetes-int-or-string: true + type: object + type: + default: RollingUpdate + description: Type of deployment. Currently the only supported strategy is RollingUpdate + enum: + - RollingUpdate + type: string + type: object template: description: Template contains the details used to build a replica virtual machine within the Machine Pool properties: @@ -521,7 +563,7 @@ spec: type: object type: array image: - description: Image is used to provide details of an image to use during Virtual Machine creation. If image details are omitted the image will default the Azure Marketplace "capi" offer, which is based on Ubuntu. + description: Image is used to provide details of an image to use during VM creation. If image details are omitted the image will default the Azure Marketplace "capi" offer, which is based on Ubuntu. properties: id: description: ID specifies an image to use by ID @@ -715,6 +757,72 @@ spec: failureReason: description: "FailureReason will be set in the event that there is a terminal problem reconciling the MachinePool and will contain a succinct value suitable for machine interpretation. \n This field should not be set for transitive errors that a controller faces that are expected to be fixed automatically over time (like service outages), but instead indicate that something is fundamentally wrong with the MachinePool's spec or the configuration of the controller, and that manual intervention is required. Examples of terminal errors would be invalid combinations of settings in the spec, values that are unsupported by the controller, or the responsible controller itself being critically misconfigured. \n Any transient errors that occur during the reconciliation of MachinePools can be added as events to the MachinePool object and/or logged in the controller's output." type: string + image: + description: Image is the current image used in the AzureMachinePool. When the spec image is nil, this image is populated with the details of the defaulted Azure Marketplace "capi" offer. + properties: + id: + description: ID specifies an image to use by ID + type: string + marketplace: + description: Marketplace specifies an image to use from the Azure Marketplace + properties: + offer: + description: Offer specifies the name of a group of related images created by the publisher. For example, UbuntuServer, WindowsServer + minLength: 1 + type: string + publisher: + description: Publisher is the name of the organization that created the image + minLength: 1 + type: string + sku: + description: SKU specifies an instance of an offer, such as a major release of a distribution. For example, 18.04-LTS, 2019-Datacenter + minLength: 1 + type: string + thirdPartyImage: + default: false + description: ThirdPartyImage indicates the image is published by a third party publisher and a Plan will be generated for it. + type: boolean + version: + description: Version specifies the version of an image sku. The allowed formats are Major.Minor.Build or 'latest'. Major, Minor, and Build are decimal numbers. Specify 'latest' to use the latest version of an image available at deploy time. Even if you use 'latest', the VM image will not automatically update after deploy time even if a new version becomes available. + minLength: 1 + type: string + required: + - offer + - publisher + - sku + - version + type: object + sharedGallery: + description: SharedGallery specifies an image to use from an Azure Shared Image Gallery + properties: + gallery: + description: Gallery specifies the name of the shared image gallery that contains the image + minLength: 1 + type: string + name: + description: Name is the name of the image + minLength: 1 + type: string + resourceGroup: + description: ResourceGroup specifies the resource group containing the shared image gallery + minLength: 1 + type: string + subscriptionID: + description: SubscriptionID is the identifier of the subscription that contains the shared image gallery + minLength: 1 + type: string + version: + description: Version specifies the version of the marketplace image. The allowed formats are Major.Minor.Build or 'latest'. Major, Minor, and Build are decimal numbers. Specify 'latest' to use the latest version of an image available at deploy time. Even if you use 'latest', the VM image will not automatically update after deploy time even if a new version becomes available. + minLength: 1 + type: string + required: + - gallery + - name + - resourceGroup + - subscriptionID + - version + type: object + type: object instances: description: Instances is the VM instance status for each VM in the VMSS items: diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index 6138968a08d..459afec53fd 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -13,6 +13,7 @@ resources: - bases/infrastructure.cluster.x-k8s.io_azuremanagedmachinepools.yaml - bases/infrastructure.cluster.x-k8s.io_azuremanagedclusters.yaml - bases/infrastructure.cluster.x-k8s.io_azuremanagedcontrolplanes.yaml + - bases/infrastructure.cluster.x-k8s.io_azuremachinepoolmachines.yaml # +kubebuilder:scaffold:crdkustomizeresource @@ -23,6 +24,7 @@ patchesStrategicMerge: - patches/webhook_in_azureclusters.yaml - patches/webhook_in_azuremachinetemplates.yaml - patches/webhook_in_azuremachinepools.yaml + - patches/webhook_in_azuremachinepoolmachines.yaml # - patches/webhook_in_azuremanagedmachinepools.yaml # - patches/webhook_in_azuremanagedclusters.yaml # - patches/webhook_in_azuremanagedcontrolplanes.yaml @@ -34,6 +36,7 @@ patchesStrategicMerge: - patches/cainjection_in_azureclusters.yaml - patches/cainjection_in_azuremachinetemplates.yaml - patches/cainjection_in_azuremachinepools.yaml + - patches/cainjection_in_azuremachinepoolmachines.yaml # - patches/cainjection_in_azuremanagedmachinepools.yaml # - patches/cainjection_in_azuremanagedclusters.yaml # - patches/cainjection_in_azuremanagedcontrolplanes.yaml diff --git a/config/crd/patches/cainjection_in_azuremachinepoolmachines.yaml b/config/crd/patches/cainjection_in_azuremachinepoolmachines.yaml new file mode 100644 index 00000000000..cb25d81da73 --- /dev/null +++ b/config/crd/patches/cainjection_in_azuremachinepoolmachines.yaml @@ -0,0 +1,8 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + name: azuremachinepoolmachines.infrastructure.cluster.x-k8s.io diff --git a/config/crd/patches/webhook_in_azuremachinepoolmachines.yaml b/config/crd/patches/webhook_in_azuremachinepoolmachines.yaml new file mode 100644 index 00000000000..86906dd573d --- /dev/null +++ b/config/crd/patches/webhook_in_azuremachinepoolmachines.yaml @@ -0,0 +1,19 @@ +# The following patch enables conversion webhook for CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: azuremachinepoolmachines.infrastructure.cluster.x-k8s.io +spec: + conversion: + strategy: Webhook + webhook: + conversionReviewVersions: ["v1", "v1beta1"] + clientConfig: + # this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank, + # but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager) + caBundle: Cg== + service: + namespace: system + name: webhook-service + path: /convert diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index e3d9522d4ee..c92785260aa 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -116,6 +116,26 @@ rules: - get - patch - update +- apiGroups: + - infrastructure.cluster.x-k8s.io + resources: + - azuremachinepoolmachines + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - infrastructure.cluster.x-k8s.io + resources: + - azuremachinepoolmachines/status + verbs: + - get + - patch + - update - apiGroups: - infrastructure.cluster.x-k8s.io resources: diff --git a/config/webhook/manifests.yaml b/config/webhook/manifests.yaml index 6247463e374..836c7070bfc 100644 --- a/config/webhook/manifests.yaml +++ b/config/webhook/manifests.yaml @@ -178,6 +178,26 @@ webhooks: resources: - azuremachinepools sideEffects: None +- admissionReviewVersions: + - v1beta1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /validate-infrastructure-cluster-x-k8s-io-v1alpha4-azuremachinepoolmachine + failurePolicy: Fail + name: azuremachinepoolmachine.kb.io + rules: + - apiGroups: + - infrastructure.cluster.x-k8s.io + apiVersions: + - v1alpha4 + operations: + - CREATE + - UPDATE + resources: + - azuremachinepoolmachines + sideEffects: None - admissionReviewVersions: - v1beta1 clientConfig: diff --git a/controllers/helpers.go b/controllers/helpers.go index e5405e8cfb1..b8860a009c4 100644 --- a/controllers/helpers.go +++ b/controllers/helpers.go @@ -21,10 +21,6 @@ import ( "encoding/json" "fmt" - "sigs.k8s.io/cluster-api-provider-azure/azure/scope" - "sigs.k8s.io/cluster-api-provider-azure/azure/services/groups" - "sigs.k8s.io/cluster-api-provider-azure/util/tele" - "github.com/go-logr/logr" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" @@ -35,17 +31,30 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" + infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1alpha4" + "sigs.k8s.io/cluster-api-provider-azure/azure" + "sigs.k8s.io/cluster-api-provider-azure/azure/scope" + "sigs.k8s.io/cluster-api-provider-azure/azure/services/groups" + infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1alpha4" + "sigs.k8s.io/cluster-api-provider-azure/pkg/coalescing" + "sigs.k8s.io/cluster-api-provider-azure/util/reconciler" + "sigs.k8s.io/cluster-api-provider-azure/util/tele" clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4" capiv1exp "sigs.k8s.io/cluster-api/exp/api/v1alpha4" "sigs.k8s.io/cluster-api/util" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/apiutil" + "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/handler" +) - infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1alpha4" - "sigs.k8s.io/cluster-api-provider-azure/azure" - "sigs.k8s.io/cluster-api-provider-azure/util/reconciler" +type ( + // Options are controller options extended + Options struct { + controller.Options + Cache *coalescing.ReconcileCache + } ) // AzureClusterToAzureMachinesMapper creates a mapping handler to transform AzureClusters into AzureMachines. The transform @@ -401,7 +410,7 @@ func reconcileAzureSecret(ctx context.Context, log logr.Logger, kubeclient clien tag, exists := old.Labels[clusterName] if exists && tag != string(infrav1.ResourceLifecycleOwned) { - log.Info("returning early from json reconcile, user provided secret already exists") + log.V(2).Info("returning early from json reconcile, user provided secret already exists") return nil } @@ -417,7 +426,7 @@ func reconcileAzureSecret(ctx context.Context, log logr.Logger, kubeclient clien hasData := equality.Semantic.DeepEqual(old.Data, new.Data) if hasData && hasOwner { // no update required - log.Info("returning early from json reconcile, no update needed") + log.V(2).Info("returning early from json reconcile, no update needed") return nil } @@ -429,12 +438,12 @@ func reconcileAzureSecret(ctx context.Context, log logr.Logger, kubeclient clien old.Data = new.Data } - log.Info("updating azure json") + log.V(2).Info("updating azure json") if err := kubeclient.Update(ctx, old); err != nil { return errors.Wrap(err, "failed to update cluster azure json when diff was required") } - log.Info("done updating azure json") + log.V(2).Info("done updating azure json") return nil } @@ -460,7 +469,29 @@ func GetOwnerMachinePool(ctx context.Context, c client.Client, obj metav1.Object return nil, nil } -// GetMachinePoolByName finds and return a Machine object using the specified params. +// GetOwnerAzureMachinePool returns the AzureMachinePool object owning the current resource. +func GetOwnerAzureMachinePool(ctx context.Context, c client.Client, obj metav1.ObjectMeta) (*infrav1exp.AzureMachinePool, error) { + ctx, span := tele.Tracer().Start(ctx, "controllers.GetOwnerAzureMachinePool") + defer span.End() + + for _, ref := range obj.OwnerReferences { + if ref.Kind != "AzureMachinePool" { + continue + } + + gv, err := schema.ParseGroupVersion(ref.APIVersion) + if err != nil { + return nil, errors.WithStack(err) + } + + if gv.Group == infrav1exp.GroupVersion.Group { + return GetAzureMachinePoolByName(ctx, c, obj.Namespace, ref.Name) + } + } + return nil, nil +} + +// GetMachinePoolByName finds and return a MachinePool object using the specified params. func GetMachinePoolByName(ctx context.Context, c client.Client, namespace, name string) (*capiv1exp.MachinePool, error) { ctx, span := tele.Tracer().Start(ctx, "controllers.GetMachinePoolByName") defer span.End() @@ -473,6 +504,19 @@ func GetMachinePoolByName(ctx context.Context, c client.Client, namespace, name return m, nil } +// GetAzureMachinePoolByName finds and return an AzureMachinePool object using the specified params. +func GetAzureMachinePoolByName(ctx context.Context, c client.Client, namespace, name string) (*infrav1exp.AzureMachinePool, error) { + ctx, span := tele.Tracer().Start(ctx, "controllers.GetAzureMachinePoolByName") + defer span.End() + + m := &infrav1exp.AzureMachinePool{} + key := client.ObjectKey{Name: name, Namespace: namespace} + if err := c.Get(ctx, key, m); err != nil { + return nil, err + } + return m, nil +} + // ShouldDeleteIndividualResources returns false if the resource group is managed and the whole cluster is being deleted // meaning that we can rely on a single resource group delete operation as opposed to deleting every individual VM resource. func ShouldDeleteIndividualResources(ctx context.Context, clusterScope *scope.ClusterScope) bool { diff --git a/exp/api/v1alpha3/azuremachinepool_conversion.go b/exp/api/v1alpha3/azuremachinepool_conversion.go index 7b5de553ff8..2f02d8db4b8 100644 --- a/exp/api/v1alpha3/azuremachinepool_conversion.go +++ b/exp/api/v1alpha3/azuremachinepool_conversion.go @@ -17,6 +17,7 @@ limitations under the License. package v1alpha3 import ( + convert "k8s.io/apimachinery/pkg/conversion" infrav1alpha4 "sigs.k8s.io/cluster-api-provider-azure/api/v1alpha4" expv1alpha4 "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1alpha4" utilconversion "sigs.k8s.io/cluster-api/util/conversion" @@ -45,6 +46,24 @@ func (src *AzureMachinePool) ConvertTo(dstRaw conversion.Hub) error { // nolint } } + dst.Spec.Strategy.Type = restored.Spec.Strategy.Type + if restored.Spec.Strategy.RollingUpdate != nil { + + if dst.Spec.Strategy.RollingUpdate == nil { + dst.Spec.Strategy.RollingUpdate = &expv1alpha4.MachineRollingUpdateDeployment{} + } + + dst.Spec.Strategy.RollingUpdate.DeletePolicy = restored.Spec.Strategy.RollingUpdate.DeletePolicy + } + + if restored.Status.Image != nil { + dst.Status.Image = restored.Status.Image + } + + if len(dst.Annotations) == 0 { + dst.Annotations = nil + } + return nil } @@ -63,3 +82,11 @@ func (dst *AzureMachinePool) ConvertFrom(srcRaw conversion.Hub) error { // nolin return nil } + +func Convert_v1alpha4_AzureMachinePoolSpec_To_v1alpha3_AzureMachinePoolSpec(in *expv1alpha4.AzureMachinePoolSpec, out *AzureMachinePoolSpec, s convert.Scope) error { + return autoConvert_v1alpha4_AzureMachinePoolSpec_To_v1alpha3_AzureMachinePoolSpec(in, out, s) +} + +func Convert_v1alpha4_AzureMachinePoolStatus_To_v1alpha3_AzureMachinePoolStatus(in *expv1alpha4.AzureMachinePoolStatus, out *AzureMachinePoolStatus, s convert.Scope) error { + return autoConvert_v1alpha4_AzureMachinePoolStatus_To_v1alpha3_AzureMachinePoolStatus(in, out, s) +} diff --git a/exp/api/v1alpha3/conversion_test.go b/exp/api/v1alpha3/conversion_test.go index 29719a1e1a9..a28438acfb8 100644 --- a/exp/api/v1alpha3/conversion_test.go +++ b/exp/api/v1alpha3/conversion_test.go @@ -20,10 +20,10 @@ import ( "testing" . "github.com/onsi/gomega" - "k8s.io/apimachinery/pkg/runtime" - v1alpha4 "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1alpha4" utilconversion "sigs.k8s.io/cluster-api/util/conversion" + + "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1alpha4" ) func TestFuzzyConversion(t *testing.T) { diff --git a/exp/api/v1alpha3/types.go b/exp/api/v1alpha3/types.go deleted file mode 100644 index 06a746b9fb7..00000000000 --- a/exp/api/v1alpha3/types.go +++ /dev/null @@ -1,47 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha3 - -import ( - infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1alpha3" -) - -type ( - // VMSSVM defines a VM in a virtual machine scale set. - VMSSVM struct { - ID string `json:"id,omitempty"` - InstanceID string `json:"instanceID,omitempty"` - Name string `json:"name,omitempty"` - AvailabilityZone string `json:"availabilityZone,omitempty"` - State infrav1.VMState `json:"vmState,omitempty"` - LatestModelApplied bool `json:"latestModelApplied,omitempty"` - } - - // VMSS defines a virtual machine scale set. - VMSS struct { - ID string `json:"id,omitempty"` - Name string `json:"name,omitempty"` - Sku string `json:"sku,omitempty"` - Capacity int64 `json:"capacity,omitempty"` - Zones []string `json:"zones,omitempty"` - Image infrav1.Image `json:"image,omitempty"` - State infrav1.VMState `json:"vmState,omitempty"` - Identity infrav1.VMIdentity `json:"identity,omitempty"` - Tags infrav1.Tags `json:"tags,omitempty"` - Instances []VMSSVM `json:"instances,omitempty"` - } -) diff --git a/exp/api/v1alpha3/zz_generated.conversion.go b/exp/api/v1alpha3/zz_generated.conversion.go index 22968e98173..a3fbf2d58a8 100644 --- a/exp/api/v1alpha3/zz_generated.conversion.go +++ b/exp/api/v1alpha3/zz_generated.conversion.go @@ -85,21 +85,11 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha4.AzureMachinePoolSpec)(nil), (*AzureMachinePoolSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha4_AzureMachinePoolSpec_To_v1alpha3_AzureMachinePoolSpec(a.(*v1alpha4.AzureMachinePoolSpec), b.(*AzureMachinePoolSpec), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*AzureMachinePoolStatus)(nil), (*v1alpha4.AzureMachinePoolStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha3_AzureMachinePoolStatus_To_v1alpha4_AzureMachinePoolStatus(a.(*AzureMachinePoolStatus), b.(*v1alpha4.AzureMachinePoolStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha4.AzureMachinePoolStatus)(nil), (*AzureMachinePoolStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha4_AzureMachinePoolStatus_To_v1alpha3_AzureMachinePoolStatus(a.(*v1alpha4.AzureMachinePoolStatus), b.(*AzureMachinePoolStatus), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*AzureManagedCluster)(nil), (*v1alpha4.AzureManagedCluster)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha3_AzureManagedCluster_To_v1alpha4_AzureManagedCluster(a.(*AzureManagedCluster), b.(*v1alpha4.AzureManagedCluster), scope) }); err != nil { @@ -240,26 +230,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*VMSS)(nil), (*v1alpha4.VMSS)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_VMSS_To_v1alpha4_VMSS(a.(*VMSS), b.(*v1alpha4.VMSS), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha4.VMSS)(nil), (*VMSS)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha4_VMSS_To_v1alpha3_VMSS(a.(*v1alpha4.VMSS), b.(*VMSS), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*VMSSVM)(nil), (*v1alpha4.VMSSVM)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_VMSSVM_To_v1alpha4_VMSSVM(a.(*VMSSVM), b.(*v1alpha4.VMSSVM), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha4.VMSSVM)(nil), (*VMSSVM)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha4_VMSSVM_To_v1alpha3_VMSSVM(a.(*v1alpha4.VMSSVM), b.(*VMSSVM), scope) - }); err != nil { - return err - } if err := s.AddConversionFunc((*apiv1alpha3.APIEndpoint)(nil), (*apiv1alpha4.APIEndpoint)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha3_APIEndpoint_To_v1alpha4_APIEndpoint(a.(*apiv1alpha3.APIEndpoint), b.(*apiv1alpha4.APIEndpoint), scope) }); err != nil { @@ -280,6 +250,16 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddConversionFunc((*v1alpha4.AzureMachinePoolSpec)(nil), (*AzureMachinePoolSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_AzureMachinePoolSpec_To_v1alpha3_AzureMachinePoolSpec(a.(*v1alpha4.AzureMachinePoolSpec), b.(*AzureMachinePoolSpec), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*v1alpha4.AzureMachinePoolStatus)(nil), (*AzureMachinePoolStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_AzureMachinePoolStatus_To_v1alpha3_AzureMachinePoolStatus(a.(*v1alpha4.AzureMachinePoolStatus), b.(*AzureMachinePoolStatus), scope) + }); err != nil { + return err + } if err := s.AddConversionFunc((*clusterapiproviderazureapiv1alpha4.Image)(nil), (*clusterapiproviderazureapiv1alpha3.Image)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha4_Image_To_v1alpha3_Image(a.(*clusterapiproviderazureapiv1alpha4.Image), b.(*clusterapiproviderazureapiv1alpha3.Image), scope) }); err != nil { @@ -483,14 +463,10 @@ func autoConvert_v1alpha4_AzureMachinePoolSpec_To_v1alpha3_AzureMachinePoolSpec( out.Identity = clusterapiproviderazureapiv1alpha3.VMIdentity(in.Identity) out.UserAssignedIdentities = *(*[]clusterapiproviderazureapiv1alpha3.UserAssignedIdentity)(unsafe.Pointer(&in.UserAssignedIdentities)) out.RoleAssignmentName = in.RoleAssignmentName + // WARNING: in.Strategy requires manual conversion: does not exist in peer-type return nil } -// Convert_v1alpha4_AzureMachinePoolSpec_To_v1alpha3_AzureMachinePoolSpec is an autogenerated conversion function. -func Convert_v1alpha4_AzureMachinePoolSpec_To_v1alpha3_AzureMachinePoolSpec(in *v1alpha4.AzureMachinePoolSpec, out *AzureMachinePoolSpec, s conversion.Scope) error { - return autoConvert_v1alpha4_AzureMachinePoolSpec_To_v1alpha3_AzureMachinePoolSpec(in, out, s) -} - func autoConvert_v1alpha3_AzureMachinePoolStatus_To_v1alpha4_AzureMachinePoolStatus(in *AzureMachinePoolStatus, out *v1alpha4.AzureMachinePoolStatus, s conversion.Scope) error { out.Ready = in.Ready out.Replicas = in.Replicas @@ -513,6 +489,7 @@ func autoConvert_v1alpha4_AzureMachinePoolStatus_To_v1alpha3_AzureMachinePoolSta out.Ready = in.Ready out.Replicas = in.Replicas out.Instances = *(*[]*AzureMachinePoolInstanceStatus)(unsafe.Pointer(&in.Instances)) + // WARNING: in.Image requires manual conversion: does not exist in peer-type out.Version = in.Version out.ProvisioningState = (*clusterapiproviderazureapiv1alpha3.VMState)(unsafe.Pointer(in.ProvisioningState)) out.FailureReason = (*errors.MachineStatusError)(unsafe.Pointer(in.FailureReason)) @@ -522,11 +499,6 @@ func autoConvert_v1alpha4_AzureMachinePoolStatus_To_v1alpha3_AzureMachinePoolSta return nil } -// Convert_v1alpha4_AzureMachinePoolStatus_To_v1alpha3_AzureMachinePoolStatus is an autogenerated conversion function. -func Convert_v1alpha4_AzureMachinePoolStatus_To_v1alpha3_AzureMachinePoolStatus(in *v1alpha4.AzureMachinePoolStatus, out *AzureMachinePoolStatus, s conversion.Scope) error { - return autoConvert_v1alpha4_AzureMachinePoolStatus_To_v1alpha3_AzureMachinePoolStatus(in, out, s) -} - func autoConvert_v1alpha3_AzureManagedCluster_To_v1alpha4_AzureManagedCluster(in *AzureManagedCluster, out *v1alpha4.AzureManagedCluster, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta if err := Convert_v1alpha3_AzureManagedClusterSpec_To_v1alpha4_AzureManagedClusterSpec(&in.Spec, &out.Spec, s); err != nil { @@ -948,75 +920,3 @@ func autoConvert_v1alpha4_ManagedControlPlaneVirtualNetwork_To_v1alpha3_ManagedC func Convert_v1alpha4_ManagedControlPlaneVirtualNetwork_To_v1alpha3_ManagedControlPlaneVirtualNetwork(in *v1alpha4.ManagedControlPlaneVirtualNetwork, out *ManagedControlPlaneVirtualNetwork, s conversion.Scope) error { return autoConvert_v1alpha4_ManagedControlPlaneVirtualNetwork_To_v1alpha3_ManagedControlPlaneVirtualNetwork(in, out, s) } - -func autoConvert_v1alpha3_VMSS_To_v1alpha4_VMSS(in *VMSS, out *v1alpha4.VMSS, s conversion.Scope) error { - out.ID = in.ID - out.Name = in.Name - out.Sku = in.Sku - out.Capacity = in.Capacity - out.Zones = *(*[]string)(unsafe.Pointer(&in.Zones)) - if err := Convert_v1alpha3_Image_To_v1alpha4_Image(&in.Image, &out.Image, s); err != nil { - return err - } - out.State = clusterapiproviderazureapiv1alpha4.ProvisioningState(in.State) - out.Identity = clusterapiproviderazureapiv1alpha4.VMIdentity(in.Identity) - out.Tags = *(*clusterapiproviderazureapiv1alpha4.Tags)(unsafe.Pointer(&in.Tags)) - out.Instances = *(*[]v1alpha4.VMSSVM)(unsafe.Pointer(&in.Instances)) - return nil -} - -// Convert_v1alpha3_VMSS_To_v1alpha4_VMSS is an autogenerated conversion function. -func Convert_v1alpha3_VMSS_To_v1alpha4_VMSS(in *VMSS, out *v1alpha4.VMSS, s conversion.Scope) error { - return autoConvert_v1alpha3_VMSS_To_v1alpha4_VMSS(in, out, s) -} - -func autoConvert_v1alpha4_VMSS_To_v1alpha3_VMSS(in *v1alpha4.VMSS, out *VMSS, s conversion.Scope) error { - out.ID = in.ID - out.Name = in.Name - out.Sku = in.Sku - out.Capacity = in.Capacity - out.Zones = *(*[]string)(unsafe.Pointer(&in.Zones)) - if err := Convert_v1alpha4_Image_To_v1alpha3_Image(&in.Image, &out.Image, s); err != nil { - return err - } - out.State = clusterapiproviderazureapiv1alpha3.VMState(in.State) - out.Identity = clusterapiproviderazureapiv1alpha3.VMIdentity(in.Identity) - out.Tags = *(*clusterapiproviderazureapiv1alpha3.Tags)(unsafe.Pointer(&in.Tags)) - out.Instances = *(*[]VMSSVM)(unsafe.Pointer(&in.Instances)) - return nil -} - -// Convert_v1alpha4_VMSS_To_v1alpha3_VMSS is an autogenerated conversion function. -func Convert_v1alpha4_VMSS_To_v1alpha3_VMSS(in *v1alpha4.VMSS, out *VMSS, s conversion.Scope) error { - return autoConvert_v1alpha4_VMSS_To_v1alpha3_VMSS(in, out, s) -} - -func autoConvert_v1alpha3_VMSSVM_To_v1alpha4_VMSSVM(in *VMSSVM, out *v1alpha4.VMSSVM, s conversion.Scope) error { - out.ID = in.ID - out.InstanceID = in.InstanceID - out.Name = in.Name - out.AvailabilityZone = in.AvailabilityZone - out.State = clusterapiproviderazureapiv1alpha4.ProvisioningState(in.State) - out.LatestModelApplied = in.LatestModelApplied - return nil -} - -// Convert_v1alpha3_VMSSVM_To_v1alpha4_VMSSVM is an autogenerated conversion function. -func Convert_v1alpha3_VMSSVM_To_v1alpha4_VMSSVM(in *VMSSVM, out *v1alpha4.VMSSVM, s conversion.Scope) error { - return autoConvert_v1alpha3_VMSSVM_To_v1alpha4_VMSSVM(in, out, s) -} - -func autoConvert_v1alpha4_VMSSVM_To_v1alpha3_VMSSVM(in *v1alpha4.VMSSVM, out *VMSSVM, s conversion.Scope) error { - out.ID = in.ID - out.InstanceID = in.InstanceID - out.Name = in.Name - out.AvailabilityZone = in.AvailabilityZone - out.State = clusterapiproviderazureapiv1alpha3.VMState(in.State) - out.LatestModelApplied = in.LatestModelApplied - return nil -} - -// Convert_v1alpha4_VMSSVM_To_v1alpha3_VMSSVM is an autogenerated conversion function. -func Convert_v1alpha4_VMSSVM_To_v1alpha3_VMSSVM(in *v1alpha4.VMSSVM, out *VMSSVM, s conversion.Scope) error { - return autoConvert_v1alpha4_VMSSVM_To_v1alpha3_VMSSVM(in, out, s) -} diff --git a/exp/api/v1alpha3/zz_generated.deepcopy.go b/exp/api/v1alpha3/zz_generated.deepcopy.go index eab9528b9b6..acf6a030db1 100644 --- a/exp/api/v1alpha3/zz_generated.deepcopy.go +++ b/exp/api/v1alpha3/zz_generated.deepcopy.go @@ -588,51 +588,3 @@ func (in *ManagedControlPlaneVirtualNetwork) DeepCopy() *ManagedControlPlaneVirt in.DeepCopyInto(out) return out } - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VMSS) DeepCopyInto(out *VMSS) { - *out = *in - if in.Zones != nil { - in, out := &in.Zones, &out.Zones - *out = make([]string, len(*in)) - copy(*out, *in) - } - in.Image.DeepCopyInto(&out.Image) - if in.Tags != nil { - in, out := &in.Tags, &out.Tags - *out = make(apiv1alpha3.Tags, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.Instances != nil { - in, out := &in.Instances, &out.Instances - *out = make([]VMSSVM, len(*in)) - copy(*out, *in) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VMSS. -func (in *VMSS) DeepCopy() *VMSS { - if in == nil { - return nil - } - out := new(VMSS) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VMSSVM) DeepCopyInto(out *VMSSVM) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VMSSVM. -func (in *VMSSVM) DeepCopy() *VMSSVM { - if in == nil { - return nil - } - out := new(VMSSVM) - in.DeepCopyInto(out) - return out -} diff --git a/exp/api/v1alpha4/azuremachinepool_default_test.go b/exp/api/v1alpha4/azuremachinepool_default_test.go index 2ff2ef4271a..0cce2782c7d 100644 --- a/exp/api/v1alpha4/azuremachinepool_default_test.go +++ b/exp/api/v1alpha4/azuremachinepool_default_test.go @@ -33,8 +33,8 @@ func TestAzureMachinePool_SetDefaultSSHPublicKey(t *testing.T) { } existingPublicKey := "testpublickey" - publicKeyExistTest := test{amp: createMachinePoolWithSSHPublicKey(t, existingPublicKey)} - publicKeyNotExistTest := test{amp: createMachinePoolWithSSHPublicKey(t, "")} + publicKeyExistTest := test{amp: createMachinePoolWithSSHPublicKey(existingPublicKey)} + publicKeyNotExistTest := test{amp: createMachinePoolWithSSHPublicKey("")} err := publicKeyExistTest.amp.SetDefaultSSHPublicKey() g.Expect(err).To(BeNil()) @@ -77,7 +77,7 @@ func TestAzureMachinePool_SetIdentityDefaults(t *testing.T) { g.Expect(notSystemAssignedTest.machinePool.Spec.RoleAssignmentName).To(BeEmpty()) } -func createMachinePoolWithSSHPublicKey(t *testing.T, sshPublicKey string) *AzureMachinePool { +func createMachinePoolWithSSHPublicKey(sshPublicKey string) *AzureMachinePool { return hardcodedAzureMachinePoolWithSSHKey(sshPublicKey) } diff --git a/exp/api/v1alpha4/azuremachinepool_types.go b/exp/api/v1alpha4/azuremachinepool_types.go index 42ca4235971..b6b93c93029 100644 --- a/exp/api/v1alpha4/azuremachinepool_types.go +++ b/exp/api/v1alpha4/azuremachinepool_types.go @@ -18,12 +18,30 @@ package v1alpha4 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4" "sigs.k8s.io/cluster-api/errors" infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1alpha4" ) +const ( + // MachinePoolNameLabel indicates the AzureMachinePool name the AzureMachinePoolMachine belongs + MachinePoolNameLabel = "azuremachinepool.infrastructure.cluster.x-k8s.io/machine-pool" + + // RollingUpdateAzureMachinePoolDeploymentStrategyType replaces AzureMachinePoolMachines with older models with + // AzureMachinePoolMachines based on the latest model. + // i.e. gradually scale down the old AzureMachinePoolMachines and scale up the new ones. + RollingUpdateAzureMachinePoolDeploymentStrategyType AzureMachinePoolDeploymentStrategyType = "RollingUpdate" + + // OldestDeletePolicyType will delete machines with the oldest creation date first + OldestDeletePolicyType AzureMachinePoolDeletePolicyType = "Oldest" + // NewestDeletePolicyType will delete machines with the newest creation date first + NewestDeletePolicyType AzureMachinePoolDeletePolicyType = "Newest" + // RandomDeletePolicyType will delete machines in random order + RandomDeletePolicyType AzureMachinePoolDeletePolicyType = "Random" +) + type ( // AzureMachinePoolMachineTemplate defines the template for an AzureMachine. AzureMachinePoolMachineTemplate struct { @@ -31,7 +49,7 @@ type ( // See https://docs.microsoft.com/en-us/rest/api/compute/virtualmachines/createorupdate#virtualmachinesizetypes VMSize string `json:"vmSize"` - // Image is used to provide details of an image to use during Virtual Machine creation. + // Image is used to provide details of an image to use during VM creation. // If image details are omitted the image will default the Azure Marketplace "capi" offer, // which is based on Ubuntu. // +kubebuilder:validation:nullable @@ -111,6 +129,78 @@ type ( // If not specified, a random GUID will be generated. // +optional RoleAssignmentName string `json:"roleAssignmentName,omitempty"` + + // The deployment strategy to use to replace existing AzureMachinePoolMachines with new ones. + // +optional + // +kubebuilder:default={type: "RollingUpdate", rollingUpdate: {maxSurge: 1, maxUnavailable: 0, deletePolicy: Oldest}} + Strategy AzureMachinePoolDeploymentStrategy `json:"strategy,omitempty"` + } + + // AzureMachinePoolDeploymentStrategyType is the type of deployment strategy employed to rollout a new version of + // the AzureMachinePool + AzureMachinePoolDeploymentStrategyType string + + // AzureMachinePoolDeploymentStrategy describes how to replace existing machines with new ones. + AzureMachinePoolDeploymentStrategy struct { + // Type of deployment. Currently the only supported strategy is RollingUpdate + // +optional + // +kubebuilder:validation:Enum=RollingUpdate + // +optional + // +kubebuilder:default=RollingUpdate + Type AzureMachinePoolDeploymentStrategyType `json:"type,omitempty"` + + // Rolling update config params. Present only if + // MachineDeploymentStrategyType = RollingUpdate. + // +optional + RollingUpdate *MachineRollingUpdateDeployment `json:"rollingUpdate,omitempty"` + } + + // AzureMachinePoolDeletePolicyType is the type of DeletePolicy employed to select machines to be deleted during an + // upgrade + AzureMachinePoolDeletePolicyType string + + // MachineRollingUpdateDeployment is used to control the desired behavior of rolling update. + MachineRollingUpdateDeployment struct { + // The maximum number of machines that can be unavailable during the update. + // Value can be an absolute number (ex: 5) or a percentage of desired + // machines (ex: 10%). + // Absolute number is calculated from percentage by rounding down. + // This can not be 0 if MaxSurge is 0. + // Defaults to 0. + // Example: when this is set to 30%, the old MachineSet can be scaled + // down to 70% of desired machines immediately when the rolling update + // starts. Once new machines are ready, old MachineSet can be scaled + // down further, followed by scaling up the new MachineSet, ensuring + // that the total number of machines available at all times + // during the update is at least 70% of desired machines. + // +optional + // +kubebuilder:default:=0 + MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"` + + // The maximum number of machines that can be scheduled above the + // desired number of machines. + // Value can be an absolute number (ex: 5) or a percentage of + // desired machines (ex: 10%). + // This can not be 0 if MaxUnavailable is 0. + // Absolute number is calculated from percentage by rounding up. + // Defaults to 1. + // Example: when this is set to 30%, the new MachineSet can be scaled + // up immediately when the rolling update starts, such that the total + // number of old and new machines do not exceed 130% of desired + // machines. Once old machines have been killed, new MachineSet can + // be scaled up further, ensuring that total number of machines running + // at any time during the update is at most 130% of desired machines. + // +optional + // +kubebuilder:default:=1 + MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty"` + + // DeletePolicy defines the policy used by the MachineDeployment to identify nodes to delete when downscaling. + // Valid values are "Random, "Newest", "Oldest" + // When no value is supplied, the default is Oldest + // +optional + // +kubebuilder:validation:Enum=Random;Newest;Oldest + // +kubebuilder:default:=Oldest + DeletePolicy AzureMachinePoolDeletePolicyType `json:"deletePolicy,omitempty"` } // AzureMachinePoolStatus defines the observed state of AzureMachinePool @@ -127,6 +217,11 @@ type ( // +optional Instances []*AzureMachinePoolInstanceStatus `json:"instances,omitempty"` + // Image is the current image used in the AzureMachinePool. When the spec image is nil, this image is populated + // with the details of the defaulted Azure Marketplace "capi" offer. + // +optional + Image *infrav1.Image `json:"image,omitempty"` + // Version is the Kubernetes version for the current VMSS model // +optional Version string `json:"version"` diff --git a/exp/api/v1alpha4/azuremachinepool_webhook.go b/exp/api/v1alpha4/azuremachinepool_webhook.go index 1e4ea680022..9b1d4ed1a73 100644 --- a/exp/api/v1alpha4/azuremachinepool_webhook.go +++ b/exp/api/v1alpha4/azuremachinepool_webhook.go @@ -23,6 +23,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" kerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/validation/field" ctrl "sigs.k8s.io/controller-runtime" logf "sigs.k8s.io/controller-runtime/pkg/log" @@ -84,6 +85,7 @@ func (amp *AzureMachinePool) Validate(old runtime.Object) error { amp.ValidateTerminateNotificationTimeout, amp.ValidateSSHKey, amp.ValidateUserAssignedIdentity, + amp.ValidateStrategy(), amp.ValidateSystemAssignedIdentity(old), } @@ -151,6 +153,23 @@ func (amp *AzureMachinePool) ValidateUserAssignedIdentity() error { return nil } +// ValidateStrategy validates the strategy +func (amp *AzureMachinePool) ValidateStrategy() func() error { + return func() error { + if amp.Spec.Strategy.Type == RollingUpdateAzureMachinePoolDeploymentStrategyType && amp.Spec.Strategy.RollingUpdate != nil { + rollingUpdateStrategy := amp.Spec.Strategy.RollingUpdate + maxSurge := rollingUpdateStrategy.MaxSurge + maxUnavailable := rollingUpdateStrategy.MaxUnavailable + if maxSurge.Type == intstr.Int && maxSurge.IntVal == 0 && + maxUnavailable.Type == intstr.Int && maxUnavailable.IntVal == 0 { + return errors.New("rolling update strategy MaxUnavailable must not be 0 if MaxSurge is 0") + } + } + + return nil + } +} + // ValidateSystemAssignedIdentity validates system-assigned identity role func (amp *AzureMachinePool) ValidateSystemAssignedIdentity(old runtime.Object) func() error { return func() error { diff --git a/exp/api/v1alpha4/azuremachinepool_webhook_test.go b/exp/api/v1alpha4/azuremachinepool_webhook_test.go index 98a62842b13..97db5f47fca 100644 --- a/exp/api/v1alpha4/azuremachinepool_webhook_test.go +++ b/exp/api/v1alpha4/azuremachinepool_webhook_test.go @@ -22,6 +22,7 @@ import ( "encoding/base64" "testing" + "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/uuid" "github.com/Azure/go-autorest/autorest/to" @@ -38,6 +39,11 @@ var ( func TestAzureMachinePool_ValidateCreate(t *testing.T) { g := NewWithT(t) + var ( + zero = intstr.FromInt(0) + one = intstr.FromInt(1) + ) + tests := []struct { name string amp *AzureMachinePool @@ -45,69 +51,91 @@ func TestAzureMachinePool_ValidateCreate(t *testing.T) { }{ { name: "azuremachinepool with marketplace image - full", - amp: createMachinePoolWithtMarketPlaceImage(t, "PUB1234", "OFFER1234", "SKU1234", "1.0.0", to.IntPtr(10)), + amp: createMachinePoolWithtMarketPlaceImage("PUB1234", "OFFER1234", "SKU1234", "1.0.0", to.IntPtr(10)), wantErr: false, }, { name: "azuremachinepool with marketplace image - missing publisher", - amp: createMachinePoolWithtMarketPlaceImage(t, "", "OFFER1234", "SKU1234", "1.0.0", to.IntPtr(10)), + amp: createMachinePoolWithtMarketPlaceImage("", "OFFER1234", "SKU1234", "1.0.0", to.IntPtr(10)), wantErr: true, }, { name: "azuremachinepool with shared gallery image - full", - amp: createMachinePoolWithSharedImage(t, "SUB123", "RG123", "NAME123", "GALLERY1", "1.0.0", to.IntPtr(10)), + amp: createMachinePoolWithSharedImage("SUB123", "RG123", "NAME123", "GALLERY1", "1.0.0", to.IntPtr(10)), wantErr: false, }, { name: "azuremachinepool with marketplace image - missing subscription", - amp: createMachinePoolWithSharedImage(t, "", "RG123", "NAME123", "GALLERY1", "1.0.0", to.IntPtr(10)), + amp: createMachinePoolWithSharedImage("", "RG123", "NAME123", "GALLERY1", "1.0.0", to.IntPtr(10)), wantErr: true, }, { name: "azuremachinepool with image by - with id", - amp: createMachinePoolWithImageByID(t, "ID123", to.IntPtr(10)), + amp: createMachinePoolWithImageByID("ID123", to.IntPtr(10)), wantErr: false, }, { name: "azuremachinepool with image by - without id", - amp: createMachinePoolWithImageByID(t, "", to.IntPtr(10)), + amp: createMachinePoolWithImageByID("", to.IntPtr(10)), wantErr: true, }, { name: "azuremachinepool with valid SSHPublicKey", - amp: createMachinePoolWithSSHPublicKey(t, validSSHPublicKey), + amp: createMachinePoolWithSSHPublicKey(validSSHPublicKey), wantErr: false, }, { name: "azuremachinepool with invalid SSHPublicKey", - amp: createMachinePoolWithSSHPublicKey(t, "invalid ssh key"), + amp: createMachinePoolWithSSHPublicKey("invalid ssh key"), wantErr: true, }, { name: "azuremachinepool with wrong terminate notification", - amp: createMachinePoolWithSharedImage(t, "SUB123", "RG123", "NAME123", "GALLERY1", "1.0.0", to.IntPtr(35)), + amp: createMachinePoolWithSharedImage("SUB123", "RG123", "NAME123", "GALLERY1", "1.0.0", to.IntPtr(35)), wantErr: true, }, { name: "azuremachinepool with system assigned identity", - amp: createMachinePoolWithSystemAssignedIdentity(t, string(uuid.NewUUID())), + amp: createMachinePoolWithSystemAssignedIdentity(string(uuid.NewUUID())), wantErr: false, }, { name: "azuremachinepool with system assigned identity, but invalid role", - amp: createMachinePoolWithSystemAssignedIdentity(t, "not_a_uuid"), + amp: createMachinePoolWithSystemAssignedIdentity("not_a_uuid"), wantErr: true, }, { name: "azuremachinepool with user assigned identity", - amp: createMachinePoolWithUserAssignedIdentity(t, []string{"azure:://id1", "azure:://id2"}), + amp: createMachinePoolWithUserAssignedIdentity([]string{"azure:://id1", "azure:://id2"}), wantErr: false, }, { name: "azuremachinepool with user assigned identity, but without any provider ids", - amp: createMachinePoolWithUserAssignedIdentity(t, []string{}), + amp: createMachinePoolWithUserAssignedIdentity([]string{}), + wantErr: true, + }, + { + name: "azuremachinepool with invalid MaxSurge and MaxUnavailable rolling upgrade configuration", + amp: createMachinePoolWithStrategy(AzureMachinePoolDeploymentStrategy{ + Type: RollingUpdateAzureMachinePoolDeploymentStrategyType, + RollingUpdate: &MachineRollingUpdateDeployment{ + MaxSurge: &zero, + MaxUnavailable: &zero, + }, + }), wantErr: true, }, + { + name: "azuremachinepool with valid MaxSurge and MaxUnavailable rolling upgrade configuration", + amp: createMachinePoolWithStrategy(AzureMachinePoolDeploymentStrategy{ + Type: RollingUpdateAzureMachinePoolDeploymentStrategyType, + RollingUpdate: &MachineRollingUpdateDeployment{ + MaxSurge: &zero, + MaxUnavailable: &one, + }, + }), + wantErr: false, + }, } for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { @@ -124,6 +152,11 @@ func TestAzureMachinePool_ValidateCreate(t *testing.T) { func TestAzureMachinePool_ValidateUpdate(t *testing.T) { g := NewWithT(t) + var ( + zero = intstr.FromInt(0) + one = intstr.FromInt(1) + ) + tests := []struct { name string oldAMP *AzureMachinePool @@ -131,29 +164,53 @@ func TestAzureMachinePool_ValidateUpdate(t *testing.T) { wantErr bool }{ { - name: "azuremachine with valid SSHPublicKey", - oldAMP: createMachinePoolWithSSHPublicKey(t, ""), - amp: createMachinePoolWithSSHPublicKey(t, validSSHPublicKey), + name: "azuremachinepool with valid SSHPublicKey", + oldAMP: createMachinePoolWithSSHPublicKey(""), + amp: createMachinePoolWithSSHPublicKey(validSSHPublicKey), wantErr: false, }, { - name: "azuremachine with invalid SSHPublicKey", - oldAMP: createMachinePoolWithSSHPublicKey(t, ""), - amp: createMachinePoolWithSSHPublicKey(t, "invalid ssh key"), + name: "azuremachinepool with invalid SSHPublicKey", + oldAMP: createMachinePoolWithSSHPublicKey(""), + amp: createMachinePoolWithSSHPublicKey("invalid ssh key"), wantErr: true, }, { - name: "azuremachine with system-assigned identity, and role unchanged", - oldAMP: createMachinePoolWithSystemAssignedIdentity(t, "30a757d8-fcf0-4c8b-acf0-9253a7e093ea"), - amp: createMachinePoolWithSystemAssignedIdentity(t, "30a757d8-fcf0-4c8b-acf0-9253a7e093ea"), + name: "azuremachinepool with system-assigned identity, and role unchanged", + oldAMP: createMachinePoolWithSystemAssignedIdentity("30a757d8-fcf0-4c8b-acf0-9253a7e093ea"), + amp: createMachinePoolWithSystemAssignedIdentity("30a757d8-fcf0-4c8b-acf0-9253a7e093ea"), wantErr: false, }, { - name: "azuremachine with system-assigned identity, and role changed", - oldAMP: createMachinePoolWithSystemAssignedIdentity(t, string(uuid.NewUUID())), - amp: createMachinePoolWithSystemAssignedIdentity(t, string(uuid.NewUUID())), + name: "azuremachinepool with system-assigned identity, and role changed", + oldAMP: createMachinePoolWithSystemAssignedIdentity(string(uuid.NewUUID())), + amp: createMachinePoolWithSystemAssignedIdentity(string(uuid.NewUUID())), + wantErr: true, + }, + { + name: "azuremachinepool with invalid MaxSurge and MaxUnavailable rolling upgrade configuration", + oldAMP: createMachinePoolWithStrategy(AzureMachinePoolDeploymentStrategy{}), + amp: createMachinePoolWithStrategy(AzureMachinePoolDeploymentStrategy{ + Type: RollingUpdateAzureMachinePoolDeploymentStrategyType, + RollingUpdate: &MachineRollingUpdateDeployment{ + MaxSurge: &zero, + MaxUnavailable: &zero, + }, + }), wantErr: true, }, + { + name: "azuremachinepool with valid MaxSurge and MaxUnavailable rolling upgrade configuration", + oldAMP: createMachinePoolWithStrategy(AzureMachinePoolDeploymentStrategy{}), + amp: createMachinePoolWithStrategy(AzureMachinePoolDeploymentStrategy{ + Type: RollingUpdateAzureMachinePoolDeploymentStrategyType, + RollingUpdate: &MachineRollingUpdateDeployment{ + MaxSurge: &zero, + MaxUnavailable: &one, + }, + }), + wantErr: false, + }, } for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { @@ -175,18 +232,18 @@ func TestAzureMachine_Default(t *testing.T) { } existingPublicKey := validSSHPublicKey - publicKeyExistTest := test{amp: createMachinePoolWithSSHPublicKey(t, existingPublicKey)} - publicKeyNotExistTest := test{amp: createMachinePoolWithSSHPublicKey(t, "")} + publicKeyExistTest := test{amp: createMachinePoolWithSSHPublicKey(existingPublicKey)} + publicKeyNotExistTest := test{amp: createMachinePoolWithSSHPublicKey("")} publicKeyExistTest.amp.Default() g.Expect(publicKeyExistTest.amp.Spec.Template.SSHPublicKey).To(Equal(existingPublicKey)) publicKeyNotExistTest.amp.Default() - g.Expect(publicKeyNotExistTest.amp.Spec.Template.SSHPublicKey).NotTo((BeEmpty())) + g.Expect(publicKeyNotExistTest.amp.Spec.Template.SSHPublicKey).NotTo(BeEmpty()) } -func createMachinePoolWithtMarketPlaceImage(t *testing.T, publisher, offer, sku, version string, terminateNotificationTimeout *int) *AzureMachinePool { - image := &infrav1.Image{ +func createMachinePoolWithtMarketPlaceImage(publisher, offer, sku, version string, terminateNotificationTimeout *int) *AzureMachinePool { + image := infrav1.Image{ Marketplace: &infrav1.AzureMarketplaceImage{ Publisher: publisher, Offer: offer, @@ -198,7 +255,7 @@ func createMachinePoolWithtMarketPlaceImage(t *testing.T, publisher, offer, sku, return &AzureMachinePool{ Spec: AzureMachinePoolSpec{ Template: AzureMachinePoolMachineTemplate{ - Image: image, + Image: &image, SSHPublicKey: validSSHPublicKey, TerminateNotificationTimeout: terminateNotificationTimeout, }, @@ -206,8 +263,8 @@ func createMachinePoolWithtMarketPlaceImage(t *testing.T, publisher, offer, sku, } } -func createMachinePoolWithSharedImage(t *testing.T, subscriptionID, resourceGroup, name, gallery, version string, terminateNotificationTimeout *int) *AzureMachinePool { - image := &infrav1.Image{ +func createMachinePoolWithSharedImage(subscriptionID, resourceGroup, name, gallery, version string, terminateNotificationTimeout *int) *AzureMachinePool { + image := infrav1.Image{ SharedGallery: &infrav1.AzureSharedGalleryImage{ SubscriptionID: subscriptionID, ResourceGroup: resourceGroup, @@ -220,7 +277,7 @@ func createMachinePoolWithSharedImage(t *testing.T, subscriptionID, resourceGrou return &AzureMachinePool{ Spec: AzureMachinePoolSpec{ Template: AzureMachinePoolMachineTemplate{ - Image: image, + Image: &image, SSHPublicKey: validSSHPublicKey, TerminateNotificationTimeout: terminateNotificationTimeout, }, @@ -228,15 +285,15 @@ func createMachinePoolWithSharedImage(t *testing.T, subscriptionID, resourceGrou } } -func createMachinePoolWithImageByID(t *testing.T, imageID string, terminateNotificationTimeout *int) *AzureMachinePool { - image := &infrav1.Image{ +func createMachinePoolWithImageByID(imageID string, terminateNotificationTimeout *int) *AzureMachinePool { + image := infrav1.Image{ ID: &imageID, } return &AzureMachinePool{ Spec: AzureMachinePoolSpec{ Template: AzureMachinePoolMachineTemplate{ - Image: image, + Image: &image, SSHPublicKey: validSSHPublicKey, TerminateNotificationTimeout: terminateNotificationTimeout, }, @@ -244,7 +301,7 @@ func createMachinePoolWithImageByID(t *testing.T, imageID string, terminateNotif } } -func createMachinePoolWithSystemAssignedIdentity(t *testing.T, role string) *AzureMachinePool { +func createMachinePoolWithSystemAssignedIdentity(role string) *AzureMachinePool { return &AzureMachinePool{ Spec: AzureMachinePoolSpec{ Identity: infrav1.VMIdentitySystemAssigned, @@ -253,7 +310,7 @@ func createMachinePoolWithSystemAssignedIdentity(t *testing.T, role string) *Azu } } -func createMachinePoolWithUserAssignedIdentity(t *testing.T, providerIds []string) *AzureMachinePool { +func createMachinePoolWithUserAssignedIdentity(providerIds []string) *AzureMachinePool { userAssignedIdentities := make([]infrav1.UserAssignedIdentity, len(providerIds)) for _, providerID := range providerIds { @@ -278,3 +335,11 @@ func generateSSHPublicKey(b64Enconded bool) string { } return string(ssh.MarshalAuthorizedKey(publicRsaKey)) } + +func createMachinePoolWithStrategy(strategy AzureMachinePoolDeploymentStrategy) *AzureMachinePool { + return &AzureMachinePool{ + Spec: AzureMachinePoolSpec{ + Strategy: strategy, + }, + } +} diff --git a/exp/api/v1alpha4/azuremachinepoolmachine_types.go b/exp/api/v1alpha4/azuremachinepoolmachine_types.go new file mode 100644 index 00000000000..df5af465532 --- /dev/null +++ b/exp/api/v1alpha4/azuremachinepoolmachine_types.go @@ -0,0 +1,141 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha4 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4" + "sigs.k8s.io/cluster-api/errors" + + infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1alpha4" +) + +const ( + // AzureMachinePoolMachineFinalizer is used to ensure deletion of dependencies (nodes, infra). + AzureMachinePoolMachineFinalizer = "azuremachinepoolmachine.infrastructure.cluster.x-k8s.io" +) + +type ( + + // AzureMachinePoolMachineSpec defines the desired state of AzureMachinePoolMachine + AzureMachinePoolMachineSpec struct { + // ProviderID is the identification ID of the Virtual Machine Scale Set + ProviderID string `json:"providerID"` + + // InstanceID is the identification of the Machine Instance within the VMSS + InstanceID string `json:"instanceID"` + } + + // AzureMachinePoolMachineStatus defines the observed state of AzureMachinePoolMachine + AzureMachinePoolMachineStatus struct { + // NodeRef will point to the corresponding Node if it exists. + // +optional + NodeRef *corev1.ObjectReference `json:"nodeRef,omitempty"` + + // Version defines the Kubernetes version for the VM Instance + // +optional + Version string `json:"version"` + + // ProvisioningState is the provisioning state of the Azure virtual machine instance. + // +optional + ProvisioningState *infrav1.ProvisioningState `json:"provisioningState"` + + // InstanceName is the name of the Machine Instance within the VMSS + // +optional + InstanceName string `json:"instanceName"` + + // FailureReason will be set in the event that there is a terminal problem + // reconciling the MachinePool machine and will contain a succinct value suitable + // for machine interpretation. + // + // Any transient errors that occur during the reconciliation of MachinePools + // can be added as events to the MachinePool object and/or logged in the + // controller's output. + // +optional + FailureReason *errors.MachineStatusError `json:"failureReason,omitempty"` + + // FailureMessage will be set in the event that there is a terminal problem + // reconciling the MachinePool and will contain a more verbose string suitable + // for logging and human consumption. + // + // Any transient errors that occur during the reconciliation of MachinePools + // can be added as events to the MachinePool object and/or logged in the + // controller's output. + // +optional + FailureMessage *string `json:"failureMessage,omitempty"` + + // Conditions defines current service state of the AzureMachinePool. + // +optional + Conditions clusterv1.Conditions `json:"conditions,omitempty"` + + // LongRunningOperationState saves the state for an Azure long running operations so it can be continued on the + // next reconciliation loop. + // +optional + LongRunningOperationState *infrav1.Future `json:"longRunningOperationState,omitempty"` + + // LatestModelApplied indicates the instance is running the most up-to-date VMSS model. A VMSS model describes + // the image version the VM is running. If the instance is not running the latest model, it means the instance + // may not be running the version of Kubernetes the Machine Pool has specified and needs to be updated. + LatestModelApplied bool `json:"latestModelApplied"` + + // Ready is true when the provider resource is ready. + // +optional + Ready bool `json:"ready"` + } + + // +kubebuilder:object:root=true + // +kubebuilder:subresource:status + // +kubebuilder:resource:path=azuremachinepoolmachines,scope=Namespaced,categories=cluster-api,shortName=ampm + // +kubebuilder:printcolumn:name="Version",type="string",JSONPath=".status.version",description="Kubernetes version" + // +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.ready",description="Flag indicating infrastructure is successfully provisioned" + // +kubebuilder:printcolumn:name="State",type="string",JSONPath=".status.provisioningState",description="Azure VMSS VM provisioning state" + // +kubebuilder:printcolumn:name="Cluster",type="string",priority=1,JSONPath=".metadata.labels.cluster\\.x-k8s\\.io/cluster-name",description="Cluster to which this AzureMachinePoolMachine belongs" + // +kubebuilder:printcolumn:name="VMSS VM ID",type="string",priority=1,JSONPath=".spec.providerID",description="Azure VMSS VM ID" + + // AzureMachinePoolMachine is the Schema for the azuremachinepoolmachines API + AzureMachinePoolMachine struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec AzureMachinePoolMachineSpec `json:"spec,omitempty"` + Status AzureMachinePoolMachineStatus `json:"status,omitempty"` + } + + // +kubebuilder:object:root=true + + // AzureMachinePoolMachineList contains a list of AzureMachinePoolMachine + AzureMachinePoolMachineList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []AzureMachinePoolMachine `json:"items"` + } +) + +// GetConditions returns the list of conditions for an AzureMachinePool API object. +func (ampm *AzureMachinePoolMachine) GetConditions() clusterv1.Conditions { + return ampm.Status.Conditions +} + +// SetConditions will set the given conditions on an AzureMachinePool object +func (ampm *AzureMachinePoolMachine) SetConditions(conditions clusterv1.Conditions) { + ampm.Status.Conditions = conditions +} + +func init() { + SchemeBuilder.Register(&AzureMachinePoolMachine{}, &AzureMachinePoolMachineList{}) +} diff --git a/exp/api/v1alpha4/azuremachinepoolmachine_webhook.go b/exp/api/v1alpha4/azuremachinepoolmachine_webhook.go new file mode 100644 index 00000000000..b6d26eb7df2 --- /dev/null +++ b/exp/api/v1alpha4/azuremachinepoolmachine_webhook.go @@ -0,0 +1,66 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha4 + +import ( + "github.com/pkg/errors" + "k8s.io/apimachinery/pkg/runtime" + ctrl "sigs.k8s.io/controller-runtime" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/webhook" +) + +// log is for logging in this package. +var azuremachinepoolmachinelog = logf.Log.WithName("azuremachinepoolmachine-resource") + +// SetupWebhookWithManager sets up and registers the webhook with the manager. +func (ampm *AzureMachinePoolMachine) SetupWebhookWithManager(mgr ctrl.Manager) error { + return ctrl.NewWebhookManagedBy(mgr). + For(ampm). + Complete() +} + +// +kubebuilder:webhook:verbs=create;update,path=/validate-infrastructure-cluster-x-k8s-io-v1alpha4-azuremachinepoolmachine,mutating=false,failurePolicy=fail,groups=infrastructure.cluster.x-k8s.io,resources=azuremachinepoolmachines,versions=v1alpha4,name=azuremachinepoolmachine.kb.io,sideEffects=None,admissionReviewVersions=v1beta1 + +var _ webhook.Validator = &AzureMachinePoolMachine{} + +// ValidateCreate implements webhook.Validator so a webhook will be registered for the type +func (ampm *AzureMachinePoolMachine) ValidateCreate() error { + azuremachinepoolmachinelog.Info("validate create", "name", ampm.Name) + return nil +} + +// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type +func (ampm *AzureMachinePoolMachine) ValidateUpdate(old runtime.Object) error { + azuremachinepoolmachinelog.Info("validate update", "name", ampm.Name) + oldMachine, ok := old.(*AzureMachinePoolMachine) + if !ok { + return errors.New("expected and AzureMachinePoolMachine") + } + + if oldMachine.Spec.ProviderID != "" && ampm.Spec.ProviderID != oldMachine.Spec.ProviderID { + return errors.New("providerID is immutable") + } + + return nil +} + +// ValidateDelete implements webhook.Validator so a webhook will be registered for the type +func (ampm *AzureMachinePoolMachine) ValidateDelete() error { + azuremachinepoolmachinelog.Info("validate delete", "name", ampm.Name) + return nil +} diff --git a/exp/api/v1alpha4/types.go b/exp/api/v1alpha4/types.go deleted file mode 100644 index 7b8ad770324..00000000000 --- a/exp/api/v1alpha4/types.go +++ /dev/null @@ -1,47 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha4 - -import ( - infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1alpha4" -) - -type ( - // VMSSVM defines a VM in a virtual machine scale set. - VMSSVM struct { - ID string `json:"id,omitempty"` - InstanceID string `json:"instanceID,omitempty"` - Name string `json:"name,omitempty"` - AvailabilityZone string `json:"availabilityZone,omitempty"` - State infrav1.ProvisioningState `json:"vmState,omitempty"` - LatestModelApplied bool `json:"latestModelApplied,omitempty"` - } - - // VMSS defines a virtual machine scale set. - VMSS struct { - ID string `json:"id,omitempty"` - Name string `json:"name,omitempty"` - Sku string `json:"sku,omitempty"` - Capacity int64 `json:"capacity,omitempty"` - Zones []string `json:"zones,omitempty"` - Image infrav1.Image `json:"image,omitempty"` - State infrav1.ProvisioningState `json:"vmState,omitempty"` - Identity infrav1.VMIdentity `json:"identity,omitempty"` - Tags infrav1.Tags `json:"tags,omitempty"` - Instances []VMSSVM `json:"instances,omitempty"` - } -) diff --git a/exp/api/v1alpha4/zz_generated.deepcopy.go b/exp/api/v1alpha4/zz_generated.deepcopy.go index 46b484d592c..845ed5af536 100644 --- a/exp/api/v1alpha4/zz_generated.deepcopy.go +++ b/exp/api/v1alpha4/zz_generated.deepcopy.go @@ -21,7 +21,9 @@ limitations under the License. package v1alpha4 import ( + "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" apiv1alpha4 "sigs.k8s.io/cluster-api-provider-azure/api/v1alpha4" cluster_apiapiv1alpha4 "sigs.k8s.io/cluster-api/api/v1alpha4" "sigs.k8s.io/cluster-api/errors" @@ -54,6 +56,26 @@ func (in *AzureMachinePool) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureMachinePoolDeploymentStrategy) DeepCopyInto(out *AzureMachinePoolDeploymentStrategy) { + *out = *in + if in.RollingUpdate != nil { + in, out := &in.RollingUpdate, &out.RollingUpdate + *out = new(MachineRollingUpdateDeployment) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureMachinePoolDeploymentStrategy. +func (in *AzureMachinePoolDeploymentStrategy) DeepCopy() *AzureMachinePoolDeploymentStrategy { + if in == nil { + return nil + } + out := new(AzureMachinePoolDeploymentStrategy) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AzureMachinePoolInstanceStatus) DeepCopyInto(out *AzureMachinePoolInstanceStatus) { *out = *in @@ -106,6 +128,127 @@ func (in *AzureMachinePoolList) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureMachinePoolMachine) DeepCopyInto(out *AzureMachinePoolMachine) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureMachinePoolMachine. +func (in *AzureMachinePoolMachine) DeepCopy() *AzureMachinePoolMachine { + if in == nil { + return nil + } + out := new(AzureMachinePoolMachine) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AzureMachinePoolMachine) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureMachinePoolMachineList) DeepCopyInto(out *AzureMachinePoolMachineList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]AzureMachinePoolMachine, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureMachinePoolMachineList. +func (in *AzureMachinePoolMachineList) DeepCopy() *AzureMachinePoolMachineList { + if in == nil { + return nil + } + out := new(AzureMachinePoolMachineList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AzureMachinePoolMachineList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureMachinePoolMachineSpec) DeepCopyInto(out *AzureMachinePoolMachineSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureMachinePoolMachineSpec. +func (in *AzureMachinePoolMachineSpec) DeepCopy() *AzureMachinePoolMachineSpec { + if in == nil { + return nil + } + out := new(AzureMachinePoolMachineSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureMachinePoolMachineStatus) DeepCopyInto(out *AzureMachinePoolMachineStatus) { + *out = *in + if in.NodeRef != nil { + in, out := &in.NodeRef, &out.NodeRef + *out = new(v1.ObjectReference) + **out = **in + } + if in.ProvisioningState != nil { + in, out := &in.ProvisioningState, &out.ProvisioningState + *out = new(apiv1alpha4.ProvisioningState) + **out = **in + } + if in.FailureReason != nil { + in, out := &in.FailureReason, &out.FailureReason + *out = new(errors.MachineStatusError) + **out = **in + } + if in.FailureMessage != nil { + in, out := &in.FailureMessage, &out.FailureMessage + *out = new(string) + **out = **in + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make(cluster_apiapiv1alpha4.Conditions, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.LongRunningOperationState != nil { + in, out := &in.LongRunningOperationState, &out.LongRunningOperationState + *out = new(apiv1alpha4.Future) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureMachinePoolMachineStatus. +func (in *AzureMachinePoolMachineStatus) DeepCopy() *AzureMachinePoolMachineStatus { + if in == nil { + return nil + } + out := new(AzureMachinePoolMachineStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AzureMachinePoolMachineTemplate) DeepCopyInto(out *AzureMachinePoolMachineTemplate) { *out = *in @@ -175,6 +318,7 @@ func (in *AzureMachinePoolSpec) DeepCopyInto(out *AzureMachinePoolSpec) { *out = make([]apiv1alpha4.UserAssignedIdentity, len(*in)) copy(*out, *in) } + in.Strategy.DeepCopyInto(&out.Strategy) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureMachinePoolSpec. @@ -201,6 +345,11 @@ func (in *AzureMachinePoolStatus) DeepCopyInto(out *AzureMachinePoolStatus) { } } } + if in.Image != nil { + in, out := &in.Image, &out.Image + *out = new(apiv1alpha4.Image) + (*in).DeepCopyInto(*out) + } if in.ProvisioningState != nil { in, out := &in.ProvisioningState, &out.ProvisioningState *out = new(apiv1alpha4.ProvisioningState) @@ -559,80 +708,57 @@ func (in *AzureManagedMachinePoolStatus) DeepCopy() *AzureManagedMachinePoolStat } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ManagedControlPlaneSubnet) DeepCopyInto(out *ManagedControlPlaneSubnet) { +func (in *MachineRollingUpdateDeployment) DeepCopyInto(out *MachineRollingUpdateDeployment) { *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedControlPlaneSubnet. -func (in *ManagedControlPlaneSubnet) DeepCopy() *ManagedControlPlaneSubnet { - if in == nil { - return nil + if in.MaxUnavailable != nil { + in, out := &in.MaxUnavailable, &out.MaxUnavailable + *out = new(intstr.IntOrString) + **out = **in + } + if in.MaxSurge != nil { + in, out := &in.MaxSurge, &out.MaxSurge + *out = new(intstr.IntOrString) + **out = **in } - out := new(ManagedControlPlaneSubnet) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ManagedControlPlaneVirtualNetwork) DeepCopyInto(out *ManagedControlPlaneVirtualNetwork) { - *out = *in - out.Subnet = in.Subnet } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedControlPlaneVirtualNetwork. -func (in *ManagedControlPlaneVirtualNetwork) DeepCopy() *ManagedControlPlaneVirtualNetwork { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineRollingUpdateDeployment. +func (in *MachineRollingUpdateDeployment) DeepCopy() *MachineRollingUpdateDeployment { if in == nil { return nil } - out := new(ManagedControlPlaneVirtualNetwork) + out := new(MachineRollingUpdateDeployment) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VMSS) DeepCopyInto(out *VMSS) { +func (in *ManagedControlPlaneSubnet) DeepCopyInto(out *ManagedControlPlaneSubnet) { *out = *in - if in.Zones != nil { - in, out := &in.Zones, &out.Zones - *out = make([]string, len(*in)) - copy(*out, *in) - } - in.Image.DeepCopyInto(&out.Image) - if in.Tags != nil { - in, out := &in.Tags, &out.Tags - *out = make(apiv1alpha4.Tags, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.Instances != nil { - in, out := &in.Instances, &out.Instances - *out = make([]VMSSVM, len(*in)) - copy(*out, *in) - } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VMSS. -func (in *VMSS) DeepCopy() *VMSS { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedControlPlaneSubnet. +func (in *ManagedControlPlaneSubnet) DeepCopy() *ManagedControlPlaneSubnet { if in == nil { return nil } - out := new(VMSS) + out := new(ManagedControlPlaneSubnet) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VMSSVM) DeepCopyInto(out *VMSSVM) { +func (in *ManagedControlPlaneVirtualNetwork) DeepCopyInto(out *ManagedControlPlaneVirtualNetwork) { *out = *in + out.Subnet = in.Subnet } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VMSSVM. -func (in *VMSSVM) DeepCopy() *VMSSVM { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedControlPlaneVirtualNetwork. +func (in *ManagedControlPlaneVirtualNetwork) DeepCopy() *ManagedControlPlaneVirtualNetwork { if in == nil { return nil } - out := new(VMSSVM) + out := new(ManagedControlPlaneVirtualNetwork) in.DeepCopyInto(out) return out } diff --git a/exp/controllers/azuremachinepool_annotations.go b/exp/controllers/azuremachinepool_annotations.go index aae02d020fd..be17abc1757 100644 --- a/exp/controllers/azuremachinepool_annotations.go +++ b/exp/controllers/azuremachinepool_annotations.go @@ -23,10 +23,10 @@ import ( // AnnotationJSON returns a map[string]interface from a JSON annotation. // This method gets the given `annotation` from an `annotationReaderWriter` and unmarshalls it // from a JSON string into a `map[string]interface{}`. -func (r *AzureMachinePoolReconciler) AnnotationJSON(rw annotationReaderWriter, annotation string) (map[string]interface{}, error) { +func (ampr *AzureMachinePoolReconciler) AnnotationJSON(rw annotationReaderWriter, annotation string) (map[string]interface{}, error) { out := map[string]interface{}{} - jsonAnnotation := r.Annotation(rw, annotation) + jsonAnnotation := ampr.Annotation(rw, annotation) if len(jsonAnnotation) == 0 { return out, nil } @@ -40,6 +40,6 @@ func (r *AzureMachinePoolReconciler) AnnotationJSON(rw annotationReaderWriter, a } // Annotation fetches the specific machine annotation. -func (r *AzureMachinePoolReconciler) Annotation(rw annotationReaderWriter, annotation string) string { +func (ampr *AzureMachinePoolReconciler) Annotation(rw annotationReaderWriter, annotation string) string { return rw.GetAnnotations()[annotation] } diff --git a/exp/controllers/azuremachinepool_controller.go b/exp/controllers/azuremachinepool_controller.go index 8bf6f06be59..4d56ac0db39 100644 --- a/exp/controllers/azuremachinepool_controller.go +++ b/exp/controllers/azuremachinepool_controller.go @@ -20,8 +20,6 @@ import ( "context" "time" - "sigs.k8s.io/cluster-api/util/conditions" - "github.com/go-logr/logr" "github.com/pkg/errors" "go.opentelemetry.io/otel/api/trace" @@ -30,15 +28,14 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/tools/record" + "sigs.k8s.io/cluster-api-provider-azure/pkg/coalescing" clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4" - capierrors "sigs.k8s.io/cluster-api/errors" capiv1exp "sigs.k8s.io/cluster-api/exp/api/v1alpha4" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/annotations" "sigs.k8s.io/cluster-api/util/predicates" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -90,18 +87,26 @@ func NewAzureMachinePoolReconciler(client client.Client, log logr.Logger, record } // SetupWithManager initializes this controller with a manager. -func (r *AzureMachinePoolReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options controller.Options) error { - log := r.Log.WithValues("controller", "AzureMachinePool") +func (ampr *AzureMachinePoolReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options infracontroller.Options) error { + ctx, span := tele.Tracer().Start(ctx, "controllers.AzureMachinePoolReconciler.SetupWithManager") + defer span.End() + + log := ampr.Log.WithValues("controller", "AzureMachinePool") + var r reconcile.Reconciler = ampr + if options.Cache != nil { + r = coalescing.NewReconciler(ampr, options.Cache, log) + } + // create mapper to transform incoming AzureClusters into AzureMachinePool requests - azureClusterMapper, err := AzureClusterToAzureMachinePoolsMapper(ctx, r.Client, mgr.GetScheme(), log) + azureClusterMapper, err := AzureClusterToAzureMachinePoolsMapper(ctx, ampr.Client, mgr.GetScheme(), log) if err != nil { - return errors.Wrap(err, "failed to create AzureCluster to AzureMachinePools mapper") + return errors.Wrapf(err, "failed to create AzureCluster to AzureMachinePools mapper") } c, err := ctrl.NewControllerManagedBy(mgr). - WithOptions(options). + WithOptions(options.Options). For(&infrav1exp.AzureMachinePool{}). - WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue)). + WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), ampr.WatchFilterValue)). // watch for changes in CAPI MachinePool resources Watches( &source.Kind{Type: &capiv1exp.MachinePool{}}, @@ -117,7 +122,15 @@ func (r *AzureMachinePoolReconciler) SetupWithManager(ctx context.Context, mgr c return errors.Wrap(err, "error creating controller") } - azureMachinePoolMapper, err := util.ClusterToObjectsMapper(r.Client, &infrav1exp.AzureMachinePoolList{}, mgr.GetScheme()) + if err := c.Watch( + &source.Kind{Type: &infrav1exp.AzureMachinePoolMachine{}}, + handler.EnqueueRequestsFromMapFunc(AzureMachinePoolMachineMapper(mgr.GetScheme(), log)), + MachinePoolMachineHasStateOrVersionChange(log), + ); err != nil { + return errors.Wrap(err, "failed adding a watch for AzureMachinePoolMachine") + } + + azureMachinePoolMapper, err := util.ClusterToObjectsMapper(ampr.Client, &infrav1exp.AzureMachinePoolList{}, mgr.GetScheme()) if err != nil { return errors.Wrap(err, "failed to create mapper for Cluster to AzureMachines") } @@ -136,16 +149,19 @@ func (r *AzureMachinePoolReconciler) SetupWithManager(ctx context.Context, mgr c // +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=azuremachinepools,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=azuremachinepools/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=azuremachinepoolmachines,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=azuremachinepoolmachines/status,verbs=get // +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=machinepools;machinepools/status,verbs=get;list;watch // +kubebuilder:rbac:groups="",resources=events,verbs=get;list;watch;create;update;patch // +kubebuilder:rbac:groups="",resources=secrets;,verbs=get;list;watch // +kubebuilder:rbac:groups=core,resources=nodes,verbs=get;list;watch // Reconcile idempotently gets, creates, and updates a machine pool. -func (r *AzureMachinePoolReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Result, reterr error) { - ctx, cancel := context.WithTimeout(ctx, reconciler.DefaultedLoopTimeout(r.ReconcileTimeout)) +func (ampr *AzureMachinePoolReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Result, reterr error) { + ctx, cancel := context.WithTimeout(ctx, reconciler.DefaultedLoopTimeout(ampr.ReconcileTimeout)) defer cancel() - logger := r.Log.WithValues("namespace", req.Namespace, "azureMachinePool", req.Name) + + logger := ampr.Log.WithValues("namespace", req.Namespace, "azureMachinePool", req.Name) ctx, span := tele.Tracer().Start(ctx, "controllers.AzureMachinePoolReconciler.Reconcile", trace.WithAttributes( @@ -156,7 +172,7 @@ func (r *AzureMachinePoolReconciler) Reconcile(ctx context.Context, req ctrl.Req defer span.End() azMachinePool := &infrav1exp.AzureMachinePool{} - err := r.Get(ctx, req.NamespacedName, azMachinePool) + err := ampr.Get(ctx, req.NamespacedName, azMachinePool) if err != nil { if apierrors.IsNotFound(err) { return reconcile.Result{}, nil @@ -165,21 +181,21 @@ func (r *AzureMachinePoolReconciler) Reconcile(ctx context.Context, req ctrl.Req } // Fetch the CAPI MachinePool. - machinePool, err := infracontroller.GetOwnerMachinePool(ctx, r.Client, azMachinePool.ObjectMeta) + machinePool, err := infracontroller.GetOwnerMachinePool(ctx, ampr.Client, azMachinePool.ObjectMeta) if err != nil { return reconcile.Result{}, err } if machinePool == nil { - logger.Info("MachinePool Controller has not yet set OwnerRef") + logger.V(2).Info("MachinePool Controller has not yet set OwnerRef") return reconcile.Result{}, nil } logger = logger.WithValues("machinePool", machinePool.Name) // Fetch the Cluster. - cluster, err := util.GetClusterFromMetadata(ctx, r.Client, machinePool.ObjectMeta) + cluster, err := util.GetClusterFromMetadata(ctx, ampr.Client, machinePool.ObjectMeta) if err != nil { - logger.Info("MachinePool is missing cluster label or cluster does not exist") + logger.V(2).Info("MachinePool is missing cluster label or cluster does not exist") return reconcile.Result{}, nil } @@ -187,7 +203,7 @@ func (r *AzureMachinePoolReconciler) Reconcile(ctx context.Context, req ctrl.Req // Return early if the object or Cluster is paused. if annotations.IsPaused(cluster, azMachinePool) { - logger.Info("AzureMachinePool or linked Cluster is marked as paused. Won't reconcile") + logger.V(2).Info("AzureMachinePool or linked Cluster is marked as paused. Won't reconcile") return ctrl.Result{}, nil } @@ -196,8 +212,8 @@ func (r *AzureMachinePoolReconciler) Reconcile(ctx context.Context, req ctrl.Req Name: cluster.Spec.InfrastructureRef.Name, } azureCluster := &infrav1.AzureCluster{} - if err := r.Client.Get(ctx, azureClusterName, azureCluster); err != nil { - logger.Info("AzureCluster is not available yet") + if err := ampr.Client.Get(ctx, azureClusterName, azureCluster); err != nil { + logger.V(2).Info("AzureCluster is not available yet") return reconcile.Result{}, nil } @@ -205,7 +221,7 @@ func (r *AzureMachinePoolReconciler) Reconcile(ctx context.Context, req ctrl.Req // Create the cluster scope clusterScope, err := scope.NewClusterScope(ctx, scope.ClusterScopeParams{ - Client: r.Client, + Client: ampr.Client, Logger: logger, Cluster: cluster, AzureCluster: azureCluster, @@ -217,7 +233,7 @@ func (r *AzureMachinePoolReconciler) Reconcile(ctx context.Context, req ctrl.Req // Create the machine pool scope machinePoolScope, err := scope.NewMachinePoolScope(scope.MachinePoolScopeParams{ Logger: logger, - Client: r.Client, + Client: ampr.Client, MachinePool: machinePool, AzureMachinePool: azMachinePool, ClusterScope: clusterScope, @@ -235,14 +251,14 @@ func (r *AzureMachinePoolReconciler) Reconcile(ctx context.Context, req ctrl.Req // Handle deleted machine pools if !azMachinePool.ObjectMeta.DeletionTimestamp.IsZero() { - return r.reconcileDelete(ctx, machinePoolScope, clusterScope) + return ampr.reconcileDelete(ctx, machinePoolScope, clusterScope) } // Handle non-deleted machine pools - return r.reconcileNormal(ctx, machinePoolScope, clusterScope) + return ampr.reconcileNormal(ctx, machinePoolScope, clusterScope) } -func (r *AzureMachinePoolReconciler) reconcileNormal(ctx context.Context, machinePoolScope *scope.MachinePoolScope, clusterScope *scope.ClusterScope) (_ reconcile.Result, reterr error) { +func (ampr *AzureMachinePoolReconciler) reconcileNormal(ctx context.Context, machinePoolScope *scope.MachinePoolScope, clusterScope *scope.ClusterScope) (_ reconcile.Result, reterr error) { ctx, span := tele.Tracer().Start(ctx, "controllers.AzureMachinePoolReconciler.reconcileNormal") defer span.End() @@ -271,7 +287,7 @@ func (r *AzureMachinePoolReconciler) reconcileNormal(ctx context.Context, machin return reconcile.Result{}, nil } - ams, err := r.createAzureMachinePoolService(machinePoolScope) + ams, err := ampr.createAzureMachinePoolService(machinePoolScope) if err != nil { return reconcile.Result{}, errors.Wrap(err, "failed creating a newAzureMachinePoolService") } @@ -296,73 +312,50 @@ func (r *AzureMachinePoolReconciler) reconcileNormal(ctx context.Context, machin return reconcile.Result{}, err } + machinePoolScope.V(2).Info("Scale Set reconciled", "id", + machinePoolScope.ProviderID(), "state", machinePoolScope.ProvisioningState()) + switch machinePoolScope.ProvisioningState() { - case infrav1.Succeeded: - machinePoolScope.V(2).Info("Scale Set is running", "id", machinePoolScope.ProviderID()) - conditions.MarkTrue(machinePoolScope.AzureMachinePool, infrav1.ScaleSetRunningCondition) - machinePoolScope.SetReady() - case infrav1.Creating: - machinePoolScope.V(2).Info("Scale Set is creating", "id", machinePoolScope.ProviderID()) - conditions.MarkFalse(machinePoolScope.AzureMachinePool, infrav1.ScaleSetRunningCondition, infrav1.ScaleSetCreatingReason, clusterv1.ConditionSeverityInfo, "") - machinePoolScope.SetNotReady() - case infrav1.Updating: - machinePoolScope.V(2).Info("Scale Set is updating", "id", machinePoolScope.ProviderID()) - conditions.MarkFalse(machinePoolScope.AzureMachinePool, infrav1.ScaleSetRunningCondition, infrav1.ScaleSetUpdatingReason, clusterv1.ConditionSeverityInfo, "") - machinePoolScope.SetNotReady() - // we may still be scaling up, so check back in a bit - return reconcile.Result{ - RequeueAfter: 30 * time.Second, - }, nil case infrav1.Deleting: machinePoolScope.Info("Unexpected scale set deletion", "id", machinePoolScope.ProviderID()) - r.Recorder.Eventf(machinePoolScope.AzureMachinePool, corev1.EventTypeWarning, "UnexpectedVMDeletion", "Unexpected Azure scale set deletion") - conditions.MarkFalse(machinePoolScope.AzureMachinePool, infrav1.VMRunningCondition, infrav1.ScaleSetDeletingReason, clusterv1.ConditionSeverityWarning, "") - machinePoolScope.SetNotReady() + ampr.Recorder.Eventf(machinePoolScope.AzureMachinePool, corev1.EventTypeWarning, "UnexpectedVMDeletion", "Unexpected Azure scale set deletion") case infrav1.Failed: - machinePoolScope.SetNotReady() - machinePoolScope.Error(errors.New("Failed to create or update scale set"), "Scale Set is in failed state", "id", machinePoolScope.ProviderID()) - r.Recorder.Eventf(machinePoolScope.AzureMachinePool, corev1.EventTypeWarning, "FailedVMState", "Azure scale set is in failed state") - machinePoolScope.SetFailureReason(capierrors.UpdateMachineError) - machinePoolScope.SetFailureMessage(errors.Errorf("Azure VM state is %s", machinePoolScope.ProvisioningState())) - conditions.MarkFalse(machinePoolScope.AzureMachinePool, infrav1.ScaleSetRunningCondition, infrav1.ScaleSetProvisionFailedReason, clusterv1.ConditionSeverityError, "") - // If scale set failed provisioning, delete it so it can be recreated err := ams.Delete(ctx) if err != nil { return reconcile.Result{}, errors.Wrap(err, "failed to delete scale set in a failed state") } return reconcile.Result{}, errors.Wrap(err, "Scale set deleted, retry creating in next reconcile") - default: - machinePoolScope.SetNotReady() - conditions.MarkUnknown(machinePoolScope.AzureMachinePool, infrav1.ScaleSetRunningCondition, "", "") - return reconcile.Result{}, nil + } + + if machinePoolScope.NeedsRequeue() { + return reconcile.Result{ + RequeueAfter: 30 * time.Second, + }, nil } return reconcile.Result{}, nil } -func (r *AzureMachinePoolReconciler) reconcileDelete(ctx context.Context, machinePoolScope *scope.MachinePoolScope, clusterScope *scope.ClusterScope) (_ reconcile.Result, reterr error) { +func (ampr *AzureMachinePoolReconciler) reconcileDelete(ctx context.Context, machinePoolScope *scope.MachinePoolScope, clusterScope *scope.ClusterScope) (reconcile.Result, error) { ctx, span := tele.Tracer().Start(ctx, "controllers.AzureMachinePoolReconciler.reconcileDelete") defer span.End() - machinePoolScope.Info("Handling deleted AzureMachinePool") + machinePoolScope.V(2).Info("handling deleted AzureMachinePool") if infracontroller.ShouldDeleteIndividualResources(ctx, clusterScope) { - amps, err := r.createAzureMachinePoolService(machinePoolScope) + amps, err := ampr.createAzureMachinePoolService(machinePoolScope) if err != nil { return reconcile.Result{}, errors.Wrap(err, "failed creating a new AzureMachinePoolService") } + machinePoolScope.V(4).Info("deleting AzureMachinePool resource individually") if err := amps.Delete(ctx); err != nil { - return reconcile.Result{}, errors.Wrapf(err, "error deleting AzureCluster %s/%s", clusterScope.Namespace(), clusterScope.ClusterName()) + return reconcile.Result{}, errors.Wrapf(err, "error deleting AzureMachinePool %s/%s", clusterScope.Namespace(), machinePoolScope.Name()) } } - defer func() { - if reterr == nil { - // VM is deleted so remove the finalizer. - controllerutil.RemoveFinalizer(machinePoolScope.AzureMachinePool, capiv1exp.MachinePoolFinalizer) - } - }() - + // Delete succeeded, remove finalizer + machinePoolScope.V(4).Info("removing finalizer for AzureMachinePool") + controllerutil.RemoveFinalizer(machinePoolScope.AzureMachinePool, capiv1exp.MachinePoolFinalizer) return reconcile.Result{}, nil } diff --git a/exp/controllers/azuremachinepool_reconciler.go b/exp/controllers/azuremachinepool_reconciler.go index efa9277ff6c..4b826d91e96 100644 --- a/exp/controllers/azuremachinepool_reconciler.go +++ b/exp/controllers/azuremachinepool_reconciler.go @@ -19,8 +19,6 @@ package controllers import ( "context" - "sigs.k8s.io/cluster-api-provider-azure/azure/services/vmssextensions" - "github.com/pkg/errors" "sigs.k8s.io/cluster-api-provider-azure/azure" @@ -28,6 +26,7 @@ import ( "sigs.k8s.io/cluster-api-provider-azure/azure/services/resourceskus" "sigs.k8s.io/cluster-api-provider-azure/azure/services/roleassignments" "sigs.k8s.io/cluster-api-provider-azure/azure/services/scalesets" + "sigs.k8s.io/cluster-api-provider-azure/azure/services/vmssextensions" "sigs.k8s.io/cluster-api-provider-azure/util/tele" ) diff --git a/exp/controllers/azuremachinepoolmachine_controller.go b/exp/controllers/azuremachinepoolmachine_controller.go new file mode 100644 index 00000000000..9bb7e218769 --- /dev/null +++ b/exp/controllers/azuremachinepoolmachine_controller.go @@ -0,0 +1,389 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + "fmt" + "time" + + "github.com/go-logr/logr" + "github.com/pkg/errors" + "go.opentelemetry.io/otel/api/trace" + "go.opentelemetry.io/otel/label" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/tools/record" + "sigs.k8s.io/cluster-api-provider-azure/pkg/coalescing" + capierrors "sigs.k8s.io/cluster-api/errors" + "sigs.k8s.io/cluster-api/util" + "sigs.k8s.io/cluster-api/util/annotations" + "sigs.k8s.io/cluster-api/util/predicates" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/source" + + infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1alpha4" + "sigs.k8s.io/cluster-api-provider-azure/azure" + "sigs.k8s.io/cluster-api-provider-azure/azure/scope" + "sigs.k8s.io/cluster-api-provider-azure/azure/services/scalesetvms" + infracontroller "sigs.k8s.io/cluster-api-provider-azure/controllers" + infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1alpha4" + "sigs.k8s.io/cluster-api-provider-azure/util/reconciler" + "sigs.k8s.io/cluster-api-provider-azure/util/tele" +) + +type ( + azureMachinePoolMachineReconcilerFactory func(*scope.MachinePoolMachineScope) azure.Reconciler + + // AzureMachinePoolMachineController handles Kubernetes change events for a AzureMachinePoolMachine resources + AzureMachinePoolMachineController struct { + client.Client + Log logr.Logger + Scheme *runtime.Scheme + Recorder record.EventRecorder + ReconcileTimeout time.Duration + WatchFilterValue string + reconcilerFactory azureMachinePoolMachineReconcilerFactory + } + + azureMachinePoolMachineReconciler struct { + Scope *scope.MachinePoolMachineScope + scalesetVMsService *scalesetvms.Service + } +) + +// NewAzureMachinePoolMachineController creates a new AzureMachinePoolMachineController to handle updates to Azure Machine Pool Machines. +func NewAzureMachinePoolMachineController(c client.Client, log logr.Logger, recorder record.EventRecorder, reconcileTimeout time.Duration, watchFilterValue string) *AzureMachinePoolMachineController { + return &AzureMachinePoolMachineController{ + Client: c, + Log: log, + Recorder: recorder, + ReconcileTimeout: reconcileTimeout, + WatchFilterValue: watchFilterValue, + reconcilerFactory: newAzureMachinePoolMachineReconciler, + } +} + +// SetupWithManager initializes this controller with a manager. +func (ampmr *AzureMachinePoolMachineController) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options infracontroller.Options) error { + ctx, span := tele.Tracer().Start(ctx, "controllers.AzureMachinePoolMachineController.SetupWithManager") + defer span.End() + + log := ampmr.Log.WithValues("controller", "AzureMachinePoolMachine") + + var r reconcile.Reconciler = ampmr + if options.Cache != nil { + r = coalescing.NewReconciler(ampmr, options.Cache, log) + } + + c, err := ctrl.NewControllerManagedBy(mgr). + WithOptions(options.Options). + For(&infrav1exp.AzureMachinePoolMachine{}). + WithEventFilter(predicates.ResourceNotPaused(log)). // don't queue reconcile if resource is paused + Build(r) + if err != nil { + return errors.Wrapf(err, "error creating controller") + } + + // Add a watch on AzureMachinePool for model changes + if err := c.Watch( + &source.Kind{Type: &infrav1exp.AzureMachinePool{}}, + handler.EnqueueRequestsFromMapFunc(AzureMachinePoolToAzureMachinePoolMachines(ctx, mgr.GetClient(), log)), + MachinePoolModelHasChanged(log), + ); err != nil { + return errors.Wrapf(err, "failed adding a watch for AzureMachinePool model changes") + } + + return nil +} + +// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=azuremachinepools,verbs=get;list;watch +// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=azuremachinepools/status,verbs=get +// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=azuremachinepoolmachines,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=azuremachinepoolmachines/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=machinepools;machinepools/status,verbs=get +// +kubebuilder:rbac:groups="",resources=events,verbs=get;list;watch;create;update;patch +// +kubebuilder:rbac:groups="",resources=secrets;,verbs=get;list;watch +// +kubebuilder:rbac:groups=core,resources=nodes,verbs=get;list;watch + +// Reconcile idempotently gets, creates, and updates a machine pool. +func (ampmr *AzureMachinePoolMachineController) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Result, reterr error) { + ctx, cancel := context.WithTimeout(ctx, reconciler.DefaultedLoopTimeout(ampmr.ReconcileTimeout)) + defer cancel() + logger := ampmr.Log.WithValues("namespace", req.Namespace, "azureMachinePoolMachine", req.Name) + + ctx, span := tele.Tracer().Start(ctx, "controllers.AzureMachinePoolMachineController.Reconcile", + trace.WithAttributes( + label.String("namespace", req.Namespace), + label.String("name", req.Name), + label.String("kind", "AzureMachinePoolMachine"), + )) + defer span.End() + + machine := &infrav1exp.AzureMachinePoolMachine{} + err := ampmr.Get(ctx, req.NamespacedName, machine) + if err != nil { + if apierrors.IsNotFound(err) { + return reconcile.Result{}, nil + } + return reconcile.Result{}, err + } + + // Fetch the owning AzureMachinePool (VMSS) + azureMachinePool, err := infracontroller.GetOwnerAzureMachinePool(ctx, ampmr.Client, machine.ObjectMeta) + if err != nil { + if apierrors.IsNotFound(err) { + controllerutil.RemoveFinalizer(machine, infrav1exp.AzureMachinePoolMachineFinalizer) + return reconcile.Result{}, ampmr.Client.Update(ctx, machine) + } + return reconcile.Result{}, err + } + + if azureMachinePool != nil { + logger = logger.WithValues("azureMachinePool", azureMachinePool.Name) + } + + // Fetch the CAPI MachinePool. + machinePool, err := infracontroller.GetOwnerMachinePool(ctx, ampmr.Client, azureMachinePool.ObjectMeta) + if err != nil && !apierrors.IsNotFound(err) { + return reconcile.Result{}, err + } + + if machinePool != nil { + logger = logger.WithValues("machinePool", machinePool.Name) + } + + // Fetch the Cluster. + cluster, err := util.GetClusterFromMetadata(ctx, ampmr.Client, machinePool.ObjectMeta) + if err != nil { + logger.Info("MachinePool is missing cluster label or cluster does not exist") + return reconcile.Result{}, nil + } + + logger = logger.WithValues("cluster", cluster.Name) + + // Return early if the object or Cluster is paused. + if annotations.IsPaused(cluster, machine) { + logger.Info("AzureMachinePoolMachine or linked Cluster is marked as paused. Won't reconcile") + return ctrl.Result{}, nil + } + + azureClusterName := client.ObjectKey{ + Namespace: machine.Namespace, + Name: cluster.Spec.InfrastructureRef.Name, + } + + azureCluster := &infrav1.AzureCluster{} + if err := ampmr.Client.Get(ctx, azureClusterName, azureCluster); err != nil { + logger.Info("AzureCluster is not available yet") + return reconcile.Result{}, nil + } + + logger = logger.WithValues("AzureCluster", azureCluster.Name) + + // Create the cluster scope + clusterScope, err := scope.NewClusterScope(ctx, scope.ClusterScopeParams{ + Client: ampmr.Client, + Logger: logger, + Cluster: cluster, + AzureCluster: azureCluster, + }) + if err != nil { + return reconcile.Result{}, err + } + + // Create the machine pool scope + machineScope, err := scope.NewMachinePoolMachineScope(scope.MachinePoolMachineScopeParams{ + Logger: logger, + Client: ampmr.Client, + MachinePool: machinePool, + AzureMachinePool: azureMachinePool, + AzureMachinePoolMachine: machine, + ClusterScope: clusterScope, + }) + if err != nil { + return reconcile.Result{}, errors.Errorf("failed to create scope: %+v", err) + } + + // Always close the scope when exiting this function so we can persist any AzureMachine changes. + defer func() { + if err := machineScope.Close(ctx); err != nil && reterr == nil { + reterr = err + } + }() + + // Handle deleted machine pools machine + if !machine.ObjectMeta.DeletionTimestamp.IsZero() { + return ampmr.reconcileDelete(ctx, machineScope) + } + + if !clusterScope.Cluster.Status.InfrastructureReady { + machineScope.Info("Cluster infrastructure is not ready yet") + return reconcile.Result{}, nil + } + + // Handle non-deleted machine pools + return ampmr.reconcileNormal(ctx, machineScope) +} + +func (ampmr *AzureMachinePoolMachineController) reconcileNormal(ctx context.Context, machineScope *scope.MachinePoolMachineScope) (_ reconcile.Result, reterr error) { + ctx, span := tele.Tracer().Start(ctx, "controllers.AzureMachinePoolMachineController.reconcileNormal") + defer span.End() + + machineScope.Info("Reconciling AzureMachinePoolMachine") + // If the AzureMachine is in an error state, return early. + if machineScope.AzureMachinePool.Status.FailureReason != nil || machineScope.AzureMachinePool.Status.FailureMessage != nil { + machineScope.Info("Error state detected, skipping reconciliation") + return reconcile.Result{}, nil + } + + ampms := ampmr.reconcilerFactory(machineScope) + if err := ampms.Reconcile(ctx); err != nil { + // Handle transient and terminal errors + var reconcileError azure.ReconcileError + if errors.As(err, &reconcileError) { + if reconcileError.IsTerminal() { + machineScope.Error(err, "failed to reconcile AzureMachinePool", "name", machineScope.Name()) + return reconcile.Result{}, nil + } + + if reconcileError.IsTransient() { + machineScope.V(4).Info("failed to reconcile AzureMachinePoolMachine", "name", machineScope.Name(), "transient_error", err) + return reconcile.Result{RequeueAfter: reconcileError.RequeueAfter()}, nil + } + + return reconcile.Result{}, errors.Wrapf(err, "failed to reconcile AzureMachinePool") + } + + return reconcile.Result{}, err + } + + state := machineScope.ProvisioningState() + switch state { + case infrav1.Failed: + ampmr.Recorder.Eventf(machineScope.AzureMachinePoolMachine, corev1.EventTypeWarning, "FailedVMState", "Azure scale set VM is in failed state") + machineScope.SetFailureReason(capierrors.UpdateMachineError) + machineScope.SetFailureMessage(errors.Errorf("Azure VM state is %s", state)) + case infrav1.Deleting: + if err := ampmr.Client.Delete(ctx, machineScope.AzureMachinePoolMachine); err != nil { + return reconcile.Result{}, errors.Wrap(err, "machine pool machine failed to be deleted when deleting") + } + } + + machineScope.V(2).Info(fmt.Sprintf("Scale Set VM is %s", state), "id", machineScope.ProviderID()) + + if !infrav1.IsTerminalProvisioningState(state) || !machineScope.IsReady() { + machineScope.V(2).Info("Requeuing", "state", state, "ready", machineScope.IsReady()) + // we are in a non-terminal state, retry in a bit + return reconcile.Result{ + RequeueAfter: 30 * time.Second, + }, nil + } + + return reconcile.Result{}, nil +} + +func (ampmr *AzureMachinePoolMachineController) reconcileDelete(ctx context.Context, machineScope *scope.MachinePoolMachineScope) (_ reconcile.Result, reterr error) { + ctx, span := tele.Tracer().Start(ctx, "controllers.AzureMachinePoolMachineController.reconcileDelete") + defer span.End() + + machineScope.Info("Handling deleted AzureMachinePoolMachine") + + if machineScope.AzureMachinePool == nil || !machineScope.AzureMachinePool.ObjectMeta.DeletionTimestamp.IsZero() { + // deleting the entire VMSS, so just remove finalizer and VMSS delete remove the underlying infrastructure. + controllerutil.RemoveFinalizer(machineScope.AzureMachinePoolMachine, infrav1exp.AzureMachinePoolMachineFinalizer) + return reconcile.Result{}, nil + } + + // deleting a single machine + // 1) drain the node (TODO: @devigned) + // 2) after drained, delete the infrastructure + // 3) remove finalizer + + ampms := ampmr.reconcilerFactory(machineScope) + if err := ampms.Delete(ctx); err != nil { + // Handle transient and terminal errors + var reconcileError azure.ReconcileError + if errors.As(err, &reconcileError) { + if reconcileError.IsTerminal() { + machineScope.Error(err, "failed to delete AzureMachinePoolMachine", "name", machineScope.Name()) + return reconcile.Result{}, nil + } + + if reconcileError.IsTransient() { + machineScope.V(4).Info("failed to delete AzureMachinePoolMachine", "name", machineScope.Name(), "transient_error", err) + return reconcile.Result{RequeueAfter: reconcileError.RequeueAfter()}, nil + } + + return reconcile.Result{}, errors.Wrapf(err, "failed to reconcile AzureMachinePool") + } + + return reconcile.Result{}, err + } + + return reconcile.Result{}, nil +} + +func newAzureMachinePoolMachineReconciler(scope *scope.MachinePoolMachineScope) azure.Reconciler { + return &azureMachinePoolMachineReconciler{ + Scope: scope, + scalesetVMsService: scalesetvms.NewService(scope), + } +} + +// Reconcile will reconcile the state of the Machine Pool Machine with the state of the Azure VMSS VM +func (r *azureMachinePoolMachineReconciler) Reconcile(ctx context.Context) error { + ctx, span := tele.Tracer().Start(ctx, "controllers.azureMachinePoolMachineReconciler.Reconcile") + defer span.End() + + if err := r.scalesetVMsService.Reconcile(ctx); err != nil { + return errors.Wrap(err, "failed to reconcile scalesetVMs") + } + + if err := r.Scope.UpdateStatus(ctx); err != nil { + return errors.Wrap(err, "failed to update vmss vm status") + } + + return nil +} + +// Delete will attempt to drain and delete the Azure VMSS VM +func (r *azureMachinePoolMachineReconciler) Delete(ctx context.Context) error { + ctx, span := tele.Tracer().Start(ctx, "controllers.azureMachinePoolMachineReconciler.Delete") + defer span.End() + + defer func() { + if err := r.Scope.UpdateStatus(ctx); err != nil { + r.Scope.V(4).Info("failed tup update vmss vm status during delete") + } + }() + + err := r.scalesetVMsService.Delete(ctx) + if err != nil { + return errors.Wrap(err, "failed to reconcile scalesetVMs") + } + + // no long running operation, so we are finished deleting the resource. Remove the finalizer. + controllerutil.RemoveFinalizer(r.Scope.AzureMachinePoolMachine, infrav1exp.AzureMachinePoolMachineFinalizer) + + return nil +} diff --git a/exp/controllers/azuremachinepoolmachine_controller_test.go b/exp/controllers/azuremachinepoolmachine_controller_test.go new file mode 100644 index 00000000000..5ba4f44a3b9 --- /dev/null +++ b/exp/controllers/azuremachinepoolmachine_controller_test.go @@ -0,0 +1,180 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + "testing" + "time" + + "github.com/golang/mock/gomock" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/klog/v2/klogr" + infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1alpha4" + "sigs.k8s.io/cluster-api-provider-azure/azure" + "sigs.k8s.io/cluster-api-provider-azure/azure/mocks" + "sigs.k8s.io/cluster-api-provider-azure/azure/scope" + infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1alpha4" + gomock2 "sigs.k8s.io/cluster-api-provider-azure/internal/test/matchers/gomock" + clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4" + clusterv1exp "sigs.k8s.io/cluster-api/exp/api/v1alpha4" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +func TestAzureMachinePoolMachineReconciler_Reconcile(t *testing.T) { + cases := []struct { + Name string + Setup func(cb *fake.ClientBuilder, reconciler *mocks.MockReconcilerMockRecorder) + Verify func(g *WithT, result ctrl.Result, err error) + }{ + { + Name: "should successfully reconcile", + Setup: func(cb *fake.ClientBuilder, reconciler *mocks.MockReconcilerMockRecorder) { + cluster, azCluster, mp, amp, ampm := getAReadyMachinePoolMachineCluster() + reconciler.Reconcile(gomock2.AContext()).Return(nil) + cb.WithObjects(cluster, azCluster, mp, amp, ampm) + }, + Verify: func(g *WithT, result ctrl.Result, err error) { + g.Expect(err).ToNot(HaveOccurred()) + }, + }, + { + Name: "should successfully delete", + Setup: func(cb *fake.ClientBuilder, reconciler *mocks.MockReconcilerMockRecorder) { + cluster, azCluster, mp, amp, ampm := getAReadyMachinePoolMachineCluster() + ampm.DeletionTimestamp = &metav1.Time{ + Time: time.Now(), + } + reconciler.Delete(gomock2.AContext()).Return(nil) + cb.WithObjects(cluster, azCluster, mp, amp, ampm) + }, + Verify: func(g *WithT, result ctrl.Result, err error) { + g.Expect(err).ToNot(HaveOccurred()) + }, + }, + } + + for _, c := range cases { + t.Run(c.Name, func(t *testing.T) { + var ( + g = NewWithT(t) + mockCtrl = gomock.NewController(t) + reconciler = mocks.NewMockReconciler(mockCtrl) + scheme = func() *runtime.Scheme { + s := runtime.NewScheme() + for _, addTo := range []func(s *runtime.Scheme) error{ + clusterv1.AddToScheme, + clusterv1exp.AddToScheme, + infrav1.AddToScheme, + infrav1exp.AddToScheme, + } { + g.Expect(addTo(s)).To(Succeed()) + } + + return s + }() + cb = fake.NewClientBuilder().WithScheme(scheme) + ) + defer mockCtrl.Finish() + + c.Setup(cb, reconciler.EXPECT()) + controller := NewAzureMachinePoolMachineController(cb.Build(), klogr.New(), nil, 30*time.Second, "foo") + controller.reconcilerFactory = func(_ *scope.MachinePoolMachineScope) azure.Reconciler { + return reconciler + } + res, err := controller.Reconcile(context.TODO(), ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "ampm1", + Namespace: "default", + }, + }) + c.Verify(g, res, err) + }) + } +} + +func getAReadyMachinePoolMachineCluster() (*clusterv1.Cluster, *infrav1.AzureCluster, *clusterv1exp.MachinePool, *infrav1exp.AzureMachinePool, *infrav1exp.AzureMachinePoolMachine) { + azCluster := &infrav1.AzureCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "azCluster1", + Namespace: "default", + }, + Spec: infrav1.AzureClusterSpec{ + SubscriptionID: "subID", + }, + } + + cluster := &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster1", + Namespace: "default", + }, + Spec: clusterv1.ClusterSpec{ + InfrastructureRef: &corev1.ObjectReference{ + Name: azCluster.Name, + }, + }, + Status: clusterv1.ClusterStatus{ + InfrastructureReady: true, + }, + } + + mp := &clusterv1exp.MachinePool{ + ObjectMeta: metav1.ObjectMeta{ + Name: "mp1", + Namespace: "default", + Labels: map[string]string{ + "cluster.x-k8s.io/cluster-name": cluster.Name, + }, + }, + } + + amp := &infrav1exp.AzureMachinePool{ + ObjectMeta: metav1.ObjectMeta{ + Name: "amp1", + Namespace: "default", + OwnerReferences: []metav1.OwnerReference{ + { + Name: mp.Name, + Kind: "MachinePool", + APIVersion: clusterv1exp.GroupVersion.String(), + }, + }, + }, + } + + ampm := &infrav1exp.AzureMachinePoolMachine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ampm1", + Namespace: "default", + OwnerReferences: []metav1.OwnerReference{ + { + Name: amp.Name, + Kind: "AzureMachinePool", + APIVersion: infrav1exp.GroupVersion.String(), + }, + }, + }, + } + + return cluster, azCluster, mp, amp, ampm +} diff --git a/exp/controllers/helpers.go b/exp/controllers/helpers.go index d7fc8c7ec68..041e777c693 100644 --- a/exp/controllers/helpers.go +++ b/exp/controllers/helpers.go @@ -21,23 +21,27 @@ import ( "fmt" "github.com/go-logr/logr" + "github.com/google/go-cmp/cmp" "github.com/pkg/errors" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" - infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1alpha4" - "sigs.k8s.io/cluster-api-provider-azure/controllers" - infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1alpha4" - "sigs.k8s.io/cluster-api-provider-azure/util/reconciler" clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4" clusterv1exp "sigs.k8s.io/cluster-api/exp/api/v1alpha4" "sigs.k8s.io/cluster-api/util" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/apiutil" + "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/reconcile" + + infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1alpha4" + "sigs.k8s.io/cluster-api-provider-azure/controllers" + infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1alpha4" + "sigs.k8s.io/cluster-api-provider-azure/util/reconciler" ) // AzureClusterToAzureMachinePoolsMapper creates a mapping handler to transform AzureClusters into AzureMachinePools. The transform @@ -69,7 +73,7 @@ func AzureClusterToAzureMachinePoolsMapper(ctx context.Context, c client.Client, clusterName, ok := controllers.GetOwnerClusterName(azCluster.ObjectMeta) if !ok { - log.Info("unable to get the owner cluster") + log.V(4).Info("unable to get the owner cluster") return nil } @@ -77,6 +81,7 @@ func AzureClusterToAzureMachinePoolsMapper(ctx context.Context, c client.Client, machineList.SetGroupVersionKind(gvk) // list all of the requested objects within the cluster namespace with the cluster name label if err := c.List(ctx, machineList, client.InNamespace(azCluster.Namespace), client.MatchingLabels{clusterv1.ClusterLabelName: clusterName}); err != nil { + log.V(4).Info(fmt.Sprintf("unable to list machine pools in cluster %s", clusterName)) return nil } @@ -92,6 +97,49 @@ func AzureClusterToAzureMachinePoolsMapper(ctx context.Context, c client.Client, }, nil } +// AzureMachinePoolMachineMapper creates a mapping handler to transform AzureMachinePoolMachine to AzureMachinePools +func AzureMachinePoolMachineMapper(scheme *runtime.Scheme, log logr.Logger) handler.MapFunc { + return func(o client.Object) []ctrl.Request { + gvk, err := apiutil.GVKForObject(new(infrav1exp.AzureMachinePool), scheme) + if err != nil { + log.Error(errors.WithStack(err), "failed to find GVK for AzureMachinePool") + return nil + } + + azureMachinePoolMachine, ok := o.(*infrav1exp.AzureMachinePoolMachine) + if !ok { + log.Error(errors.Errorf("expected an AzureCluster, got %T instead", o), "failed to map AzureMachinePoolMachine") + return nil + } + + log = log.WithValues("AzureMachinePoolMachine", azureMachinePoolMachine.Name, "Namespace", azureMachinePoolMachine.Namespace) + for _, ref := range azureMachinePoolMachine.OwnerReferences { + if ref.Kind != gvk.Kind { + continue + } + + gv, err := schema.ParseGroupVersion(ref.APIVersion) + if err != nil { + log.Error(errors.WithStack(err), "unable to parse group version", "APIVersion", ref.APIVersion) + return nil + } + + if gv.Group == gvk.Group { + return []ctrl.Request{ + { + NamespacedName: types.NamespacedName{ + Name: ref.Name, + Namespace: azureMachinePoolMachine.Namespace, + }, + }, + } + } + } + + return nil + } +} + // AzureManagedClusterToAzureManagedMachinePoolsMapper creates a mapping handler to transform AzureManagedClusters into // AzureManagedMachinePools. The transform requires AzureManagedCluster to map to the owning Cluster, then from the // Cluster, collect the MachinePools belonging to the cluster, then finally projecting the infrastructure reference @@ -122,7 +170,7 @@ func AzureManagedClusterToAzureManagedMachinePoolsMapper(ctx context.Context, c clusterName, ok := controllers.GetOwnerClusterName(azCluster.ObjectMeta) if !ok { - log.Info("unable to get the owner cluster") + log.V(4).Info("unable to get the owner cluster") return nil } @@ -386,7 +434,7 @@ func MachinePoolToInfrastructureMapFunc(gvk schema.GroupVersionKind, log logr.Lo return func(o client.Object) []reconcile.Request { m, ok := o.(*clusterv1exp.MachinePool) if !ok { - log.Info("attempt to map incorrect type", "type", fmt.Sprintf("%T", o)) + log.V(4).Info("attempt to map incorrect type", "type", fmt.Sprintf("%T", o)) return nil } @@ -395,7 +443,7 @@ func MachinePoolToInfrastructureMapFunc(gvk schema.GroupVersionKind, log logr.Lo // Return early if the GroupKind doesn't match what we expect. infraGK := ref.GroupVersionKind().GroupKind() if gk != infraGK { - log.Info("gk does not match", "gk", gk, "infraGK", infraGK) + log.V(4).Info("gk does not match", "gk", gk, "infraGK", infraGK) return nil } @@ -427,7 +475,7 @@ func AzureClusterToAzureMachinePoolsFunc(ctx context.Context, kClient client.Cli cluster, err := util.GetOwnerCluster(ctx, kClient, c.ObjectMeta) switch { case apierrors.IsNotFound(err) || cluster == nil: - logWithValues.Info("owning cluster not found") + logWithValues.V(4).Info("owning cluster not found") return nil case err != nil: logWithValues.Error(err, "failed to get owning cluster") @@ -454,3 +502,109 @@ func AzureClusterToAzureMachinePoolsFunc(ctx context.Context, kClient client.Cli return result } } + +// AzureMachinePoolToAzureMachinePoolMachines maps an AzureMachinePool to it's child AzureMachinePoolMachines through +// Cluster and MachinePool labels +func AzureMachinePoolToAzureMachinePoolMachines(ctx context.Context, kClient client.Client, log logr.Logger) handler.MapFunc { + return func(o client.Object) []reconcile.Request { + ctx, cancel := context.WithTimeout(ctx, reconciler.DefaultMappingTimeout) + defer cancel() + + amp, ok := o.(*infrav1exp.AzureMachinePool) + if !ok { + log.Error(errors.Errorf("expected a AzureMachinePool but got a %T", o), "failed to get AzureMachinePool") + return nil + } + logWithValues := log.WithValues("AzureMachinePool", amp.Name, "Namespace", amp.Namespace) + + labels := map[string]string{ + clusterv1.ClusterLabelName: amp.Labels[clusterv1.ClusterLabelName], + infrav1exp.MachinePoolNameLabel: amp.Name, + } + ampml := &infrav1exp.AzureMachinePoolMachineList{} + if err := kClient.List(ctx, ampml, client.InNamespace(amp.Namespace), client.MatchingLabels(labels)); err != nil { + logWithValues.Error(err, "failed to list AzureMachinePoolMachines") + return nil + } + + logWithValues.Info("mapping from AzureMachinePool", "count", len(ampml.Items)) + var result []reconcile.Request + for _, m := range ampml.Items { + result = append(result, reconcile.Request{ + NamespacedName: client.ObjectKey{ + Namespace: m.Namespace, + Name: m.Name, + }, + }) + } + + return result + } +} + +// MachinePoolModelHasChanged predicates any events based on changes to the AzureMachinePool model +func MachinePoolModelHasChanged(logger logr.Logger) predicate.Funcs { + return predicate.Funcs{ + UpdateFunc: func(e event.UpdateEvent) bool { + log := logger.WithValues("predicate", "MachinePoolModelHasChanged", "eventType", "update") + + oldAmp, ok := e.ObjectOld.(*infrav1exp.AzureMachinePool) + if !ok { + + log.V(4).Info("Expected AzureMachinePool", "type", e.ObjectOld.GetObjectKind().GroupVersionKind().String()) + return false + } + log = log.WithValues("namespace", oldAmp.Namespace, "azureMachinePool", oldAmp.Name) + + newAmp := e.ObjectNew.(*infrav1exp.AzureMachinePool) + + // if any of these are not equal, run the update + shouldUpdate := !cmp.Equal(oldAmp.Spec.Identity, newAmp.Spec.Identity) || + !cmp.Equal(oldAmp.Spec.Template, newAmp.Spec.Template) || + !cmp.Equal(oldAmp.Spec.UserAssignedIdentities, newAmp.Spec.UserAssignedIdentities) || + !cmp.Equal(oldAmp.Status.ProvisioningState, newAmp.Status.ProvisioningState) + + //if shouldUpdate { + log.Info("machine pool predicate", "shouldUpdate", shouldUpdate) + //} + return shouldUpdate + }, + CreateFunc: func(e event.CreateEvent) bool { return false }, + DeleteFunc: func(e event.DeleteEvent) bool { return false }, + GenericFunc: func(e event.GenericEvent) bool { return false }, + } +} + +// MachinePoolMachineHasStateOrVersionChange predicates any events based on changes to the AzureMachinePoolMachine status +// relevant for the AzureMachinePool controller +func MachinePoolMachineHasStateOrVersionChange(logger logr.Logger) predicate.Funcs { + return predicate.Funcs{ + UpdateFunc: func(e event.UpdateEvent) bool { + log := logger.WithValues("predicate", "MachinePoolModelHasChanged", "eventType", "update") + + oldAmp, ok := e.ObjectOld.(*infrav1exp.AzureMachinePoolMachine) + if !ok { + + log.V(4).Info("Expected AzureMachinePoolMachine", "type", e.ObjectOld.GetObjectKind().GroupVersionKind().String()) + return false + } + log = log.WithValues("namespace", oldAmp.Namespace, "machinePoolMachine", oldAmp.Name) + + newAmp := e.ObjectNew.(*infrav1exp.AzureMachinePoolMachine) + + // if any of these are not equal, run the update + shouldUpdate := oldAmp.Status.LatestModelApplied != newAmp.Status.LatestModelApplied || + oldAmp.Status.Version != newAmp.Status.Version || + oldAmp.Status.ProvisioningState != newAmp.Status.ProvisioningState || + oldAmp.Status.Ready != newAmp.Status.Ready + + if shouldUpdate { + log.Info("machine pool machine predicate", "shouldUpdate", shouldUpdate) + } + return shouldUpdate + }, + CreateFunc: func(e event.CreateEvent) bool { return false }, + DeleteFunc: func(e event.DeleteEvent) bool { return false }, + GenericFunc: func(e event.GenericEvent) bool { return false }, + } +} diff --git a/exp/controllers/helpers_test.go b/exp/controllers/helpers_test.go index fc555b0ada5..3144a73f08d 100644 --- a/exp/controllers/helpers_test.go +++ b/exp/controllers/helpers_test.go @@ -58,6 +58,7 @@ func TestAzureClusterToAzureMachinePoolsMapper(t *testing.T) { log := mock_log.NewMockLogger(gomock.NewController(t)) log.EXPECT().WithValues("AzureCluster", "my-cluster", "Namespace", "default").Return(log) + log.EXPECT().V(4).Return(log) log.EXPECT().Info("gk does not match", "gk", gomock.Any(), "infraGK", gomock.Any()) mapper, err := AzureClusterToAzureMachinePoolsMapper(context.Background(), fakeClient, scheme, log) g.Expect(err).NotTo(HaveOccurred()) @@ -93,6 +94,7 @@ func TestAzureManagedClusterToAzureManagedMachinePoolsMapper(t *testing.T) { log := mock_log.NewMockLogger(gomock.NewController(t)) log.EXPECT().WithValues("AzureManagedCluster", "my-cluster", "Namespace", "default").Return(log) + log.EXPECT().V(4).Return(log) log.EXPECT().Info("gk does not match", "gk", gomock.Any(), "infraGK", gomock.Any()) mapper, err := AzureManagedClusterToAzureManagedMachinePoolsMapper(context.Background(), fakeClient, scheme, log) g.Expect(err).NotTo(HaveOccurred()) @@ -155,6 +157,7 @@ func TestAzureManagedControlPlaneToAzureManagedMachinePoolsMapper(t *testing.T) log := mock_log.NewMockLogger(gomock.NewController(t)) log.EXPECT().WithValues("AzureManagedControlPlane", cpName, "Namespace", cluster.Namespace).Return(log) + log.EXPECT().V(4).Return(log) log.EXPECT().Info("gk does not match", "gk", gomock.Any(), "infraGK", gomock.Any()) mapper, err := AzureManagedControlPlaneToAzureManagedMachinePoolsMapper(context.Background(), fakeClient, scheme, log) g.Expect(err).NotTo(HaveOccurred()) @@ -433,6 +436,7 @@ func Test_MachinePoolToInfrastructureMapFunc(t *testing.T) { }, Setup: func(logMock *mock_log.MockLogger) { ampGK := infrav1exp.GroupVersion.WithKind("AzureMachinePool").GroupKind() + logMock.EXPECT().V(4).Return(logMock) logMock.EXPECT().Info("gk does not match", "gk", ampGK, "infraGK", gomock.Any()) }, Expect: func(g *GomegaWithT, reqs []reconcile.Request) { @@ -445,6 +449,7 @@ func Test_MachinePoolToInfrastructureMapFunc(t *testing.T) { return newCluster("azureCluster") }, Setup: func(logMock *mock_log.MockLogger) { + logMock.EXPECT().V(4).Return(logMock) logMock.EXPECT().Info("attempt to map incorrect type", "type", "*v1alpha4.Cluster") }, Expect: func(g *GomegaWithT, reqs []reconcile.Request) { @@ -505,10 +510,10 @@ func Test_azureClusterToAzureMachinePoolsFunc(t *testing.T) { Setup: func(g *GomegaWithT, t *testing.T) (*mock_log.MockLogger, *gomock.Controller, client.Client) { mockCtrl := gomock.NewController(t) log := mock_log.NewMockLogger(mockCtrl) - logWithValues := mock_log.NewMockLogger(mockCtrl) kClient := fake.NewClientBuilder().WithScheme(newScheme(g)).Build() - log.EXPECT().WithValues("AzureCluster", "azurefoo", "Namespace", "default").Return(logWithValues) - logWithValues.EXPECT().Info("owning cluster not found") + log.EXPECT().WithValues("AzureCluster", "azurefoo", "Namespace", "default").Return(log) + log.EXPECT().V(4).Return(log) + log.EXPECT().Info("owning cluster not found") return log, mockCtrl, kClient }, Expect: func(g *GomegaWithT, reqs []reconcile.Request) { diff --git a/exp/controllers/mocks/doc.go b/exp/controllers/mocks/doc.go new file mode 100644 index 00000000000..2180e918083 --- /dev/null +++ b/exp/controllers/mocks/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Run go generate to regenerate this mock. +//go:generate ../../../hack/tools/bin/mockgen -destination reconciler_mock.go -package mock_controllers sigs.k8s.io/cluster-api-provider-azure/azure Reconciler +//go:generate /usr/bin/env bash -c "cat ../../../hack/boilerplate/boilerplate.generatego.txt reconciler_mock.go > _reconciler_mock.go && mv _reconciler_mock.go reconciler_mock.go" +package mock_controllers //nolint diff --git a/exp/controllers/mocks/reconciler_mock.go b/exp/controllers/mocks/reconciler_mock.go new file mode 100644 index 00000000000..c4edded2afa --- /dev/null +++ b/exp/controllers/mocks/reconciler_mock.go @@ -0,0 +1,79 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by MockGen. DO NOT EDIT. +// Source: sigs.k8s.io/cluster-api-provider-azure/azure (interfaces: Reconciler) + +// Package mock_controllers is a generated GoMock package. +package mock_controllers + +import ( + context "context" + reflect "reflect" + + gomock "github.com/golang/mock/gomock" +) + +// MockReconciler is a mock of Reconciler interface. +type MockReconciler struct { + ctrl *gomock.Controller + recorder *MockReconcilerMockRecorder +} + +// MockReconcilerMockRecorder is the mock recorder for MockReconciler. +type MockReconcilerMockRecorder struct { + mock *MockReconciler +} + +// NewMockReconciler creates a new mock instance. +func NewMockReconciler(ctrl *gomock.Controller) *MockReconciler { + mock := &MockReconciler{ctrl: ctrl} + mock.recorder = &MockReconcilerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockReconciler) EXPECT() *MockReconcilerMockRecorder { + return m.recorder +} + +// Delete mocks base method. +func (m *MockReconciler) Delete(arg0 context.Context) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Delete", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// Delete indicates an expected call of Delete. +func (mr *MockReconcilerMockRecorder) Delete(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockReconciler)(nil).Delete), arg0) +} + +// Reconcile mocks base method. +func (m *MockReconciler) Reconcile(arg0 context.Context) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Reconcile", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// Reconcile indicates an expected call of Reconcile. +func (mr *MockReconcilerMockRecorder) Reconcile(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Reconcile", reflect.TypeOf((*MockReconciler)(nil).Reconcile), arg0) +} diff --git a/exp/controllers/suite_test.go b/exp/controllers/suite_test.go index 5b0d4754f2c..c908a62d886 100644 --- a/exp/controllers/suite_test.go +++ b/exp/controllers/suite_test.go @@ -23,6 +23,7 @@ import ( . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" v1 "k8s.io/api/core/v1" + "sigs.k8s.io/cluster-api-provider-azure/controllers" "sigs.k8s.io/cluster-api-provider-azure/util/reconciler" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/envtest/printer" @@ -66,7 +67,10 @@ var _ = BeforeSuite(func(done Done) { reconciler.DefaultLoopTimeout, "").SetupWithManager(context.Background(), testEnv.Manager, controller.Options{MaxConcurrentReconciles: 1})).To(Succeed()) Expect(NewAzureMachinePoolReconciler(testEnv, testEnv.Log, testEnv.GetEventRecorderFor("azuremachinepool-reconciler"), - reconciler.DefaultLoopTimeout, "").SetupWithManager(context.Background(), testEnv.Manager, controller.Options{MaxConcurrentReconciles: 1})).To(Succeed()) + reconciler.DefaultLoopTimeout, "").SetupWithManager(context.Background(), testEnv.Manager, controllers.Options{Options: controller.Options{MaxConcurrentReconciles: 1}})).To(Succeed()) + + Expect(NewAzureMachinePoolMachineController(testEnv, testEnv.Log, testEnv.GetEventRecorderFor("azuremachinepoolmachine-reconciler"), + reconciler.DefaultLoopTimeout, "").SetupWithManager(context.Background(), testEnv.Manager, controllers.Options{Options: controller.Options{MaxConcurrentReconciles: 1}})).To(Succeed()) // +kubebuilder:scaffold:scheme diff --git a/internal/test/matchers/gomega/matchers.go b/internal/test/matchers/gomega/matchers.go index cbd01fcc1e0..8f2dd9669f7 100644 --- a/internal/test/matchers/gomega/matchers.go +++ b/internal/test/matchers/gomega/matchers.go @@ -20,6 +20,7 @@ import ( "fmt" "strings" + "github.com/google/go-cmp/cmp" "github.com/onsi/gomega/matchers" "github.com/onsi/gomega/types" @@ -38,8 +39,33 @@ type ( WithLevel(int) LogMatcher WithLogFunc(string) LogMatcher } + + cmpMatcher struct { + x interface{} + diff string + } ) +// DiffEq will verify cmp.Diff(expected, actual) == "" using github.com/google/go-cmp/cmp +func DiffEq(x interface{}) types.GomegaMatcher { + return &cmpMatcher{ + x: x, + } +} + +func (c *cmpMatcher) Match(actual interface{}) (bool, error) { + c.diff = cmp.Diff(actual, c.x) + return c.diff == "", nil +} + +func (c *cmpMatcher) FailureMessage(_ interface{}) string { + return c.diff +} + +func (c *cmpMatcher) NegatedFailureMessage(_ interface{}) string { + return c.diff +} + func LogContains(values ...interface{}) LogMatcher { return &logEntryMactcher{ values: values, diff --git a/main.go b/main.go index a691dc10da0..85f8cfbb7cc 100644 --- a/main.go +++ b/main.go @@ -17,6 +17,7 @@ limitations under the License. package main import ( + "context" "flag" "fmt" "net/http" @@ -39,6 +40,7 @@ import ( cgrecord "k8s.io/client-go/tools/record" "k8s.io/klog/v2" "k8s.io/klog/v2/klogr" + "sigs.k8s.io/cluster-api-provider-azure/pkg/coalescing" clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4" clusterv1exp "sigs.k8s.io/cluster-api/exp/api/v1alpha4" capifeature "sigs.k8s.io/cluster-api/feature" @@ -46,9 +48,11 @@ import ( ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/healthz" + "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/metrics" aadpodv1 "github.com/Azure/aad-pod-identity/pkg/apis/aadpodidentity/v1" + infrav1alpha3 "sigs.k8s.io/cluster-api-provider-azure/api/v1alpha3" infrav1alpha4 "sigs.k8s.io/cluster-api-provider-azure/api/v1alpha4" "sigs.k8s.io/cluster-api-provider-azure/controllers" @@ -93,23 +97,24 @@ func init() { } var ( - metricsAddr string - enableLeaderElection bool - leaderElectionNamespace string - leaderElectionLeaseDuration time.Duration - leaderElectionRenewDeadline time.Duration - leaderElectionRetryPeriod time.Duration - watchNamespace string - watchFilterValue string - profilerAddress string - azureClusterConcurrency int - azureMachineConcurrency int - azureMachinePoolConcurrency int - syncPeriod time.Duration - healthAddr string - webhookPort int - reconcileTimeout time.Duration - enableTracing bool + metricsAddr string + enableLeaderElection bool + leaderElectionNamespace string + leaderElectionLeaseDuration time.Duration + leaderElectionRenewDeadline time.Duration + leaderElectionRetryPeriod time.Duration + watchNamespace string + watchFilterValue string + profilerAddress string + azureClusterConcurrency int + azureMachineConcurrency int + azureMachinePoolConcurrency int + azureMachinePoolMachineConcurrency int + syncPeriod time.Duration + healthAddr string + webhookPort int + reconcileTimeout time.Duration + enableTracing bool ) // InitFlags initializes all command-line flags. @@ -194,6 +199,11 @@ func InitFlags(fs *pflag.FlagSet) { 10, "Number of AzureMachinePools to process simultaneously") + fs.IntVar(&azureMachinePoolMachineConcurrency, + "azuremachinepoolmachine-concurrency", + 10, + "Number of AzureMachinePoolMachines to process simultaneously") + fs.DurationVar(&syncPeriod, "sync-period", 10*time.Minute, @@ -295,10 +305,28 @@ func main() { // Setup the context that's going to be used in controllers and for the manager. ctx := ctrl.SetupSignalHandler() + registerControllers(ctx, mgr) + // +kubebuilder:scaffold:builder - if err = controllers.NewAzureMachineReconciler( - mgr.GetClient(), - ctrl.Log.WithName("controllers").WithName("AzureMachine"), + if err := mgr.AddReadyzCheck("ping", healthz.Ping); err != nil { + setupLog.Error(err, "unable to create ready check") + os.Exit(1) + } + + if err := mgr.AddHealthzCheck("ping", healthz.Ping); err != nil { + setupLog.Error(err, "unable to create health check") + os.Exit(1) + } + + setupLog.Info("starting manager", "version", version.Get().String()) + if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil { + setupLog.Error(err, "problem running manager") + os.Exit(1) + } +} + +func registerControllers(ctx context.Context, mgr manager.Manager) { + if err := controllers.NewAzureMachineReconciler(mgr.GetClient(), ctrl.Log.WithName("controllers").WithName("AzureMachine"), mgr.GetEventRecorderFor("azuremachine-reconciler"), reconcileTimeout, watchFilterValue, @@ -306,7 +334,8 @@ func main() { setupLog.Error(err, "unable to create controller", "controller", "AzureMachine") os.Exit(1) } - if err = controllers.NewAzureClusterReconciler( + + if err := controllers.NewAzureClusterReconciler( mgr.GetClient(), ctrl.Log.WithName("controllers").WithName("AzureCluster"), mgr.GetEventRecorderFor("azurecluster-reconciler"), @@ -316,7 +345,8 @@ func main() { setupLog.Error(err, "unable to create controller", "controller", "AzureCluster") os.Exit(1) } - if err = (&controllers.AzureJSONTemplateReconciler{ + + if err := (&controllers.AzureJSONTemplateReconciler{ Client: mgr.GetClient(), Log: ctrl.Log.WithName("controllers").WithName("AzureJSONTemplate"), Recorder: mgr.GetEventRecorderFor("azurejsontemplate-reconciler"), @@ -325,7 +355,8 @@ func main() { setupLog.Error(err, "unable to create controller", "controller", "AzureJSONTemplate") os.Exit(1) } - if err = (&controllers.AzureJSONMachineReconciler{ + + if err := (&controllers.AzureJSONMachineReconciler{ Client: mgr.GetClient(), Log: ctrl.Log.WithName("controllers").WithName("AzureJSONMachine"), Recorder: mgr.GetEventRecorderFor("azurejsonmachine-reconciler"), @@ -334,7 +365,8 @@ func main() { setupLog.Error(err, "unable to create controller", "controller", "AzureJSONMachine") os.Exit(1) } - if err = (&controllers.AzureIdentityReconciler{ + + if err := (&controllers.AzureIdentityReconciler{ Client: mgr.GetClient(), Log: ctrl.Log.WithName("controllers").WithName("AzureIdentity"), Recorder: mgr.GetEventRecorderFor("azureidentity-reconciler"), @@ -344,20 +376,43 @@ func main() { setupLog.Error(err, "unable to create controller", "controller", "AzureIdentity") os.Exit(1) } + // just use CAPI MachinePool feature flag rather than create a new one setupLog.V(1).Info(fmt.Sprintf("%+v\n", feature.Gates)) if feature.Gates.Enabled(capifeature.MachinePool) { - if err = infrav1controllersexp.NewAzureMachinePoolReconciler( + mpCache, err := coalescing.NewRequestCache(20 * time.Second) + if err != nil { + setupLog.Error(err, "failed to build mpCache ReconcileCache") + } + + if err := infrav1controllersexp.NewAzureMachinePoolReconciler( mgr.GetClient(), ctrl.Log.WithName("controllers").WithName("AzureMachinePool"), mgr.GetEventRecorderFor("azuremachinepool-reconciler"), reconcileTimeout, watchFilterValue, - ).SetupWithManager(ctx, mgr, controller.Options{MaxConcurrentReconciles: azureMachinePoolConcurrency}); err != nil { + ).SetupWithManager(ctx, mgr, controllers.Options{Options: controller.Options{MaxConcurrentReconciles: azureMachinePoolConcurrency}, Cache: mpCache}); err != nil { setupLog.Error(err, "unable to create controller", "controller", "AzureMachinePool") os.Exit(1) } - if err = (&controllers.AzureJSONMachinePoolReconciler{ + + mpmCache, err := coalescing.NewRequestCache(10 * time.Second) + if err != nil { + setupLog.Error(err, "failed to build mpmCache ReconcileCache") + } + + if err := infrav1controllersexp.NewAzureMachinePoolMachineController( + mgr.GetClient(), + ctrl.Log.WithName("controllers").WithName("AzureMachinePoolMachine"), + mgr.GetEventRecorderFor("azuremachinepoolmachine-reconciler"), + reconcileTimeout, + watchFilterValue, + ).SetupWithManager(ctx, mgr, controllers.Options{Options: controller.Options{MaxConcurrentReconciles: azureMachinePoolMachineConcurrency}, Cache: mpmCache}); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "AzureMachinePoolMachine") + os.Exit(1) + } + + if err := (&controllers.AzureJSONMachinePoolReconciler{ Client: mgr.GetClient(), Log: ctrl.Log.WithName("controllers").WithName("AzureJSONMachinePool"), Recorder: mgr.GetEventRecorderFor("azurejsonmachinepool-reconciler"), @@ -367,7 +422,7 @@ func main() { os.Exit(1) } if feature.Gates.Enabled(feature.AKS) { - if err = infrav1controllersexp.NewAzureManagedMachinePoolReconciler( + if err := infrav1controllersexp.NewAzureManagedMachinePoolReconciler( mgr.GetClient(), ctrl.Log.WithName("controllers").WithName("AzureManagedMachinePool"), mgr.GetEventRecorderFor("azuremachine-reconciler"), @@ -377,7 +432,8 @@ func main() { setupLog.Error(err, "unable to create controller", "controller", "AzureManagedMachinePool") os.Exit(1) } - if err = (&infrav1controllersexp.AzureManagedClusterReconciler{ + + if err := (&infrav1controllersexp.AzureManagedClusterReconciler{ Client: mgr.GetClient(), Log: ctrl.Log.WithName("controllers").WithName("AzureManagedCluster"), Recorder: mgr.GetEventRecorderFor("azuremanagedcluster-reconciler"), @@ -387,7 +443,8 @@ func main() { setupLog.Error(err, "unable to create controller", "controller", "AzureManagedCluster") os.Exit(1) } - if err = (&infrav1controllersexp.AzureManagedControlPlaneReconciler{ + + if err := (&infrav1controllersexp.AzureManagedControlPlaneReconciler{ Client: mgr.GetClient(), Log: ctrl.Log.WithName("controllers").WithName("AzureManagedControlPlane"), Recorder: mgr.GetEventRecorderFor("azuremanagedcontrolplane-reconciler"), @@ -400,27 +457,35 @@ func main() { } } - if err = (&infrav1alpha4.AzureCluster{}).SetupWebhookWithManager(mgr); err != nil { + if err := (&infrav1alpha4.AzureCluster{}).SetupWebhookWithManager(mgr); err != nil { setupLog.Error(err, "unable to create webhook", "webhook", "AzureCluster") os.Exit(1) } - if err = (&infrav1alpha4.AzureMachine{}).SetupWebhookWithManager(mgr); err != nil { + + if err := (&infrav1alpha4.AzureMachine{}).SetupWebhookWithManager(mgr); err != nil { setupLog.Error(err, "unable to create webhook", "webhook", "AzureMachine") os.Exit(1) } - if err = (&infrav1alpha4.AzureMachineTemplate{}).SetupWebhookWithManager(mgr); err != nil { + + if err := (&infrav1alpha4.AzureMachineTemplate{}).SetupWebhookWithManager(mgr); err != nil { setupLog.Error(err, "unable to create webhook", "webhook", "AzureMachineTemplate") os.Exit(1) } // just use CAPI MachinePool feature flag rather than create a new one if feature.Gates.Enabled(capifeature.MachinePool) { - if err = (&infrav1alpha4exp.AzureMachinePool{}).SetupWebhookWithManager(mgr); err != nil { + if err := (&infrav1alpha4exp.AzureMachinePool{}).SetupWebhookWithManager(mgr); err != nil { setupLog.Error(err, "unable to create webhook", "webhook", "AzureMachinePool") os.Exit(1) } + + if err := (&infrav1alpha4exp.AzureMachinePoolMachine{}).SetupWebhookWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create webhook", "webhook", "AzureMachinePoolMachine") + os.Exit(1) + } } + if feature.Gates.Enabled(feature.AKS) { - if err = (&infrav1alpha4exp.AzureManagedControlPlane{}).SetupWebhookWithManager(mgr); err != nil { + if err := (&infrav1alpha4exp.AzureManagedControlPlane{}).SetupWebhookWithManager(mgr); err != nil { setupLog.Error(err, "unable to create webhook", "webhook", "AzureManagedControlPlane") os.Exit(1) } diff --git a/pkg/coalescing/mocks/doc.go b/pkg/coalescing/mocks/doc.go index 71b017d81c7..f16ff25492f 100644 --- a/pkg/coalescing/mocks/doc.go +++ b/pkg/coalescing/mocks/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Kubernetes Authors. +Copyright 2021 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/coalescing/reconciler.go b/pkg/coalescing/reconciler.go index feff14289f2..9065a1cf103 100644 --- a/pkg/coalescing/reconciler.go +++ b/pkg/coalescing/reconciler.go @@ -53,7 +53,7 @@ type ( } ) -// NewRequestCache creates a new instance of a ReconcileCache given a specified window of expiration +// NewRequestCache creates a new instance of a ReconcileCache given a specified window of expiration. func NewRequestCache(window time.Duration) (*ReconcileCache, error) { cache, err := ttllru.New(1024, window) if err != nil { diff --git a/test/e2e/azure_lb.go b/test/e2e/azure_lb.go index 633c42913c7..3c8722cc855 100644 --- a/test/e2e/azure_lb.go +++ b/test/e2e/azure_lb.go @@ -22,6 +22,7 @@ import ( "context" "fmt" "net" + "time" "sigs.k8s.io/cluster-api/util" @@ -81,8 +82,12 @@ func AzureLBSpec(ctx context.Context, inputGetter func() AzureLBSpecInput) { webDeployment.AddContainerPort("http", "http", 80, corev1.ProtocolTCP) if input.Windows { - windowsVersion, err := node.GetWindowsVersion(ctx, clientset) - Expect(err).NotTo(HaveOccurred()) + var windowsVersion windows.OSVersion + Eventually(func() error { + version, err := node.GetWindowsVersion(ctx, clientset) + windowsVersion = version + return err + }, 300*time.Second, 5*time.Second).Should(Succeed()) iisImage := windows.GetWindowsImage(windows.Httpd, windowsVersion) webDeployment.SetImage(deploymentName, iisImage) webDeployment.AddWindowsSelectors() diff --git a/test/e2e/azure_timesync.go b/test/e2e/azure_timesync.go index e6fc4483c96..4252d5a38f3 100644 --- a/test/e2e/azure_timesync.go +++ b/test/e2e/azure_timesync.go @@ -44,17 +44,14 @@ func AzureTimeSyncSpec(ctx context.Context, inputGetter func() AzureTimeSyncSpec var ( specName = "azure-timesync" input AzureTimeSyncSpecInput - thirtySeconds = 30*time.Second + thirty = 30 * time.Second ) input = inputGetter() Expect(input.BootstrapClusterProxy).NotTo(BeNil(), "Invalid argument. input.BootstrapClusterProxy can't be nil when calling %s spec", specName) - - namespace, name := input.Namespace.Name, input.ClusterName - managementClusterClient := input.BootstrapClusterProxy.GetClient() - + namespace, clusterName := input.Namespace.Name, input.ClusterName Eventually(func() error { - sshInfo, err := getClusterSSHInfo(ctx, managementClusterClient, namespace, name) + sshInfo, err := getClusterSSHInfo(ctx, input.BootstrapClusterProxy, namespace, clusterName) if err != nil { return err } @@ -95,5 +92,5 @@ func AzureTimeSyncSpec(ctx context.Context, inputGetter func() AzureTimeSyncSpec } return kinderrors.AggregateConcurrent(testFuncs) - }, thirtySeconds, thirtySeconds).Should(Succeed()) + }, thirty, thirty).Should(Succeed()) } diff --git a/test/e2e/config/azure-dev.yaml b/test/e2e/config/azure-dev.yaml index 299c5e5b3cf..d37f024e6dc 100644 --- a/test/e2e/config/azure-dev.yaml +++ b/test/e2e/config/azure-dev.yaml @@ -91,7 +91,7 @@ variables: KUBERNETES_VERSION: "${KUBERNETES_VERSION:-v1.19.7}" ETCD_VERSION_UPGRADE_TO: "3.4.3-0" COREDNS_VERSION_UPGRADE_TO: "1.6.7" - KUBERNETES_VERSION_UPGRADE_TO: "${KUBERNETES_VERSION_UPGRADE_TO:-v1.19.7}" + KUBERNETES_VERSION_UPGRADE_TO: "${KUBERNETES_VERSION_UPGRADE_TO:-v1.19.11}" KUBERNETES_VERSION_UPGRADE_FROM: "${KUBERNETES_VERSION_UPGRADE_FROM:-v1.18.15}" CNI: "${PWD}/templates/addons/calico.yaml" REDACT_LOG_SCRIPT: "${PWD}/hack/log/redact.sh" @@ -117,4 +117,4 @@ intervals: default/wait-deployment: ["15m", "10s"] default/wait-job: ["5m", "10s"] default/wait-service: ["5m", "10s"] - default/wait-machine-pool-nodes: ["20m", "10s"] + default/wait-machine-pool-nodes: ["30m", "10s"] diff --git a/test/e2e/helpers.go b/test/e2e/helpers.go index e92304ce358..83978626dd3 100644 --- a/test/e2e/helpers.go +++ b/test/e2e/helpers.go @@ -47,16 +47,13 @@ import ( typedappsv1 "k8s.io/client-go/kubernetes/typed/apps/v1" typedbatchv1 "k8s.io/client-go/kubernetes/typed/batch/v1" typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1" - "k8s.io/client-go/tools/clientcmd" "sigs.k8s.io/cluster-api-provider-azure/azure" - "sigs.k8s.io/cluster-api-provider-azure/util/tele" clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4" "sigs.k8s.io/cluster-api/controllers/noderefutil" clusterv1exp "sigs.k8s.io/cluster-api/exp/api/v1alpha4" "sigs.k8s.io/cluster-api/test/framework" "sigs.k8s.io/cluster-api/test/framework/kubernetesversions" "sigs.k8s.io/cluster-api/util" - utilkubeconfig "sigs.k8s.io/cluster-api/util/kubeconfig" "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -315,16 +312,20 @@ type nodeSSHInfo struct { // getClusterSSHInfo returns the information needed to establish a SSH connection through a // control plane endpoint to each node in the cluster. -func getClusterSSHInfo(ctx context.Context, c client.Client, namespace, name string) ([]nodeSSHInfo, error) { - var sshInfo []nodeSSHInfo +func getClusterSSHInfo(ctx context.Context, mgmtClusterProxy framework.ClusterProxy, namespace, clusterName string) ([]nodeSSHInfo, error) { + var ( + sshInfo []nodeSSHInfo + mgmtClusterClient = mgmtClusterProxy.GetClient() + workloadClusterClient = mgmtClusterProxy.GetWorkloadCluster(ctx, namespace, clusterName).GetClient() + ) // Collect the info for each VM / Machine. - machines, err := getMachinesInCluster(ctx, c, namespace, name) + machines, err := getMachinesInCluster(ctx, mgmtClusterClient, namespace, clusterName) if err != nil { return sshInfo, errors.Wrap(err, "failed to get machines in the cluster") } for i := range machines.Items { m := &machines.Items[i] - cluster, err := util.GetClusterFromMetadata(ctx, c, m.ObjectMeta) + cluster, err := util.GetClusterFromMetadata(ctx, mgmtClusterClient, m.ObjectMeta) if err != nil { return nil, errors.Wrap(err, "failed to get cluster from metadata") } @@ -336,25 +337,19 @@ func getClusterSSHInfo(ctx context.Context, c client.Client, namespace, name str } // Collect the info for each instance in a VMSS / MachinePool. - machinePools, err := getMachinePoolsInCluster(ctx, c, namespace, name) + machinePools, err := getMachinePoolsInCluster(ctx, mgmtClusterClient, namespace, clusterName) if err != nil { return sshInfo, errors.Wrap(err, "failed to find machine pools in cluster") } - // make a workload client to access the workload cluster - workloadClient, err := getWorkloadClient(ctx, c, namespace, name) - if err != nil { - return sshInfo, errors.Wrap(err, "failed to get workload client") - } - for i := range machinePools.Items { p := &machinePools.Items[i] - cluster, err := util.GetClusterFromMetadata(ctx, c, p.ObjectMeta) + cluster, err := util.GetClusterFromMetadata(ctx, mgmtClusterClient, p.ObjectMeta) if err != nil { return sshInfo, errors.Wrap(err, "failed to get cluster from metadata") } - nodes, err := getReadyNodes(ctx, workloadClient, p.Status.NodeRefs) + nodes, err := getReadyNodes(ctx, workloadClusterClient, p.Status.NodeRefs) if err != nil { return sshInfo, errors.Wrap(err, "failed to get ready nodes") } @@ -404,32 +399,6 @@ func getReadyNodes(ctx context.Context, c client.Client, refs []corev1.ObjectRef return nodes, nil } -func getWorkloadClient(ctx context.Context, c client.Client, namespace, clusterName string) (client.Client, error) { - ctx, span := tele.Tracer().Start(ctx, "scope.MachinePoolMachineScope.getWorkloadClient") - defer span.End() - - obj := client.ObjectKey{ - Namespace: namespace, - Name: clusterName, - } - dataBytes, err := utilkubeconfig.FromSecret(ctx, c, obj) - if err != nil { - return nil, errors.Wrapf(err, "\"%s-kubeconfig\" not found in namespace %q", obj.Name, obj.Namespace) - } - - cfg, err := clientcmd.Load(dataBytes) - if err != nil { - return nil, errors.Wrapf(err, "failed to load \"%s-kubeconfig\" in namespace %q", obj.Name, obj.Namespace) - } - - restConfig, err := clientcmd.NewDefaultClientConfig(*cfg, &clientcmd.ConfigOverrides{}).ClientConfig() - if err != nil { - return nil, errors.Wrapf(err, "failed transform config \"%s-kubeconfig\" in namespace %q", obj.Name, obj.Namespace) - } - - return client.New(restConfig, client.Options{}) -} - // getMachinesInCluster returns a list of all machines in the given cluster. // This is adapted from CAPI's test/framework/cluster_proxy.go. func getMachinesInCluster(ctx context.Context, c framework.Lister, namespace, name string) (*clusterv1.MachineList, error) {