Skip to content

Commit

Permalink
Do not set cascade delete for resources not part of VM creation (#142)
Browse files Browse the repository at this point in the history
* do not set cascade delete for resources not part of VM creation

* fix errors

* address review comments
  • Loading branch information
rishabh-11 committed Apr 13, 2024
1 parent 6ae8a9f commit 7d00496
Show file tree
Hide file tree
Showing 4 changed files with 127 additions and 31 deletions.
51 changes: 32 additions & 19 deletions pkg/azure/provider/helpers/driver.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ import (
"encoding/base64"
"errors"
"fmt"
"slices"
"strconv"
"strings"

Expand Down Expand Up @@ -115,11 +116,18 @@ func GetDiskNames(providerSpec api.AzureProviderSpec, vmName string) []string {
dataDisks := providerSpec.Properties.StorageProfile.DataDisks
diskNames := make([]string, 0, len(dataDisks)+1)
diskNames = append(diskNames, utils.CreateOSDiskName(vmName))
if !utils.IsSliceNilOrEmpty(dataDisks) {
for _, disk := range dataDisks {
diskName := utils.CreateDataDiskName(vmName, disk)
diskNames = append(diskNames, diskName)
}
dataDiskNames := createDataDiskNames(providerSpec, vmName)
diskNames = append(diskNames, dataDiskNames...)
return diskNames
}

// createDataDiskNames creates disk names for all configured DataDisks in the provider spec.
func createDataDiskNames(providerSpec api.AzureProviderSpec, vmName string) []string {
dataDisks := providerSpec.Properties.StorageProfile.DataDisks
diskNames := make([]string, 0, len(dataDisks))
for _, disk := range dataDisks {
diskName := utils.CreateDataDiskName(vmName, disk)
diskNames = append(diskNames, diskName)
}
return diskNames
}
Expand Down Expand Up @@ -153,11 +161,11 @@ func CheckAndDeleteLeftoverNICsAndDisks(ctx context.Context, factory access.Fact
return nil
}

// UpdateCascadeDeleteOptions updates the VirtualMachine properties and sets cascade delete options for NIC's and DISK's if it is not already set.
// UpdateCascadeDeleteOptions updates the VirtualMachine properties and sets cascade delete options for NIC and DISKs if it is not already set.
// Once that is set then it deletes the VM. This will ensure that no separate calls to delete each NIC and DISK are made as they will get deleted along with the VM in one single atomic call.
func UpdateCascadeDeleteOptions(ctx context.Context, vmAccess *armcompute.VirtualMachinesClient, resourceGroup string, vm *armcompute.VirtualMachine) error {
func UpdateCascadeDeleteOptions(ctx context.Context, providerSpec api.AzureProviderSpec, vmAccess *armcompute.VirtualMachinesClient, resourceGroup string, vm *armcompute.VirtualMachine) error {
vmName := *vm.Name
vmUpdateParams := computeDeleteOptionUpdatesForNICsAndDisksIfRequired(resourceGroup, vm)
vmUpdateParams := computeDeleteOptionUpdatesForNICsAndDisksIfRequired(resourceGroup, vm, providerSpec)
if vmUpdateParams != nil {
// update the VM and set cascade delete on NIC and Disks (OSDisk and DataDisks) if not already set and then trigger VM deletion.
klog.V(4).Infof("Updating cascade deletion options for VM: [ResourceGroup: %s, Name: %s] resources", resourceGroup, vmName)
Expand Down Expand Up @@ -191,7 +199,7 @@ func CanUpdateVirtualMachine(vm *armcompute.VirtualMachine) bool {

// computeDeleteOptionUpdatesForNICsAndDisksIfRequired computes changes required to set cascade delete options for NICs, OSDisk and DataDisks.
// If there are no changes then a nil is returned. If there are changes then delta changes are captured in armcompute.VirtualMachineUpdate
func computeDeleteOptionUpdatesForNICsAndDisksIfRequired(resourceGroup string, vm *armcompute.VirtualMachine) *armcompute.VirtualMachineUpdate {
func computeDeleteOptionUpdatesForNICsAndDisksIfRequired(resourceGroup string, vm *armcompute.VirtualMachine, providerSpec api.AzureProviderSpec) *armcompute.VirtualMachineUpdate {
var (
vmUpdateParams *armcompute.VirtualMachineUpdate
updatedNicReferences []*armcompute.NetworkInterfaceReference
Expand All @@ -206,10 +214,12 @@ func computeDeleteOptionUpdatesForNICsAndDisksIfRequired(resourceGroup string, v
return vmUpdateParams
}

updatedNicReferences = getNetworkInterfaceReferencesToUpdate(vm.Properties.NetworkProfile)
updatedOSDisk = getOSDiskToUpdate(vm.Properties.StorageProfile)
updatedDataDisks = getDataDisksToUpdate(vm.Properties.StorageProfile)
dataDisksToUpdate := createDataDiskNames(providerSpec, vmName)
nicToUpdate := utils.CreateNICName(vmName)

updatedNicReferences = getNetworkInterfaceReferencesToUpdate(vm.Properties.NetworkProfile, nicToUpdate)
updatedOSDisk = getOSDiskToUpdate(vm.Properties.StorageProfile)
updatedDataDisks = getDataDisksToUpdate(vm.Properties.StorageProfile, dataDisksToUpdate)
// If there are no updates on NIC(s), OSDisk and DataDisk(s) then just return early.
if utils.IsSliceNilOrEmpty(updatedNicReferences) && updatedOSDisk == nil && utils.IsSliceNilOrEmpty(updatedDataDisks) {
klog.Infof("All configured NICs, OSDisk and DataDisks have cascade delete already set for VM: [ResourceGroup: %s, Name: %s]", resourceGroup, vmName)
Expand Down Expand Up @@ -239,16 +249,16 @@ func computeDeleteOptionUpdatesForNICsAndDisksIfRequired(resourceGroup string, v
return vmUpdateParams
}

// getNetworkInterfaceReferencesToUpdate checks if there are still NICs which do not have cascade delete set. These are captured and changed
// NetworkInterfaceReference's are then returned with cascade delete option set.
func getNetworkInterfaceReferencesToUpdate(networkProfile *armcompute.NetworkProfile) []*armcompute.NetworkInterfaceReference {
// getNetworkInterfaceReferencesToUpdate checks if there is still the NIC which was created during VM creation with cascade delete not set. It is captured and changed
// NetworkInterfaceReference is then returned with cascade delete option set.
func getNetworkInterfaceReferencesToUpdate(networkProfile *armcompute.NetworkProfile, nicToUpdate string) []*armcompute.NetworkInterfaceReference {
if networkProfile == nil || utils.IsSliceNilOrEmpty(networkProfile.NetworkInterfaces) {
return nil
}
updatedNicRefs := make([]*armcompute.NetworkInterfaceReference, 0, len(networkProfile.NetworkInterfaces))
for _, nicRef := range networkProfile.NetworkInterfaces {
updatedNicRef := &armcompute.NetworkInterfaceReference{ID: nicRef.ID}
if !isNicCascadeDeleteSet(nicRef) {
if *nicRef.ID == nicToUpdate && !isNicCascadeDeleteSet(nicRef) {
if updatedNicRef.Properties == nil {
updatedNicRef.Properties = &armcompute.NetworkInterfaceReferenceProperties{}
}
Expand Down Expand Up @@ -285,15 +295,18 @@ func getOSDiskToUpdate(storageProfile *armcompute.StorageProfile) *armcompute.OS
return updatedOSDisk
}

// getDataDisksToUpdate checks if cascade delete option set for all DataDisks attached to the Virtual machine.
// getDataDisksToUpdate checks if cascade delete option set for all DataDisks attached to the Virtual machine that were part of VM creation.
// All data disks that do not have that set, it will set the appropriate DeleteOption and return the updated
// DataDisks else it will return nil
func getDataDisksToUpdate(storageProfile *armcompute.StorageProfile) []*armcompute.DataDisk {
func getDataDisksToUpdate(storageProfile *armcompute.StorageProfile, dataDisksToUpdate []string) []*armcompute.DataDisk {
var updatedDataDisks []*armcompute.DataDisk
if utils.IsSliceNilOrEmpty(dataDisksToUpdate) {
return updatedDataDisks
}
if storageProfile != nil && !utils.IsSliceNilOrEmpty(storageProfile.DataDisks) {
updatedDataDisks = make([]*armcompute.DataDisk, 0, len(storageProfile.DataDisks))
for _, dataDisk := range storageProfile.DataDisks {
if dataDisk.DeleteOption == nil || *dataDisk.DeleteOption != armcompute.DiskDeleteOptionTypesDelete {
if slices.Contains(dataDisksToUpdate, *dataDisk.Name) && (dataDisk.DeleteOption == nil || *dataDisk.DeleteOption != armcompute.DiskDeleteOptionTypesDelete) {
updatedDataDisk := &armcompute.DataDisk{
Lun: dataDisk.Lun,
DeleteOption: to.Ptr(armcompute.DiskDeleteOptionTypesDelete),
Expand Down
2 changes: 1 addition & 1 deletion pkg/azure/provider/provider.go
Original file line number Diff line number Diff line change
Expand Up @@ -133,7 +133,7 @@ func (d defaultDriver) DeleteMachine(ctx context.Context, req *driver.DeleteMach
}
} else {
if helpers.CanUpdateVirtualMachine(vm) {
if err = helpers.UpdateCascadeDeleteOptions(ctx, vmAccess, resourceGroup, vm); err != nil {
if err = helpers.UpdateCascadeDeleteOptions(ctx, providerSpec, vmAccess, resourceGroup, vm); err != nil {
return
}
if err = helpers.DeleteVirtualMachine(ctx, vmAccess, resourceGroup, vmName); err != nil {
Expand Down
65 changes: 65 additions & 0 deletions pkg/azure/provider/provider_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -159,6 +159,71 @@ func TestDeleteMachineWhenVMExists(t *testing.T) {
}
}

func TestDeleteMachineWhenDataDiskIsAttachedAfterVMCreation(t *testing.T) {
const (
vmName = "vm-0"
diskNameToAttachOnExistingVM = "sample-disk"
)

checkClusterStateFn := func(g *WithT, ctx context.Context, factory fakes.Factory, vmName string, dataDiskNames []string) {
checkClusterStateAndGetMachineResources(g, ctx, factory, vmName, false, false, false, dataDiskNames, false, true)
checkAndGetDataDisks(g, ctx, factory, []string{diskNameToAttachOnExistingVM}, true, false)
}

g := NewWithT(t)
ctx := context.Background()

// initialize cluster state
//----------------------------------------------------------------------------
// create provider spec
providerSpecBuilder := testhelp.NewProviderSpecBuilder(testResourceGroupName, testShootNs, testWorkerPool0Name).WithDefaultValues()
//Add one data disk
providerSpecBuilder.WithDataDisks(testDataDiskName, 1)
providerSpec := providerSpecBuilder.Build()

// create cluster state
clusterState := fakes.NewClusterState(providerSpec)
m := fakes.NewMachineResourcesBuilder(providerSpec, vmName).WithCascadeDeleteOptions(fakes.CascadeDeleteAllResources).BuildAllResources()

// Attach a new data disk to the VM
err := m.AttachDataDisk(providerSpec, diskNameToAttachOnExistingVM, armcompute.DiskDeleteOptionTypesDetach)
g.Expect(err).To(BeNil())
clusterState.AddMachineResources(m)

// create fake factory
fakeFactory := createDefaultFakeFactoryForDeleteMachine(g, providerSpec.ResourceGroup, clusterState)

// Create machine and machine class to be used to create DeleteMachineRequest
machineClass, err := fakes.CreateMachineClass(providerSpec, nil)
g.Expect(err).To(BeNil())
machine := &v1alpha1.Machine{
ObjectMeta: fakes.NewMachineObjectMeta(testShootNs, vmName),
}

// Test environment before running actual test
//----------------------------------------------------------------------------
_, err = fakeFactory.VMAccess.Get(ctx, providerSpec.ResourceGroup, vmName, nil)
g.Expect(err).To(BeNil())

// Test
//----------------------------------------------------------------------------
testDriver := NewDefaultDriver(fakeFactory)
_, err = testDriver.DeleteMachine(ctx, &driver.DeleteMachineRequest{
Machine: machine,
MachineClass: machineClass,
Secret: fakes.CreateProviderSecret(),
})
g.Expect(err == nil).To(Equal(true))

var dataDiskNames []string
if !utils.IsSliceNilOrEmpty(providerSpec.Properties.StorageProfile.DataDisks) {
dataDiskNames = make([]string, 0, len(providerSpec.Properties.StorageProfile.DataDisks))
dataDiskNames = testhelp.CreateDataDiskNames(vmName, providerSpec)
}
// evaluate cluster state post delete machine operation
checkClusterStateFn(g, ctx, *fakeFactory, vmName, dataDiskNames)
}

func TestDeleteMachineWhenVMDoesNotExist(t *testing.T) {
const vmName = "test-vm-0"
testVMID := fakes.CreateVirtualMachineID(testhelp.SubscriptionID, testResourceGroupName, vmName)
Expand Down
40 changes: 29 additions & 11 deletions pkg/azure/testhelp/fakes/machineresources.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
package fakes

import (
"fmt"
"strings"

"github.com/Azure/azure-sdk-for-go/sdk/azcore/to"
Expand Down Expand Up @@ -151,6 +152,18 @@ func (m *MachineResources) UpdateDataDisksDeleteOpt(deleteOpt *armcompute.DiskDe
}
}

// AttachDataDisk attaches a data disk to the VM
func (m *MachineResources) AttachDataDisk(spec api.AzureProviderSpec, diskName string, deleteOption armcompute.DiskDeleteOptionTypes) error {
if _, ok := m.DataDisks[diskName]; ok {
return fmt.Errorf("disk %s already exists, cannot create a new disk with the same name", diskName)
}
dataDisk := createDataDisk(int32(len(m.DataDisks)+1), "None", &deleteOption, 20, testhelp.StorageAccountType, diskName)
d := createDiskResource(spec, diskName, m.VM.ID, nil)
m.DataDisks[diskName] = d
m.VM.Properties.StorageProfile.DataDisks = append(m.VM.Properties.StorageProfile.DataDisks, dataDisk)
return nil
}

func isCascadeDeleteSetForAllDataDisks(dataDiskDeleteOptsMap map[string]*armcompute.DiskDeleteOptionTypes) bool {
for _, deleteOpt := range dataDiskDeleteOptsMap {
if *deleteOpt != armcompute.DiskDeleteOptionTypesDelete {
Expand Down Expand Up @@ -429,18 +442,23 @@ func createDataDisks(spec api.AzureProviderSpec, vmName string, deleteOption *ar
}
dataDisks := make([]*armcompute.DataDisk, 0, len(specDataDisks))
for _, disk := range specDataDisks {
d := &armcompute.DataDisk{
CreateOption: to.Ptr(armcompute.DiskCreateOptionTypesEmpty),
Lun: disk.Lun,
Caching: to.Ptr(armcompute.CachingTypes(disk.Caching)),
DeleteOption: deleteOption,
DiskSizeGB: pointer.Int32(disk.DiskSizeGB),
ManagedDisk: &armcompute.ManagedDiskParameters{
StorageAccountType: to.Ptr(armcompute.StorageAccountTypes(disk.StorageAccountType)),
},
Name: to.Ptr(utils.CreateDataDiskName(vmName, disk)),
}
diskName := utils.CreateDataDiskName(vmName, disk)
d := createDataDisk(*disk.Lun, armcompute.CachingTypes(disk.Caching), deleteOption, disk.DiskSizeGB, armcompute.StorageAccountTypes(disk.StorageAccountType), diskName)
dataDisks = append(dataDisks, d)
}
return dataDisks
}

func createDataDisk(lun int32, caching armcompute.CachingTypes, deleteOption *armcompute.DiskDeleteOptionTypes, diskSize int32, storageAccountType armcompute.StorageAccountTypes, diskName string) *armcompute.DataDisk {
return &armcompute.DataDisk{
CreateOption: to.Ptr(armcompute.DiskCreateOptionTypesEmpty),
Lun: to.Ptr(lun),
Caching: to.Ptr(caching),
DeleteOption: deleteOption,
DiskSizeGB: pointer.Int32(diskSize),
ManagedDisk: &armcompute.ManagedDiskParameters{
StorageAccountType: to.Ptr(storageAccountType),
},
Name: to.Ptr(diskName),
}
}

0 comments on commit 7d00496

Please sign in to comment.