diff --git a/images/virtualization-artifact/pkg/builder/vmbda/option.go b/images/virtualization-artifact/pkg/builder/vmbda/option.go new file mode 100644 index 0000000000..ecb902244a --- /dev/null +++ b/images/virtualization-artifact/pkg/builder/vmbda/option.go @@ -0,0 +1,50 @@ +/* +Copyright 2025 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vmbda + +import ( + "github.com/deckhouse/virtualization-controller/pkg/builder/meta" + "github.com/deckhouse/virtualization/api/core/v1alpha2" +) + +type Option func(vd *v1alpha2.VirtualMachineBlockDeviceAttachment) + +var ( + WithName = meta.WithName[*v1alpha2.VirtualMachineBlockDeviceAttachment] + WithNamespace = meta.WithNamespace[*v1alpha2.VirtualMachineBlockDeviceAttachment] + WithGenerateName = meta.WithGenerateName[*v1alpha2.VirtualMachineBlockDeviceAttachment] + WithLabel = meta.WithLabel[*v1alpha2.VirtualMachineBlockDeviceAttachment] + WithLabels = meta.WithLabels[*v1alpha2.VirtualMachineBlockDeviceAttachment] + WithAnnotation = meta.WithAnnotation[*v1alpha2.VirtualMachineBlockDeviceAttachment] + WithAnnotations = meta.WithAnnotations[*v1alpha2.VirtualMachineBlockDeviceAttachment] + WithFinalizer = meta.WithFinalizer[*v1alpha2.VirtualMachineBlockDeviceAttachment] +) + +func WithVirtualMachineName(name string) func(vmbda *v1alpha2.VirtualMachineBlockDeviceAttachment) { + return func(vmbda *v1alpha2.VirtualMachineBlockDeviceAttachment) { + vmbda.Spec.VirtualMachineName = name + } +} + +func WithBlockDeviceRef(kind v1alpha2.VMBDAObjectRefKind, name string) func(vmbda *v1alpha2.VirtualMachineBlockDeviceAttachment) { + return func(vmbda *v1alpha2.VirtualMachineBlockDeviceAttachment) { + vmbda.Spec.BlockDeviceRef = v1alpha2.VMBDAObjectRef{ + Kind: kind, + Name: name, + } + } +} diff --git a/images/virtualization-artifact/pkg/builder/vmbda/vd.go b/images/virtualization-artifact/pkg/builder/vmbda/vd.go new file mode 100644 index 0000000000..e5f9f6c075 --- /dev/null +++ b/images/virtualization-artifact/pkg/builder/vmbda/vd.go @@ -0,0 +1,51 @@ +/* +Copyright 2025 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vmbda + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/deckhouse/virtualization/api/core/v1alpha2" +) + +func New(options ...Option) *v1alpha2.VirtualMachineBlockDeviceAttachment { + vmbda := NewEmpty("", "") + ApplyOptions(vmbda, options...) + return vmbda +} + +func ApplyOptions(vmbda *v1alpha2.VirtualMachineBlockDeviceAttachment, opts ...Option) { + if vmbda == nil { + return + } + for _, opt := range opts { + opt(vmbda) + } +} + +func NewEmpty(name, namespace string) *v1alpha2.VirtualMachineBlockDeviceAttachment { + return &v1alpha2.VirtualMachineBlockDeviceAttachment{ + TypeMeta: metav1.TypeMeta{ + APIVersion: v1alpha2.SchemeGroupVersion.String(), + Kind: v1alpha2.VirtualMachineBlockDeviceAttachmentKind, + }, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + } +} diff --git a/images/virtualization-artifact/pkg/common/vmop/vmop.go b/images/virtualization-artifact/pkg/common/vmop/vmop.go index 882d51570d..3cac1a3d9d 100644 --- a/images/virtualization-artifact/pkg/common/vmop/vmop.go +++ b/images/virtualization-artifact/pkg/common/vmop/vmop.go @@ -17,7 +17,11 @@ limitations under the License. package vmop import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2/vmopcondition" ) func IsInProgressOrPending(vmop *v1alpha2.VirtualMachineOperation) bool { @@ -44,3 +48,8 @@ func InProgressOrPendingExists(vmops []v1alpha2.VirtualMachineOperation) bool { } return false } + +func IsOperationInProgress(vmop *v1alpha2.VirtualMachineOperation) bool { + sent, _ := conditions.GetCondition(vmopcondition.TypeSignalSent, vmop.Status.Conditions) + return sent.Status == metav1.ConditionTrue && !IsFinished(vmop) +} diff --git a/images/virtualization-artifact/pkg/controller/kvbuilder/kvvm_utils.go b/images/virtualization-artifact/pkg/controller/kvbuilder/kvvm_utils.go index 4ac6ae376f..324ba440db 100644 --- a/images/virtualization-artifact/pkg/controller/kvbuilder/kvvm_utils.go +++ b/images/virtualization-artifact/pkg/controller/kvbuilder/kvvm_utils.go @@ -259,7 +259,7 @@ func ApplyVirtualMachineSpec( func ApplyMigrationVolumes(kvvm *KVVM, vm *virtv2.VirtualMachine, vdsByName map[string]*virtv2.VirtualDisk) error { bootOrder := uint(1) - var updateVolumesStrategy *virtv1.UpdateVolumesStrategy + var updateVolumesStrategy *virtv1.UpdateVolumesStrategy = nil for _, bd := range vm.Spec.BlockDeviceRefs { if bd.Kind != virtv2.DiskDevice { diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/life_cycle.go b/images/virtualization-artifact/pkg/controller/vd/internal/life_cycle.go index cd7a2be6f6..b9d69dd1eb 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/life_cycle.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/life_cycle.go @@ -19,7 +19,6 @@ package internal import ( "context" "fmt" - "time" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -153,10 +152,5 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vd *virtv2.VirtualDisk) (r return reconcile.Result{}, fmt.Errorf("failed to sync virtual disk data source %s: %w", ds.Name(), err) } - readyConditionAfterSync, _ := conditions.GetCondition(vdcondition.ReadyType, vd.Status.Conditions) - if readyConditionAfterSync.Status == metav1.ConditionTrue && conditions.IsLastUpdated(readyConditionAfterSync, vd) { - return reconcile.Result{RequeueAfter: 1 * time.Second}, nil - } - return result, nil } diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/migration.go b/images/virtualization-artifact/pkg/controller/vd/internal/migration.go index 468b374fed..d9d7407b0e 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/migration.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/migration.go @@ -410,7 +410,7 @@ func (h MigrationHandler) handleMigratePrepareTarget(ctx context.Context, vd *v1 StartTimestamp: metav1.Now(), } - cb.Status(metav1.ConditionTrue). + cb.Status(metav1.ConditionFalse). Reason(vdcondition.MigratingWaitForTargetReadyReason). Message("Migration started.") conditions.SetCondition(cb, &vd.Status.Conditions) @@ -426,19 +426,17 @@ func (h MigrationHandler) handleMigrateSync(ctx context.Context, vd *v1alpha2.Vi cb := conditions.NewConditionBuilder(vdcondition.MigratingType). Generation(vd.Generation). - Status(metav1.ConditionTrue). + Status(metav1.ConditionFalse). Reason(vdcondition.MigratingWaitForTargetReadyReason) if pvc == nil { - cb.Status(metav1.ConditionFalse). - Reason(vdcondition.MigratingWaitForTargetReadyReason). - Message("Target persistent volume claim is not found.") + cb.Message("Target persistent volume claim is not found.") conditions.SetCondition(cb, &vd.Status.Conditions) return nil } if pvc.Status.Phase == corev1.ClaimBound { - cb.Reason(vdcondition.InProgress).Message("Target persistent volume claim is bound.") + cb.Status(metav1.ConditionTrue).Reason(vdcondition.InProgress).Message("Target persistent volume claim is bound.") conditions.SetCondition(cb, &vd.Status.Conditions) return nil } @@ -467,15 +465,13 @@ func (h MigrationHandler) handleMigrateSync(ctx context.Context, vd *v1alpha2.Vi isWaitForFistConsumer := sc.VolumeBindingMode == nil || *sc.VolumeBindingMode == storev1.VolumeBindingWaitForFirstConsumer if isWaitForFistConsumer { - cb.Reason(vdcondition.InProgress).Message("Target persistent volume claim is waiting for first consumer.") + cb.Status(metav1.ConditionTrue).Reason(vdcondition.InProgress).Message("Target persistent volume claim is waiting for first consumer.") conditions.SetCondition(cb, &vd.Status.Conditions) return nil } } - cb.Status(metav1.ConditionFalse). - Reason(vdcondition.MigratingWaitForTargetReadyReason). - Message("Target persistent volume claim is not bound or not waiting for first consumer.") + cb.Message("Target persistent volume claim is not bound or not waiting for first consumer.") conditions.SetCondition(cb, &vd.Status.Conditions) return nil } diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/migrating.go b/images/virtualization-artifact/pkg/controller/vm/internal/migrating.go index 093fc34ca7..312d2f732b 100644 --- a/images/virtualization-artifact/pkg/controller/vm/internal/migrating.go +++ b/images/virtualization-artifact/pkg/controller/vm/internal/migrating.go @@ -17,8 +17,10 @@ limitations under the License. package internal import ( + "cmp" "context" "fmt" + "slices" "strings" corev1 "k8s.io/api/core/v1" @@ -285,21 +287,30 @@ func (h *MigratingHandler) getVMOPCandidate(ctx context.Context, s state.Virtual return nil, err } - var candidate *virtv2.VirtualMachineOperation - if len(vmops) > 0 { - candidate = vmops[0] + if len(vmops) == 0 { + return nil, nil + } - for _, vmop := range vmops { - if !commonvmop.IsMigration(vmop) { - continue - } - if vmop.GetCreationTimestamp().Time.After(candidate.GetCreationTimestamp().Time) { - candidate = vmop - } + // sort vmops from the oldest to the newest + slices.SortFunc(vmops, func(a, b *virtv2.VirtualMachineOperation) int { + return cmp.Compare(a.GetCreationTimestamp().UnixNano(), b.GetCreationTimestamp().UnixNano()) + }) + + migrations := slices.DeleteFunc(vmops, func(vmop *virtv2.VirtualMachineOperation) bool { + return !commonvmop.IsMigration(vmop) + }) + + for _, migration := range migrations { + if commonvmop.IsInProgressOrPending(migration) { + return migration, nil } } - return candidate, nil + if len(migrations) > 0 { + return migrations[len(migrations)-1], nil + } + + return nil, nil } func (h *MigratingHandler) syncMigratable(ctx context.Context, s state.VirtualMachineState, vm *virtv2.VirtualMachine, kvvm *virtv1.VirtualMachine) error { diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/service/migration_volumes.go b/images/virtualization-artifact/pkg/controller/vm/internal/service/migration_volumes.go index 559233f7d5..b74513bb6a 100644 --- a/images/virtualization-artifact/pkg/controller/vm/internal/service/migration_volumes.go +++ b/images/virtualization-artifact/pkg/controller/vm/internal/service/migration_volumes.go @@ -17,9 +17,11 @@ limitations under the License. package service import ( + "cmp" "context" "fmt" "log/slog" + "slices" "time" corev1 "k8s.io/api/core/v1" @@ -35,12 +37,14 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/patch" commonvd "github.com/deckhouse/virtualization-controller/pkg/common/vd" commonvm "github.com/deckhouse/virtualization-controller/pkg/common/vm" + commonvmop "github.com/deckhouse/virtualization-controller/pkg/common/vmop" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/kvbuilder" "github.com/deckhouse/virtualization-controller/pkg/controller/vm/internal/state" "github.com/deckhouse/virtualization-controller/pkg/logger" "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmcondition" + "github.com/deckhouse/virtualization/api/core/v1alpha2/vmopcondition" ) type MigrationVolumesService struct { @@ -79,6 +83,23 @@ func (s MigrationVolumesService) SyncVolumes(ctx context.Context, vmState state. return reconcile.Result{}, nil } + if migrating.Reason == vmcondition.ReasonReadyToMigrate.String() { + return reconcile.Result{}, nil + } + + vmop, err := s.getVMOPCandidate(ctx, vmState) + if err != nil { + return reconcile.Result{}, err + } + + if vmop != nil { + completed, _ := conditions.GetCondition(vmopcondition.TypeCompleted, vmop.Status.Conditions) + switch completed.Reason { + case vmopcondition.ReasonMigrationPrepareTarget.String(), vmopcondition.ReasonMigrationTargetReady.String(), vmopcondition.ReasonMigrationRunning.String(): + return reconcile.Result{}, nil + } + } + kvvmInCluster, builtKVVM, builtKVVMWithMigrationVolumes, kvvmiInCluster, err := s.getMachines(ctx, vmState) if err != nil { return reconcile.Result{}, err @@ -101,33 +122,37 @@ func (s MigrationVolumesService) SyncVolumes(ctx context.Context, vmState state. return reconcile.Result{}, err } - // Check disks in generated KVVM before running kvvmSynced check: detect non-migratable disks and disks with changed storage class. - if !s.areDisksSynced(builtKVVMWithMigrationVolumes, readWriteOnceDisks) { - log.Info("ReadWriteOnce disks are not synced yet, skip volume migration.") - return reconcile.Result{}, nil - } - if !s.areDisksSynced(builtKVVMWithMigrationVolumes, storageClassChangedDisks) { - log.Info("Storage class changed disks are not synced yet, skip volume migration.") - return reconcile.Result{}, nil - } + readWriteOnceDisksSynced := s.areDisksSynced(builtKVVMWithMigrationVolumes, readWriteOnceDisks) + storageClassChangedDisksSynced := s.areDisksSynced(builtKVVMWithMigrationVolumes, storageClassChangedDisks) kvvmSynced := equality.Semantic.DeepEqual(builtKVVMWithMigrationVolumes.Spec.Template.Spec.Volumes, kvvmInCluster.Spec.Template.Spec.Volumes) if kvvmSynced { + if vmop != nil && !(readWriteOnceDisksSynced && storageClassChangedDisksSynced) { + return reconcile.Result{RequeueAfter: 10 * time.Second}, nil + } // we already synced our vm with kvvm log.Info("kvvm volumes are already synced, skip volume migration.") return reconcile.Result{}, nil } + if !equality.Semantic.DeepEqual(builtKVVM.Spec.Template.Spec.Volumes, kvvmiInCluster.Spec.Volumes) { + return reconcile.Result{}, s.patchVolumes(ctx, builtKVVM) + } + migrationRequested := builtKVVMWithMigrationVolumes.Spec.UpdateVolumesStrategy != nil && *builtKVVMWithMigrationVolumes.Spec.UpdateVolumesStrategy == virtv1.UpdateVolumesStrategyMigration - migrationInProgress := len(kvvmiInCluster.Status.MigratedVolumes) > 0 - if !migrationRequested && !migrationInProgress { - log.Info("Migration is not requested and not in progress, skip volume migration.") - return reconcile.Result{}, nil + // Check disks in generated KVVM before running kvvmSynced check: detect non-migratable disks and disks with changed storage class. + if !readWriteOnceDisksSynced { + log.Info("ReadWriteOnce disks are not synced yet, skip volume migration.") + return reconcile.Result{RequeueAfter: 10 * time.Second}, nil + } + if !storageClassChangedDisksSynced { + log.Info("Storage class changed disks are not synced yet, skip volume migration.") + return reconcile.Result{RequeueAfter: 10 * time.Second}, nil } - if migrationRequested && !migrationInProgress { - // We should wait 10 seconds. This delay allows user to change storage class on other volumes + if migrationRequested { + // We should wait delayDuration seconds. This delay allows user to change storage class on other volumes if len(storageClassChangedDisks) > 0 { delay, exists := s.delay[vm.UID] if !exists { @@ -167,6 +192,16 @@ func (s MigrationVolumesService) SyncVolumes(ctx context.Context, vmState state. // migration in progress // if some volumes is different, we should revert all and sync again in next reconcile + if s.shouldRevert(kvvmiInCluster, readWriteOnceDisks, storageClassChangedDisks) { + return reconcile.Result{}, s.patchVolumes(ctx, builtKVVM) + } + + return reconcile.Result{}, nil +} + +// if any volume in kvvmi is not exists in readWriteOnceDisks or storageClassChangedDisks, +// this indicates that +func (s MigrationVolumesService) shouldRevert(kvvmi *virtv1.VirtualMachineInstance, readWriteOnceDisks, storageClassChangedDisks map[string]*v1alpha2.VirtualDisk) bool { migratedPVCNames := make(map[string]struct{}) for _, vd := range readWriteOnceDisks { @@ -176,21 +211,14 @@ func (s MigrationVolumesService) SyncVolumes(ctx context.Context, vmState state. migratedPVCNames[vd.Status.MigrationState.TargetPVC] = struct{}{} } - shouldRevert := false - for _, v := range kvvmiInCluster.Status.MigratedVolumes { - if v.DestinationPVCInfo != nil { - if _, ok := migratedPVCNames[v.DestinationPVCInfo.ClaimName]; !ok { - shouldRevert = true - break + for _, v := range kvvmi.Spec.Volumes { + if v.PersistentVolumeClaim != nil { + if _, ok := migratedPVCNames[v.PersistentVolumeClaim.ClaimName]; !ok { + return true } } } - - if shouldRevert { - return reconcile.Result{}, s.patchVolumes(ctx, builtKVVM) - } - - return reconcile.Result{}, nil + return false } func (s MigrationVolumesService) patchVolumes(ctx context.Context, kvvm *virtv1.VirtualMachine) error { @@ -452,3 +480,30 @@ func (s MigrationVolumesService) areDisksSynced(kvvm *virtv1.VirtualMachine, dis return true } + +func (s MigrationVolumesService) getVMOPCandidate(ctx context.Context, vmState state.VirtualMachineState) (*v1alpha2.VirtualMachineOperation, error) { + vmops, err := vmState.VMOPs(ctx) + if err != nil { + return nil, err + } + + if len(vmops) == 0 { + return nil, nil + } + + slices.SortFunc(vmops, func(a, b *v1alpha2.VirtualMachineOperation) int { + return cmp.Compare(a.GetCreationTimestamp().UnixNano(), b.GetCreationTimestamp().UnixNano()) + }) + + vmops = slices.DeleteFunc(vmops, func(vmop *v1alpha2.VirtualMachineOperation) bool { + return !commonvmop.IsMigration(vmop) || commonvmop.IsFinished(vmop) + }) + + for _, vmop := range vmops { + if commonvmop.IsInProgressOrPending(vmop) { + return vmop, nil + } + } + + return nil, nil +} diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/state/state.go b/images/virtualization-artifact/pkg/controller/vm/internal/state/state.go index 6338d2e4eb..9abd0cbdd7 100644 --- a/images/virtualization-artifact/pkg/controller/vm/internal/state/state.go +++ b/images/virtualization-artifact/pkg/controller/vm/internal/state/state.go @@ -19,7 +19,6 @@ package state import ( "context" "fmt" - "sync" corev1 "k8s.io/api/core/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" @@ -61,26 +60,16 @@ func New(c client.Client, vm *reconciler.Resource[*virtv2.VirtualMachine, virtv2 return &state{client: c, vm: vm} } -type state struct { - client client.Client - mu sync.RWMutex - vm *reconciler.Resource[*virtv2.VirtualMachine, virtv2.VirtualMachineStatus] - pods *corev1.PodList - pod *corev1.Pod - vdByName map[string]*virtv2.VirtualDisk - viByName map[string]*virtv2.VirtualImage - cviByName map[string]*virtv2.ClusterVirtualImage - vmbdasByRef map[virtv2.VMBDAObjectRef][]*virtv2.VirtualMachineBlockDeviceAttachment - ipAddress *virtv2.VirtualMachineIPAddress - vmmacs []*virtv2.VirtualMachineMACAddress - vmClass *virtv2.VirtualMachineClass - shared Shared -} - type Shared struct { ShutdownInfo powerstate.ShutdownInfo } +type state struct { + client client.Client + vm *reconciler.Resource[*virtv2.VirtualMachine, virtv2.VirtualMachineStatus] + shared Shared +} + func (s *state) Shared(fn func(s *Shared)) { fn(&s.shared) } @@ -106,15 +95,6 @@ func (s *state) KVVMI(ctx context.Context) (*virtv1.VirtualMachineInstance, erro } func (s *state) Pods(ctx context.Context) (*corev1.PodList, error) { - if s.vm == nil { - return nil, nil - } - if s.pods != nil { - return s.pods, nil - } - s.mu.Lock() - defer s.mu.Unlock() - podList := corev1.PodList{} err := s.client.List(ctx, &podList, &client.ListOptions{ Namespace: s.vm.Current().GetNamespace(), @@ -123,17 +103,10 @@ func (s *state) Pods(ctx context.Context) (*corev1.PodList, error) { if err != nil && !k8serrors.IsNotFound(err) { return nil, fmt.Errorf("unable to list virt-launcher Pod for KubeVirt VM %q: %w", s.vm.Current().GetName(), err) } - s.pods = &podList - return s.pods, nil + return &podList, nil } func (s *state) Pod(ctx context.Context) (*corev1.Pod, error) { - if s.vm == nil { - return nil, nil - } - if s.pod != nil { - return s.pod, nil - } pods, err := s.Pods(ctx) if err != nil { return nil, fmt.Errorf("failed to fetch pod for VirtualMachine %q: %w", s.vm.Current().GetName(), err) @@ -146,22 +119,10 @@ func (s *state) Pod(ctx context.Context) (*corev1.Pod, error) { if len(pods.Items) > 0 { pod = kvvmutil.GetVMPod(kvvmi, pods) } - s.mu.Lock() - defer s.mu.Unlock() - s.pod = pod return pod, nil } func (s *state) VirtualMachineBlockDeviceAttachments(ctx context.Context) (map[virtv2.VMBDAObjectRef][]*virtv2.VirtualMachineBlockDeviceAttachment, error) { - if s.vm == nil { - return nil, nil - } - if len(s.vmbdasByRef) > 0 { - return s.vmbdasByRef, nil - } - s.mu.Lock() - defer s.mu.Unlock() - var vmbdas virtv2.VirtualMachineBlockDeviceAttachmentList err := s.client.List(ctx, &vmbdas, &client.ListOptions{ Namespace: s.vm.Name().Namespace, @@ -184,7 +145,6 @@ func (s *state) VirtualMachineBlockDeviceAttachments(ctx context.Context) (map[v vmbdasByRef[key] = append(vmbdasByRef[key], &vmbda) } - s.vmbdasByRef = vmbdasByRef return vmbdasByRef, nil } @@ -209,14 +169,6 @@ func (s *state) ClusterVirtualImage(ctx context.Context, name string) (*virtv2.C } func (s *state) VirtualDisksByName(ctx context.Context) (map[string]*virtv2.VirtualDisk, error) { - if s.vm == nil { - return nil, nil - } - if len(s.vdByName) > 0 { - return s.vdByName, nil - } - s.mu.Lock() - defer s.mu.Unlock() vdByName := make(map[string]*virtv2.VirtualDisk) for _, bd := range s.vm.Current().Spec.BlockDeviceRefs { switch bd.Kind { @@ -236,19 +188,10 @@ func (s *state) VirtualDisksByName(ctx context.Context) (map[string]*virtv2.Virt continue } } - s.vdByName = vdByName return vdByName, nil } func (s *state) VirtualImagesByName(ctx context.Context) (map[string]*virtv2.VirtualImage, error) { - if s.vm == nil { - return nil, nil - } - if len(s.viByName) > 0 { - return s.viByName, nil - } - s.mu.Lock() - defer s.mu.Unlock() viByName := make(map[string]*virtv2.VirtualImage) for _, bd := range s.vm.Current().Spec.BlockDeviceRefs { switch bd.Kind { @@ -268,19 +211,10 @@ func (s *state) VirtualImagesByName(ctx context.Context) (map[string]*virtv2.Vir continue } } - s.viByName = viByName return viByName, nil } func (s *state) ClusterVirtualImagesByName(ctx context.Context) (map[string]*virtv2.ClusterVirtualImage, error) { - if s.vm == nil { - return nil, nil - } - if len(s.cviByName) > 0 { - return s.cviByName, nil - } - s.mu.Lock() - defer s.mu.Unlock() cviByName := make(map[string]*virtv2.ClusterVirtualImage) for _, bd := range s.vm.Current().Spec.BlockDeviceRefs { switch bd.Kind { @@ -300,21 +234,10 @@ func (s *state) ClusterVirtualImagesByName(ctx context.Context) (map[string]*vir continue } } - s.cviByName = cviByName return cviByName, nil } func (s *state) VirtualMachineMACAddresses(ctx context.Context) ([]*virtv2.VirtualMachineMACAddress, error) { - if s.vm == nil { - return nil, nil - } - - if s.vmmacs != nil { - return s.vmmacs, nil - } - s.mu.Lock() - defer s.mu.Unlock() - var vmmacs []*virtv2.VirtualMachineMACAddress for _, ns := range s.vm.Current().Spec.Networks { vmmacKey := types.NamespacedName{Name: ns.VirtualMachineMACAddressName, Namespace: s.vm.Current().GetNamespace()} @@ -340,21 +263,10 @@ func (s *state) VirtualMachineMACAddresses(ctx context.Context) ([]*virtv2.Virtu vmmacs = append(vmmacs, &vmmac) } - s.vmmacs = vmmacs - return s.vmmacs, nil + return vmmacs, nil } func (s *state) IPAddress(ctx context.Context) (*virtv2.VirtualMachineIPAddress, error) { - if s.vm == nil { - return nil, nil - } - - if s.ipAddress != nil { - return s.ipAddress, nil - } - s.mu.Lock() - defer s.mu.Unlock() - vmipName := s.vm.Current().Spec.VirtualMachineIPAddress if vmipName == "" { vmipList := &virtv2.VirtualMachineIPAddressList{} @@ -372,42 +284,30 @@ func (s *state) IPAddress(ctx context.Context) (*virtv2.VirtualMachineIPAddress, return nil, nil } - s.ipAddress = &vmipList.Items[0] - } else { - vmipKey := types.NamespacedName{Name: vmipName, Namespace: s.vm.Current().GetNamespace()} + return &vmipList.Items[0], nil + } - ipAddress, err := object.FetchObject(ctx, vmipKey, s.client, &virtv2.VirtualMachineIPAddress{}) - if err != nil { - return nil, fmt.Errorf("failed to fetch VirtualMachineIPAddress: %w", err) - } - s.ipAddress = ipAddress + vmipKey := types.NamespacedName{Name: vmipName, Namespace: s.vm.Current().GetNamespace()} + + ipAddress, err := object.FetchObject(ctx, vmipKey, s.client, &virtv2.VirtualMachineIPAddress{}) + if err != nil { + return nil, fmt.Errorf("failed to fetch VirtualMachineIPAddress: %w", err) } - return s.ipAddress, nil + return ipAddress, nil } func (s *state) Class(ctx context.Context) (*virtv2.VirtualMachineClass, error) { - if s.vm == nil { - return nil, nil - } - if s.vmClass != nil { - return s.vmClass, nil - } className := s.vm.Current().Spec.VirtualMachineClassName classKey := types.NamespacedName{Name: className} class, err := object.FetchObject(ctx, classKey, s.client, &virtv2.VirtualMachineClass{}) if err != nil { return nil, fmt.Errorf("failed to fetch VirtualMachineClass: %w", err) } - s.vmClass = class - return s.vmClass, nil + return class, nil } func (s *state) VMOPs(ctx context.Context) ([]*virtv2.VirtualMachineOperation, error) { - if s.vm == nil { - return nil, nil - } - vm := s.vm.Current() vmops := &virtv2.VirtualMachineOperationList{} err := s.client.List(ctx, vmops, client.InNamespace(vm.Namespace)) diff --git a/images/virtualization-artifact/pkg/controller/vmop/migration/internal/handler/lifecycle.go b/images/virtualization-artifact/pkg/controller/vmop/migration/internal/handler/lifecycle.go index b6f9440b1f..e9c0b2ada9 100644 --- a/images/virtualization-artifact/pkg/controller/vmop/migration/internal/handler/lifecycle.go +++ b/images/virtualization-artifact/pkg/controller/vmop/migration/internal/handler/lifecycle.go @@ -37,7 +37,6 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/livemigration" "github.com/deckhouse/virtualization-controller/pkg/logger" "github.com/deckhouse/virtualization/api/core/v1alpha2" - "github.com/deckhouse/virtualization/api/core/v1alpha2/vmbdacondition" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmcondition" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmopcondition" ) @@ -120,7 +119,7 @@ func (h LifecycleHandler) Handle(ctx context.Context, vmop *v1alpha2.VirtualMach // 3. Operation already in progress. Check if the operation is completed. // Synchronize conditions to the VMOP. - if isOperationInProgress(vmop) { + if commonvmop.IsOperationInProgress(vmop) { log.Debug("Operation in progress, check if VM is completed", "vm.phase", vm.Status.Phase, "vmop.phase", vmop.Status.Phase) return reconcile.Result{}, h.syncOperationComplete(ctx, vmop) } @@ -344,17 +343,15 @@ func (h LifecycleHandler) areAnyRWOHotplugDisks(ctx context.Context, vm *v1alpha return false, err } - var attached []*v1alpha2.VirtualMachineBlockDeviceAttachment + var vmbdas []*v1alpha2.VirtualMachineBlockDeviceAttachment for _, vmbda := range vmbdaList.Items { if vmbda.Spec.BlockDeviceRef.Kind != v1alpha2.VMBDAObjectRefKindVirtualDisk { continue } - if cond, _ := conditions.GetCondition(vmbdacondition.AttachedType, vmbda.Status.Conditions); cond.Status == metav1.ConditionTrue { - attached = append(attached, &vmbda) - } + vmbdas = append(vmbdas, &vmbda) } - for _, vmbda := range attached { + for _, vmbda := range vmbdas { vd := &v1alpha2.VirtualDisk{} err = h.client.Get(ctx, client.ObjectKey{Namespace: vmbda.Namespace, Name: vmbda.Spec.BlockDeviceRef.Name}, vd) if err != nil { @@ -491,12 +488,6 @@ func (h LifecycleHandler) recordEvent(ctx context.Context, vmop *v1alpha2.Virtua } } -func isOperationInProgress(vmop *v1alpha2.VirtualMachineOperation) bool { - sent, _ := conditions.GetCondition(vmopcondition.TypeSignalSent, vmop.Status.Conditions) - completed, _ := conditions.GetCondition(vmopcondition.TypeCompleted, vmop.Status.Conditions) - return sent.Status == metav1.ConditionTrue && completed.Status != metav1.ConditionTrue -} - var mapMigrationPhaseToReason = map[virtv1.VirtualMachineInstanceMigrationPhase]vmopcondition.ReasonCompleted{ virtv1.MigrationPhaseUnset: vmopcondition.ReasonMigrationPending, virtv1.MigrationPending: vmopcondition.ReasonMigrationPending, diff --git a/tests/e2e/e2e_test.go b/tests/e2e/e2e_test.go index c6fd27b44f..e310d150d7 100644 --- a/tests/e2e/e2e_test.go +++ b/tests/e2e/e2e_test.go @@ -165,10 +165,7 @@ var _ = SynchronizedBeforeSuite(func() { } if !config.IsReusable() { - err := Cleanup() - if err != nil { - Expect(err).NotTo(HaveOccurred()) - } + Expect(Cleanup()).To(Succeed()) } else { log.Println("Run test in REUSABLE mode") } diff --git a/tests/e2e/framework/client.go b/tests/e2e/framework/client.go index fbbc974aa4..2487e7fcd6 100644 --- a/tests/e2e/framework/client.go +++ b/tests/e2e/framework/client.go @@ -21,6 +21,9 @@ import ( "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes" clientgoscheme "k8s.io/client-go/kubernetes/scheme" + _ "k8s.io/client-go/plugin/pkg/client/auth/exec" + _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" + _ "k8s.io/client-go/plugin/pkg/client/auth/oidc" "sigs.k8s.io/controller-runtime/pkg/client" "github.com/deckhouse/virtualization/api/client/kubeclient" @@ -30,11 +33,6 @@ import ( "github.com/deckhouse/virtualization/tests/e2e/d8" gt "github.com/deckhouse/virtualization/tests/e2e/git" "github.com/deckhouse/virtualization/tests/e2e/kubectl" - - // register auth plugins - _ "k8s.io/client-go/plugin/pkg/client/auth/exec" - _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" - _ "k8s.io/client-go/plugin/pkg/client/auth/oidc" ) var clients = Clients{} diff --git a/tests/e2e/object/vmbda.go b/tests/e2e/object/vmbda.go new file mode 100644 index 0000000000..b148caa7a6 --- /dev/null +++ b/tests/e2e/object/vmbda.go @@ -0,0 +1,33 @@ +/* +Copyright 2025 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +import ( + "github.com/deckhouse/virtualization-controller/pkg/builder/vmbda" + "github.com/deckhouse/virtualization/api/core/v1alpha2" +) + +func NewVMBDAFromDisk(name, vmName string, vd *v1alpha2.VirtualDisk, opts ...vmbda.Option) *v1alpha2.VirtualMachineBlockDeviceAttachment { + bda := vmbda.New( + vmbda.WithName(name), + vmbda.WithNamespace(vd.Namespace), + vmbda.WithVirtualMachineName(vmName), + vmbda.WithBlockDeviceRef(v1alpha2.VMBDAObjectRefKindVirtualDisk, vd.Name), + ) + vmbda.ApplyOptions(bda, opts...) + return bda +} diff --git a/tests/e2e/storage/volume_migration_local_disks.go b/tests/e2e/storage/volume_migration_local_disks.go index 5ba1558690..1cc23938c4 100644 --- a/tests/e2e/storage/volume_migration_local_disks.go +++ b/tests/e2e/storage/volume_migration_local_disks.go @@ -27,14 +27,17 @@ import ( . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/utils/ptr" crclient "sigs.k8s.io/controller-runtime/pkg/client" vmopbuilder "github.com/deckhouse/virtualization-controller/pkg/builder/vmop" "github.com/deckhouse/virtualization-controller/pkg/common/patch" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2/vmopcondition" "github.com/deckhouse/virtualization/tests/e2e/framework" "github.com/deckhouse/virtualization/tests/e2e/object" "github.com/deckhouse/virtualization/tests/e2e/util" @@ -411,4 +414,52 @@ var _ = SIGDescribe("Volume migration with local disks", framework.CommonE2ETest untilVirtualDisksMigrationsFailed(f) }) }) + + It("should be failed with RWO VMBDA", func() { + ns := f.Namespace().Name + + vm, vds := localMigrationRootAndAdditionalBuild() + + By("Creating VM") + vm, err := f.VirtClient().VirtualMachines(ns).Create(context.Background(), vm, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + f.DeferDelete(vm) + + By("Creating VDs") + for _, vd := range vds { + _, err := f.VirtClient().VirtualDisks(ns).Create(context.Background(), vd, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + f.DeferDelete(vd) + } + + By("Creating RWO VD for VMBDA") + const vdVmbdaName = "vd-vmbda-rwo" + vdVmbda := object.NewBlankVD(vdVmbdaName, ns, &storageClass.Name, ptr.To(resource.MustParse("100Mi"))) + _, err = f.VirtClient().VirtualDisks(ns).Create(context.Background(), vdVmbda, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + f.DeferDelete(vdVmbda) + + By("Creating VMBDA") + const vmbdaName = "vd-vmbda-rwo" + vmbda := object.NewVMBDAFromDisk(vmbdaName, vm.Name, vdVmbda) + _, err = f.VirtClient().VirtualMachineBlockDeviceAttachments(ns).Create(context.Background(), vmbda, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + f.DeferDelete(vmbda) + + util.UntilVMAgentReady(crclient.ObjectKeyFromObject(vm), framework.LongTimeout) + + const vmopName = "local-disks-migration-with-rwo-vmbda" + util.MigrateVirtualMachine(vm, vmopbuilder.WithName(vmopName)) + + By("Waiting for migration failed") + Eventually(func(g Gomega) { + vmop, err := f.VirtClient().VirtualMachineOperations(ns).Get(context.Background(), vmopName, metav1.GetOptions{}) + g.Expect(err).NotTo(HaveOccurred()) + + g.Expect(vmop.Status.Phase).To(Equal(v1alpha2.VMOPPhaseFailed)) + completed, _ := conditions.GetCondition(vmopcondition.TypeCompleted, vmop.Status.Conditions) + g.Expect(completed.Status).To(Equal(metav1.ConditionFalse)) + g.Expect(completed.Reason).To(Equal(vmopcondition.ReasonHotplugDisksNotShared.String())) + }).WithTimeout(framework.MiddleTimeout).WithPolling(time.Second).Should(Succeed()) + }) }) diff --git a/tests/e2e/storage/volume_migration_storage_class_changed.go b/tests/e2e/storage/volume_migration_storage_class_changed.go index 0c61864942..9ad9a65ef7 100644 --- a/tests/e2e/storage/volume_migration_storage_class_changed.go +++ b/tests/e2e/storage/volume_migration_storage_class_changed.go @@ -146,7 +146,7 @@ var _ = SIGDescribe("Volume migration when storage class changed", framework.Com Expect(err).NotTo(HaveOccurred()) } - util.UntilVMMigrationSucceeded(crclient.ObjectKeyFromObject(vm), framework.LongTimeout) + util.UntilVMMigrationSucceeded(crclient.ObjectKeyFromObject(vm), framework.MaxTimeout) untilVirtualDisksMigrationsSucceeded(f) @@ -163,7 +163,7 @@ var _ = SIGDescribe("Volume migration when storage class changed", framework.Com }, Entry("when only root disk changed storage class", storageClassMigrationRootOnlyBuild, vdRootName), Entry("when root disk changed storage class and one local additional disk", storageClassMigrationRootAndLocalAdditionalBuild, vdRootName), - // Entry("when root disk changed storage class and one additional disk", storageClassMigrationRootAndAdditionalBuild, vdRootName, vdAdditionalName), // TODO: fixme + Entry("when root disk changed storage class and one additional disk", storageClassMigrationRootAndAdditionalBuild, vdRootName, vdAdditionalName), Entry("when only additional disk changed storage class", storageClassMigrationAdditionalOnlyBuild, vdAdditionalName), ) @@ -230,7 +230,7 @@ var _ = SIGDescribe("Volume migration when storage class changed", framework.Com }, Entry("when only root disk changed storage class", storageClassMigrationRootOnlyBuild, vdRootName), Entry("when root disk changed storage class and one local additional disk", storageClassMigrationRootAndLocalAdditionalBuild, vdRootName), - // Entry("when root disk changed storage class and one additional disk", storageClassMigrationRootAndAdditionalBuild, vdRootName, vdAdditionalName), // TODO:fixme + Entry("when root disk changed storage class and one additional disk", storageClassMigrationRootAndAdditionalBuild, vdRootName, vdAdditionalName), // TODO:fixme Entry("when only additional disk changed storage class", storageClassMigrationAdditionalOnlyBuild, vdAdditionalName), ) @@ -291,9 +291,9 @@ var _ = SIGDescribe("Volume migration when storage class changed", framework.Com } return fmt.Errorf("migration is not completed") - }).WithTimeout(framework.LongTimeout).WithPolling(time.Second).Should(Succeed()) + }).WithTimeout(framework.MaxTimeout).WithPolling(time.Second).Should(Succeed()) - util.UntilVMMigrationSucceeded(crclient.ObjectKeyFromObject(vm), framework.LongTimeout) + util.UntilVMMigrationSucceeded(crclient.ObjectKeyFromObject(vm), framework.MaxTimeout) untilVirtualDisksMigrationsSucceeded(f) @@ -307,48 +307,4 @@ var _ = SIGDescribe("Volume migration when storage class changed", framework.Com Expect(pvc.Status.Phase).To(Equal(corev1.ClaimBound)) } }) - - It("migrate to ImmediateStorageClass", func() { - ns := f.Namespace().Name - - vm, vds := storageClassMigrationRootAndAdditionalBuild() - - vm, err := f.VirtClient().VirtualMachines(ns).Create(context.Background(), vm, metav1.CreateOptions{}) - Expect(err).NotTo(HaveOccurred()) - f.DeferDelete(vm) - - for _, vd := range vds { - _, err := f.VirtClient().VirtualDisks(ns).Create(context.Background(), vd, metav1.CreateOptions{}) - Expect(err).NotTo(HaveOccurred()) - f.DeferDelete(vd) - } - - util.UntilVMAgentReady(crclient.ObjectKeyFromObject(vm), framework.LongTimeout) - - vdForMigration, err := f.VirtClient().VirtualDisks(ns).Get(context.Background(), vdRootName, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) - - immediateStorageClass := framework.GetConfig().StorageClass.ImmediateStorageClass.Name - Expect(immediateStorageClass).NotTo(BeNil()) - - By("Patch VD with new storage class") - patchBytes, err := patch.NewJSONPatch(patch.WithReplace("/spec/persistentVolumeClaim/storageClassName", immediateStorageClass)).Bytes() - Expect(err).NotTo(HaveOccurred()) - - _, err = f.VirtClient().VirtualDisks(vdForMigration.GetNamespace()).Patch(context.Background(), vdForMigration.GetName(), types.JSONPatchType, patchBytes, metav1.PatchOptions{}) - Expect(err).NotTo(HaveOccurred()) - - util.UntilVMMigrationSucceeded(crclient.ObjectKeyFromObject(vm), framework.LongTimeout) - - untilVirtualDisksMigrationsSucceeded(f) - - migratedVD, err := f.VirtClient().VirtualDisks(ns).Get(context.Background(), vdForMigration.GetName(), metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) - - pvc, err := f.KubeClient().CoreV1().PersistentVolumeClaims(ns).Get(context.Background(), migratedVD.Status.Target.PersistentVolumeClaim, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) - Expect(pvc.Spec.StorageClassName).NotTo(BeNil()) - Expect(*pvc.Spec.StorageClassName).To(Equal(immediateStorageClass)) - Expect(pvc.Status.Phase).To(Equal(corev1.ClaimBound)) - }) }) diff --git a/tests/e2e/util/vm.go b/tests/e2e/util/vm.go index 45d696fd84..716bc68770 100644 --- a/tests/e2e/util/vm.go +++ b/tests/e2e/util/vm.go @@ -37,6 +37,7 @@ import ( func UntilVMAgentReady(key client.ObjectKey, timeout time.Duration) { GinkgoHelper() + By("Wait until VM agent is ready") Eventually(func() error { vm, err := framework.GetClients().VirtClient().VirtualMachines(key.Namespace).Get(context.Background(), key.Name, metav1.GetOptions{}) if err != nil {