diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index c0262a4f..53717d26 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -54,7 +54,7 @@ jobs: uses: golangci/golangci-lint-action@v3 with: args: --timeout=5m -v - version: v1.54.2 + version: v1.55.2 check-gen: runs-on: ubuntu-latest diff --git a/api/v1alpha1/kubevirtmachine_types.go b/api/v1alpha1/kubevirtmachine_types.go index 4cb44979..94e2bcf9 100644 --- a/api/v1alpha1/kubevirtmachine_types.go +++ b/api/v1alpha1/kubevirtmachine_types.go @@ -71,7 +71,7 @@ type VirtualMachineBootstrapCheckSpec struct { // KubevirtMachineStatus defines the observed state of KubevirtMachine. type KubevirtMachineStatus struct { // Ready denotes that the machine is ready - // +optional + // +kubebuilder:default=false Ready bool `json:"ready"` // LoadBalancerConfigured denotes that the machine has been @@ -134,6 +134,8 @@ type KubevirtMachineStatus struct { // +kubebuilder:object:root=true // +kubebuilder:storageversion // +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:printcolumn:name="Ready",type="boolean",JSONPath=".status.ready",description="Is machine ready" // KubevirtMachine is the Schema for the kubevirtmachines API. type KubevirtMachine struct { diff --git a/clusterkubevirtadm/cmd/credentials/credentials.go b/clusterkubevirtadm/cmd/credentials/credentials.go index ba76f576..3b03ed52 100644 --- a/clusterkubevirtadm/cmd/credentials/credentials.go +++ b/clusterkubevirtadm/cmd/credentials/credentials.go @@ -26,6 +26,8 @@ import ( "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" k8sclient "k8s.io/client-go/kubernetes" + kubevirtcore "kubevirt.io/api/core" + cdicore "kubevirt.io/containerized-data-importer-api/pkg/apis/core" "sigs.k8s.io/cluster-api-provider-kubevirt/clusterkubevirtadm/common" ) @@ -148,10 +150,15 @@ func generateRole(cmdCtx cmdContext) *rbacv1.Role { }, Rules: []rbacv1.PolicyRule{ { - APIGroups: []string{"kubevirt.io"}, + APIGroups: []string{kubevirtcore.GroupName}, Resources: []string{"virtualmachines", "virtualmachineinstances"}, Verbs: []string{rbacv1.VerbAll}, }, + { + APIGroups: []string{cdicore.GroupName}, + Resources: []string{"datavolumes"}, + Verbs: []string{"get", "list", "watch"}, + }, { APIGroups: []string{""}, Resources: []string{"secrets", "services"}, diff --git a/clusterkubevirtadm/cmd/credentials/credentials_test.go b/clusterkubevirtadm/cmd/credentials/credentials_test.go index d9c5d9e2..f9c2b213 100644 --- a/clusterkubevirtadm/cmd/credentials/credentials_test.go +++ b/clusterkubevirtadm/cmd/credentials/credentials_test.go @@ -2,7 +2,6 @@ package credentials import ( "context" - . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" @@ -10,6 +9,8 @@ import ( "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes/fake" + kubevirtcore "kubevirt.io/api/core" + cdicore "kubevirt.io/containerized-data-importer-api/pkg/apis/core" "sigs.k8s.io/cluster-api-provider-kubevirt/clusterkubevirtadm/common" ) @@ -118,21 +119,18 @@ var _ = Describe("test credentials common function", func() { Expect(roles.Items).To(HaveLen(1)) Expect(roles.Items[0].Name).Should(Equal(roleName)) - Expect(roles.Items[0].Rules).Should(HaveLen(2)) - Expect(roles.Items[0].Rules[0].APIGroups).Should(HaveLen(1)) - Expect(roles.Items[0].Rules[0].APIGroups[0]).Should(Equal("kubevirt.io")) - Expect(roles.Items[0].Rules[0].Resources).Should(HaveLen(2)) - Expect(roles.Items[0].Rules[0].Resources[0]).Should(Equal("virtualmachines")) - Expect(roles.Items[0].Rules[0].Resources[1]).Should(Equal("virtualmachineinstances")) - Expect(roles.Items[0].Rules[0].Verbs).Should(HaveLen(1)) - Expect(roles.Items[0].Rules[0].Verbs[0]).Should(Equal(rbacv1.VerbAll)) - Expect(roles.Items[0].Rules[1].APIGroups).Should(HaveLen(1)) - Expect(roles.Items[0].Rules[1].APIGroups[0]).Should(Equal("")) - Expect(roles.Items[0].Rules[1].Resources).Should(HaveLen(2)) - Expect(roles.Items[0].Rules[1].Resources[0]).Should(Equal("secrets")) - Expect(roles.Items[0].Rules[1].Resources[1]).Should(Equal("services")) - Expect(roles.Items[0].Rules[1].Verbs).Should(HaveLen(1)) - Expect(roles.Items[0].Rules[1].Verbs[0]).Should(Equal(rbacv1.VerbAll)) + Expect(roles.Items[0].Rules).Should(HaveLen(3)) + Expect(roles.Items[0].Rules[0].APIGroups).Should(And(HaveLen(1), ContainElements(kubevirtcore.GroupName))) + Expect(roles.Items[0].Rules[0].Resources).Should(And(HaveLen(2), ContainElements("virtualmachines", "virtualmachineinstances"))) + Expect(roles.Items[0].Rules[0].Verbs).Should(And(HaveLen(1), ContainElements(rbacv1.VerbAll))) + + Expect(roles.Items[0].Rules[1].APIGroups).Should(And(HaveLen(1), ContainElements(cdicore.GroupName))) + Expect(roles.Items[0].Rules[1].Resources).Should(And(HaveLen(1), ContainElements("datavolumes"))) + Expect(roles.Items[0].Rules[1].Verbs).Should(And(HaveLen(3), ContainElements("get", "list", "watch"))) + + Expect(roles.Items[0].Rules[2].APIGroups).Should(And(HaveLen(1), ContainElements(""))) + Expect(roles.Items[0].Rules[2].Resources).Should(And(HaveLen(2), ContainElements("secrets", "services"))) + Expect(roles.Items[0].Rules[2].Verbs).Should(And(HaveLen(1), ContainElements(rbacv1.VerbAll))) }) It("create should return error if the Role is already exist", func() { diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_kubevirtmachines.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_kubevirtmachines.yaml index 9799cd78..a5050567 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_kubevirtmachines.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_kubevirtmachines.yaml @@ -18,7 +18,15 @@ spec: singular: kubevirtmachine scope: Namespaced versions: - - name: v1alpha1 + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - description: Is machine ready + jsonPath: .status.ready + name: Ready + type: boolean + name: v1alpha1 schema: openAPIV3Schema: description: KubevirtMachine is the Schema for the kubevirtmachines API. @@ -4614,8 +4622,11 @@ spec: Node of this KubevirtMachine type: boolean ready: + default: false description: Ready denotes that the machine is ready type: boolean + required: + - ready type: object type: object served: true diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 1a9f05d4..5935a816 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -45,6 +45,14 @@ rules: verbs: - delete - list +- apiGroups: + - cdi.kubevirt.io + resources: + - datavolumes + verbs: + - get + - list + - watch - apiGroups: - cluster.x-k8s.io resources: diff --git a/controllers/kubevirtmachine_controller.go b/controllers/kubevirtmachine_controller.go index 300fe1f9..932bf762 100644 --- a/controllers/kubevirtmachine_controller.go +++ b/controllers/kubevirtmachine_controller.go @@ -22,8 +22,6 @@ import ( "regexp" "time" - "sigs.k8s.io/controller-runtime/pkg/builder" - "github.com/pkg/errors" "gopkg.in/yaml.v3" corev1 "k8s.io/api/core/v1" @@ -31,13 +29,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" utilerrors "k8s.io/apimachinery/pkg/util/errors" - infrav1 "sigs.k8s.io/cluster-api-provider-kubevirt/api/v1alpha1" - "sigs.k8s.io/cluster-api-provider-kubevirt/pkg/context" - "sigs.k8s.io/cluster-api-provider-kubevirt/pkg/infracluster" - "sigs.k8s.io/cluster-api-provider-kubevirt/pkg/kubevirt" - kubevirthandler "sigs.k8s.io/cluster-api-provider-kubevirt/pkg/kubevirt" - "sigs.k8s.io/cluster-api-provider-kubevirt/pkg/ssh" - "sigs.k8s.io/cluster-api-provider-kubevirt/pkg/workloadcluster" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" capierrors "sigs.k8s.io/cluster-api/errors" "sigs.k8s.io/cluster-api/util" @@ -46,10 +37,19 @@ import ( "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/cluster-api/util/predicates" ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/handler" + + infrav1 "sigs.k8s.io/cluster-api-provider-kubevirt/api/v1alpha1" + "sigs.k8s.io/cluster-api-provider-kubevirt/pkg/context" + "sigs.k8s.io/cluster-api-provider-kubevirt/pkg/infracluster" + "sigs.k8s.io/cluster-api-provider-kubevirt/pkg/kubevirt" + kubevirthandler "sigs.k8s.io/cluster-api-provider-kubevirt/pkg/kubevirt" + "sigs.k8s.io/cluster-api-provider-kubevirt/pkg/ssh" + "sigs.k8s.io/cluster-api-provider-kubevirt/pkg/workloadcluster" ) // KubevirtMachineReconciler reconciles a KubevirtMachine object. @@ -66,6 +66,7 @@ type KubevirtMachineReconciler struct { // +kubebuilder:rbac:groups="",resources=secrets;,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=kubevirt.io,resources=virtualmachines;,verbs=get;create;update;patch;delete // +kubebuilder:rbac:groups=kubevirt.io,resources=virtualmachineinstances;,verbs=get;delete +// +kubebuilder:rbac:groups=cdi.kubevirt.io,resources=datavolumes;,verbs=get;list;watch // Reconcile handles KubevirtMachine events. func (r *KubevirtMachineReconciler) Reconcile(goctx gocontext.Context, req ctrl.Request) (_ ctrl.Result, rerr error) { @@ -277,6 +278,9 @@ func (r *KubevirtMachineReconciler) reconcileNormal(ctx *context.MachineContext) // Mark VMProvisionedCondition to indicate that the VM has successfully started conditions.MarkTrue(ctx.KubevirtMachine, infrav1.VMProvisionedCondition) } else { + reason, message := externalMachine.GetVMNotReadyReason() + conditions.MarkFalse(ctx.KubevirtMachine, infrav1.VMProvisionedCondition, reason, clusterv1.ConditionSeverityInfo, message) + // Waiting for VM to boot ctx.KubevirtMachine.Status.Ready = false ctx.Logger.Info("KubeVirt VM is not fully provisioned and running...") @@ -476,7 +480,7 @@ func (r *KubevirtMachineReconciler) reconcileDelete(ctx *context.MachineContext) // SetupWithManager will add watches for this controller. func (r *KubevirtMachineReconciler) SetupWithManager(goctx gocontext.Context, mgr ctrl.Manager, options controller.Options) error { - clusterToKubevirtMachines, err := util.ClusterToObjectsMapper(mgr.GetClient(), &infrav1.KubevirtMachineList{}, mgr.GetScheme()) + clusterToKubevirtMachines, err := util.ClusterToTypedObjectsMapper(mgr.GetClient(), &infrav1.KubevirtMachineList{}, mgr.GetScheme()) if err != nil { return err } diff --git a/main.go b/main.go index 52f041c5..4a2fe897 100644 --- a/main.go +++ b/main.go @@ -21,10 +21,6 @@ import ( "flag" "math/rand" "os" - "sigs.k8s.io/cluster-api-provider-kubevirt/pkg/webhookhandler" - "sigs.k8s.io/controller-runtime/pkg/cache" - "sigs.k8s.io/controller-runtime/pkg/metrics/server" - "sigs.k8s.io/controller-runtime/pkg/webhook" "time" "github.com/spf13/pflag" @@ -35,23 +31,27 @@ import ( "k8s.io/klog/v2" "k8s.io/klog/v2/klogr" kubevirtv1 "kubevirt.io/api/core/v1" + cdiv1 "kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/feature" ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/cache" k8sclient "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/healthz" + "sigs.k8s.io/controller-runtime/pkg/metrics/server" + "sigs.k8s.io/controller-runtime/pkg/webhook" infrav1 "sigs.k8s.io/cluster-api-provider-kubevirt/api/v1alpha1" "sigs.k8s.io/cluster-api-provider-kubevirt/controllers" "sigs.k8s.io/cluster-api-provider-kubevirt/pkg/infracluster" "sigs.k8s.io/cluster-api-provider-kubevirt/pkg/kubevirt" + "sigs.k8s.io/cluster-api-provider-kubevirt/pkg/webhookhandler" "sigs.k8s.io/cluster-api-provider-kubevirt/pkg/workloadcluster" // +kubebuilder:scaffold:imports ) var ( - myscheme = runtime.NewScheme() setupLog = ctrl.Log.WithName("setup") //flags. @@ -67,12 +67,24 @@ var ( func init() { klog.InitFlags(nil) +} - _ = scheme.AddToScheme(myscheme) - _ = infrav1.AddToScheme(myscheme) - _ = clusterv1.AddToScheme(myscheme) - _ = kubevirtv1.AddToScheme(myscheme) - // +kubebuilder:scaffold:scheme +func registerScheme() (*runtime.Scheme, error) { + myscheme := runtime.NewScheme() + + for _, f := range []func(*runtime.Scheme) error{ + scheme.AddToScheme, + infrav1.AddToScheme, + clusterv1.AddToScheme, + kubevirtv1.AddToScheme, + cdiv1.AddToScheme, + // +kubebuilder:scaffold:scheme + } { + if err := f(myscheme); err != nil { + return nil, err + } + } + return myscheme, nil } func initFlags(fs *pflag.FlagSet) { @@ -106,6 +118,12 @@ func main() { ctrl.SetLogger(klogr.New()) + myscheme, err := registerScheme() + if err != nil { + setupLog.Error(err, "can't register scheme") + os.Exit(1) + } + var defaultNamespaces map[string]cache.Config if watchNamespace != "" { setupLog.Info("Watching cluster-api objects only in namespace for reconciliation", "namespace", watchNamespace) diff --git a/pkg/kubevirt/machine.go b/pkg/kubevirt/machine.go index 60fd1639..5499bdd0 100644 --- a/pkg/kubevirt/machine.go +++ b/pkg/kubevirt/machine.go @@ -19,6 +19,8 @@ package kubevirt import ( gocontext "context" "fmt" + "time" + "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -26,16 +28,16 @@ import ( "k8s.io/apimachinery/pkg/types" kubedrain "k8s.io/kubectl/pkg/drain" kubevirtv1 "kubevirt.io/api/core/v1" - "sigs.k8s.io/cluster-api-provider-kubevirt/pkg/workloadcluster" + cdiv1 "kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/controllers/noderefutil" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - "time" infrav1 "sigs.k8s.io/cluster-api-provider-kubevirt/api/v1alpha1" "sigs.k8s.io/cluster-api-provider-kubevirt/pkg/context" "sigs.k8s.io/cluster-api-provider-kubevirt/pkg/ssh" + "sigs.k8s.io/cluster-api-provider-kubevirt/pkg/workloadcluster" ) const ( @@ -49,6 +51,7 @@ type Machine struct { machineContext *context.MachineContext vmiInstance *kubevirtv1.VirtualMachineInstance vmInstance *kubevirtv1.VirtualMachine + dataVolumes []*cdiv1.DataVolume sshKeys *ssh.ClusterNodeSshKeys getCommandExecutor func(string, *ssh.ClusterNodeSshKeys) ssh.VMCommandExecutor @@ -63,6 +66,7 @@ func NewMachine(ctx *context.MachineContext, client client.Client, namespace str vmiInstance: nil, vmInstance: nil, sshKeys: sshKeys, + dataVolumes: nil, getCommandExecutor: ssh.NewVMCommandExecutor, } @@ -90,6 +94,20 @@ func NewMachine(ctx *context.MachineContext, client client.Client, namespace str machine.vmInstance = vm } + if machine.vmInstance != nil { + for _, dvTemp := range machine.vmInstance.Spec.DataVolumeTemplates { + dv := &cdiv1.DataVolume{} + err = client.Get(ctx.Context, types.NamespacedName{Name: dvTemp.ObjectMeta.Name, Namespace: namespace}, dv) + if err != nil { + if !apierrors.IsNotFound(err) { + return nil, err + } + } else { + machine.dataVolumes = append(machine.dataVolumes, dv) + } + } + } + return machine, nil } @@ -219,6 +237,78 @@ func (m *Machine) IsReady() bool { return m.hasReadyCondition() } +const ( + defaultCondReason = "VMNotReady" + defaultCondMessage = "VM is not ready" +) + +func (m *Machine) GetVMNotReadyReason() (reason string, message string) { + reason = defaultCondReason + + if m.vmInstance == nil { + message = defaultCondMessage + return + } + + message = fmt.Sprintf("%s: %s", defaultCondMessage, m.vmInstance.Status.PrintableStatus) + + cond := m.getVMCondition(kubevirtv1.VirtualMachineConditionType(corev1.PodScheduled)) + if cond != nil { + if cond.Status == corev1.ConditionTrue { + return + } else if cond.Status == corev1.ConditionFalse { + if cond.Reason == "Unschedulable" { + return "Unschedulable", cond.Message + } + } + } + + for _, dv := range m.dataVolumes { + dvReason, dvMessage, foundDVReason := m.getDVNotProvisionedReason(dv) + if foundDVReason { + return dvReason, dvMessage + } + } + + return +} + +func (m *Machine) getDVNotProvisionedReason(dv *cdiv1.DataVolume) (string, string, bool) { + msg := fmt.Sprintf("DataVolume %s is not ready; Phase: %s", dv.Name, dv.Status.Phase) + switch dv.Status.Phase { + case cdiv1.Succeeded: // DV's OK, return default reason & message + return "", "", false + case cdiv1.Pending: + return "DVPending", msg, true + case cdiv1.Failed: + return "DVFailed", msg, true + default: + for _, dvCond := range dv.Status.Conditions { + if dvCond.Type == cdiv1.DataVolumeRunning { + if dvCond.Status == corev1.ConditionFalse { + msg = fmt.Sprintf("DataVolume %s import is not running: %s", dv.Name, dvCond.Message) + } + break + } + } + return "DVNotReady", msg, true + } +} + +func (m *Machine) getVMCondition(t kubevirtv1.VirtualMachineConditionType) *kubevirtv1.VirtualMachineCondition { + if m.vmInstance == nil { + return nil + } + + for _, cond := range m.vmInstance.Status.Conditions { + if cond.Type == t { + return cond.DeepCopy() + } + } + + return nil +} + // SupportsCheckingIsBootstrapped checks if we have a method of checking // that this bootstrapper has completed. func (m *Machine) SupportsCheckingIsBootstrapped() bool { diff --git a/pkg/kubevirt/machine_factory.go b/pkg/kubevirt/machine_factory.go index f4ddc2bc..38180115 100644 --- a/pkg/kubevirt/machine_factory.go +++ b/pkg/kubevirt/machine_factory.go @@ -38,6 +38,9 @@ type MachineInterface interface { IsTerminal() (bool, string, error) DrainNodeIfNeeded(workloadcluster.WorkloadCluster) (time.Duration, error) + + // GetVMUnscheduledReason returns the reason and message for the condition, if the VM is not ready + GetVMNotReadyReason() (string, string) } // MachineFactory allows creating new instances of kubevirt.machine diff --git a/pkg/kubevirt/machine_test.go b/pkg/kubevirt/machine_test.go index 71f812f0..ddb67141 100644 --- a/pkg/kubevirt/machine_test.go +++ b/pkg/kubevirt/machine_test.go @@ -32,6 +32,7 @@ import ( k8sfake "k8s.io/client-go/kubernetes/fake" k8stesting "k8s.io/client-go/testing" kubevirtv1 "kubevirt.io/api/core/v1" + cdiv1 "kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" "sigs.k8s.io/controller-runtime/pkg/log/zap" @@ -777,6 +778,187 @@ var _ = Describe("util functions", func() { }) }) +var _ = Describe("with dataVolumes", func() { + var machineContext *context.MachineContext + namespace := kubevirtMachine.Namespace + virtualMachineInstance := testing.NewVirtualMachineInstance(kubevirtMachine) + virtualMachine := testing.NewVirtualMachine(virtualMachineInstance) + dataVolume := &cdiv1.DataVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dv-name", + Namespace: namespace, + }, + } + + BeforeEach(func() { + kubevirtMachine.Spec.BootstrapCheckSpec = v1alpha1.VirtualMachineBootstrapCheckSpec{} + + machineContext = &context.MachineContext{ + Context: gocontext.TODO(), + Cluster: cluster, + KubevirtCluster: kubevirtCluster, + Machine: machine, + KubevirtMachine: kubevirtMachine, + BootstrapDataSecret: bootstrapDataSecret, + Logger: logger, + } + + virtualMachine.Spec.DataVolumeTemplates = []kubevirtv1.DataVolumeTemplateSpec{ + { + ObjectMeta: metav1.ObjectMeta{Name: "dv-name"}, + }, + } + + if virtualMachine.Spec.Template == nil { + virtualMachine.Spec.Template = &kubevirtv1.VirtualMachineInstanceTemplateSpec{} + } + virtualMachine.Spec.Template.Spec.Volumes = []kubevirtv1.Volume{ + { + Name: "dv-disk", + VolumeSource: kubevirtv1.VolumeSource{ + DataVolume: &kubevirtv1.DataVolumeSource{ + Name: "dv-name", + }, + }, + }, + } + + fakeVMCommandExecutor = FakeVMCommandExecutor{true} + }) + JustBeforeEach(func() { + objects := []client.Object{ + cluster, + kubevirtCluster, + machine, + kubevirtMachine, + virtualMachineInstance, + virtualMachine, + dataVolume, + } + fakeClient = fake.NewClientBuilder().WithScheme(testing.SetupScheme()).WithObjects(objects...).Build() + }) + + It("NewMachine should have all client, machineContext and vmiInstance NOT nil", func() { + externalMachine, err := defaultTestMachine(machineContext, namespace, fakeClient, fakeVMCommandExecutor, []byte(sshKey)) + Expect(err).NotTo(HaveOccurred()) + Expect(externalMachine.client).ToNot(BeNil()) + Expect(externalMachine.machineContext).To(Equal(machineContext)) + Expect(externalMachine.vmiInstance).ToNot(BeNil()) + Expect(externalMachine.dataVolumes).To(HaveLen(1)) + Expect(externalMachine.dataVolumes[0].Name).To(Equal(dataVolume.Name)) + }) +}) + +var _ = Describe("check GetVMNotReadyReason", func() { + DescribeTable("not-ready reason", func(vm *kubevirtv1.VirtualMachine, dv *cdiv1.DataVolume, expectedReason, expectedMsg string) { + m := Machine{ + vmInstance: vm, + } + + if dv != nil { + m.dataVolumes = []*cdiv1.DataVolume{dv} + } + + reason, msg := m.GetVMNotReadyReason() + Expect(reason).To(Equal(expectedReason)) + Expect(msg).To(ContainSubstring(expectedMsg)) + }, + Entry("no vm instance", nil, nil, defaultCondReason, defaultCondMessage), + Entry("no vm conditions", &kubevirtv1.VirtualMachine{}, nil, defaultCondReason, defaultCondMessage), + Entry("vm PodScheduled condition is true", &kubevirtv1.VirtualMachine{ + Status: kubevirtv1.VirtualMachineStatus{ + Conditions: []kubevirtv1.VirtualMachineCondition{ + { + Type: kubevirtv1.VirtualMachineConditionType(corev1.PodScheduled), + Status: corev1.ConditionTrue, + }, + }, + }, + }, nil, defaultCondReason, defaultCondMessage), + Entry("vm PodScheduled condition is false, with unknown reason", &kubevirtv1.VirtualMachine{ + Status: kubevirtv1.VirtualMachineStatus{ + Conditions: []kubevirtv1.VirtualMachineCondition{ + { + Type: kubevirtv1.VirtualMachineConditionType(corev1.PodScheduled), + Status: corev1.ConditionFalse, + Reason: "somethingElse", + }, + }, + }, + }, nil, defaultCondReason, defaultCondMessage), + Entry("vm PodScheduled condition is false, with 'Unschedulable' reason", &kubevirtv1.VirtualMachine{ + Status: kubevirtv1.VirtualMachineStatus{ + Conditions: []kubevirtv1.VirtualMachineCondition{ + { + Type: kubevirtv1.VirtualMachineConditionType(corev1.PodScheduled), + Status: corev1.ConditionFalse, + Reason: "Unschedulable", + Message: "test message", + }, + }, + }, + }, nil, "Unschedulable", "test message"), + Entry("dv with Running condition; phase = Succeeded", &kubevirtv1.VirtualMachine{}, &cdiv1.DataVolume{ + Status: cdiv1.DataVolumeStatus{ + Phase: cdiv1.Succeeded, + }, + }, defaultCondReason, defaultCondMessage), + Entry("dv with Running condition; phase = Pending", &kubevirtv1.VirtualMachine{}, &cdiv1.DataVolume{ + + Status: cdiv1.DataVolumeStatus{ + Phase: cdiv1.Pending, + }, + }, "DVPending", "is not ready; Phase: Pending"), + Entry("dv with Running condition; phase = Failed", &kubevirtv1.VirtualMachine{}, &cdiv1.DataVolume{ + Status: cdiv1.DataVolumeStatus{ + Phase: cdiv1.Failed, + }, + }, "DVFailed", "is not ready; Phase: Failed"), + Entry("dv with Running condition; phase is something else; Running condition true", &kubevirtv1.VirtualMachine{}, &cdiv1.DataVolume{ + Status: cdiv1.DataVolumeStatus{ + Phase: cdiv1.ImportInProgress, + Conditions: []cdiv1.DataVolumeCondition{ + { + Type: cdiv1.DataVolumeRunning, + Status: corev1.ConditionTrue, + }, + }, + }, + }, "DVNotReady", "is not ready; Phase: ImportInProgress"), + Entry("dv with Running condition; phase is something else; Running condition false; reason=Completed", &kubevirtv1.VirtualMachine{}, &cdiv1.DataVolume{ + Status: cdiv1.DataVolumeStatus{ + Phase: cdiv1.ImportInProgress, + Conditions: []cdiv1.DataVolumeCondition{ + { + Type: cdiv1.DataVolumeRunning, + Status: corev1.ConditionFalse, + Reason: "Completed", + }, + }, + }, + }, "DVNotReady", "import is not running"), + Entry("dv with Running condition; phase is something else; Running condition false; reason!=Completed", &kubevirtv1.VirtualMachine{}, &cdiv1.DataVolume{ + Status: cdiv1.DataVolumeStatus{ + Phase: cdiv1.ImportInProgress, + Conditions: []cdiv1.DataVolumeCondition{ + { + Type: cdiv1.DataVolumeRunning, + Status: corev1.ConditionFalse, + Reason: "SomethingElse", + Message: "test message", + }, + }, + }, + }, "DVNotReady", "test message"), + Entry("dv with Running condition; phase is something else; no Running condition", &kubevirtv1.VirtualMachine{}, &cdiv1.DataVolume{ + Status: cdiv1.DataVolumeStatus{ + Phase: cdiv1.ImportInProgress, + Conditions: []cdiv1.DataVolumeCondition{}, + }, + }, "DVNotReady", "is not ready; Phase: ImportInProgress"), + ) +}) + func validateVMNotExist(expected *kubevirtv1.VirtualMachine, fakeClient client.Client, machineContext *context.MachineContext) { vm := &kubevirtv1.VirtualMachine{} key := client.ObjectKey{Name: expected.Name, Namespace: expected.Namespace} diff --git a/pkg/kubevirt/mock/machine_factory_generated.go b/pkg/kubevirt/mock/machine_factory_generated.go index 0652f0e5..870dc28d 100644 --- a/pkg/kubevirt/mock/machine_factory_generated.go +++ b/pkg/kubevirt/mock/machine_factory_generated.go @@ -10,6 +10,7 @@ import ( time "time" gomock "github.com/golang/mock/gomock" + context0 "sigs.k8s.io/cluster-api-provider-kubevirt/pkg/context" kubevirt "sigs.k8s.io/cluster-api-provider-kubevirt/pkg/kubevirt" ssh "sigs.k8s.io/cluster-api-provider-kubevirt/pkg/ssh" @@ -148,6 +149,10 @@ func (m *MockMachineInterface) IsReady() bool { return ret0 } +func (m *MockMachineInterface) GetVMNotReadyReason() (string, string) { + return "", "" +} + // IsReady indicates an expected call of IsReady. func (mr *MockMachineInterfaceMockRecorder) IsReady() *gomock.Call { mr.mock.ctrl.T.Helper() diff --git a/pkg/testing/common.go b/pkg/testing/common.go index a561c6bc..1069716e 100644 --- a/pkg/testing/common.go +++ b/pkg/testing/common.go @@ -7,6 +7,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" kubevirtv1 "kubevirt.io/api/core/v1" + cdiv1 "kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" infrav1 "sigs.k8s.io/cluster-api-provider-kubevirt/api/v1alpha1" @@ -188,23 +189,19 @@ func NewBootstrapDataSecret(userData []byte) *corev1.Secret { // SetupScheme setups the scheme for a fake client. func SetupScheme() *runtime.Scheme { s := runtime.NewScheme() - if err := clusterv1.AddToScheme(s); err != nil { - panic(err) - } - if err := infrav1.AddToScheme(s); err != nil { - panic(err) - } - if err := kubevirtv1.AddToScheme(s); err != nil { - panic(err) - } - if err := corev1.AddToScheme(s); err != nil { - panic(err) - } - if err := appsv1.AddToScheme(s); err != nil { - panic(err) - } - if err := rbacv1.AddToScheme(s); err != nil { - panic(err) + for _, f := range []func(*runtime.Scheme) error{ + clusterv1.AddToScheme, + infrav1.AddToScheme, + kubevirtv1.AddToScheme, + cdiv1.AddToScheme, + corev1.AddToScheme, + appsv1.AddToScheme, + rbacv1.AddToScheme, + } { + if err := f(s); err != nil { + panic(err) + } } + return s }