Skip to content

Commit

Permalink
Reflect the VM statuses to the KubeVirtCluster status
Browse files Browse the repository at this point in the history
If not all the cluster VMs are ready, CAPK operator now sets the new
`AllMachinesAreReady` condition in the KubeVirtCluster CR to `False`.

Signed-off-by: Nahshon Unna-Tsameret <nunnatsa@redhat.com>
  • Loading branch information
nunnatsa committed Dec 21, 2023
1 parent 35cc2f1 commit dc8c7bd
Show file tree
Hide file tree
Showing 13 changed files with 384 additions and 59 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/test.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ jobs:
uses: golangci/golangci-lint-action@v3
with:
args: --timeout=5m -v
version: v1.54.2
version: v1.55.2

check-gen:
runs-on: ubuntu-latest
Expand Down
4 changes: 3 additions & 1 deletion api/v1alpha1/kubevirtmachine_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ type VirtualMachineBootstrapCheckSpec struct {
// KubevirtMachineStatus defines the observed state of KubevirtMachine.
type KubevirtMachineStatus struct {
// Ready denotes that the machine is ready
// +optional
// +kubebuilder:default=false
Ready bool `json:"ready"`

// LoadBalancerConfigured denotes that the machine has been
Expand Down Expand Up @@ -134,6 +134,8 @@ type KubevirtMachineStatus struct {
// +kubebuilder:object:root=true
// +kubebuilder:storageversion
// +kubebuilder:subresource:status
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp"
// +kubebuilder:printcolumn:name="Ready",type="boolean",JSONPath=".status.ready",description="Is machine ready"

// KubevirtMachine is the Schema for the kubevirtmachines API.
type KubevirtMachine struct {
Expand Down
9 changes: 8 additions & 1 deletion clusterkubevirtadm/cmd/credentials/credentials.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,8 @@ import (
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
k8sclient "k8s.io/client-go/kubernetes"
kubevirtcore "kubevirt.io/api/core"
cdicore "kubevirt.io/containerized-data-importer-api/pkg/apis/core"

"sigs.k8s.io/cluster-api-provider-kubevirt/clusterkubevirtadm/common"
)
Expand Down Expand Up @@ -148,10 +150,15 @@ func generateRole(cmdCtx cmdContext) *rbacv1.Role {
},
Rules: []rbacv1.PolicyRule{
{
APIGroups: []string{"kubevirt.io"},
APIGroups: []string{kubevirtcore.GroupName},
Resources: []string{"virtualmachines", "virtualmachineinstances"},
Verbs: []string{rbacv1.VerbAll},
},
{
APIGroups: []string{cdicore.GroupName},
Resources: []string{"datavolumes"},
Verbs: []string{"get", "list", "watch"},
},
{
APIGroups: []string{""},
Resources: []string{"secrets", "services"},
Expand Down
30 changes: 14 additions & 16 deletions clusterkubevirtadm/cmd/credentials/credentials_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,14 +2,15 @@ package credentials

import (
"context"

. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes/fake"
kubevirtcore "kubevirt.io/api/core"
cdicore "kubevirt.io/containerized-data-importer-api/pkg/apis/core"

"sigs.k8s.io/cluster-api-provider-kubevirt/clusterkubevirtadm/common"
)
Expand Down Expand Up @@ -118,21 +119,18 @@ var _ = Describe("test credentials common function", func() {
Expect(roles.Items).To(HaveLen(1))

Expect(roles.Items[0].Name).Should(Equal(roleName))
Expect(roles.Items[0].Rules).Should(HaveLen(2))
Expect(roles.Items[0].Rules[0].APIGroups).Should(HaveLen(1))
Expect(roles.Items[0].Rules[0].APIGroups[0]).Should(Equal("kubevirt.io"))
Expect(roles.Items[0].Rules[0].Resources).Should(HaveLen(2))
Expect(roles.Items[0].Rules[0].Resources[0]).Should(Equal("virtualmachines"))
Expect(roles.Items[0].Rules[0].Resources[1]).Should(Equal("virtualmachineinstances"))
Expect(roles.Items[0].Rules[0].Verbs).Should(HaveLen(1))
Expect(roles.Items[0].Rules[0].Verbs[0]).Should(Equal(rbacv1.VerbAll))
Expect(roles.Items[0].Rules[1].APIGroups).Should(HaveLen(1))
Expect(roles.Items[0].Rules[1].APIGroups[0]).Should(Equal(""))
Expect(roles.Items[0].Rules[1].Resources).Should(HaveLen(2))
Expect(roles.Items[0].Rules[1].Resources[0]).Should(Equal("secrets"))
Expect(roles.Items[0].Rules[1].Resources[1]).Should(Equal("services"))
Expect(roles.Items[0].Rules[1].Verbs).Should(HaveLen(1))
Expect(roles.Items[0].Rules[1].Verbs[0]).Should(Equal(rbacv1.VerbAll))
Expect(roles.Items[0].Rules).Should(HaveLen(3))
Expect(roles.Items[0].Rules[0].APIGroups).Should(And(HaveLen(1), ContainElements(kubevirtcore.GroupName)))
Expect(roles.Items[0].Rules[0].Resources).Should(And(HaveLen(2), ContainElements("virtualmachines", "virtualmachineinstances")))
Expect(roles.Items[0].Rules[0].Verbs).Should(And(HaveLen(1), ContainElements(rbacv1.VerbAll)))

Expect(roles.Items[0].Rules[1].APIGroups).Should(And(HaveLen(1), ContainElements(cdicore.GroupName)))
Expect(roles.Items[0].Rules[1].Resources).Should(And(HaveLen(1), ContainElements("datavolumes")))
Expect(roles.Items[0].Rules[1].Verbs).Should(And(HaveLen(3), ContainElements("get", "list", "watch")))

Expect(roles.Items[0].Rules[2].APIGroups).Should(And(HaveLen(1), ContainElements("")))
Expect(roles.Items[0].Rules[2].Resources).Should(And(HaveLen(2), ContainElements("secrets", "services")))
Expect(roles.Items[0].Rules[2].Verbs).Should(And(HaveLen(1), ContainElements(rbacv1.VerbAll)))
})

It("create should return error if the Role is already exist", func() {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,15 @@ spec:
singular: kubevirtmachine
scope: Namespaced
versions:
- name: v1alpha1
- additionalPrinterColumns:
- jsonPath: .metadata.creationTimestamp
name: Age
type: date
- description: Is machine ready
jsonPath: .status.ready
name: Ready
type: boolean
name: v1alpha1
schema:
openAPIV3Schema:
description: KubevirtMachine is the Schema for the kubevirtmachines API.
Expand Down Expand Up @@ -4614,8 +4622,11 @@ spec:
Node of this KubevirtMachine
type: boolean
ready:
default: false
description: Ready denotes that the machine is ready
type: boolean
required:
- ready
type: object
type: object
served: true
Expand Down
8 changes: 8 additions & 0 deletions config/rbac/role.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,14 @@ rules:
verbs:
- delete
- list
- apiGroups:
- cdi.kubevirt.io
resources:
- datavolumes
verbs:
- get
- list
- watch
- apiGroups:
- cluster.x-k8s.io
resources:
Expand Down
24 changes: 14 additions & 10 deletions controllers/kubevirtmachine_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,22 +22,13 @@ import (
"regexp"
"time"

"sigs.k8s.io/controller-runtime/pkg/builder"

"github.com/pkg/errors"
"gopkg.in/yaml.v3"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
infrav1 "sigs.k8s.io/cluster-api-provider-kubevirt/api/v1alpha1"
"sigs.k8s.io/cluster-api-provider-kubevirt/pkg/context"
"sigs.k8s.io/cluster-api-provider-kubevirt/pkg/infracluster"
"sigs.k8s.io/cluster-api-provider-kubevirt/pkg/kubevirt"
kubevirthandler "sigs.k8s.io/cluster-api-provider-kubevirt/pkg/kubevirt"
"sigs.k8s.io/cluster-api-provider-kubevirt/pkg/ssh"
"sigs.k8s.io/cluster-api-provider-kubevirt/pkg/workloadcluster"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
capierrors "sigs.k8s.io/cluster-api/errors"
"sigs.k8s.io/cluster-api/util"
Expand All @@ -46,10 +37,19 @@ import (
"sigs.k8s.io/cluster-api/util/patch"
"sigs.k8s.io/cluster-api/util/predicates"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/builder"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/handler"

infrav1 "sigs.k8s.io/cluster-api-provider-kubevirt/api/v1alpha1"
"sigs.k8s.io/cluster-api-provider-kubevirt/pkg/context"
"sigs.k8s.io/cluster-api-provider-kubevirt/pkg/infracluster"
"sigs.k8s.io/cluster-api-provider-kubevirt/pkg/kubevirt"
kubevirthandler "sigs.k8s.io/cluster-api-provider-kubevirt/pkg/kubevirt"
"sigs.k8s.io/cluster-api-provider-kubevirt/pkg/ssh"
"sigs.k8s.io/cluster-api-provider-kubevirt/pkg/workloadcluster"
)

// KubevirtMachineReconciler reconciles a KubevirtMachine object.
Expand All @@ -66,6 +66,7 @@ type KubevirtMachineReconciler struct {
// +kubebuilder:rbac:groups="",resources=secrets;,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=kubevirt.io,resources=virtualmachines;,verbs=get;create;update;patch;delete
// +kubebuilder:rbac:groups=kubevirt.io,resources=virtualmachineinstances;,verbs=get;delete
// +kubebuilder:rbac:groups=cdi.kubevirt.io,resources=datavolumes;,verbs=get;list;watch

// Reconcile handles KubevirtMachine events.
func (r *KubevirtMachineReconciler) Reconcile(goctx gocontext.Context, req ctrl.Request) (_ ctrl.Result, rerr error) {
Expand Down Expand Up @@ -277,6 +278,9 @@ func (r *KubevirtMachineReconciler) reconcileNormal(ctx *context.MachineContext)
// Mark VMProvisionedCondition to indicate that the VM has successfully started
conditions.MarkTrue(ctx.KubevirtMachine, infrav1.VMProvisionedCondition)
} else {
reason, message := externalMachine.GetVMNotReadyReason()
conditions.MarkFalse(ctx.KubevirtMachine, infrav1.VMProvisionedCondition, reason, clusterv1.ConditionSeverityInfo, message)

// Waiting for VM to boot
ctx.KubevirtMachine.Status.Ready = false
ctx.Logger.Info("KubeVirt VM is not fully provisioned and running...")
Expand Down Expand Up @@ -476,7 +480,7 @@ func (r *KubevirtMachineReconciler) reconcileDelete(ctx *context.MachineContext)

// SetupWithManager will add watches for this controller.
func (r *KubevirtMachineReconciler) SetupWithManager(goctx gocontext.Context, mgr ctrl.Manager, options controller.Options) error {
clusterToKubevirtMachines, err := util.ClusterToObjectsMapper(mgr.GetClient(), &infrav1.KubevirtMachineList{}, mgr.GetScheme())
clusterToKubevirtMachines, err := util.ClusterToTypedObjectsMapper(mgr.GetClient(), &infrav1.KubevirtMachineList{}, mgr.GetScheme())
if err != nil {
return err
}
Expand Down
38 changes: 28 additions & 10 deletions main.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,10 +21,6 @@ import (
"flag"
"math/rand"
"os"
"sigs.k8s.io/cluster-api-provider-kubevirt/pkg/webhookhandler"
"sigs.k8s.io/controller-runtime/pkg/cache"
"sigs.k8s.io/controller-runtime/pkg/metrics/server"
"sigs.k8s.io/controller-runtime/pkg/webhook"
"time"

"github.com/spf13/pflag"
Expand All @@ -35,23 +31,27 @@ import (
"k8s.io/klog/v2"
"k8s.io/klog/v2/klogr"
kubevirtv1 "kubevirt.io/api/core/v1"
cdiv1 "kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
"sigs.k8s.io/cluster-api/feature"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/cache"
k8sclient "sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/healthz"
"sigs.k8s.io/controller-runtime/pkg/metrics/server"
"sigs.k8s.io/controller-runtime/pkg/webhook"

infrav1 "sigs.k8s.io/cluster-api-provider-kubevirt/api/v1alpha1"
"sigs.k8s.io/cluster-api-provider-kubevirt/controllers"
"sigs.k8s.io/cluster-api-provider-kubevirt/pkg/infracluster"
"sigs.k8s.io/cluster-api-provider-kubevirt/pkg/kubevirt"
"sigs.k8s.io/cluster-api-provider-kubevirt/pkg/webhookhandler"
"sigs.k8s.io/cluster-api-provider-kubevirt/pkg/workloadcluster"
// +kubebuilder:scaffold:imports
)

var (
myscheme = runtime.NewScheme()
setupLog = ctrl.Log.WithName("setup")

//flags.
Expand All @@ -67,12 +67,24 @@ var (

func init() {
klog.InitFlags(nil)
}

_ = scheme.AddToScheme(myscheme)
_ = infrav1.AddToScheme(myscheme)
_ = clusterv1.AddToScheme(myscheme)
_ = kubevirtv1.AddToScheme(myscheme)
// +kubebuilder:scaffold:scheme
func registerScheme() (*runtime.Scheme, error) {
myscheme := runtime.NewScheme()

for _, f := range []func(*runtime.Scheme) error{
scheme.AddToScheme,
infrav1.AddToScheme,
clusterv1.AddToScheme,
kubevirtv1.AddToScheme,
cdiv1.AddToScheme,
// +kubebuilder:scaffold:scheme
} {
if err := f(myscheme); err != nil {
return nil, err
}
}
return myscheme, nil
}

func initFlags(fs *pflag.FlagSet) {
Expand Down Expand Up @@ -106,6 +118,12 @@ func main() {

ctrl.SetLogger(klogr.New())

myscheme, err := registerScheme()
if err != nil {
setupLog.Error(err, "can't register scheme")
os.Exit(1)
}

var defaultNamespaces map[string]cache.Config
if watchNamespace != "" {
setupLog.Info("Watching cluster-api objects only in namespace for reconciliation", "namespace", watchNamespace)
Expand Down
Loading

0 comments on commit dc8c7bd

Please sign in to comment.