From f10adb4d3b454ffe851eb002f0f1490f1932fbbb Mon Sep 17 00:00:00 2001 From: "Jose Rivera (Hose)" Date: Mon, 17 Apr 2023 14:58:08 -0700 Subject: [PATCH] PWX-28826: Handle pre-flight check for DMthin (#1014) * PWX-28826 Boilerplace Signed-off-by: Harsh Desai * more boilerplate Signed-off-by: Harsh Desai * PWX-28826: Pre-flight check for DMthin. Signed-off-by: Jose Rivera * PWX-28826: Add comments and move StorageNode cleanup. Signed-off-by: Jose Rivera * Passed checks should be Info events. Signed-off-by: Jose Rivera * Passed checks should be Info events. (#1010) Signed-off-by: Jose Rivera * Pwx 28826 (#1011) * Pwx 28826 (#1012) * PWX-28826: Update with the latest master changes. (#1013) * Updating CSV to use 23.3.1 released image * Update for 23.3.1 release * Controller gen vendor Signed-off-by: Piyush Nimbalkar * PWX-29389 Add CRD for portworx diags collection Signed-off-by: Piyush Nimbalkar * PWX-29409: Ignore zones with no nodes (#1008) In disaggregated mode, there could be zones in which no storage nodes might be present. Such a zone would make the maxSNPZ value to be 0. CHanging the behavior to ignore 0 nodes in a zone for maxSNPZ calculation. Signed-off-by: Naveen Revanna --------- Signed-off-by: Piyush Nimbalkar Signed-off-by: Naveen Revanna Co-authored-by: CNBU Jenkins Co-authored-by: Jiafeng Liao Co-authored-by: Piyush Nimbalkar Co-authored-by: Naveen Revanna <83608369+nrevanna@users.noreply.github.com> * Add PassPreFlight event tag and logging Signed-off-by: Jose Rivera * PWX-28826: Check status of portworx container in pre-flight pod and remove 'wait' code. Signed-off-by: Jose Rivera * PWX-28826: Fix unit test. Signed-off-by: Jose Rivera * PWX-28826: Fix unit test. Signed-off-by: Jose Rivera * PWX-28826: PR review changes and fix portworx_test.go UTs Signed-off-by: Jose Rivera * PWX-28826: fix gomack Validate calls. Also comment out the two tests that don't work since Validate was removed from the controller.validate() func. PWX-30373 to try and fix later. Signed-off-by: Jose Rivera * PWX-30373: Re-add back in the commented out tests and add K8s version check failure to trigger the needed workflow. Signed-off-by: Jose Rivera * PWX-28826: Exit pre-check wait if running CBT namespace. Signed-off-by: Jose Rivera * PWX-28826: Add 5 min timeout to pre-flight status check. Signed-off-by: Jose Rivera * PWX-28826: Exit GetPreFlightStatus() with success if running CBT namespace. Signed-off-by: Jose Rivera * PWX-28826: Don't automatically enable dmthin via pre-flight check if running CBT namespace. Signed-off-by: Jose Rivera * PWX-30373: Revert UT and integration test hacks. Need to mock the functionality correctly. Signed-off-by: Jose Rivera * PWX-28826: Increase pre-flight daemonset ready wait to 10mins. Signed-off-by: Jose Rivera * PWX-28826: fix 'TestValidate' UT. Don't error if pre-flight daemonset exists. Signed-off-by: Jose Rivera * Only run preflight if AWS. Signed-off-by: Jose Rivera --------- Signed-off-by: Harsh Desai Signed-off-by: Jose Rivera Signed-off-by: Piyush Nimbalkar Signed-off-by: Naveen Revanna Co-authored-by: Harsh Desai Co-authored-by: CNBU Jenkins Co-authored-by: Jiafeng Liao Co-authored-by: Piyush Nimbalkar Co-authored-by: Naveen Revanna <83608369+nrevanna@users.noreply.github.com> --- .../component/securitycontextconstraints.go | 5 +- drivers/storage/portworx/portworx.go | 63 ++- drivers/storage/portworx/portworx_test.go | 74 +++- drivers/storage/portworx/preflight.go | 417 ++++++++++++++++++ drivers/storage/storage.go | 2 +- .../storagecluster/controller_test.go | 145 +++--- .../storagecluster/storagecluster.go | 45 +- pkg/mock/storagedriver.mock.go | 8 +- pkg/preflight/utils.go | 3 +- pkg/util/util.go | 4 + 10 files changed, 683 insertions(+), 83 deletions(-) create mode 100644 drivers/storage/portworx/preflight.go diff --git a/drivers/storage/portworx/component/securitycontextconstraints.go b/drivers/storage/portworx/component/securitycontextconstraints.go index d1e0cee16..38a2ce7c5 100644 --- a/drivers/storage/portworx/component/securitycontextconstraints.go +++ b/drivers/storage/portworx/component/securitycontextconstraints.go @@ -3,6 +3,9 @@ package component import ( "context" "fmt" + "reflect" + "strconv" + "github.com/hashicorp/go-version" ocp_secv1 "github.com/openshift/api/security/v1" "github.com/sirupsen/logrus" @@ -12,9 +15,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/tools/record" - "reflect" "sigs.k8s.io/controller-runtime/pkg/client" - "strconv" "k8s.io/apimachinery/pkg/types" diff --git a/drivers/storage/portworx/portworx.go b/drivers/storage/portworx/portworx.go index 1f30bca70..15dc4378b 100644 --- a/drivers/storage/portworx/portworx.go +++ b/drivers/storage/portworx/portworx.go @@ -6,6 +6,7 @@ import ( "math" "strconv" "strings" + "time" version "github.com/hashicorp/go-version" "github.com/sirupsen/logrus" @@ -88,7 +89,67 @@ func (p *portworx) Init( return nil } -func (p *portworx) Validate() error { +func (p *portworx) Validate(cluster *corev1.StorageCluster) error { + podSpec, err := p.GetStoragePodSpec(cluster, "") + if err != nil { + return err + } + + preFlighter := NewPreFlighter(cluster, p.k8sClient, podSpec) + + // Start the pre-flight container. The pre-flight checks at this time are specific to enabling DMthin + err = preFlighter.RunPreFlight() + if err != nil { + if !errors.IsAlreadyExists(err) { + return err + } + logrus.Debugf("pre-flight: container already running...") + } + + cnt := 0 + // Wait for all the pre-flight pods to finish + for { + time.Sleep(3 * time.Second) // Pause before status check + completed, inProgress, total, err := preFlighter.GetPreFlightStatus() + if err != nil { + logrus.Errorf("pre-flight: error getting pre-flight status: %v", err) + return err + } + logrus.Infof("pre-flight: Completed [%v] In Progress [%v] Total [%v]", completed, inProgress, total) + + if total != 0 && completed == total { + logrus.Infof("pre-flight: completed...") + break + } + + // Add five minute timeout. If we do reconcile loop check we will need a different way. + cnt++ + if cnt == 200 { // 3s * 100 = 300s (10 mins) + err = fmt.Errorf("pre-flight: pre-flight status check timed out") + logrus.Errorf("%v", err) + return err + } + } + + defer func() { + // Clean up the pre-flight pods + logrus.Infof("pre-flight: cleaning pre-flight ds...") + err = preFlighter.DeletePreFlight() + if err != nil { + logrus.Errorf("pre-flight: error deleting pre-flight: %v", err) + } + }() + + // Process all the StorageNode.Status.Checks + if storageNodes, err := p.storageNodesList(cluster); err == nil { + err = preFlighter.ProcessPreFlightResults(p.recorder, storageNodes) + if err != nil { + logrus.Errorf("pre-flight: Error processing results: %v", err) + } + } else { + logrus.Errorf("pre-flight incomplete: Error getting storage node list: %v", err) + } + return nil } func (p *portworx) initializeComponents() { diff --git a/drivers/storage/portworx/portworx_test.go b/drivers/storage/portworx/portworx_test.go index 97544dd71..62ddb4845 100644 --- a/drivers/storage/portworx/portworx_test.go +++ b/drivers/storage/portworx/portworx_test.go @@ -86,9 +86,81 @@ func TestInit(t *testing.T) { func TestValidate(t *testing.T) { driver := portworx{} + cluster := &corev1.StorageCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "px-cluster", + Namespace: "kube-test", + }, + } + + storageNode := &corev1.StorageNode{ + ObjectMeta: metav1.ObjectMeta{ + Name: "node-1", + Namespace: cluster.Namespace, + }, + } + + labels := map[string]string{ + "name": pxPreFlightDaemonSetName, + } + + clusterRef := metav1.NewControllerRef(cluster, pxutil.StorageClusterKind()) + preflightDS := &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: pxPreFlightDaemonSetName, + Namespace: cluster.Namespace, + Labels: labels, + UID: types.UID("preflight-ds-uid"), + OwnerReferences: []metav1.OwnerReference{*clusterRef}, + }, + Spec: appsv1.DaemonSetSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: labels, + }, + }, + } + + k8sClient := testutil.FakeK8sClient(preflightDS) + + preFlightPod1 := &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "preflight-1", + Namespace: cluster.Namespace, + OwnerReferences: []metav1.OwnerReference{{UID: preflightDS.UID}}, + }, + Status: v1.PodStatus{ + ContainerStatuses: []v1.ContainerStatus{ + { + Name: "portworx", + Ready: true, + }, + }, + }, + } + err := k8sClient.Create(context.TODO(), preFlightPod1) + require.NoError(t, err) + + preflightDS.Status.DesiredNumberScheduled = int32(1) + err = k8sClient.Status().Update(context.TODO(), preflightDS) + require.NoError(t, err) + + recorder := record.NewFakeRecorder(100) + err = driver.Init(k8sClient, runtime.NewScheme(), recorder) + require.NoError(t, err) + + err = driver.SetDefaultsOnStorageCluster(cluster) + require.NoError(t, err) + + err = k8sClient.Create(context.TODO(), storageNode) + require.NoError(t, err) - err := driver.Validate() + err = driver.Validate(cluster) require.NoError(t, err) + require.Contains(t, cluster.Annotations[pxutil.AnnotationMiscArgs], "-T dmthin") + require.NotEmpty(t, recorder.Events) + <-recorder.Events // Pop first event which is Default telemetry enabled event + require.Contains(t, <-recorder.Events, + fmt.Sprintf("%v %v %s", v1.EventTypeNormal, util.PassPreFlight, "Enabling DMthin")) } func TestGetSelectorLabels(t *testing.T) { diff --git a/drivers/storage/portworx/preflight.go b/drivers/storage/portworx/preflight.go new file mode 100644 index 000000000..9cde1000c --- /dev/null +++ b/drivers/storage/portworx/preflight.go @@ -0,0 +1,417 @@ +package portworx + +import ( + "context" + "fmt" + "strings" + + "github.com/sirupsen/logrus" + appsv1 "k8s.io/api/apps/v1" + v1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/libopenstorage/operator/drivers/storage/portworx/component" + pxutil "github.com/libopenstorage/operator/drivers/storage/portworx/util" + corev1 "github.com/libopenstorage/operator/pkg/apis/core/v1" + "github.com/libopenstorage/operator/pkg/constants" + "github.com/libopenstorage/operator/pkg/util" + k8sutil "github.com/libopenstorage/operator/pkg/util/k8s" +) + +const ( + pxPreFlightClusterRoleName = "px-pre-flight" + pxPreFlightClusterRoleBindingName = "px-pre-flight" + pxPreFlightDaemonSetName = "px-pre-flight" + // PxPreFlightServiceAccountName name of portworx pre flight service account + PxPreFlightServiceAccountName = "px-pre-flight" +) + +// PreFlightPortworx provides a set of APIs to uninstall portworx +type PreFlightPortworx interface { + // RunPreFlight runs the pre-flight daemonset + RunPreFlight() error + // GetPreFlightStatus returns the status of the pre-flight daemonset + // returns the no. of completed, in progress and total pods + GetPreFlightStatus() (int32, int32, int32, error) + // GetPreFlightPods returns the pods of the pre-flight daemonset + GetPreFlightPods() ([]*v1.Pod, error) + // ProcessPreFlightResults process StorageNode status checks + ProcessPreFlightResults(recorder record.EventRecorder, storageNodes []*corev1.StorageNode) error + // DeletePreFlight deletes the pre-flight daemonset + DeletePreFlight() error +} + +type preFlightPortworx struct { + cluster *corev1.StorageCluster + k8sClient client.Client + podSpec v1.PodSpec +} + +// NewPreFlighter returns an implementation of PreFlightPortworx interface +func NewPreFlighter( + cluster *corev1.StorageCluster, + k8sClient client.Client, + podSpec v1.PodSpec) PreFlightPortworx { + return &preFlightPortworx{ + cluster: cluster, + k8sClient: k8sClient, + podSpec: podSpec, + } +} + +func getPreFlightPodsFromNamespace(k8sClient client.Client, namespace string) (*appsv1.DaemonSet, []*v1.Pod, error) { + ds := &appsv1.DaemonSet{} + err := k8sClient.Get( + context.TODO(), + types.NamespacedName{ + Name: pxPreFlightDaemonSetName, + Namespace: namespace, + }, + ds, + ) + if err != nil { + return ds, nil, err + } + + pods, err := k8sutil.GetDaemonSetPods(k8sClient, ds) + + return ds, pods, err +} + +// GetPreFlightPods returns the pods of the pre-flight daemonset +func (u *preFlightPortworx) GetPreFlightPods() ([]*v1.Pod, error) { + _, pods, err := getPreFlightPodsFromNamespace(u.k8sClient, u.cluster.Namespace) + return pods, err +} + +func (u *preFlightPortworx) GetPreFlightStatus() (int32, int32, int32, error) { + ds, pods, err := getPreFlightPodsFromNamespace(u.k8sClient, u.cluster.Namespace) + if err != nil { + return -1, -1, -1, err + } + totalPods := ds.Status.DesiredNumberScheduled + completedPods := 0 + for _, pod := range pods { + if len(pod.Status.ContainerStatuses) > 0 { + for _, containerStatus := range pod.Status.ContainerStatuses { + if containerStatus.Name == "portworx" && containerStatus.Ready { + completedPods++ + } + } + } + } + logrus.Infof("Pre-flight Status: Completed [%v] InProgress [%v] Total Pods [%v]", completedPods, totalPods-int32(completedPods), totalPods) + return int32(completedPods), totalPods - int32(completedPods), totalPods, nil +} + +func (u *preFlightPortworx) RunPreFlight() error { + ownerRef := metav1.NewControllerRef(u.cluster, pxutil.StorageClusterKind()) + + err := u.createServiceAccount(ownerRef) + if err != nil { + if errors.IsAlreadyExists(err) { + logrus.Infof("runPreFlight: ServiceAccount already exists, skipping...") + } else { + return err + } + } + + err = u.createClusterRole() + if err != nil { + if errors.IsAlreadyExists(err) { + logrus.Infof("runPreFlight: ClusterRole already exists, skipping...") + } else { + return err + } + } + + err = u.createClusterRoleBinding() + if err != nil { + if errors.IsAlreadyExists(err) { + logrus.Infof("runPreFlight: ClusterRoleBinding already exists, skipping...") + } else { + return err + } + } + + // Create daemonset from podSpec + labels := map[string]string{ + "name": pxPreFlightDaemonSetName, + } + + preflightDS := &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: pxPreFlightDaemonSetName, + Namespace: u.cluster.Namespace, + Labels: labels, + OwnerReferences: []metav1.OwnerReference{*ownerRef}, + }, + Spec: appsv1.DaemonSetSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: labels, + }, + Template: v1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: labels, + }, + Spec: u.podSpec, + }, + }, + } + /* TODO PWX-28826 Run DMThin pre-flight checks + + Operator starts a daemonset with oci-monitor pods in pre-flight mode (--preflight) + Operator waits for all daemonset pods to be 1/1 (for this the oci-monitor in preflight mode must not exit). + oci-monitor pod has to declare it’s done + Operator deletes the preflight daemonset + Operator reads Checks in the StorageNode for each node + If Checks in any of the nodes + fail: Operator sets backend to btrfs + // Log, raise event and do nothing. Default already at btrfs + all success: Operator sets backend to dmthin + // Append dmthin backend use MiscArgs() + toUpdate.Annotations[pxutil.AnnotationMiscArgs] = " -T dmthin" + + u.cluster.Annotations[pxutil.AnnotationMiscArgs] = strings.TrimSpace(miscArgs) + */ + + // Add --pre-flight check for DMthin + preflightDS.Spec.Template.Spec.Containers[0].Args = append([]string{"--pre-flight", "-T", "dmthin"}, + preflightDS.Spec.Template.Spec.Containers[0].Args...) + + if u.cluster.Spec.ImagePullSecret != nil && *u.cluster.Spec.ImagePullSecret != "" { + preflightDS.Spec.Template.Spec.ImagePullSecrets = append( + []v1.LocalObjectReference{}, + v1.LocalObjectReference{ + Name: *u.cluster.Spec.ImagePullSecret, + }, + ) + } + + preflightDS.Spec.Template.Spec.ServiceAccountName = PxPreFlightServiceAccountName + + if u.cluster.Spec.Placement != nil { + if u.cluster.Spec.Placement.NodeAffinity != nil { + preflightDS.Spec.Template.Spec.Affinity = &v1.Affinity{ + NodeAffinity: u.cluster.Spec.Placement.NodeAffinity.DeepCopy(), + } + } + + if len(u.cluster.Spec.Placement.Tolerations) > 0 { + preflightDS.Spec.Template.Spec.Tolerations = make([]v1.Toleration, 0) + for _, toleration := range u.cluster.Spec.Placement.Tolerations { + preflightDS.Spec.Template.Spec.Tolerations = append( + preflightDS.Spec.Template.Spec.Tolerations, + *(toleration.DeepCopy()), + ) + } + } + } + + err = u.k8sClient.Create(context.TODO(), preflightDS) + if err != nil { + logrus.Errorf("RunPreFlight: error creating: %v", err) + } + + return err +} + +func (u *preFlightPortworx) ProcessPreFlightResults(recorder record.EventRecorder, storageNodes []*corev1.StorageNode) error { + logrus.Infof("pre-flight: process pre-flight results...") + + passed := true + for _, node := range storageNodes { + logrus.Infof("storageNode[%s]: %#v ", node.Name, node.Status.Checks) + if len(node.Status.Checks) > 1 { + for _, check := range node.Status.Checks { + if check.Type != "status" { + msg := fmt.Sprintf("%s pre-flight check ", check.Type) + if check.Success { + msg = msg + "passed: " + check.Reason + k8sutil.InfoEvent(recorder, u.cluster, util.PassPreFlight, msg) + } else { + msg = msg + "failed: " + check.Reason + k8sutil.WarningEvent(recorder, u.cluster, util.FailedPreFlight, msg) + } + } + } + passed = false + } + } + + if passed { + // Enable DMthin via misc args + u.cluster.Annotations[pxutil.AnnotationMiscArgs] = strings.TrimSpace(u.cluster.Annotations[pxutil.AnnotationMiscArgs] + " -T dmthin") + k8sutil.InfoEvent(recorder, u.cluster, util.PassPreFlight, "Enabling DMthin") + } + + return nil +} + +func (u *preFlightPortworx) DeletePreFlight() error { + ownerRef := metav1.NewControllerRef(u.cluster, pxutil.StorageClusterKind()) + if err := k8sutil.DeleteServiceAccount(u.k8sClient, PxPreFlightServiceAccountName, u.cluster.Namespace, *ownerRef); err != nil { + return err + } + if err := k8sutil.DeleteClusterRole(u.k8sClient, pxPreFlightClusterRoleName); err != nil { + return err + } + if err := k8sutil.DeleteClusterRoleBinding(u.k8sClient, pxPreFlightClusterRoleBindingName); err != nil { + return err + } + if err := k8sutil.DeleteDaemonSet(u.k8sClient, pxPreFlightDaemonSetName, u.cluster.Namespace, *ownerRef); err != nil { + return err + } + return nil +} + +func (u *preFlightPortworx) createServiceAccount( + ownerRef *metav1.OwnerReference, +) error { + return k8sutil.CreateOrUpdateServiceAccount( + u.k8sClient, + &v1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: PxPreFlightServiceAccountName, + Namespace: u.cluster.Namespace, + OwnerReferences: []metav1.OwnerReference{*ownerRef}, + }, + }, + ownerRef, + ) +} + +func (u *preFlightPortworx) createClusterRole() error { + return k8sutil.CreateOrUpdateClusterRole( + u.k8sClient, + &rbacv1.ClusterRole{ + ObjectMeta: metav1.ObjectMeta{ + Name: pxPreFlightClusterRoleName, + }, + + // Portworx roles taken from drivers/storage/portworx/component/portworx_basic.go + Rules: []rbacv1.PolicyRule{ + { + APIGroups: []string{""}, + Resources: []string{"secrets"}, + Verbs: []string{"get", "list", "watch"}, + }, + { + APIGroups: []string{""}, + Resources: []string{"nodes"}, + Verbs: []string{"get", "list", "watch", "update"}, + }, + { + APIGroups: []string{""}, + Resources: []string{"pods"}, + Verbs: []string{"get", "list", "watch", "delete", "update"}, + }, + { + APIGroups: []string{""}, + Resources: []string{"pods/exec"}, + Verbs: []string{"create"}, + }, + { + APIGroups: []string{""}, + Resources: []string{"persistentvolumeclaims", "persistentvolumes"}, + Verbs: []string{"get", "list", "create", "delete", "update"}, + }, + { + APIGroups: []string{""}, + Resources: []string{"configmaps"}, + Verbs: []string{"get", "list", "create", "update"}, + }, + { + APIGroups: []string{"apps"}, + Resources: []string{"deployments"}, + Verbs: []string{"get", "list", "create", "update", "delete"}, + }, + { + APIGroups: []string{""}, + Resources: []string{"services"}, + Verbs: []string{"get", "list", "create", "update", "delete"}, + }, + { + APIGroups: []string{""}, + Resources: []string{"endpoints"}, + Verbs: []string{"get", "list", "create", "update", "delete"}, + }, + { + APIGroups: []string{"portworx.io"}, + Resources: []string{"volumeplacementstrategies"}, + Verbs: []string{"get", "list"}, + }, + { + APIGroups: []string{"storage.k8s.io"}, + Resources: []string{"storageclasses", "csinodes"}, + Verbs: []string{"get", "list"}, + }, + { + APIGroups: []string{"storage.k8s.io"}, + Resources: []string{"volumeattachments"}, + Verbs: []string{"get", "list", "create", "delete", "update"}, + }, + { + APIGroups: []string{"stork.libopenstorage.org"}, + Resources: []string{"backuplocations"}, + Verbs: []string{"get", "list"}, + }, + { + APIGroups: []string{""}, + Resources: []string{"events"}, + Verbs: []string{"create", "patch", "update"}, + }, + { + APIGroups: []string{"core.libopenstorage.org"}, + Resources: []string{"*"}, + Verbs: []string{"*"}, + }, + { + APIGroups: []string{"security.openshift.io"}, + Resources: []string{"securitycontextconstraints"}, + ResourceNames: []string{component.PxSCCName}, + Verbs: []string{"use"}, + }, + { + APIGroups: []string{"policy"}, + Resources: []string{"podsecuritypolicies"}, + ResourceNames: []string{constants.PrivilegedPSPName}, + Verbs: []string{"use"}, + }, + { + APIGroups: []string{"certificates.k8s.io"}, + Resources: []string{"certificatesigningrequests"}, + Verbs: []string{"get", "list", "create", "watch", "delete", "update"}, + }, + }, + }, + ) +} + +func (u *preFlightPortworx) createClusterRoleBinding() error { + return k8sutil.CreateOrUpdateClusterRoleBinding( + u.k8sClient, + &rbacv1.ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: pxPreFlightClusterRoleBindingName, + }, + Subjects: []rbacv1.Subject{ + { + Kind: "ServiceAccount", + Name: PxPreFlightServiceAccountName, + Namespace: u.cluster.Namespace, + }, + }, + RoleRef: rbacv1.RoleRef{ + Kind: "ClusterRole", + Name: pxPreFlightClusterRoleName, + APIGroup: "rbac.authorization.k8s.io", + }, + }, + ) +} diff --git a/drivers/storage/storage.go b/drivers/storage/storage.go index 9363e3f0a..996b2f072 100644 --- a/drivers/storage/storage.go +++ b/drivers/storage/storage.go @@ -27,7 +27,7 @@ type Driver interface { // Init initializes the storage driver Init(client.Client, *runtime.Scheme, record.EventRecorder) error // Validate validates if the driver is correctly configured - Validate() error + Validate(cluster *corev1.StorageCluster) error // String returns the string name of the driver String() string // UpdateDriver updates the driver with the current cluster and node conditions. diff --git a/pkg/controller/storagecluster/controller_test.go b/pkg/controller/storagecluster/controller_test.go index 15bf6dfcd..7762e8f10 100644 --- a/pkg/controller/storagecluster/controller_test.go +++ b/pkg/controller/storagecluster/controller_test.go @@ -425,7 +425,7 @@ func TestWaitForMigrationApproval(t *testing.T) { kubernetesVersion: k8sVersion, } - driver.EXPECT().Validate().Return(nil).AnyTimes() + driver.EXPECT().Validate(gomock.Any()).Return(nil).AnyTimes() driver.EXPECT().SetDefaultsOnStorageCluster(gomock.Any()).AnyTimes() driver.EXPECT().GetSelectorLabels().Return(nil).AnyTimes() driver.EXPECT().String().Return("mock-driver").AnyTimes() @@ -1016,7 +1016,7 @@ func TestReconcileWithDaemonSet(t *testing.T) { } daemonSetList := &appsv1.DaemonSetList{Items: []appsv1.DaemonSet{daemonSet}} - k8sVersion, _ := version.NewVersion(minSupportedK8sVersion) + k8sVersion, _ := version.NewVersion("1.19.0") k8sClient := testutil.FakeK8sClient(cluster, daemonSetList) driver := testutil.MockDriver(mockCtrl) controller := Controller{ @@ -1028,7 +1028,8 @@ func TestReconcileWithDaemonSet(t *testing.T) { driver.EXPECT().GetSelectorLabels().Return(nil).AnyTimes() driver.EXPECT().String().Return("mockDriverName").AnyTimes() - driver.EXPECT().Validate().Return(fmt.Errorf("daemonset is present")) + driver.EXPECT().GetStorageNodes(gomock.Any()).Return(nil, nil).AnyTimes() + driver.EXPECT().GetSelectorLabels().Return(nil).AnyTimes() request := reconcile.Request{ NamespacedName: types.NamespacedName{ @@ -1091,7 +1092,7 @@ func TestFailureDuringStorkInstallation(t *testing.T) { nodeInfoMap: make(map[string]*k8s.NodeInfo), } - driver.EXPECT().Validate().Return(nil).AnyTimes() + driver.EXPECT().Validate(gomock.Any()).Return(nil).AnyTimes() driver.EXPECT().PreInstall(gomock.Any()).Return(nil) driver.EXPECT().GetStorkDriverName().Return("mock", nil).AnyTimes() driver.EXPECT().SetDefaultsOnStorageCluster(gomock.Any()) @@ -1139,7 +1140,7 @@ func TestFailureDuringDriverPreInstall(t *testing.T) { nodeInfoMap: make(map[string]*k8s.NodeInfo), } - driver.EXPECT().Validate().Return(nil).AnyTimes() + driver.EXPECT().Validate(gomock.Any()).Return(nil).AnyTimes() driver.EXPECT().PreInstall(gomock.Any()).Return(fmt.Errorf("preinstall error")) driver.EXPECT().UpdateDriver(gomock.Any()).Return(nil) driver.EXPECT().SetDefaultsOnStorageCluster(gomock.Any()) @@ -1185,7 +1186,7 @@ func TestStorageClusterFailedSyncObjectModified(t *testing.T) { nodeInfoMap: make(map[string]*k8s.NodeInfo), } - driver.EXPECT().Validate().Return(nil).AnyTimes() + driver.EXPECT().Validate(gomock.Any()).Return(nil).AnyTimes() driver.EXPECT().UpdateDriver(gomock.Any()).Return(nil) driver.EXPECT().SetDefaultsOnStorageCluster(gomock.Any()).Return(fmt.Errorf(k8s.UpdateRevisionConflictErr)) driver.EXPECT().GetSelectorLabels().Return(nil).AnyTimes() @@ -1231,7 +1232,7 @@ func TestStoragePodsShouldNotBeScheduledIfDisabled(t *testing.T) { nodeInfoMap: make(map[string]*k8s.NodeInfo), } - driver.EXPECT().Validate().Return(nil).AnyTimes() + driver.EXPECT().Validate(gomock.Any()).Return(nil).AnyTimes() driver.EXPECT().PreInstall(gomock.Any()).Return(nil) driver.EXPECT().GetSelectorLabels().Return(nil).AnyTimes() driver.EXPECT().String().Return(driverName).AnyTimes() @@ -1391,7 +1392,7 @@ func TestStoragePodGetsScheduled(t *testing.T) { Spec: expectedPodSpec, } - driver.EXPECT().Validate().Return(nil).AnyTimes() + driver.EXPECT().Validate(gomock.Any()).Return(nil).AnyTimes() driver.EXPECT().PreInstall(gomock.Any()).Return(nil) driver.EXPECT().GetSelectorLabels().Return(nil).AnyTimes() driver.EXPECT().String().Return(driverName).AnyTimes() @@ -1505,7 +1506,7 @@ func TestStoragePodGetsScheduledK8s1_24(t *testing.T) { Spec: expectedPodSpec, } - driver.EXPECT().Validate().Return(nil).AnyTimes() + driver.EXPECT().Validate(gomock.Any()).Return(nil).AnyTimes() driver.EXPECT().PreInstall(gomock.Any()).Return(nil) driver.EXPECT().GetSelectorLabels().Return(nil).AnyTimes() driver.EXPECT().String().Return(driverName).AnyTimes() @@ -1587,7 +1588,7 @@ func TestStorageNodeGetsCreated(t *testing.T) { clusterRef := metav1.NewControllerRef(cluster, controllerKind) storageLabels := map[string]string{"foo": "bar"} - driver.EXPECT().Validate().Return(nil).AnyTimes() + driver.EXPECT().Validate(gomock.Any()).Return(nil).AnyTimes() driver.EXPECT().PreInstall(gomock.Any()).Return(nil).AnyTimes() driver.EXPECT().GetSelectorLabels().Return(storageLabels).AnyTimes() driver.EXPECT().String().Return(driverName).AnyTimes() @@ -1902,7 +1903,7 @@ func TestStoragePodGetsScheduledWithCustomNodeSpecs(t *testing.T) { *podTemplate1, *podTemplate2, *podTemplate3, } - driver.EXPECT().Validate().Return(nil).AnyTimes() + driver.EXPECT().Validate(gomock.Any()).Return(nil).AnyTimes() driver.EXPECT().PreInstall(gomock.Any()).Return(nil) driver.EXPECT().GetSelectorLabels().Return(nil).AnyTimes() driver.EXPECT().String().Return(driverName).AnyTimes() @@ -2014,7 +2015,7 @@ func TestFailedStoragePodsGetRemoved(t *testing.T) { nodeInfoMap: make(map[string]*k8s.NodeInfo), } - driver.EXPECT().Validate().Return(nil).AnyTimes() + driver.EXPECT().Validate(gomock.Any()).Return(nil).AnyTimes() driver.EXPECT().SetDefaultsOnStorageCluster(gomock.Any()).AnyTimes() driver.EXPECT().GetSelectorLabels().Return(nil).AnyTimes() driver.EXPECT().String().Return(driverName).AnyTimes() @@ -2117,7 +2118,7 @@ func TestExtraStoragePodsGetRemoved(t *testing.T) { nodeInfoMap: make(map[string]*k8s.NodeInfo), } - driver.EXPECT().Validate().Return(nil).AnyTimes() + driver.EXPECT().Validate(gomock.Any()).Return(nil).AnyTimes() driver.EXPECT().SetDefaultsOnStorageCluster(gomock.Any()).AnyTimes() driver.EXPECT().GetSelectorLabels().Return(nil).AnyTimes() driver.EXPECT().String().Return(driverName).AnyTimes() @@ -2245,7 +2246,7 @@ func TestStoragePodsAreRemovedIfDisabled(t *testing.T) { nodeInfoMap: make(map[string]*k8s.NodeInfo), } - driver.EXPECT().Validate().Return(nil).AnyTimes() + driver.EXPECT().Validate(gomock.Any()).Return(nil).AnyTimes() driver.EXPECT().SetDefaultsOnStorageCluster(gomock.Any()).AnyTimes() driver.EXPECT().GetSelectorLabels().Return(nil).AnyTimes() driver.EXPECT().String().Return(driverName).AnyTimes() @@ -2340,7 +2341,7 @@ func TestStoragePodFailureDueToNodeSelectorNotMatch(t *testing.T) { nodeInfoMap: make(map[string]*k8s.NodeInfo), } - driver.EXPECT().Validate().Return(nil).AnyTimes() + driver.EXPECT().Validate(gomock.Any()).Return(nil).AnyTimes() driver.EXPECT().GetStoragePodSpec(gomock.Any(), gomock.Any()).Return(v1.PodSpec{}, nil).AnyTimes() driver.EXPECT().SetDefaultsOnStorageCluster(gomock.Any()) driver.EXPECT().GetSelectorLabels().Return(nil).AnyTimes() @@ -2437,7 +2438,7 @@ func TestStoragePodSchedulingWithTolerations(t *testing.T) { nodeInfoMap: make(map[string]*k8s.NodeInfo), } - driver.EXPECT().Validate().Return(nil).AnyTimes() + driver.EXPECT().Validate(gomock.Any()).Return(nil).AnyTimes() driver.EXPECT().GetStoragePodSpec(gomock.Any(), gomock.Any()).Return(v1.PodSpec{}, nil).AnyTimes() driver.EXPECT().SetDefaultsOnStorageCluster(gomock.Any()).Times(3) driver.EXPECT().GetSelectorLabels().Return(nil).AnyTimes() @@ -2578,7 +2579,7 @@ func TestFailureDuringPodTemplateCreation(t *testing.T) { nodeInfoMap: make(map[string]*k8s.NodeInfo), } - driver.EXPECT().Validate().Return(nil).AnyTimes() + driver.EXPECT().Validate(gomock.Any()).Return(nil).AnyTimes() driver.EXPECT().GetSelectorLabels().Return(nil).AnyTimes() driver.EXPECT().String().Return(driverName).AnyTimes() driver.EXPECT().PreInstall(gomock.Any()).Return(nil) @@ -2612,7 +2613,7 @@ func TestFailureDuringPodTemplateCreation(t *testing.T) { require.Contains(t, eventMsg, fmt.Sprintf("%v %v", v1.EventTypeWarning, util.FailedSyncReason)) require.Contains(t, eventMsg, "pod template error for k8s-node-1") - driver.EXPECT().Validate().Return(nil).AnyTimes() + driver.EXPECT().Validate(gomock.Any()).Return(nil).AnyTimes() // When pod template creation passes for some and fails for others driver.EXPECT().PreInstall(gomock.Any()).Return(nil) driver.EXPECT().UpdateDriver(gomock.Any()).Return(nil) @@ -2670,7 +2671,7 @@ func TestFailureDuringCreateDeletePods(t *testing.T) { nodeInfoMap: make(map[string]*k8s.NodeInfo), } - driver.EXPECT().Validate().Return(nil).AnyTimes() + driver.EXPECT().Validate(gomock.Any()).Return(nil).AnyTimes() driver.EXPECT().SetDefaultsOnStorageCluster(gomock.Any()) driver.EXPECT().GetSelectorLabels().Return(nil).AnyTimes() driver.EXPECT().String().Return(driverName).AnyTimes() @@ -2751,7 +2752,7 @@ func TestTimeoutFailureDuringCreatePods(t *testing.T) { nodeInfoMap: make(map[string]*k8s.NodeInfo), } - driver.EXPECT().Validate().Return(nil).AnyTimes() + driver.EXPECT().Validate(gomock.Any()).Return(nil).AnyTimes() driver.EXPECT().SetDefaultsOnStorageCluster(gomock.Any()) driver.EXPECT().GetSelectorLabels().Return(nil).AnyTimes() driver.EXPECT().String().Return(driverName).AnyTimes() @@ -2802,7 +2803,7 @@ func TestUpdateClusterStatusFromDriver(t *testing.T) { nodeInfoMap: make(map[string]*k8s.NodeInfo), } - driver.EXPECT().Validate().Return(nil).AnyTimes() + driver.EXPECT().Validate(gomock.Any()).Return(nil).AnyTimes() driver.EXPECT().SetDefaultsOnStorageCluster(gomock.Any()) driver.EXPECT().GetSelectorLabels().Return(nil).AnyTimes() driver.EXPECT().String().Return(driverName).AnyTimes() @@ -2864,7 +2865,7 @@ func TestUpdateClusterStatusErrorFromDriver(t *testing.T) { nodeInfoMap: make(map[string]*k8s.NodeInfo), } - driver.EXPECT().Validate().Return(nil).AnyTimes() + driver.EXPECT().Validate(gomock.Any()).Return(nil).AnyTimes() driver.EXPECT().SetDefaultsOnStorageCluster(gomock.Any()) driver.EXPECT().GetSelectorLabels().Return(nil).AnyTimes() driver.EXPECT().String().Return(driverName).AnyTimes() @@ -2920,7 +2921,7 @@ func TestFailedPreInstallFromDriver(t *testing.T) { nodeInfoMap: make(map[string]*k8s.NodeInfo), } - driver.EXPECT().Validate().Return(nil).AnyTimes() + driver.EXPECT().Validate(gomock.Any()).Return(nil).AnyTimes() driver.EXPECT().UpdateDriver(gomock.Any()).AnyTimes() driver.EXPECT().SetDefaultsOnStorageCluster(gomock.Any()) driver.EXPECT().GetSelectorLabels().Return(nil).AnyTimes() @@ -2984,7 +2985,7 @@ func TestUpdateDriverWithInstanceInformation(t *testing.T) { }, } - driver.EXPECT().Validate().Return(nil).AnyTimes() + driver.EXPECT().Validate(gomock.Any()).Return(nil).AnyTimes() driver.EXPECT().PreInstall(gomock.Any()).Return(nil).AnyTimes() driver.EXPECT().GetSelectorLabels().Return(nil).AnyTimes() driver.EXPECT().String().Return(driverName).AnyTimes() @@ -3121,7 +3122,7 @@ func TestGarbageCollection(t *testing.T) { nodeInfoMap: make(map[string]*k8s.NodeInfo), } - driver.EXPECT().Validate().Return(nil).AnyTimes() + driver.EXPECT().Validate(gomock.Any()).Return(nil).AnyTimes() driver.EXPECT().SetDefaultsOnStorageCluster(gomock.Any()).AnyTimes() driver.EXPECT().GetSelectorLabels().Return(nil).AnyTimes() driver.EXPECT().String().Return("pxd").AnyTimes() @@ -3212,7 +3213,7 @@ func TestDeleteStorageClusterWithoutFinalizers(t *testing.T) { nodeInfoMap: make(map[string]*k8s.NodeInfo), } - driver.EXPECT().Validate().Return(nil).AnyTimes() + driver.EXPECT().Validate(gomock.Any()).Return(nil).AnyTimes() driver.EXPECT().GetSelectorLabels().Return(nil).AnyTimes() driver.EXPECT().String().Return(driverName).AnyTimes() @@ -3294,7 +3295,7 @@ func TestDeleteStorageClusterWithFinalizers(t *testing.T) { nodeInfoMap: make(map[string]*k8s.NodeInfo), } - driver.EXPECT().Validate().Return(nil).AnyTimes() + driver.EXPECT().Validate(gomock.Any()).Return(nil).AnyTimes() // Empty delete condition should not remove finalizer driver.EXPECT().DeleteStorage(gomock.Any()).Return(nil, nil) driver.EXPECT().GetSelectorLabels().Return(nil).AnyTimes() @@ -3458,7 +3459,7 @@ func TestDeleteStorageClusterShouldSetTelemetryCertOwnerRef(t *testing.T) { nodeInfoMap: make(map[string]*k8s.NodeInfo), } - driver.EXPECT().Validate().Return(nil).AnyTimes() + driver.EXPECT().Validate(gomock.Any()).Return(nil).AnyTimes() // Empty delete condition should not remove finalizer driver.EXPECT().DeleteStorage(gomock.Any()).Return(nil, nil) driver.EXPECT().GetSelectorLabels().Return(nil).AnyTimes() @@ -3556,7 +3557,7 @@ func TestDeleteStorageClusterShouldDeleteStork(t *testing.T) { nodeInfoMap: make(map[string]*k8s.NodeInfo), } - driver.EXPECT().Validate().Return(nil).AnyTimes() + driver.EXPECT().Validate(gomock.Any()).Return(nil).AnyTimes() driver.EXPECT().SetDefaultsOnStorageCluster(gomock.Any()).AnyTimes() driver.EXPECT().GetSelectorLabels().Return(nil).AnyTimes() driver.EXPECT().String().Return("pxd").AnyTimes() @@ -3689,7 +3690,7 @@ func TestDeleteStorageClusterShouldRemoveMigrationLabels(t *testing.T) { nodeInfoMap: make(map[string]*k8s.NodeInfo), } - driver.EXPECT().Validate().Return(nil).AnyTimes() + driver.EXPECT().Validate(gomock.Any()).Return(nil).AnyTimes() condition := &corev1.ClusterCondition{ Type: corev1.ClusterConditionTypeDelete, Status: corev1.ClusterConditionStatusCompleted, @@ -3765,7 +3766,7 @@ func TestRollingUpdateWithMinReadySeconds(t *testing.T) { } clusterRef := metav1.NewControllerRef(cluster, controllerKind) - driver.EXPECT().Validate().Return(nil).AnyTimes() + driver.EXPECT().Validate(gomock.Any()).Return(nil).AnyTimes() driver.EXPECT().SetDefaultsOnStorageCluster(gomock.Any()).AnyTimes() driver.EXPECT().GetSelectorLabels().Return(nil).AnyTimes() driver.EXPECT().String().Return(driverName).AnyTimes() @@ -3871,7 +3872,7 @@ func TestUpdateStorageClusterWithRollingUpdateStrategy(t *testing.T) { } clusterRef := metav1.NewControllerRef(cluster, controllerKind) - driver.EXPECT().Validate().Return(nil).AnyTimes() + driver.EXPECT().Validate(gomock.Any()).Return(nil).AnyTimes() driver.EXPECT().SetDefaultsOnStorageCluster(gomock.Any()).AnyTimes() driver.EXPECT().GetSelectorLabels().Return(nil).AnyTimes() driver.EXPECT().String().Return(driverName).AnyTimes() @@ -4029,7 +4030,7 @@ func TestUpdateStorageClusterBasedOnStorageNodeStatuses(t *testing.T) { storageNodes = append(storageNodes, createStorageNode("k8s-node-2", true)) storageNodes = append(storageNodes, createStorageNode("not-k8s-node", false)) - driver.EXPECT().Validate().Return(nil).AnyTimes() + driver.EXPECT().Validate(gomock.Any()).Return(nil).AnyTimes() driver.EXPECT().SetDefaultsOnStorageCluster(gomock.Any()).AnyTimes() driver.EXPECT().GetSelectorLabels().Return(nil).AnyTimes() driver.EXPECT().String().Return(driverName).AnyTimes() @@ -4241,7 +4242,7 @@ func TestUpdateStorageClusterWithOpenshiftUpgrade(t *testing.T) { } clusterRef := metav1.NewControllerRef(cluster, controllerKind) - driver.EXPECT().Validate().Return(nil).AnyTimes() + driver.EXPECT().Validate(gomock.Any()).Return(nil).AnyTimes() driver.EXPECT().SetDefaultsOnStorageCluster(gomock.Any()).AnyTimes() driver.EXPECT().GetSelectorLabels().Return(nil).AnyTimes() driver.EXPECT().String().Return(driverName).AnyTimes() @@ -4378,7 +4379,7 @@ func TestUpdateStorageClusterShouldNotExceedMaxUnavailable(t *testing.T) { } clusterRef := metav1.NewControllerRef(cluster, controllerKind) - driver.EXPECT().Validate().Return(nil).AnyTimes() + driver.EXPECT().Validate(gomock.Any()).Return(nil).AnyTimes() driver.EXPECT().SetDefaultsOnStorageCluster(gomock.Any()).AnyTimes() driver.EXPECT().GetSelectorLabels().Return(nil).AnyTimes() driver.EXPECT().String().Return(driverName).AnyTimes() @@ -4588,7 +4589,7 @@ func TestUpdateStorageClusterWithPercentageMaxUnavailable(t *testing.T) { } clusterRef := metav1.NewControllerRef(cluster, controllerKind) - driver.EXPECT().Validate().Return(nil).AnyTimes() + driver.EXPECT().Validate(gomock.Any()).Return(nil).AnyTimes() driver.EXPECT().SetDefaultsOnStorageCluster(gomock.Any()).AnyTimes() driver.EXPECT().GetSelectorLabels().Return(nil).AnyTimes() driver.EXPECT().String().Return(driverName).AnyTimes() @@ -4751,7 +4752,7 @@ func TestUpdateStorageClusterWithInvalidMaxUnavailableValue(t *testing.T) { nodeInfoMap: make(map[string]*k8s.NodeInfo), } - driver.EXPECT().Validate().Return(nil).AnyTimes() + driver.EXPECT().Validate(gomock.Any()).Return(nil).AnyTimes() driver.EXPECT().SetDefaultsOnStorageCluster(gomock.Any()) driver.EXPECT().GetSelectorLabels().Return(nil).AnyTimes() driver.EXPECT().String().Return(driverName).AnyTimes() @@ -4817,7 +4818,7 @@ func TestUpdateStorageClusterWhenDriverReportsPodNotUpdated(t *testing.T) { nodeInfoMap: make(map[string]*k8s.NodeInfo), } - driver.EXPECT().Validate().Return(nil).AnyTimes() + driver.EXPECT().Validate(gomock.Any()).Return(nil).AnyTimes() driver.EXPECT().SetDefaultsOnStorageCluster(gomock.Any()).AnyTimes() driver.EXPECT().GetSelectorLabels().Return(nil).AnyTimes() driver.EXPECT().String().Return(driverName).AnyTimes() @@ -4885,7 +4886,7 @@ func TestUpdateStorageClusterShouldRestartPodIfItDoesNotHaveAnyHash(t *testing.T nodeInfoMap: make(map[string]*k8s.NodeInfo), } - driver.EXPECT().Validate().Return(nil).AnyTimes() + driver.EXPECT().Validate(gomock.Any()).Return(nil).AnyTimes() driver.EXPECT().SetDefaultsOnStorageCluster(gomock.Any()).AnyTimes() driver.EXPECT().GetSelectorLabels().Return(nil).AnyTimes() driver.EXPECT().String().Return(driverName).AnyTimes() @@ -4947,7 +4948,7 @@ func TestUpdateStorageClusterImagePullSecret(t *testing.T) { nodeInfoMap: make(map[string]*k8s.NodeInfo), } - driver.EXPECT().Validate().Return(nil).AnyTimes() + driver.EXPECT().Validate(gomock.Any()).Return(nil).AnyTimes() driver.EXPECT().SetDefaultsOnStorageCluster(gomock.Any()).AnyTimes() driver.EXPECT().GetSelectorLabels().Return(nil).AnyTimes() driver.EXPECT().String().Return(driverName).AnyTimes() @@ -5056,7 +5057,7 @@ func TestUpdateStorageClusterCustomImageRegistry(t *testing.T) { nodeInfoMap: make(map[string]*k8s.NodeInfo), } - driver.EXPECT().Validate().Return(nil).AnyTimes() + driver.EXPECT().Validate(gomock.Any()).Return(nil).AnyTimes() driver.EXPECT().SetDefaultsOnStorageCluster(gomock.Any()).AnyTimes() driver.EXPECT().GetSelectorLabels().Return(nil).AnyTimes() driver.EXPECT().String().Return(driverName).AnyTimes() @@ -5163,7 +5164,7 @@ func TestUpdateStorageClusterKvdbSpec(t *testing.T) { nodeInfoMap: make(map[string]*k8s.NodeInfo), } - driver.EXPECT().Validate().Return(nil).AnyTimes() + driver.EXPECT().Validate(gomock.Any()).Return(nil).AnyTimes() driver.EXPECT().SetDefaultsOnStorageCluster(gomock.Any()).AnyTimes() driver.EXPECT().GetSelectorLabels().Return(nil).AnyTimes() driver.EXPECT().String().Return(driverName).AnyTimes() @@ -5283,7 +5284,7 @@ func TestUpdateStorageClusterResourceRequirements(t *testing.T) { nodeInfoMap: make(map[string]*k8s.NodeInfo), } - driver.EXPECT().Validate().Return(nil).AnyTimes() + driver.EXPECT().Validate(gomock.Any()).Return(nil).AnyTimes() driver.EXPECT().SetDefaultsOnStorageCluster(gomock.Any()).AnyTimes() driver.EXPECT().GetSelectorLabels().Return(nil).AnyTimes() driver.EXPECT().String().Return(driverName).AnyTimes() @@ -5399,7 +5400,7 @@ func TestUpdateStorageClusterCloudStorageSpec(t *testing.T) { nodeInfoMap: make(map[string]*k8s.NodeInfo), } - driver.EXPECT().Validate().Return(nil).AnyTimes() + driver.EXPECT().Validate(gomock.Any()).Return(nil).AnyTimes() driver.EXPECT().SetDefaultsOnStorageCluster(gomock.Any()).AnyTimes() driver.EXPECT().GetSelectorLabels().Return(nil).AnyTimes() driver.EXPECT().String().Return(driverName).AnyTimes() @@ -5672,7 +5673,7 @@ func TestUpdateStorageClusterStorageSpec(t *testing.T) { nodeInfoMap: make(map[string]*k8s.NodeInfo), } - driver.EXPECT().Validate().Return(nil).AnyTimes() + driver.EXPECT().Validate(gomock.Any()).Return(nil).AnyTimes() driver.EXPECT().SetDefaultsOnStorageCluster(gomock.Any()).AnyTimes() driver.EXPECT().GetSelectorLabels().Return(nil).AnyTimes() driver.EXPECT().String().Return(driverName).AnyTimes() @@ -5898,7 +5899,7 @@ func TestUpdateStorageClusterNetworkSpec(t *testing.T) { nodeInfoMap: make(map[string]*k8s.NodeInfo), } - driver.EXPECT().Validate().Return(nil).AnyTimes() + driver.EXPECT().Validate(gomock.Any()).Return(nil).AnyTimes() driver.EXPECT().SetDefaultsOnStorageCluster(gomock.Any()).AnyTimes() driver.EXPECT().GetSelectorLabels().Return(nil).AnyTimes() driver.EXPECT().String().Return(driverName).AnyTimes() @@ -5991,7 +5992,7 @@ func TestUpdateStorageClusterEnvVariables(t *testing.T) { nodeInfoMap: make(map[string]*k8s.NodeInfo), } - driver.EXPECT().Validate().Return(nil).AnyTimes() + driver.EXPECT().Validate(gomock.Any()).Return(nil).AnyTimes() driver.EXPECT().SetDefaultsOnStorageCluster(gomock.Any()).AnyTimes() driver.EXPECT().GetSelectorLabels().Return(nil).AnyTimes() driver.EXPECT().String().Return(driverName).AnyTimes() @@ -6086,7 +6087,7 @@ func TestUpdateStorageClusterRuntimeOptions(t *testing.T) { nodeInfoMap: make(map[string]*k8s.NodeInfo), } - driver.EXPECT().Validate().Return(nil).AnyTimes() + driver.EXPECT().Validate(gomock.Any()).Return(nil).AnyTimes() driver.EXPECT().SetDefaultsOnStorageCluster(gomock.Any()).AnyTimes() driver.EXPECT().GetSelectorLabels().Return(nil).AnyTimes() driver.EXPECT().String().Return(driverName).AnyTimes() @@ -6178,7 +6179,7 @@ func TestUpdateStorageClusterVolumes(t *testing.T) { nodeInfoMap: make(map[string]*k8s.NodeInfo), } - driver.EXPECT().Validate().Return(nil).AnyTimes() + driver.EXPECT().Validate(gomock.Any()).Return(nil).AnyTimes() driver.EXPECT().SetDefaultsOnStorageCluster(gomock.Any()).AnyTimes() driver.EXPECT().GetSelectorLabels().Return(nil).AnyTimes() driver.EXPECT().String().Return(driverName).AnyTimes() @@ -6441,7 +6442,7 @@ func TestUpdateStorageClusterSecretsProvider(t *testing.T) { nodeInfoMap: make(map[string]*k8s.NodeInfo), } - driver.EXPECT().Validate().Return(nil).AnyTimes() + driver.EXPECT().Validate(gomock.Any()).Return(nil).AnyTimes() driver.EXPECT().SetDefaultsOnStorageCluster(gomock.Any()).AnyTimes() driver.EXPECT().GetSelectorLabels().Return(nil).AnyTimes() driver.EXPECT().String().Return(driverName).AnyTimes() @@ -6532,7 +6533,7 @@ func TestUpdateStorageClusterStartPort(t *testing.T) { nodeInfoMap: make(map[string]*k8s.NodeInfo), } - driver.EXPECT().Validate().Return(nil).AnyTimes() + driver.EXPECT().Validate(gomock.Any()).Return(nil).AnyTimes() driver.EXPECT().SetDefaultsOnStorageCluster(gomock.Any()).AnyTimes() driver.EXPECT().GetSelectorLabels().Return(nil).AnyTimes() driver.EXPECT().String().Return(driverName).AnyTimes() @@ -6626,7 +6627,7 @@ func TestUpdateStorageClusterCSISpec(t *testing.T) { nodeInfoMap: make(map[string]*k8s.NodeInfo), } - driver.EXPECT().Validate().Return(nil).AnyTimes() + driver.EXPECT().Validate(gomock.Any()).Return(nil).AnyTimes() driver.EXPECT().SetDefaultsOnStorageCluster(gomock.Any()).AnyTimes() driver.EXPECT().GetSelectorLabels().Return(nil).AnyTimes() driver.EXPECT().String().Return(driverName).AnyTimes() @@ -6801,7 +6802,7 @@ func TestUpdateStorageClusterNodeSpec(t *testing.T) { nodeInfoMap: make(map[string]*k8s.NodeInfo), } - driver.EXPECT().Validate().Return(nil).AnyTimes() + driver.EXPECT().Validate(gomock.Any()).Return(nil).AnyTimes() driver.EXPECT().SetDefaultsOnStorageCluster(gomock.Any()).AnyTimes() driver.EXPECT().GetSelectorLabels().Return(nil).AnyTimes() driver.EXPECT().String().Return(driverName).AnyTimes() @@ -7268,7 +7269,7 @@ func TestUpdateStorageClusterK8sNodeChanges(t *testing.T) { nodeInfoMap: make(map[string]*k8s.NodeInfo), } - driver.EXPECT().Validate().Return(nil).AnyTimes() + driver.EXPECT().Validate(gomock.Any()).Return(nil).AnyTimes() driver.EXPECT().SetDefaultsOnStorageCluster(gomock.Any()).AnyTimes() driver.EXPECT().GetSelectorLabels().Return(nil).AnyTimes() driver.EXPECT().String().Return(driverName).AnyTimes() @@ -7363,7 +7364,7 @@ func TestUpdateStorageClusterShouldNotRestartPodsForSomeOptions(t *testing.T) { nodeInfoMap: make(map[string]*k8s.NodeInfo), } - driver.EXPECT().Validate().Return(nil).AnyTimes() + driver.EXPECT().Validate(gomock.Any()).Return(nil).AnyTimes() driver.EXPECT().SetDefaultsOnStorageCluster(gomock.Any()).AnyTimes() driver.EXPECT().GetSelectorLabels().Return(nil).AnyTimes() driver.EXPECT().String().Return(driverName).AnyTimes() @@ -7531,7 +7532,7 @@ func TestUpdateStorageClusterShouldRestartPodIfItsHistoryHasInvalidSpec(t *testi nodeInfoMap: make(map[string]*k8s.NodeInfo), } - driver.EXPECT().Validate().Return(nil).AnyTimes() + driver.EXPECT().Validate(gomock.Any()).Return(nil).AnyTimes() driver.EXPECT().SetDefaultsOnStorageCluster(gomock.Any()).AnyTimes() driver.EXPECT().GetSelectorLabels().Return(nil).AnyTimes() driver.EXPECT().String().Return(driverName).AnyTimes() @@ -7614,7 +7615,7 @@ func TestUpdateStorageClusterSecurity(t *testing.T) { nodeInfoMap: make(map[string]*k8s.NodeInfo), } - driver.EXPECT().Validate().Return(nil).AnyTimes() + driver.EXPECT().Validate(gomock.Any()).Return(nil).AnyTimes() driver.EXPECT().SetDefaultsOnStorageCluster(gomock.Any()).AnyTimes() driver.EXPECT().GetSelectorLabels().Return(nil).AnyTimes() driver.EXPECT().String().Return(driverName).AnyTimes() @@ -7925,7 +7926,7 @@ func TestUpdateStorageCustomAnnotations(t *testing.T) { nodeInfoMap: make(map[string]*k8s.NodeInfo), } - driver.EXPECT().Validate().Return(nil).AnyTimes() + driver.EXPECT().Validate(gomock.Any()).Return(nil).AnyTimes() driver.EXPECT().SetDefaultsOnStorageCluster(gomock.Any()).AnyTimes() driver.EXPECT().GetSelectorLabels().Return(nil).AnyTimes() driver.EXPECT().String().Return(driverName).AnyTimes() @@ -8125,7 +8126,7 @@ func TestUpdateClusterShouldDedupOlderRevisionsInHistory(t *testing.T) { } clusterRef := metav1.NewControllerRef(cluster, controllerKind) - driver.EXPECT().Validate().Return(nil).AnyTimes() + driver.EXPECT().Validate(gomock.Any()).Return(nil).AnyTimes() driver.EXPECT().SetDefaultsOnStorageCluster(gomock.Any()).AnyTimes() driver.EXPECT().GetSelectorLabels().Return(nil).AnyTimes() driver.EXPECT().String().Return(driverName).AnyTimes() @@ -8278,7 +8279,7 @@ func TestUpdateClusterShouldHandleHashCollisions(t *testing.T) { coreops.SetInstance(coreops.New(fakek8sclient.NewSimpleClientset())) operatorops.SetInstance(operatorops.New(fakeClient)) - driver.EXPECT().Validate().Return(nil).AnyTimes() + driver.EXPECT().Validate(gomock.Any()).Return(nil).AnyTimes() driver.EXPECT().SetDefaultsOnStorageCluster(gomock.Any()).AnyTimes() driver.EXPECT().GetSelectorLabels().Return(nil).AnyTimes() driver.EXPECT().String().Return(driverName).AnyTimes() @@ -8449,7 +8450,7 @@ func TestUpdateClusterShouldDedupRevisionsAnywhereInHistory(t *testing.T) { } clusterRef := metav1.NewControllerRef(cluster, controllerKind) - driver.EXPECT().Validate().Return(nil).AnyTimes() + driver.EXPECT().Validate(gomock.Any()).Return(nil).AnyTimes() driver.EXPECT().SetDefaultsOnStorageCluster(gomock.Any()).AnyTimes() driver.EXPECT().GetSelectorLabels().Return(nil).AnyTimes() driver.EXPECT().String().Return(driverName).AnyTimes() @@ -8597,7 +8598,7 @@ func TestHistoryCleanup(t *testing.T) { } clusterRef := metav1.NewControllerRef(cluster, controllerKind) - driver.EXPECT().Validate().Return(nil).AnyTimes() + driver.EXPECT().Validate(gomock.Any()).Return(nil).AnyTimes() driver.EXPECT().SetDefaultsOnStorageCluster(gomock.Any()).AnyTimes() driver.EXPECT().GetSelectorLabels().Return(nil).AnyTimes() driver.EXPECT().String().Return(driverName).AnyTimes() @@ -8977,7 +8978,7 @@ func TestDoesTelemetryMatch(t *testing.T) { kubernetesVersion: k8sVersion, } - driver.EXPECT().Validate().Return(nil).AnyTimes() + driver.EXPECT().Validate(gomock.Any()).Return(nil).AnyTimes() driver.EXPECT().SetDefaultsOnStorageCluster(gomock.Any()).AnyTimes() driver.EXPECT().GetSelectorLabels().Return(nil).AnyTimes() driver.EXPECT().String().Return(driverName).AnyTimes() @@ -9076,12 +9077,17 @@ func TestEKSPreflightCheck(t *testing.T) { } k8sClient := testutil.FakeK8sClient(cluster) + driver := testutil.MockDriver(mockCtrl) + driver.EXPECT().Validate(gomock.Any()).Return(nil).AnyTimes() + controller := Controller{ client: k8sClient, + Driver: driver, } // TestCase: eks cloud permission check failed errMsg := "eks cloud permission check failed" + preflightOps.EXPECT().ProviderName().Return(string(cloudops.AWS)).AnyTimes() preflightOps.EXPECT().K8sDistributionName().Return("eks").AnyTimes() preflightOps.EXPECT().CheckCloudDrivePermission(cluster).Return(fmt.Errorf(errMsg)) @@ -9133,7 +9139,7 @@ func TestStorageClusterStateDuringValidation(t *testing.T) { }, } - k8sVersion, _ := version.NewVersion(minSupportedK8sVersion) + k8sVersion, _ := version.NewVersion("1.19.0") k8sClient := testutil.FakeK8sClient(cluster) recorder := record.NewFakeRecorder(10) driver := testutil.MockDriver(mockCtrl) @@ -9143,10 +9149,17 @@ func TestStorageClusterStateDuringValidation(t *testing.T) { recorder: recorder, kubernetesVersion: k8sVersion, } - validationErr := fmt.Errorf("driver validation failed") - driver.EXPECT().Validate().Return(validationErr).AnyTimes() + + validationErr := fmt.Errorf("minimum supported kubernetes version by the operator is 1.21.0") + driver.EXPECT().Validate(gomock.Any()).Return(validationErr).AnyTimes() driver.EXPECT().String().Return("mock-driver").AnyTimes() driver.EXPECT().GetSelectorLabels().Return(nil).AnyTimes() + driver.EXPECT().UpdateDriver(gomock.Any()).Return(nil).AnyTimes() + driver.EXPECT().SetDefaultsOnStorageCluster(gomock.Any()).AnyTimes() + driver.EXPECT().PreInstall(gomock.Any()).Return(nil).AnyTimes() + driver.EXPECT().UpdateDriver(gomock.Any()).Return(nil).AnyTimes() + driver.EXPECT().GetStorageNodes(gomock.Any()).Return(nil, nil).AnyTimes() + driver.EXPECT().UpdateStorageClusterStatus(gomock.Any(), gomock.Any()).Return(nil).AnyTimes() request := reconcile.Request{ NamespacedName: types.NamespacedName{ diff --git a/pkg/controller/storagecluster/storagecluster.go b/pkg/controller/storagecluster/storagecluster.go index 095a16107..6301843cc 100644 --- a/pkg/controller/storagecluster/storagecluster.go +++ b/pkg/controller/storagecluster/storagecluster.go @@ -300,10 +300,6 @@ func (c *Controller) validate(cluster *corev1.StorageCluster) error { if err := c.validateCloudStorageLabelKey(cluster); err != nil { return err } - if err := c.Driver.Validate(); err != nil { - return err - } - return nil } @@ -427,6 +423,7 @@ func (c *Controller) runPreflightCheck(cluster *corev1.StorageCluster) error { return fmt.Errorf("please make sure your cluster meet all prerequisites and rerun preflight check") } // preflight passed or not required + logrus.Infof("preflight check not required") return nil } @@ -441,15 +438,51 @@ func (c *Controller) runPreflightCheck(cluster *corev1.StorageCluster) error { logrus.WithError(err).Errorf("permission check for eks cloud drive failed") } } - // TODO: validate cloud permission for other providers as well + // Skip of over driver Validate() if an error has occurred + if err == nil { + // Create StorageNodes to return pre-flight checks used by c.Driver.Validate() + k8sNodeList := &v1.NodeList{} + err = c.client.List(context.TODO(), k8sNodeList) + if err == nil { + for _, node := range k8sNodeList.Items { + logrus.Infof("Create pre-flight storage node entry for node: %s", node.Name) + c.createStorageNode(cluster, node.Name) + } + } else { + logrus.WithError(err).Errorf("Failed to get cluster nodes") + } + + // Run driver specific pre-flights + if err = c.Driver.Validate(toUpdate); err != nil { + logrus.WithError(err).Errorf("pre-flight validate failed") + } + + // Delete StorageNodes created for c.Driver.Validate() checks. + storageNodes := &corev1.StorageNodeList{} + serr := c.client.List(context.TODO(), storageNodes, + &client.ListOptions{Namespace: toUpdate.Namespace}) + if serr == nil { + for _, storageNode := range storageNodes.Items { + logrus.Infof("Delete validate() storage node entry for node: %s", storageNode.Name) + serr = c.client.Delete(context.TODO(), storageNode.DeepCopy()) + if serr != nil { + logrus.WithError(serr).Errorf("failed to delete storage node entry %s: %v", storageNode.Name, serr) + } + } + } else { + logrus.WithError(err).Errorf("Failed to get StorageNodes used for validate.") + } + } + condition := &corev1.ClusterCondition{ Source: pxutil.PortworxComponentName, Type: corev1.ClusterConditionTypePreflight, } + if err != nil { - logrus.Infof("storage cluster preflight check failed") + logrus.Errorf("storage cluster preflight check failed") condition.Status = corev1.ClusterConditionStatusFailed condition.Message = err.Error() } else { diff --git a/pkg/mock/storagedriver.mock.go b/pkg/mock/storagedriver.mock.go index 3a6f45e51..c98939796 100644 --- a/pkg/mock/storagedriver.mock.go +++ b/pkg/mock/storagedriver.mock.go @@ -242,15 +242,15 @@ func (mr *MockDriverMockRecorder) UpdateStorageClusterStatus(arg0, arg1 interfac } // Validate mocks base method. -func (m *MockDriver) Validate() error { +func (m *MockDriver) Validate(arg0 *v1.StorageCluster) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Validate") + ret := m.ctrl.Call(m, "Validate", arg0) ret0, _ := ret[0].(error) return ret0 } // Validate indicates an expected call of Validate. -func (mr *MockDriverMockRecorder) Validate() *gomock.Call { +func (mr *MockDriverMockRecorder) Validate(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Validate", reflect.TypeOf((*MockDriver)(nil).Validate)) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Validate", reflect.TypeOf((*MockDriver)(nil).Validate), arg0) } diff --git a/pkg/preflight/utils.go b/pkg/preflight/utils.go index 77455bf8d..72fe76c3d 100644 --- a/pkg/preflight/utils.go +++ b/pkg/preflight/utils.go @@ -11,8 +11,7 @@ func IsEKS() bool { // RequiresCheck returns whether a preflight check is needed based on the platform func RequiresCheck() bool { - // TODO: add other scenarios here - return IsEKS() + return Instance().ProviderName() == string(cloudops.AWS) } // RunningOnCloud checks whether portworx is running on cloud diff --git a/pkg/util/util.go b/pkg/util/util.go index 285cf1833..9a696297b 100644 --- a/pkg/util/util.go +++ b/pkg/util/util.go @@ -53,6 +53,10 @@ const ( TelemetryEnabledReason = "TelemetryEnabled" // TelemetryDisabledReason is added to an event when the telemetry is disabled by operator TelemetryDisabledReason = "TelemetryDisabled" + // FailedPreFlight is added to denote pre-flight failure. + FailedPreFlight = "FailedPreFlight" + // PassedPreFlight is added to denote pre-flight Passed. + PassPreFlight = "PassedPreFlight" // MigrationDryRunCompletedReason is added to an event when dry run is completed MigrationDryRunCompletedReason = "MigrationDryRunCompleted"