Skip to content

Commit

Permalink
Add migration upgrade/downgrade test
Browse files Browse the repository at this point in the history
  • Loading branch information
wongma7 committed Jun 11, 2021
1 parent d2f9ba6 commit 05de825
Show file tree
Hide file tree
Showing 4 changed files with 1,471 additions and 0 deletions.
279 changes: 279 additions & 0 deletions tests/e2e-upgrade/e2e_test.go
@@ -0,0 +1,279 @@
package e2e_upgrade

import (
"context"
"errors"
"flag"
"fmt"
"math/rand"
"path/filepath"
"regexp"
"strings"
"testing"
"time"

"github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/util/homedir"
"k8s.io/kubernetes/test/e2e/framework"
frameworkconfig "k8s.io/kubernetes/test/e2e/framework/config"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"k8s.io/kubernetes/test/e2e/storage/utils"
)

var (
kopsBinaryPath = flag.String("binary", "kops", "")
kopsStateStore = flag.String("state", "s3://k8s-kops-csi-e2e", "")
kopsClusterName = flag.String("name", "", "")
)

// TestKopsMigration tests the configurations described here:
// https://github.com/kubernetes/community/blob/master/contributors/design-proposals/storage/csi-migration.md#upgradedowngradeskew-testing
func TestKopsMigration(t *testing.T) {
RegisterFailHandler(ginkgo.Fail)
ginkgo.RunSpecs(t, "TestKopsMigration")
}

func init() {
rand.Seed(time.Now().UnixNano())

testing.Init()
frameworkconfig.CopyFlags(frameworkconfig.Flags, flag.CommandLine)
framework.RegisterCommonFlags(flag.CommandLine)
framework.RegisterClusterFlags(flag.CommandLine)

flag.Parse()

if home := homedir.HomeDir(); home != "" && framework.TestContext.KubeConfig == "" {
framework.TestContext.KubeConfig = filepath.Join(home, ".kube", "config")
}
framework.AfterReadingAllFlags(&framework.TestContext)

ginkgo.Describe("Kops", func() {
var (
k *kops
clientset *kubernetes.Clientset
f *framework.Framework
)
k = &kops{*kopsBinaryPath, *kopsStateStore, *kopsClusterName}
var err error
clientset, err = k.exportKubecfg(framework.TestContext.KubeConfig)
if err != nil {
panic(err)
}
f = framework.NewFramework("kops-migrate", framework.Options{}, clientset)

ginkgo.It("should call csi plugin for all operations after migration toggled on", func() {
pvc := toggleMigrationAndVerify(k, f, false, nil)
toggleMigrationAndVerify(k, f, true, pvc)
})

ginkgo.It("should call in-tree plugin for all operations after migration toggled off", func() {
pvc := toggleMigrationAndVerify(k, f, true, nil)
toggleMigrationAndVerify(k, f, false, pvc)
})

ginkgo.It("should call in-tree plugin for attach & mount and csi plugin for provision after kube-controller-manager migration toggled on and kubelet migration toggled off", func() {
// TODO
})
})
}

func toggleMigrationAndVerify(k *kops, f *framework.Framework, migrationOn bool, preTogglePVC *v1.PersistentVolumeClaim) *v1.PersistentVolumeClaim {
var err error
var v *verifier
if migrationOn {
ginkgo.By("Toggling kube-controller-manager migration ON")
err = k.toggleMigration("kubeControllerManager", migrationOn)
Expect(err).NotTo(HaveOccurred())

ginkgo.By("Toggling kubelet migration ON")
err = k.toggleMigration("kubelet", migrationOn)
Expect(err).NotTo(HaveOccurred())

v = &verifier{
name: "csi",
provisioner: "ebs.csi.aws.com",
plugin: "kubernetes.io/csi/ebs.csi.aws.com",
}
} else {
ginkgo.By("Toggling kubelet migration OFF")
err = k.toggleMigration("kubelet", migrationOn)
Expect(err).NotTo(HaveOccurred())

ginkgo.By("Toggling kube-controller-manager migration OFF")
err = k.toggleMigration("kubeControllerManager", migrationOn)
Expect(err).NotTo(HaveOccurred())

v = &verifier{
name: "in-tree",
provisioner: "kubernetes.io/aws-ebs",
plugin: "kubernetes.io/aws-ebs",
}
}

clientset, err := k.exportKubecfg(framework.TestContext.KubeConfig)
f.ClientSet = clientset

if preTogglePVC != nil {
ginkgo.By("Creating post-toggle Pod using pre-toggle PVC")
pod, _, preTogglePV, err := createPodPVC(f, preTogglePVC)
Expect(err).NotTo(HaveOccurred())

ginkgo.By(fmt.Sprintf("Verifying pre-toggle PV %q got re-attached by %s", preTogglePV.Name, v.name))
err = v.verifyAttach(f, preTogglePV)
Expect(err).NotTo(HaveOccurred())

ginkgo.By(fmt.Sprintf("Verifying pre-toggle PV %q got re-mounted by %s", preTogglePV.Name, v.name))
err = v.verifyMount(f, preTogglePV, pod.Spec.NodeName)
Expect(err).NotTo(HaveOccurred())
}

ginkgo.By("Creating post-toggle Pod using post-toggle PVC")
pod, pvc, pv, err := createPodPVC(f, nil)
Expect(err).NotTo(HaveOccurred())

ginkgo.By(fmt.Sprintf("Verifying post-toggle PV %q got provisioned by %s", pv.Name, v.name))
err = v.verifyProvision(pv)
Expect(err).NotTo(HaveOccurred())

ginkgo.By(fmt.Sprintf("Verifying post-toggle PV %q got attached by %s", pv.Name, v.name))
err = v.verifyAttach(f, pv)
Expect(err).NotTo(HaveOccurred())

ginkgo.By(fmt.Sprintf("Verifying post-toggle PV %q got mounted by %s", pv.Name, v.name))
err = v.verifyMount(f, pv, pod.Spec.NodeName)
Expect(err).NotTo(HaveOccurred())

return pvc
}

func createPodPVC(f *framework.Framework, pvc *v1.PersistentVolumeClaim) (*v1.Pod, *v1.PersistentVolumeClaim, *v1.PersistentVolume, error) {
clientset := f.ClientSet
ns := f.Namespace.Name
var err error

if pvc == nil {
pvc = &v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
GenerateName: f.BaseName,
},
Spec: v1.PersistentVolumeClaimSpec{
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce},
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceStorage: resource.MustParse("1Gi"),
},
},
},
}
pvc, err = clientset.CoreV1().PersistentVolumeClaims(ns).Create(context.TODO(), pvc, metav1.CreateOptions{})
if err != nil {
return nil, nil, nil, err
}
}

pod := e2epod.MakePod(ns, nil, []*v1.PersistentVolumeClaim{pvc}, false, "")
pod, err = clientset.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{})
if err != nil {
return nil, nil, nil, err
}

err = e2epod.WaitForPodNameRunningInNamespace(clientset, pod.Name, ns)
if err != nil {
return nil, nil, nil, err
}

pod, err = clientset.CoreV1().Pods(ns).Get(context.TODO(), pod.Name, metav1.GetOptions{})
if err != nil {
return nil, nil, nil, err
}

pvc, err = clientset.CoreV1().PersistentVolumeClaims(ns).Get(context.TODO(), pvc.Name, metav1.GetOptions{})
if err != nil {
return nil, nil, nil, err
}

pv, err := clientset.CoreV1().PersistentVolumes().Get(context.TODO(), pvc.Spec.VolumeName, metav1.GetOptions{})
if err != nil {
return nil, nil, nil, err
}

return pod, pvc, pv, nil
}

type verifier struct {
name string
provisioner string
plugin string
}

func (v *verifier) verifyProvision(pv *v1.PersistentVolume) error {
provisionedBy, ok := pv.Annotations["pv.kubernetes.io/provisioned-by"]
if !ok {
return errors.New("provisioned-by annotation missing")
} else if provisionedBy != v.provisioner {
return fmt.Errorf("provisioned-by annotation is %q but expected %q", provisionedBy, v.provisioner)
}
return nil
}

func (v *verifier) verifyAttach(f *framework.Framework, pv *v1.PersistentVolume) error {
segments := strings.Split(pv.Spec.AWSElasticBlockStore.VolumeID, "/")
volumeID := segments[len(segments)-1]
re := regexp.MustCompile(fmt.Sprintf("AttachVolume.Attach.*%s.*%s", v.plugin, volumeID))
logs, err := kubeControllerManagerLogs(f.ClientSet)
if err != nil {
return err
}
match := re.FindString(logs)
if match == "" {
return fmt.Errorf("regexp %q not found", re)
}
return nil
}

func (v *verifier) verifyMount(f *framework.Framework, pv *v1.PersistentVolume, nodeName string) error {
segments := strings.Split(pv.Spec.AWSElasticBlockStore.VolumeID, "/")
volumeID := segments[len(segments)-1]
re := regexp.MustCompile(fmt.Sprintf("MountVolume.Mount.*%s.*%s", v.plugin, volumeID))
hostExec := utils.NewHostExec(f)
node, err := f.ClientSet.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
if err != nil {
return err
}
logs, err := hostExec.IssueCommandWithResult("journalctl -u kubelet", node)
if err != nil {
return err
}
match := re.FindString(logs)
if match == "" {
return fmt.Errorf("regexp %q not found", re)
}
return nil
}

func podLogs(clientset kubernetes.Interface, podNamePrefix string) (string, error) {
pods, err := clientset.CoreV1().Pods(metav1.NamespaceSystem).List(context.TODO(), metav1.ListOptions{})
if err != nil {
return "", err
}
for _, pod := range pods.Items {
if strings.HasPrefix(pod.Name, podNamePrefix) {
body, err := clientset.CoreV1().Pods(metav1.NamespaceSystem).GetLogs(pod.Name, &v1.PodLogOptions{}).Do(context.TODO()).Raw()
if err != nil {
return "", err
}
return string(body), nil
}
}
return "", fmt.Errorf("%q pod not found", podNamePrefix)
}

func kubeControllerManagerLogs(clientset kubernetes.Interface) (string, error) {
return podLogs(clientset, "kube-controller-manager")
}
41 changes: 41 additions & 0 deletions tests/e2e-upgrade/go.mod
@@ -0,0 +1,41 @@
module github.com/kubernetes-sigs/aws-ebs-csi-driver/tests/e2e-upgrade

go 1.16

require (
github.com/onsi/ginkgo v1.11.0
github.com/onsi/gomega v1.7.0
k8s.io/api v0.21.1
k8s.io/apimachinery v0.21.1
k8s.io/client-go v0.21.1
k8s.io/kubernetes v0.0.0-00010101000000-000000000000
)

replace k8s.io/code-generator => k8s.io/code-generator v0.21.1

replace (
k8s.io/api => k8s.io/api v0.21.1
k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.21.1
k8s.io/apimachinery => k8s.io/apimachinery v0.21.1
k8s.io/apiserver => k8s.io/apiserver v0.21.1
k8s.io/cli-runtime => k8s.io/cli-runtime v0.21.1
k8s.io/client-go => k8s.io/client-go v0.21.1
k8s.io/cloud-provider => k8s.io/cloud-provider v0.21.1
k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.21.1
k8s.io/component-base => k8s.io/component-base v0.21.1
k8s.io/component-helpers => k8s.io/component-helpers v0.21.1
k8s.io/controller-manager => k8s.io/controller-manager v0.21.1
k8s.io/cri-api => k8s.io/cri-api v0.21.1
k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.21.1
k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.21.1
k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.21.1
k8s.io/kube-proxy => k8s.io/kube-proxy v0.21.1
k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.21.1
k8s.io/kubectl => k8s.io/kubectl v0.21.1
k8s.io/kubelet => k8s.io/kubelet v0.21.1
k8s.io/kubernetes => k8s.io/kubernetes v1.21.0
k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.21.1
k8s.io/metrics => k8s.io/metrics v0.21.1
k8s.io/mount-utils => k8s.io/mount-utils v0.21.1
k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.21.1
)

0 comments on commit 05de825

Please sign in to comment.