Skip to content

Commit

Permalink
add integration tests
Browse files Browse the repository at this point in the history
  • Loading branch information
jmdeal committed Dec 6, 2023
1 parent dafe148 commit 740a1af
Showing 1 changed file with 186 additions and 78 deletions.
264 changes: 186 additions & 78 deletions test/suites/integration/storage_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,104 +16,212 @@ package integration_test

import (
"fmt"
"strings"

"github.com/aws/aws-sdk-go/aws"
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/client"

"github.com/aws/aws-sdk-go/aws"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/samber/lo"

"github.com/aws/karpenter/pkg/errors"

"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/karpenter/pkg/test"
)

// This test requires the EBS CSI driver to be installed
var _ = Describe("Dynamic PVC", func() {
It("should run a pod with a dynamic persistent volume", func() {
// Ensure that the EBS driver is installed, or we can't run the test.
var ds appsv1.DaemonSet
if err := env.Client.Get(env.Context, client.ObjectKey{
Namespace: "kube-system",
Name: "ebs-csi-node",
}, &ds); err != nil {
if errors.IsNotFound(err) {
Skip(fmt.Sprintf("skipping dynamic PVC test due to missing EBS driver %s", err))
} else {
Fail(fmt.Sprintf("determining EBS driver status, %s", err))
}
}
storageClassName := "ebs-sc-test"
bindMode := storagev1.VolumeBindingWaitForFirstConsumer
sc := test.StorageClass(test.StorageClassOptions{
ObjectMeta: metav1.ObjectMeta{
Name: storageClassName,
},
Provisioner: aws.String("ebs.csi.aws.com"),
VolumeBindingMode: &bindMode,
var _ = Describe("Persistent Volumes", func() {
Context("Static", func() {
It("should run a pod with a pre-bound persistent volume (empty storage class)", func() {
pvc := test.PersistentVolumeClaim(test.PersistentVolumeClaimOptions{
VolumeName: "test-volume",
StorageClassName: lo.ToPtr(""),
})
pv := test.PersistentVolume(test.PersistentVolumeOptions{
ObjectMeta: metav1.ObjectMeta{
Name: pvc.Spec.VolumeName,
},
})
pod := test.Pod(test.PodOptions{
PersistentVolumeClaims: []string{pvc.Name},
})

env.ExpectCreated(nodeClass, nodePool, pv, pvc, pod)
env.EventuallyExpectHealthy(pod)
env.ExpectCreatedNodeCount("==", 1)
})

pvc := test.PersistentVolumeClaim(test.PersistentVolumeClaimOptions{
ObjectMeta: metav1.ObjectMeta{
Name: "ebs-claim",
},
StorageClassName: aws.String(storageClassName),
Resources: v1.ResourceRequirements{Requests: v1.ResourceList{v1.ResourceStorage: resource.MustParse("5Gi")}},
It("should run a pod with a pre-bound persistent volume (non-existent storage class)", func() {
pvc := test.PersistentVolumeClaim(test.PersistentVolumeClaimOptions{
VolumeName: "test-volume",
StorageClassName: lo.ToPtr("non-existent-storage-class"),
})
pv := test.PersistentVolume(test.PersistentVolumeOptions{
ObjectMeta: metav1.ObjectMeta{
Name: pvc.Spec.VolumeName,
},
StorageClassName: "non-existent-storage-class",
})
pod := test.Pod(test.PodOptions{
PersistentVolumeClaims: []string{pvc.Name},
})
env.ExpectCreated(nodeClass, nodePool, pv, pvc, pod)
env.EventuallyExpectHealthy(pod)
env.ExpectCreatedNodeCount("==", 1)
})

pod := test.Pod(test.PodOptions{
PersistentVolumeClaims: []string{pvc.Name},
It("should run a pod with a pre-bound persistent volume while respecting topology constraints", func() {
subnets := env.GetSubnets(map[string]string{"karpenter.sh/discovery": env.ClusterName})
shuffledAZs := lo.Shuffle(lo.Keys(subnets))

pvc := test.PersistentVolumeClaim(test.PersistentVolumeClaimOptions{
StorageClassName: lo.ToPtr("non-existent-storage-class"),
})
pv := test.PersistentVolume(test.PersistentVolumeOptions{
ObjectMeta: metav1.ObjectMeta{
Name: pvc.Spec.VolumeName,
},
StorageClassName: "non-existent-storage-class",
Zones: []string{shuffledAZs[0]},
})
pod := test.Pod(test.PodOptions{
PersistentVolumeClaims: []string{pvc.Name},
})
env.ExpectCreated(nodeClass, nodePool, pv, pvc, pod)
env.EventuallyExpectHealthy(pod)
env.ExpectCreatedNodeCount("==", 1)
})

env.ExpectCreated(nodeClass, nodePool, sc, pvc, pod)
env.EventuallyExpectHealthy(pod)
env.ExpectCreatedNodeCount("==", 1)
env.ExpectDeleted(pod)
})
})

var _ = Describe("Static PVC", func() {
It("should run a pod with a static persistent volume", func() {
storageClassName := "nfs-test"
bindMode := storagev1.VolumeBindingWaitForFirstConsumer
sc := test.StorageClass(test.StorageClassOptions{
ObjectMeta: metav1.ObjectMeta{
Name: storageClassName,
},
VolumeBindingMode: &bindMode,
Context("Dynamic", func() {
var storageClass *storagev1.StorageClass
BeforeEach(func() {
// Ensure that the EBS driver is installed, or we can't run the test.
var ds appsv1.DaemonSet
if err := env.Client.Get(env.Context, client.ObjectKey{
Namespace: "kube-system",
Name: "ebs-csi-node",
}, &ds); err != nil {
if errors.IsNotFound(err) {
Skip(fmt.Sprintf("skipping dynamic PVC test due to missing EBS driver %s", err))
} else {
Fail(fmt.Sprintf("determining EBS driver status, %s", err))
}
}
storageClass = test.StorageClass(test.StorageClassOptions{
ObjectMeta: metav1.ObjectMeta{
Name: "test-storage-class",
},
Provisioner: aws.String("ebs.csi.aws.com"),
VolumeBindingMode: lo.ToPtr(storagev1.VolumeBindingWaitForFirstConsumer),
})
ExpectSetEBSDriverLimit(1)
})

pv := test.PersistentVolume(test.PersistentVolumeOptions{
ObjectMeta: metav1.ObjectMeta{Name: "nfs-test-volume"},
StorageClassName: "nfs-test",
AfterEach(func() {
ExpectRemoveEBSDriverLimit()
})

// the server here doesn't need to actually exist for the pod to start running
pv.Spec.NFS = &v1.NFSVolumeSource{
Server: "fake.server",
Path: "/some/path",
}
pv.Spec.CSI = nil

pvc := test.PersistentVolumeClaim(test.PersistentVolumeClaimOptions{
ObjectMeta: metav1.ObjectMeta{
Name: "nfs-claim",
},
StorageClassName: aws.String(storageClassName),
VolumeName: pv.Name,
Resources: v1.ResourceRequirements{Requests: v1.ResourceList{v1.ResourceStorage: resource.MustParse("5Gi")}},
It("should run a pod with a dynamic persistent volume", func() {
pvc := test.PersistentVolumeClaim(test.PersistentVolumeClaimOptions{
StorageClassName: &storageClass.Name,
})
pod := test.Pod(test.PodOptions{
PersistentVolumeClaims: []string{pvc.Name},
})

env.ExpectCreated(nodeClass, nodePool, storageClass, pvc, pod)
env.EventuallyExpectHealthy(pod)
env.ExpectCreatedNodeCount("==", 1)
})

pod := test.Pod(test.PodOptions{
PersistentVolumeClaims: []string{pvc.Name},
It("should run a pod with a dynamic persistent volume while respecting allowed topologies", func() {
subnets := env.GetSubnets(map[string]string{"karpenter.sh/discovery": env.ClusterName})
shuffledAZs := lo.Shuffle(lo.Keys(subnets))

storageClass.AllowedTopologies = []v1.TopologySelectorTerm{{
MatchLabelExpressions: []v1.TopologySelectorLabelRequirement{{
Key: "topology.ebs.csi.aws.com/zone",
Values: []string{shuffledAZs[0]},
}},
}}

pvc := test.PersistentVolumeClaim(test.PersistentVolumeClaimOptions{
StorageClassName: &storageClass.Name,
})
pod := test.Pod(test.PodOptions{
PersistentVolumeClaims: []string{pvc.Name},
})

env.ExpectCreated(nodeClass, nodePool, storageClass, pvc, pod)
env.EventuallyExpectHealthy(pod)
env.ExpectCreatedNodeCount("==", 1)
})
It("should run a pod with a dynamic persistent volume while respecting volume limits", func() {
count := 2
pvcs := lo.Times(count, func(_ int) *v1.PersistentVolumeClaim {
return test.PersistentVolumeClaim(test.PersistentVolumeClaimOptions{
StorageClassName: &storageClass.Name,
})
})
pods := lo.Map(pvcs, func(pvc *v1.PersistentVolumeClaim, _ int) *v1.Pod {
return test.Pod(test.PodOptions{
PersistentVolumeClaims: []string{pvc.Name},
})
})
env.ExpectCreated(nodeClass, nodePool, storageClass, pvcs[0], pods[0])
env.EventuallyExpectHealthy(pods[0])
env.ExpectCreatedNodeCount("==", 1)
env.ExpectCreated(pvcs[1], pods[1])
env.EventuallyExpectHealthy(pods[1])
env.ExpectCreatedNodeCount("==", 2)
})
It("should run a pod with a generic ephemeral volume", func() {
pod := test.Pod(test.PodOptions{
EphemeralVolumeTemplates: []test.EphemeralVolumeTemplateOptions{{
StorageClassName: &storageClass.Name,
}},
})

env.ExpectCreated(nodeClass, nodePool, storageClass, pod)
env.EventuallyExpectHealthy(pod)
env.ExpectCreatedNodeCount("==", 1)
})

env.ExpectCreated(nodeClass, nodePool, sc, pv, pvc, pod)
env.EventuallyExpectHealthy(pod)
env.ExpectCreatedNodeCount("==", 1)
env.ExpectDeleted(pod)
})
})

func ExpectSetEBSDriverLimit(limit int) {
GinkgoHelper()
ds := &appsv1.DaemonSet{}
Expect(env.Client.Get(env.Context, client.ObjectKey{Namespace: "kube-system", Name: "ebs-csi-node"}, ds)).To(Succeed())
stored := ds.DeepCopy()

containers := ds.Spec.Template.Spec.Containers
for i := range containers {
if containers[i].Name != "ebs-plugin" {
continue
}
containers[i].Args = append(containers[i].Args, fmt.Sprintf("--volume-attach-limit=%d", limit))
break
}
Expect(env.Client.Patch(env.Context, ds, client.MergeFrom(stored))).To(Succeed())
}

func ExpectRemoveEBSDriverLimit() {
GinkgoHelper()
ds := &appsv1.DaemonSet{}
Expect(env.Client.Get(env.Context, client.ObjectKey{Namespace: "kube-system", Name: "ebs-csi-node"}, ds)).To(Succeed())
stored := ds.DeepCopy()

containers := ds.Spec.Template.Spec.Containers
for i := range containers {
if containers[i].Name != "ebs-plugin" {
continue
}
containers[i].Args = lo.Reject(containers[i].Args, func(arg string, _ int) bool {
return strings.Contains(arg, "--volume-attach-limit")
})
break
}
Expect(env.Client.Patch(env.Context, ds, client.MergeFrom(stored))).To(Succeed())
}

0 comments on commit 740a1af

Please sign in to comment.