Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

cleanup dot imports and make test error checking more readable in test/e2e/scheduling #77714

Merged
merged 3 commits into from
May 15, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
1 change: 0 additions & 1 deletion hack/.golint_failures
Original file line number Diff line number Diff line change
Expand Up @@ -605,7 +605,6 @@ test/e2e/common
test/e2e/framework
test/e2e/lifecycle/bootstrap
test/e2e/scalability
test/e2e/scheduling
test/e2e/storage/drivers
test/e2e/storage/testsuites
test/e2e/storage/utils
Expand Down
46 changes: 24 additions & 22 deletions test/e2e/scheduling/equivalence_cache_predicates.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,8 +31,9 @@ import (
testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image"

. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
// ensure libs have a chance to initialize
_ "github.com/stretchr/testify/assert"
)

Expand All @@ -48,7 +49,7 @@ var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() {
var ns string
f := framework.NewDefaultFramework("equivalence-cache")

BeforeEach(func() {
ginkgo.BeforeEach(func() {
cs = f.ClientSet
ns = f.Namespace.Name

Expand All @@ -61,7 +62,7 @@ var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() {
// cannot be run in parallel with any other test that touches Nodes or Pods.
// It is so because we need to have precise control on what's running in the cluster.
systemPods, err := framework.GetPodsInNamespace(cs, ns, map[string]string{})
Expect(err).NotTo(HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
systemPodsNo = 0
for _, pod := range systemPods {
if !masterNodes.Has(pod.Spec.NodeName) && pod.DeletionTimestamp == nil {
Expand All @@ -70,7 +71,7 @@ var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() {
}

err = framework.WaitForPodsRunningReady(cs, api.NamespaceSystem, int32(systemPodsNo), int32(systemPodsNo), framework.PodReadyBeforeTimeout, map[string]string{})
Expect(err).NotTo(HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred())

for _, node := range nodeList.Items {
e2elog.Logf("\nLogging pods the kubelet thinks is on node %v before test", node.Name)
Expand All @@ -83,16 +84,16 @@ var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() {
// When a replica pod (with HostPorts) is scheduled to a node, it will invalidate GeneralPredicates cache on this node,
// so that subsequent replica pods with same host port claim will be rejected.
// We enforce all replica pods bind to the same node so there will always be conflicts.
It("validates GeneralPredicates is properly invalidated when a pod is scheduled [Slow]", func() {
By("Launching a RC with two replica pods with HostPorts")
ginkgo.It("validates GeneralPredicates is properly invalidated when a pod is scheduled [Slow]", func() {
ginkgo.By("Launching a RC with two replica pods with HostPorts")
nodeName := getNodeThatCanRunPodWithoutToleration(f)
rcName := "host-port"

// bind all replicas to same node
nodeSelector := map[string]string{"kubernetes.io/hostname": nodeName}

By("One pod should be scheduled, the other should be rejected")
// CreateNodeSelectorPods creates RC with host port 4312
ginkgo.By("One pod should be scheduled, the other should be rejected")
// CreateNodeSelectorPods creates RC with host port 4321
WaitForSchedulerAfterAction(f, func() error {
err := CreateNodeSelectorPods(f, rcName, 2, nodeSelector, false)
return err
Expand All @@ -105,11 +106,11 @@ var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() {
// This test verifies that MatchInterPodAffinity works as expected.
// In equivalence cache, it does not handle inter pod affinity (anti-affinity) specially (unless node label changed),
// because current predicates algorithm will ensure newly scheduled pod does not break existing affinity in cluster.
It("validates pod affinity works properly when new replica pod is scheduled", func() {
ginkgo.It("validates pod affinity works properly when new replica pod is scheduled", func() {
// create a pod running with label {security: S1}, and choose this node
nodeName, _ := runAndKeepPodWithLabelAndGetNodeName(f)

By("Trying to apply a random label on the found node.")
ginkgo.By("Trying to apply a random label on the found node.")
// we need to use real failure domains, since scheduler only know them
k := "failure-domain.beta.kubernetes.io/zone"
v := "equivalence-e2e-test"
Expand All @@ -118,7 +119,7 @@ var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() {
// restore the node label
defer framework.AddOrUpdateLabelOnNode(cs, nodeName, k, oldValue)

By("Trying to schedule RC with Pod Affinity should success.")
ginkgo.By("Trying to schedule RC with Pod Affinity should success.")
framework.WaitForStableCluster(cs, masterNodes)
affinityRCName := "with-pod-affinity-" + string(uuid.NewUUID())
replica := 2
Expand Down Expand Up @@ -154,10 +155,10 @@ var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() {
framework.ExpectNoError(err)
framework.ExpectNoError(framework.WaitForControlledPodsRunning(cs, ns, affinityRCName, api.Kind("ReplicationController")))

By("Remove node failure domain label")
ginkgo.By("Remove node failure domain label")
framework.RemoveLabelOffNode(cs, nodeName, k)

By("Trying to schedule another equivalent Pod should fail due to node label has been removed.")
ginkgo.By("Trying to schedule another equivalent Pod should fail due to node label has been removed.")
// use scale to create another equivalent pod and wait for failure event
WaitForSchedulerAfterAction(f, func() error {
err := framework.ScaleRC(f.ClientSet, f.ScalesGetter, ns, affinityRCName, uint(replica+1), false)
Expand All @@ -168,17 +169,17 @@ var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() {
})

// This test verifies that MatchInterPodAffinity (anti-affinity) is respected as expected.
It("validates pod anti-affinity works properly when new replica pod is scheduled", func() {
By("Launching two pods on two distinct nodes to get two node names")
ginkgo.It("validates pod anti-affinity works properly when new replica pod is scheduled", func() {
ginkgo.By("Launching two pods on two distinct nodes to get two node names")
CreateHostPortPods(f, "host-port", 2, true)
defer framework.DeleteRCAndWaitForGC(f.ClientSet, ns, "host-port")
podList, err := cs.CoreV1().Pods(ns).List(metav1.ListOptions{})
framework.ExpectNoError(err)
Expect(len(podList.Items)).To(Equal(2))
gomega.Expect(len(podList.Items)).To(gomega.Equal(2))
nodeNames := []string{podList.Items[0].Spec.NodeName, podList.Items[1].Spec.NodeName}
Expect(nodeNames[0]).ToNot(Equal(nodeNames[1]))
gomega.Expect(nodeNames[0]).ToNot(gomega.Equal(nodeNames[1]))

By("Applying a random label to both nodes.")
ginkgo.By("Applying a random label to both nodes.")
k := "e2e.inter-pod-affinity.kubernetes.io/zone"
v := "equivalence-e2etest"
for _, nodeName := range nodeNames {
Expand All @@ -187,15 +188,15 @@ var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() {
defer framework.RemoveLabelOffNode(cs, nodeName, k)
}

By("Trying to launch a pod with the service label on the selected nodes.")
ginkgo.By("Trying to launch a pod with the service label on the selected nodes.")
// run a pod with label {"service": "S1"} and expect it to be running
runPausePod(f, pausePodConfig{
Name: "with-label-" + string(uuid.NewUUID()),
Labels: map[string]string{"service": "S1"},
NodeSelector: map[string]string{k: v}, // only launch on our two nodes
})

By("Trying to launch RC with podAntiAffinity on these two nodes should be rejected.")
ginkgo.By("Trying to launch RC with podAntiAffinity on these two nodes should be rejected.")
labelRCName := "with-podantiaffinity-" + string(uuid.NewUUID())
replica := 2
labelsMap := map[string]string{
Expand Down Expand Up @@ -269,8 +270,9 @@ func getRCWithInterPodAffinityNodeSelector(name string, labelsMap map[string]str
}
}

// CreateNodeSelectorPods creates RC with host port 4321 and defines node selector
func CreateNodeSelectorPods(f *framework.Framework, id string, replicas int, nodeSelector map[string]string, expectRunning bool) error {
By(fmt.Sprintf("Running RC which reserves host port and defines node selector"))
ginkgo.By(fmt.Sprintf("Running RC which reserves host port and defines node selector"))

config := &testutils.RCConfig{
Client: f.ClientSet,
Expand Down
1 change: 1 addition & 0 deletions test/e2e/scheduling/framework.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ package scheduling

import "github.com/onsi/ginkgo"

// SIGDescribe annotates the test with the SIG label.
func SIGDescribe(text string, body func()) bool {
return ginkgo.Describe("[sig-scheduling] "+text, body)
}
90 changes: 45 additions & 45 deletions test/e2e/scheduling/limit_range.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,8 +30,8 @@ import (
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"

. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)

const (
Expand All @@ -41,8 +41,8 @@ const (
var _ = SIGDescribe("LimitRange", func() {
f := framework.NewDefaultFramework("limitrange")

It("should create a LimitRange with defaults and ensure pod has those defaults applied.", func() {
By("Creating a LimitRange")
ginkgo.It("should create a LimitRange with defaults and ensure pod has those defaults applied.", func() {
ginkgo.By("Creating a LimitRange")

min := getResourceList("50m", "100Mi", "100Gi")
max := getResourceList("500m", "500Mi", "500Gi")
Expand All @@ -54,24 +54,24 @@ var _ = SIGDescribe("LimitRange", func() {
defaultLimit, defaultRequest,
maxLimitRequestRatio)

By("Setting up watch")
ginkgo.By("Setting up watch")
selector := labels.SelectorFromSet(labels.Set(map[string]string{"name": limitRange.Name}))
options := metav1.ListOptions{LabelSelector: selector.String()}
limitRanges, err := f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).List(options)
Expect(err).NotTo(HaveOccurred(), "failed to query for limitRanges")
Expect(len(limitRanges.Items)).To(Equal(0))
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to query for limitRanges")
gomega.Expect(len(limitRanges.Items)).To(gomega.Equal(0))
options = metav1.ListOptions{
LabelSelector: selector.String(),
ResourceVersion: limitRanges.ListMeta.ResourceVersion,
}
w, err := f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Watch(metav1.ListOptions{})
Expect(err).NotTo(HaveOccurred(), "failed to set up watch")
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to set up watch")

By("Submitting a LimitRange")
ginkgo.By("Submitting a LimitRange")
limitRange, err = f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Create(limitRange)
Expect(err).NotTo(HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred())

By("Verifying LimitRange creation was observed")
ginkgo.By("Verifying LimitRange creation was observed")
select {
case event, _ := <-w.ResultChan():
if event.Type != watch.Added {
Expand All @@ -81,39 +81,39 @@ var _ = SIGDescribe("LimitRange", func() {
framework.Failf("Timeout while waiting for LimitRange creation")
}

By("Fetching the LimitRange to ensure it has proper values")
ginkgo.By("Fetching the LimitRange to ensure it has proper values")
limitRange, err = f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Get(limitRange.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
expected := v1.ResourceRequirements{Requests: defaultRequest, Limits: defaultLimit}
actual := v1.ResourceRequirements{Requests: limitRange.Spec.Limits[0].DefaultRequest, Limits: limitRange.Spec.Limits[0].Default}
err = equalResourceRequirement(expected, actual)
Expect(err).NotTo(HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred())

By("Creating a Pod with no resource requirements")
ginkgo.By("Creating a Pod with no resource requirements")
pod := f.NewTestPod("pod-no-resources", v1.ResourceList{}, v1.ResourceList{})
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
Expect(err).NotTo(HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred())

By("Ensuring Pod has resource requirements applied from LimitRange")
ginkgo.By("Ensuring Pod has resource requirements applied from LimitRange")
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
for i := range pod.Spec.Containers {
err = equalResourceRequirement(expected, pod.Spec.Containers[i].Resources)
if err != nil {
// Print the pod to help in debugging.
e2elog.Logf("Pod %+v does not have the expected requirements", pod)
Expect(err).NotTo(HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
}
}

By("Creating a Pod with partial resource requirements")
ginkgo.By("Creating a Pod with partial resource requirements")
pod = f.NewTestPod("pod-partial-resources", getResourceList("", "150Mi", "150Gi"), getResourceList("300m", "", ""))
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
Expect(err).NotTo(HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred())

By("Ensuring Pod has merged resource requirements applied from LimitRange")
ginkgo.By("Ensuring Pod has merged resource requirements applied from LimitRange")
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
// This is an interesting case, so it's worth a comment
// If you specify a Limit, and no Request, the Limit will default to the Request
// This means that the LimitRange.DefaultRequest will ONLY take affect if a container.resources.limit is not supplied
Expand All @@ -123,49 +123,49 @@ var _ = SIGDescribe("LimitRange", func() {
if err != nil {
// Print the pod to help in debugging.
e2elog.Logf("Pod %+v does not have the expected requirements", pod)
Expect(err).NotTo(HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
}
}

By("Failing to create a Pod with less than min resources")
ginkgo.By("Failing to create a Pod with less than min resources")
pod = f.NewTestPod(podName, getResourceList("10m", "50Mi", "50Gi"), v1.ResourceList{})
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
Expect(err).To(HaveOccurred())
framework.ExpectError(err)

By("Failing to create a Pod with more than max resources")
ginkgo.By("Failing to create a Pod with more than max resources")
pod = f.NewTestPod(podName, getResourceList("600m", "600Mi", "600Gi"), v1.ResourceList{})
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
Expect(err).To(HaveOccurred())
framework.ExpectError(err)

By("Updating a LimitRange")
ginkgo.By("Updating a LimitRange")
newMin := getResourceList("9m", "49Mi", "49Gi")
limitRange.Spec.Limits[0].Min = newMin
limitRange, err = f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Update(limitRange)
Expect(err).NotTo(HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred())

By("Verifying LimitRange updating is effective")
Expect(wait.Poll(time.Second*2, time.Second*20, func() (bool, error) {
ginkgo.By("Verifying LimitRange updating is effective")
gomega.Expect(wait.Poll(time.Second*2, time.Second*20, func() (bool, error) {
limitRange, err = f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Get(limitRange.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
return reflect.DeepEqual(limitRange.Spec.Limits[0].Min, newMin), nil
})).NotTo(HaveOccurred())
})).NotTo(gomega.HaveOccurred())

By("Creating a Pod with less than former min resources")
ginkgo.By("Creating a Pod with less than former min resources")
pod = f.NewTestPod(podName, getResourceList("10m", "50Mi", "50Gi"), v1.ResourceList{})
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
Expect(err).NotTo(HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred())

By("Failing to create a Pod with more than max resources")
ginkgo.By("Failing to create a Pod with more than max resources")
pod = f.NewTestPod(podName, getResourceList("600m", "600Mi", "600Gi"), v1.ResourceList{})
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
Expect(err).To(HaveOccurred())
framework.ExpectError(err)

By("Deleting a LimitRange")
ginkgo.By("Deleting a LimitRange")
err = f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Delete(limitRange.Name, metav1.NewDeleteOptions(30))
Expect(err).NotTo(HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred())

By("Verifying the LimitRange was deleted")
Expect(wait.Poll(time.Second*5, time.Second*30, func() (bool, error) {
ginkgo.By("Verifying the LimitRange was deleted")
gomega.Expect(wait.Poll(time.Second*5, time.Second*30, func() (bool, error) {
selector := labels.SelectorFromSet(labels.Set(map[string]string{"name": limitRange.Name}))
options := metav1.ListOptions{LabelSelector: selector.String()}
limitRanges, err := f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).List(options)
Expand All @@ -190,12 +190,12 @@ var _ = SIGDescribe("LimitRange", func() {

return false, nil

})).NotTo(HaveOccurred(), "kubelet never observed the termination notice")
})).NotTo(gomega.HaveOccurred(), "kubelet never observed the termination notice")

By("Creating a Pod with more than former max resources")
ginkgo.By("Creating a Pod with more than former max resources")
pod = f.NewTestPod(podName+"2", getResourceList("600m", "600Mi", "600Gi"), v1.ResourceList{})
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
Expect(err).NotTo(HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
})

})
Expand Down