Skip to content

Commit

Permalink
Remove deleteTests from test; Rename test in ginkgo.It statement
Browse files Browse the repository at this point in the history
  • Loading branch information
BobyMCbobs committed Aug 5, 2019
1 parent edc6d60 commit 03cb4bf
Showing 1 changed file with 41 additions and 51 deletions.
92 changes: 41 additions & 51 deletions test/e2e/node/kubelet.go
Original file line number Diff line number Diff line change
Expand Up @@ -256,14 +256,6 @@ var _ = SIGDescribe("kubelet", func() {
nodeLabels map[string]string
resourceMonitor *framework.ResourceMonitor
)
type DeleteTest struct {
podsPerNode int
timeout time.Duration
}

deleteTests := []DeleteTest{
{podsPerNode: 10, timeout: 1 * time.Minute},
}

ginkgo.BeforeEach(func() {
// Use node labels to restrict the pods to be assigned only to the
Expand Down Expand Up @@ -309,50 +301,48 @@ var _ = SIGDescribe("kubelet", func() {
}
})

for _, itArg := range deleteTests {
ginkgo.It("should let Kubelet delete x pods per node in n minutes", func() {
totalPods := itArg.podsPerNode * numNodes
ginkgo.By(fmt.Sprintf("Creating a RC of %d pods and wait until all pods of this RC are running", totalPods))
rcName := fmt.Sprintf("cleanup%d-%s", totalPods, string(uuid.NewUUID()))

err := framework.RunRC(testutils.RCConfig{
Client: f.ClientSet,
Name: rcName,
Namespace: f.Namespace.Name,
Image: imageutils.GetPauseImageName(),
Replicas: totalPods,
NodeSelector: nodeLabels,
})
framework.ExpectNoError(err)
// Perform a sanity check so that we know all desired pods are
// running on the nodes according to kubelet. The timeout is set to
// only 30 seconds here because framework.RunRC already waited for all pods to
// transition to the running status.
err = waitTillNPodsRunningOnNodes(f.ClientSet, nodeNames, rcName, ns, totalPods, time.Second*30)
framework.ExpectNoError(err)
if resourceMonitor != nil {
resourceMonitor.LogLatest()
}

ginkgo.By("Deleting the RC")
framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, rcName)
// Check that the pods really are gone by querying /runningpods on the
// node. The /runningpods handler checks the container runtime (or its
// cache) and returns a list of running pods. Some possible causes of
// failures are:
// - kubelet deadlock
// - a bug in graceful termination (if it is enabled)
// - docker slow to delete pods (or resource problems causing slowness)
start := time.Now()
err = waitTillNPodsRunningOnNodes(f.ClientSet, nodeNames, rcName, ns, 0, itArg.timeout)
framework.ExpectNoError(err)
e2elog.Logf("Deleting %d pods on %d nodes completed in %v after the RC was deleted", totalPods, len(nodeNames),
time.Since(start))
if resourceMonitor != nil {
resourceMonitor.LogCPUSummary()
}
ginkgo.It("should let Kubelet delete 10 pods per node with in 1 minute", func() {
totalPods := 10 * numNodes
ginkgo.By(fmt.Sprintf("Creating a RC of %d pods and wait until all pods of this RC are running", totalPods))
rcName := fmt.Sprintf("cleanup%d-%s", totalPods, string(uuid.NewUUID()))

err := framework.RunRC(testutils.RCConfig{
Client: f.ClientSet,
Name: rcName,
Namespace: f.Namespace.Name,
Image: imageutils.GetPauseImageName(),
Replicas: totalPods,
NodeSelector: nodeLabels,
})
}
framework.ExpectNoError(err)
// Perform a sanity check so that we know all desired pods are
// running on the nodes according to kubelet. The timeout is set to
// only 30 seconds here because framework.RunRC already waited for all pods to
// transition to the running status.
err = waitTillNPodsRunningOnNodes(f.ClientSet, nodeNames, rcName, ns, totalPods, time.Second*30)
framework.ExpectNoError(err)
if resourceMonitor != nil {
resourceMonitor.LogLatest()
}

ginkgo.By("Deleting the RC")
framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, rcName)
// Check that the pods really are gone by querying /runningpods on the
// node. The /runningpods handler checks the container runtime (or its
// cache) and returns a list of running pods. Some possible causes of
// failures are:
// - kubelet deadlock
// - a bug in graceful termination (if it is enabled)
// - docker slow to delete pods (or resource problems causing slowness)
start := time.Now()
err = waitTillNPodsRunningOnNodes(f.ClientSet, nodeNames, rcName, ns, 0, 1*time.Minute)
framework.ExpectNoError(err)
e2elog.Logf("Deleting %d pods on %d nodes completed in %v after the RC was deleted", totalPods, len(nodeNames),
time.Since(start))
if resourceMonitor != nil {
resourceMonitor.LogCPUSummary()
}
})
})

// Test host cleanup when disrupting the volume environment.
Expand Down

0 comments on commit 03cb4bf

Please sign in to comment.