Skip to content

Commit

Permalink
Merge pull request #81685 from oomichi/e2elog-framework-p-r
Browse files Browse the repository at this point in the history
Use log functions of core framework on [r-u]
  • Loading branch information
k8s-ci-robot committed Sep 27, 2019
2 parents 354a812 + 6499f93 commit d92a250
Show file tree
Hide file tree
Showing 7 changed files with 180 additions and 188 deletions.
1 change: 0 additions & 1 deletion test/e2e/framework/BUILD
Expand Up @@ -76,7 +76,6 @@ go_library(
"//staging/src/k8s.io/component-base/version:go_default_library",
"//test/e2e/framework/ginkgowrapper:go_default_library",
"//test/e2e/framework/kubelet:go_default_library",
"//test/e2e/framework/log:go_default_library",
"//test/e2e/framework/metrics:go_default_library",
"//test/e2e/framework/node:go_default_library",
"//test/e2e/framework/pod:go_default_library",
Expand Down
23 changes: 11 additions & 12 deletions test/e2e/framework/rc_util.go
Expand Up @@ -30,7 +30,6 @@ import (
clientset "k8s.io/client-go/kubernetes"
scaleclient "k8s.io/client-go/scale"
api "k8s.io/kubernetes/pkg/apis/core"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
testutils "k8s.io/kubernetes/test/utils"
)
Expand Down Expand Up @@ -101,7 +100,7 @@ func UpdateReplicationControllerWithRetries(c clientset.Interface, namespace, na
// Apply the update, then attempt to push it to the apiserver.
applyUpdate(rc)
if rc, err = c.CoreV1().ReplicationControllers(namespace).Update(rc); err == nil {
e2elog.Logf("Updating replication controller %q", name)
Logf("Updating replication controller %q", name)
return true, nil
}
updateErr = err
Expand Down Expand Up @@ -147,10 +146,10 @@ func WaitForReplicationController(c clientset.Interface, namespace, name string,
err := wait.PollImmediate(interval, timeout, func() (bool, error) {
_, err := c.CoreV1().ReplicationControllers(namespace).Get(name, metav1.GetOptions{})
if err != nil {
e2elog.Logf("Get ReplicationController %s in namespace %s failed (%v).", name, namespace, err)
Logf("Get ReplicationController %s in namespace %s failed (%v).", name, namespace, err)
return !exist, nil
}
e2elog.Logf("ReplicationController %s in namespace %s found.", name, namespace)
Logf("ReplicationController %s in namespace %s found.", name, namespace)
return exist, nil
})
if err != nil {
Expand All @@ -167,13 +166,13 @@ func WaitForReplicationControllerwithSelector(c clientset.Interface, namespace s
rcs, err := c.CoreV1().ReplicationControllers(namespace).List(metav1.ListOptions{LabelSelector: selector.String()})
switch {
case len(rcs.Items) != 0:
e2elog.Logf("ReplicationController with %s in namespace %s found.", selector.String(), namespace)
Logf("ReplicationController with %s in namespace %s found.", selector.String(), namespace)
return exist, nil
case len(rcs.Items) == 0:
e2elog.Logf("ReplicationController with %s in namespace %s disappeared.", selector.String(), namespace)
Logf("ReplicationController with %s in namespace %s disappeared.", selector.String(), namespace)
return !exist, nil
default:
e2elog.Logf("List ReplicationController with %s in namespace %s failed: %v", selector.String(), namespace, err)
Logf("List ReplicationController with %s in namespace %s failed: %v", selector.String(), namespace, err)
return false, nil
}
})
Expand Down Expand Up @@ -230,25 +229,25 @@ waitLoop:
for _, podID := range pods {
running := RunKubectlOrDie("get", "pods", podID, "-o", "template", getContainerStateTemplate, fmt.Sprintf("--namespace=%v", ns))
if running != "true" {
e2elog.Logf("%s is created but not running", podID)
Logf("%s is created but not running", podID)
continue waitLoop
}

currentImage := RunKubectlOrDie("get", "pods", podID, "-o", "template", getImageTemplate, fmt.Sprintf("--namespace=%v", ns))
currentImage = trimDockerRegistry(currentImage)
if currentImage != containerImage {
e2elog.Logf("%s is created but running wrong image; expected: %s, actual: %s", podID, containerImage, currentImage)
Logf("%s is created but running wrong image; expected: %s, actual: %s", podID, containerImage, currentImage)
continue waitLoop
}

// Call the generic validator function here.
// This might validate for example, that (1) getting a url works and (2) url is serving correct content.
if err := validator(c, podID); err != nil {
e2elog.Logf("%s is running right image but validator function failed: %v", podID, err)
Logf("%s is running right image but validator function failed: %v", podID, err)
continue waitLoop
}

e2elog.Logf("%s is verified up and running", podID)
Logf("%s is verified up and running", podID)
runningPods = append(runningPods, podID)
}
// If we reach here, then all our checks passed.
Expand All @@ -257,5 +256,5 @@ waitLoop:
}
}
// Reaching here means that one of more checks failed multiple times. Assuming its not a race condition, something is broken.
e2elog.Failf("Timed out after %v seconds waiting for %s pods to reach valid state", PodStartTimeout.Seconds(), testname)
Failf("Timed out after %v seconds waiting for %s pods to reach valid state", PodStartTimeout.Seconds(), testname)
}
19 changes: 9 additions & 10 deletions test/e2e/framework/resource_usage_gatherer.go
Expand Up @@ -32,7 +32,6 @@ import (
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
clientset "k8s.io/client-go/kubernetes"
e2ekubelet "k8s.io/kubernetes/test/e2e/framework/kubelet"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"k8s.io/kubernetes/test/e2e/system"
)

Expand Down Expand Up @@ -180,13 +179,13 @@ func (w *resourceGatherWorker) singleProbe() {
} else {
nodeUsage, err := e2ekubelet.GetOneTimeResourceUsageOnNode(w.c, w.nodeName, w.probeDuration, func() []string { return w.containerIDs })
if err != nil {
e2elog.Logf("Error while reading data from %v: %v", w.nodeName, err)
Logf("Error while reading data from %v: %v", w.nodeName, err)
return
}
for k, v := range nodeUsage {
data[k] = v
if w.printVerboseLogs {
e2elog.Logf("Get container %v usage on node %v. CPUUsageInCores: %v, MemoryUsageInBytes: %v, MemoryWorkingSetInBytes: %v", k, w.nodeName, v.CPUUsageInCores, v.MemoryUsageInBytes, v.MemoryWorkingSetInBytes)
Logf("Get container %v usage on node %v. CPUUsageInCores: %v, MemoryUsageInBytes: %v, MemoryWorkingSetInBytes: %v", k, w.nodeName, v.CPUUsageInCores, v.MemoryUsageInBytes, v.MemoryWorkingSetInBytes)
}
}
}
Expand All @@ -196,7 +195,7 @@ func (w *resourceGatherWorker) singleProbe() {
func (w *resourceGatherWorker) gather(initialSleep time.Duration) {
defer utilruntime.HandleCrash()
defer w.wg.Done()
defer e2elog.Logf("Closing worker for %v", w.nodeName)
defer Logf("Closing worker for %v", w.nodeName)
defer func() { w.finished = true }()
select {
case <-time.After(initialSleep):
Expand Down Expand Up @@ -273,7 +272,7 @@ func NewResourceUsageGatherer(c clientset.Interface, options ResourceGathererOpt
if pods == nil {
pods, err = c.CoreV1().Pods("kube-system").List(metav1.ListOptions{})
if err != nil {
e2elog.Logf("Error while listing Pods: %v", err)
Logf("Error while listing Pods: %v", err)
return nil, err
}
}
Expand All @@ -297,7 +296,7 @@ func NewResourceUsageGatherer(c clientset.Interface, options ResourceGathererOpt
}
nodeList, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
if err != nil {
e2elog.Logf("Error while listing Nodes: %v", err)
Logf("Error while listing Nodes: %v", err)
return nil, err
}

Expand Down Expand Up @@ -345,27 +344,27 @@ func (g *ContainerResourceGatherer) StartGatheringData() {
// specified resource constraints.
func (g *ContainerResourceGatherer) StopAndSummarize(percentiles []int, constraints map[string]ResourceConstraint) (*ResourceUsageSummary, error) {
close(g.stopCh)
e2elog.Logf("Closed stop channel. Waiting for %v workers", len(g.workers))
Logf("Closed stop channel. Waiting for %v workers", len(g.workers))
finished := make(chan struct{})
go func() {
g.workerWg.Wait()
finished <- struct{}{}
}()
select {
case <-finished:
e2elog.Logf("Waitgroup finished.")
Logf("Waitgroup finished.")
case <-time.After(2 * time.Minute):
unfinished := make([]string, 0)
for i := range g.workers {
if !g.workers[i].finished {
unfinished = append(unfinished, g.workers[i].nodeName)
}
}
e2elog.Logf("Timed out while waiting for waitgroup, some workers failed to finish: %v", unfinished)
Logf("Timed out while waiting for waitgroup, some workers failed to finish: %v", unfinished)
}

if len(percentiles) == 0 {
e2elog.Logf("Warning! Empty percentile list for stopAndPrintData.")
Logf("Warning! Empty percentile list for stopAndPrintData.")
return &ResourceUsageSummary{}, fmt.Errorf("Failed to get any resource usage data")
}
data := make(map[int]e2ekubelet.ResourceUsagePerContainer)
Expand Down
8 changes: 3 additions & 5 deletions test/e2e/framework/size.go
Expand Up @@ -19,8 +19,6 @@ package framework
import (
"fmt"
"time"

e2elog "k8s.io/kubernetes/test/e2e/framework/log"
)

const (
Expand Down Expand Up @@ -53,14 +51,14 @@ func WaitForGroupSize(group string, size int32) error {
for start := time.Now(); time.Since(start) < timeout; time.Sleep(20 * time.Second) {
currentSize, err := GroupSize(group)
if err != nil {
e2elog.Logf("Failed to get node instance group size: %v", err)
Logf("Failed to get node instance group size: %v", err)
continue
}
if currentSize != int(size) {
e2elog.Logf("Waiting for node instance group size %d, current size %d", size, currentSize)
Logf("Waiting for node instance group size %d, current size %d", size, currentSize)
continue
}
e2elog.Logf("Node instance group has reached the desired size %d", size)
Logf("Node instance group has reached the desired size %d", size)
return nil
}
return fmt.Errorf("timeout waiting %v for node instance group size to be %d", timeout, size)
Expand Down
29 changes: 14 additions & 15 deletions test/e2e/framework/suites.go
Expand Up @@ -27,7 +27,6 @@ import (
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/component-base/version"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
Expand Down Expand Up @@ -66,11 +65,11 @@ func SetupSuite() {
v1.NamespaceNodeLease,
})
if err != nil {
e2elog.Failf("Error deleting orphaned namespaces: %v", err)
Failf("Error deleting orphaned namespaces: %v", err)
}
klog.Infof("Waiting for deletion of the following namespaces: %v", deleted)
if err := WaitForNamespacesDeleted(c, deleted, NamespaceCleanupTimeout); err != nil {
e2elog.Failf("Failed to delete orphaned namespaces %v: %v", deleted, err)
Failf("Failed to delete orphaned namespaces %v: %v", deleted, err)
}
}

Expand All @@ -97,26 +96,26 @@ func SetupSuite() {
// number equal to the number of allowed not-ready nodes).
if err := e2epod.WaitForPodsRunningReady(c, metav1.NamespaceSystem, int32(TestContext.MinStartupPods), int32(TestContext.AllowedNotReadyNodes), podStartupTimeout, map[string]string{}); err != nil {
DumpAllNamespaceInfo(c, metav1.NamespaceSystem)
LogFailedContainers(c, metav1.NamespaceSystem, e2elog.Logf)
LogFailedContainers(c, metav1.NamespaceSystem, Logf)
runKubernetesServiceTestContainer(c, metav1.NamespaceDefault)
e2elog.Failf("Error waiting for all pods to be running and ready: %v", err)
Failf("Error waiting for all pods to be running and ready: %v", err)
}

if err := WaitForDaemonSets(c, metav1.NamespaceSystem, int32(TestContext.AllowedNotReadyNodes), TestContext.SystemDaemonsetStartupTimeout); err != nil {
e2elog.Logf("WARNING: Waiting for all daemonsets to be ready failed: %v", err)
Logf("WARNING: Waiting for all daemonsets to be ready failed: %v", err)
}

// Log the version of the server and this client.
e2elog.Logf("e2e test version: %s", version.Get().GitVersion)
Logf("e2e test version: %s", version.Get().GitVersion)

dc := c.DiscoveryClient

serverVersion, serverErr := dc.ServerVersion()
if serverErr != nil {
e2elog.Logf("Unexpected server error retrieving version: %v", serverErr)
Logf("Unexpected server error retrieving version: %v", serverErr)
}
if serverVersion != nil {
e2elog.Logf("kube-apiserver version: %s", serverVersion.GitVersion)
Logf("kube-apiserver version: %s", serverVersion.GitVersion)
}

if TestContext.NodeKiller.Enabled {
Expand All @@ -142,7 +141,7 @@ func SetupSuitePerGinkgoNode() {
klog.Fatal("Error loading client: ", err)
}
TestContext.IPFamily = getDefaultClusterIPFamily(c)
e2elog.Logf("Cluster IP family: %s", TestContext.IPFamily)
Logf("Cluster IP family: %s", TestContext.IPFamily)
}

// CleanupSuite is the boilerplate that can be used after tests on ginkgo were run, on the SynchronizedAfterSuite step.
Expand All @@ -151,20 +150,20 @@ func SetupSuitePerGinkgoNode() {
// and then the function that only runs on the first Ginkgo node.
func CleanupSuite() {
// Run on all Ginkgo nodes
e2elog.Logf("Running AfterSuite actions on all nodes")
Logf("Running AfterSuite actions on all nodes")
RunCleanupActions()
}

// AfterSuiteActions are actions that are run on ginkgo's SynchronizedAfterSuite
func AfterSuiteActions() {
// Run only Ginkgo on node 1
e2elog.Logf("Running AfterSuite actions on node 1")
Logf("Running AfterSuite actions on node 1")
if TestContext.ReportDir != "" {
CoreDump(TestContext.ReportDir)
}
if TestContext.GatherSuiteMetricsAfterTest {
if err := gatherTestSuiteMetrics(); err != nil {
e2elog.Logf("Error gathering metrics: %v", err)
Logf("Error gathering metrics: %v", err)
}
}
if TestContext.NodeKiller.Enabled {
Expand All @@ -173,7 +172,7 @@ func AfterSuiteActions() {
}

func gatherTestSuiteMetrics() error {
e2elog.Logf("Gathering metrics")
Logf("Gathering metrics")
c, err := LoadClientset()
if err != nil {
return fmt.Errorf("error loading client: %v", err)
Expand All @@ -198,7 +197,7 @@ func gatherTestSuiteMetrics() error {
return fmt.Errorf("error writing to %q: %v", filePath, err)
}
} else {
e2elog.Logf("\n\nTest Suite Metrics:\n%s\n", metricsJSON)
Logf("\n\nTest Suite Metrics:\n%s\n", metricsJSON)
}

return nil
Expand Down
3 changes: 1 addition & 2 deletions test/e2e/framework/test_context.go
Expand Up @@ -33,7 +33,6 @@ import (
cliflag "k8s.io/component-base/cli/flag"
"k8s.io/klog"
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
)

const (
Expand Down Expand Up @@ -441,7 +440,7 @@ func AfterReadingAllFlags(t *TestContextType) {
if TestContext.Provider == "" {
// Some users of the e2e.test binary pass --provider=.
// We need to support that, changing it would break those usages.
e2elog.Logf("The --provider flag is not set. Continuing as if --provider=skeleton had been used.")
Logf("The --provider flag is not set. Continuing as if --provider=skeleton had been used.")
TestContext.Provider = "skeleton"
}

Expand Down

0 comments on commit d92a250

Please sign in to comment.