Skip to content

Commit

Permalink
Use log functions of core framework on [c-n]
Browse files Browse the repository at this point in the history
This makes sub packages of e2e test framework to use log functions
of core framework instead for avoiding circular dependencies.
  • Loading branch information
Kenichi Omichi committed Aug 21, 2019
1 parent 8cf05f5 commit f3f14f9
Show file tree
Hide file tree
Showing 9 changed files with 112 additions and 122 deletions.
13 changes: 6 additions & 7 deletions test/e2e/framework/create.go
Expand Up @@ -34,7 +34,6 @@ import (
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/tools/cache"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"k8s.io/kubernetes/test/e2e/framework/testfiles"
)
Expand Down Expand Up @@ -81,7 +80,7 @@ func visitManifests(cb func([]byte) error, files ...string) error {
for _, fileName := range files {
data, err := testfiles.Read(fileName)
if err != nil {
e2elog.Failf("reading manifest file: %v", err)
Failf("reading manifest file: %v", err)
}

// Split at the "---" separator before working on
Expand Down Expand Up @@ -117,7 +116,7 @@ func visitManifests(cb func([]byte) error, files ...string) error {
func (f *Framework) PatchItems(items ...interface{}) error {
for _, item := range items {
// Uncomment when debugging the loading and patching of items.
// e2elog.Logf("patching original content of %T:\n%s", item, PrettyPrint(item))
// Logf("patching original content of %T:\n%s", item, PrettyPrint(item))
if err := f.patchItemRecursively(item); err != nil {
return err
}
Expand Down Expand Up @@ -156,7 +155,7 @@ func (f *Framework) CreateItems(items ...interface{}) (func(), error) {
// to non-namespaced items.
for _, destructor := range destructors {
if err := destructor(); err != nil && !apierrs.IsNotFound(err) {
e2elog.Logf("deleting failed: %s", err)
Logf("deleting failed: %s", err)
}
}
}
Expand All @@ -169,12 +168,12 @@ func (f *Framework) CreateItems(items ...interface{}) (func(), error) {
description := DescribeItem(item)
// Uncomment this line to get a full dump of the entire item.
// description = fmt.Sprintf("%s:\n%s", description, PrettyPrint(item))
e2elog.Logf("creating %s", description)
Logf("creating %s", description)
for _, factory := range factories {
destructor, err := factory.Create(f, item)
if destructor != nil {
destructors = append(destructors, func() error {
e2elog.Logf("deleting %s", description)
Logf("deleting %s", description)
return destructor()
})
}
Expand Down Expand Up @@ -417,7 +416,7 @@ func (*clusterRoleFactory) Create(f *Framework, i interface{}) (func() error, er
return nil, errorItemNotSupported
}

e2elog.Logf("Define cluster role %v", item.GetName())
Logf("Define cluster role %v", item.GetName())
client := f.ClientSet.RbacV1().ClusterRoles()
if _, err := client.Create(item); err != nil {
return nil, errors.Wrap(err, "create ClusterRole")
Expand Down
5 changes: 2 additions & 3 deletions test/e2e/framework/exec_util.go
Expand Up @@ -27,7 +27,6 @@ import (
"k8s.io/client-go/kubernetes/scheme"
restclient "k8s.io/client-go/rest"
"k8s.io/client-go/tools/remotecommand"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"

"github.com/onsi/gomega"
)
Expand All @@ -49,7 +48,7 @@ type ExecOptions struct {
// returning stdout, stderr and error. `options` allowed for
// additional parameters to be passed.
func (f *Framework) ExecWithOptions(options ExecOptions) (string, string, error) {
e2elog.Logf("ExecWithOptions %+v", options)
Logf("ExecWithOptions %+v", options)

config, err := LoadConfig()
ExpectNoError(err, "failed to load restclient config")
Expand Down Expand Up @@ -98,7 +97,7 @@ func (f *Framework) ExecCommandInContainerWithFullOutput(podName, containerName
// ExecCommandInContainer executes a command in the specified container.
func (f *Framework) ExecCommandInContainer(podName, containerName string, cmd ...string) string {
stdout, stderr, err := f.ExecCommandInContainerWithFullOutput(podName, containerName, cmd...)
e2elog.Logf("Exec stderr: %q", stderr)
Logf("Exec stderr: %q", stderr)
ExpectNoError(err,
"failed to execute command in pod %v, container %v: %v",
podName, containerName, err)
Expand Down
3 changes: 1 addition & 2 deletions test/e2e/framework/flake_reporting_util.go
Expand Up @@ -21,7 +21,6 @@ import (
"fmt"
"sync"

e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
)

Expand Down Expand Up @@ -60,7 +59,7 @@ func (f *FlakeReport) RecordFlakeIfError(err error, optionalDescription ...inter
if desc != "" {
msg = fmt.Sprintf("%v (Description: %v)", msg, desc)
}
e2elog.Logf(msg)
Logf(msg)
f.lock.Lock()
defer f.lock.Unlock()
f.Flakes = append(f.Flakes, msg)
Expand Down
63 changes: 31 additions & 32 deletions test/e2e/framework/framework.go
Expand Up @@ -46,7 +46,6 @@ import (
"k8s.io/client-go/rest"
"k8s.io/client-go/restmapper"
scaleclient "k8s.io/client-go/scale"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epsp "k8s.io/kubernetes/test/e2e/framework/psp"
Expand Down Expand Up @@ -225,7 +224,7 @@ func (f *Framework) BeforeEach() {
err = WaitForDefaultServiceAccountInNamespace(f.ClientSet, namespace.Name)
ExpectNoError(err)
} else {
e2elog.Logf("Skipping waiting for service account")
Logf("Skipping waiting for service account")
}
f.UniqueName = f.Namespace.GetName()
} else {
Expand Down Expand Up @@ -253,7 +252,7 @@ func (f *Framework) BeforeEach() {
PrintVerboseLogs: false,
}, nil)
if err != nil {
e2elog.Logf("Error while creating NewResourceUsageGatherer: %v", err)
Logf("Error while creating NewResourceUsageGatherer: %v", err)
} else {
go f.gatherer.StartGatheringData()
}
Expand All @@ -274,13 +273,13 @@ func (f *Framework) BeforeEach() {
if gatherMetricsAfterTest && TestContext.IncludeClusterAutoscalerMetrics {
grabber, err := e2emetrics.NewMetricsGrabber(f.ClientSet, f.KubemarkExternalClusterClientSet, !ProviderIs("kubemark"), false, false, false, TestContext.IncludeClusterAutoscalerMetrics)
if err != nil {
e2elog.Logf("Failed to create MetricsGrabber (skipping ClusterAutoscaler metrics gathering before test): %v", err)
Logf("Failed to create MetricsGrabber (skipping ClusterAutoscaler metrics gathering before test): %v", err)
} else {
f.clusterAutoscalerMetricsBeforeTest, err = grabber.Grab()
if err != nil {
e2elog.Logf("MetricsGrabber failed to grab CA metrics before test (skipping metrics gathering): %v", err)
Logf("MetricsGrabber failed to grab CA metrics before test (skipping metrics gathering): %v", err)
} else {
e2elog.Logf("Gathered ClusterAutoscaler metrics before test")
Logf("Gathered ClusterAutoscaler metrics before test")
}
}

Expand Down Expand Up @@ -311,15 +310,15 @@ func (f *Framework) AfterEach() {
if !apierrors.IsNotFound(err) {
nsDeletionErrors[ns.Name] = err
} else {
e2elog.Logf("Namespace %v was already deleted", ns.Name)
Logf("Namespace %v was already deleted", ns.Name)
}
}
}
} else {
if !TestContext.DeleteNamespace {
e2elog.Logf("Found DeleteNamespace=false, skipping namespace deletion!")
Logf("Found DeleteNamespace=false, skipping namespace deletion!")
} else {
e2elog.Logf("Found DeleteNamespaceOnFailure=false and current test failed, skipping namespace deletion!")
Logf("Found DeleteNamespaceOnFailure=false and current test failed, skipping namespace deletion!")
}
}

Expand All @@ -334,7 +333,7 @@ func (f *Framework) AfterEach() {
for namespaceKey, namespaceErr := range nsDeletionErrors {
messages = append(messages, fmt.Sprintf("Couldn't delete ns: %q: %s (%#v)", namespaceKey, namespaceErr, namespaceErr))
}
e2elog.Failf(strings.Join(messages, ","))
Failf(strings.Join(messages, ","))
}
}()

Expand Down Expand Up @@ -366,11 +365,11 @@ func (f *Framework) AfterEach() {
grabMetricsFromKubelets := TestContext.GatherMetricsAfterTest != "master" && !ProviderIs("kubemark")
grabber, err := e2emetrics.NewMetricsGrabber(f.ClientSet, f.KubemarkExternalClusterClientSet, grabMetricsFromKubelets, true, true, true, TestContext.IncludeClusterAutoscalerMetrics)
if err != nil {
e2elog.Logf("Failed to create MetricsGrabber (skipping metrics gathering): %v", err)
Logf("Failed to create MetricsGrabber (skipping metrics gathering): %v", err)
} else {
received, err := grabber.Grab()
if err != nil {
e2elog.Logf("MetricsGrabber failed to grab some of the metrics: %v", err)
Logf("MetricsGrabber failed to grab some of the metrics: %v", err)
}
(*e2emetrics.ComponentCollection)(&received).ComputeClusterAutoscalerMetricsDelta(f.clusterAutoscalerMetricsBeforeTest)
f.TestSummaries = append(f.TestSummaries, (*e2emetrics.ComponentCollection)(&received))
Expand All @@ -391,7 +390,7 @@ func (f *Framework) AfterEach() {
// This is explicitly done at the very end of the test, to avoid
// e.g. not removing namespace in case of this failure.
if err := AllNodesReady(f.ClientSet, 3*time.Minute); err != nil {
e2elog.Failf("All nodes should be ready after test, %v", err)
Failf("All nodes should be ready after test, %v", err)
}
}

Expand Down Expand Up @@ -490,7 +489,7 @@ func (f *Framework) WriteFileViaContainer(podName, containerName string, path st
command := fmt.Sprintf("echo '%s' > '%s'", contents, path)
stdout, stderr, err := kubectlExecWithRetry(f.Namespace.Name, podName, containerName, "--", "/bin/sh", "-c", command)
if err != nil {
e2elog.Logf("error running kubectl exec to write file: %v\nstdout=%v\nstderr=%v)", err, string(stdout), string(stderr))
Logf("error running kubectl exec to write file: %v\nstdout=%v\nstderr=%v)", err, string(stdout), string(stderr))
}
return err
}
Expand All @@ -501,7 +500,7 @@ func (f *Framework) ReadFileViaContainer(podName, containerName string, path str

stdout, stderr, err := kubectlExecWithRetry(f.Namespace.Name, podName, containerName, "--", "cat", path)
if err != nil {
e2elog.Logf("error running kubectl exec to read file: %v\nstdout=%v\nstderr=%v)", err, string(stdout), string(stderr))
Logf("error running kubectl exec to read file: %v\nstdout=%v\nstderr=%v)", err, string(stdout), string(stderr))
}
return string(stdout), err
}
Expand All @@ -512,7 +511,7 @@ func (f *Framework) CheckFileSizeViaContainer(podName, containerName, path strin

stdout, stderr, err := kubectlExecWithRetry(f.Namespace.Name, podName, containerName, "--", "ls", "-l", path)
if err != nil {
e2elog.Logf("error running kubectl exec to read file: %v\nstdout=%v\nstderr=%v)", err, string(stdout), string(stderr))
Logf("error running kubectl exec to read file: %v\nstdout=%v\nstderr=%v)", err, string(stdout), string(stderr))
}
return string(stdout), err
}
Expand Down Expand Up @@ -549,7 +548,7 @@ func (f *Framework) CreateServiceForSimpleApp(contPort, svcPort int, appName str
TargetPort: intstr.FromInt(contPort),
}}
}
e2elog.Logf("Creating a service-for-%v for selecting app=%v-pod", appName, appName)
Logf("Creating a service-for-%v for selecting app=%v-pod", appName, appName)
service, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(&v1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: "service-for-" + appName,
Expand All @@ -575,7 +574,7 @@ func (f *Framework) CreatePodsPerNodeForSimpleApp(appName string, podSpec func(n
for i, node := range nodes.Items {
// one per node, but no more than maxCount.
if i <= maxCount {
e2elog.Logf("%v/%v : Creating container with label app=%v-pod", i, maxCount, appName)
Logf("%v/%v : Creating container with label app=%v-pod", i, maxCount, appName)
_, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(&v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf(appName+"-pod-%v", i),
Expand Down Expand Up @@ -646,19 +645,19 @@ func (kc *KubeConfig) FindCluster(name string) *KubeCluster {
func kubectlExecWithRetry(namespace string, podName, containerName string, args ...string) ([]byte, []byte, error) {
for numRetries := 0; numRetries < maxKubectlExecRetries; numRetries++ {
if numRetries > 0 {
e2elog.Logf("Retrying kubectl exec (retry count=%v/%v)", numRetries+1, maxKubectlExecRetries)
Logf("Retrying kubectl exec (retry count=%v/%v)", numRetries+1, maxKubectlExecRetries)
}

stdOutBytes, stdErrBytes, err := kubectlExec(namespace, podName, containerName, args...)
if err != nil {
if strings.Contains(strings.ToLower(string(stdErrBytes)), "i/o timeout") {
// Retry on "i/o timeout" errors
e2elog.Logf("Warning: kubectl exec encountered i/o timeout.\nerr=%v\nstdout=%v\nstderr=%v)", err, string(stdOutBytes), string(stdErrBytes))
Logf("Warning: kubectl exec encountered i/o timeout.\nerr=%v\nstdout=%v\nstderr=%v)", err, string(stdOutBytes), string(stdErrBytes))
continue
}
if strings.Contains(strings.ToLower(string(stdErrBytes)), "container not found") {
// Retry on "container not found" errors
e2elog.Logf("Warning: kubectl exec encountered container not found.\nerr=%v\nstdout=%v\nstderr=%v)", err, string(stdOutBytes), string(stdErrBytes))
Logf("Warning: kubectl exec encountered container not found.\nerr=%v\nstdout=%v\nstderr=%v)", err, string(stdOutBytes), string(stdErrBytes))
time.Sleep(2 * time.Second)
continue
}
Expand All @@ -683,7 +682,7 @@ func kubectlExec(namespace string, podName, containerName string, args ...string
cmd := KubectlCmd(cmdArgs...)
cmd.Stdout, cmd.Stderr = &stdout, &stderr

e2elog.Logf("Running '%s %s'", cmd.Path, strings.Join(cmdArgs, " "))
Logf("Running '%s %s'", cmd.Path, strings.Join(cmdArgs, " "))
err := cmd.Run()
return stdout.Bytes(), stderr.Bytes(), err
}
Expand Down Expand Up @@ -790,7 +789,7 @@ func (p *PodStateVerification) filter(c clientset.Interface, namespace *v1.Names

ns := namespace.Name
pl, err := filterLabels(p.Selectors, c, ns) // Build an v1.PodList to operate against.
e2elog.Logf("Selector matched %v pods for %v", len(pl.Items), p.Selectors)
Logf("Selector matched %v pods for %v", len(pl.Items), p.Selectors)
if len(pl.Items) == 0 || err != nil {
return pl.Items, err
}
Expand All @@ -805,7 +804,7 @@ ReturnPodsSoFar:
}
passesVerify, err := passesVerifyFilter(pod, p.Verify)
if err != nil {
e2elog.Logf("Error detected on %v : %v !", pod.Name, err)
Logf("Error detected on %v : %v !", pod.Name, err)
break ReturnPodsSoFar
}
if passesVerify {
Expand All @@ -826,12 +825,12 @@ func (cl *ClusterVerification) WaitFor(atLeast int, timeout time.Duration) ([]v1

// Failure
if returnedErr != nil {
e2elog.Logf("Cutting polling short: We got an error from the pod filtering layer.")
Logf("Cutting polling short: We got an error from the pod filtering layer.")
// stop polling if the pod filtering returns an error. that should never happen.
// it indicates, for example, that the client is broken or something non-pod related.
return false, returnedErr
}
e2elog.Logf("Found %v / %v", len(pods), atLeast)
Logf("Found %v / %v", len(pods), atLeast)

// Success
if len(pods) >= atLeast {
Expand All @@ -840,15 +839,15 @@ func (cl *ClusterVerification) WaitFor(atLeast int, timeout time.Duration) ([]v1
// Keep trying...
return false, nil
})
e2elog.Logf("WaitFor completed with timeout %v. Pods found = %v out of %v", timeout, len(pods), atLeast)
Logf("WaitFor completed with timeout %v. Pods found = %v out of %v", timeout, len(pods), atLeast)
return pods, err
}

// WaitForOrFail provides a shorthand WaitFor with failure as an option if anything goes wrong.
func (cl *ClusterVerification) WaitForOrFail(atLeast int, timeout time.Duration) {
pods, err := cl.WaitFor(atLeast, timeout)
if err != nil || len(pods) < atLeast {
e2elog.Failf("Verified %v of %v pods , error : %v", len(pods), atLeast, err)
Failf("Verified %v of %v pods , error : %v", len(pods), atLeast, err)
}
}

Expand All @@ -861,14 +860,14 @@ func (cl *ClusterVerification) ForEach(podFunc func(v1.Pod)) error {
pods, err := cl.podState.filter(cl.client, cl.namespace)
if err == nil {
if len(pods) == 0 {
e2elog.Failf("No pods matched the filter.")
Failf("No pods matched the filter.")
}
e2elog.Logf("ForEach: Found %v pods from the filter. Now looping through them.", len(pods))
Logf("ForEach: Found %v pods from the filter. Now looping through them.", len(pods))
for _, p := range pods {
podFunc(p)
}
} else {
e2elog.Logf("ForEach: Something went wrong when filtering pods to execute against: %v", err)
Logf("ForEach: Something went wrong when filtering pods to execute against: %v", err)
}

return err
Expand All @@ -880,7 +879,7 @@ func GetLogToFileFunc(file *os.File) func(format string, args ...interface{}) {
return func(format string, args ...interface{}) {
writer := bufio.NewWriter(file)
if _, err := fmt.Fprintf(writer, format, args...); err != nil {
e2elog.Logf("Failed to write file %v with test performance data: %v", file.Name(), err)
Logf("Failed to write file %v with test performance data: %v", file.Name(), err)
}
writer.Flush()
}
Expand Down
5 changes: 2 additions & 3 deletions test/e2e/framework/get-kubemark-resource-usage.go
Expand Up @@ -21,7 +21,6 @@ import (
"fmt"
"strings"

e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
)

Expand All @@ -47,7 +46,7 @@ func GetKubemarkMasterComponentsResourceUsage() map[string]*KubemarkResourceUsag
// Get kubernetes component resource usage
sshResult, err := getMasterUsageByPrefix("kube")
if err != nil {
e2elog.Logf("Error when trying to SSH to master machine. Skipping probe. %v", err)
Logf("Error when trying to SSH to master machine. Skipping probe. %v", err)
return nil
}
scanner := bufio.NewScanner(strings.NewReader(sshResult))
Expand All @@ -65,7 +64,7 @@ func GetKubemarkMasterComponentsResourceUsage() map[string]*KubemarkResourceUsag
// Get etcd resource usage
sshResult, err = getMasterUsageByPrefix("bin/etcd")
if err != nil {
e2elog.Logf("Error when trying to SSH to master machine. Skipping probe")
Logf("Error when trying to SSH to master machine. Skipping probe")
return nil
}
scanner = bufio.NewScanner(strings.NewReader(sshResult))
Expand Down

0 comments on commit f3f14f9

Please sign in to comment.