Skip to content
Permalink
Browse files

Use log functions of core framework on [d-i]

This makes sub packages of e2e test framework to use log functions
of core framework instead for avoiding circular dependencies.
  • Loading branch information...
oomichi committed Aug 22, 2019
1 parent 37651f1 commit 20f507fc6a6fd4fae0b72924c81df341511ea226
@@ -20,7 +20,7 @@ go_library(
"//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/tools/watch:go_default_library",
"//test/e2e/framework/log:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/utils:go_default_library",
"//test/utils/image:go_default_library",
],
@@ -30,14 +30,14 @@ import (
clientset "k8s.io/client-go/kubernetes"
watchtools "k8s.io/client-go/tools/watch"
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"k8s.io/kubernetes/test/e2e/framework"
testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
)

// UpdateDeploymentWithRetries updates the specified deployment with retries.
func UpdateDeploymentWithRetries(c clientset.Interface, namespace, name string, applyUpdate testutils.UpdateDeploymentFunc) (*appsv1.Deployment, error) {
return testutils.UpdateDeploymentWithRetries(c, namespace, name, applyUpdate, e2elog.Logf, poll, pollShortTimeout)
return testutils.UpdateDeploymentWithRetries(c, namespace, name, applyUpdate, framework.Logf, poll, pollShortTimeout)
}

// CheckDeploymentRevisionAndImage checks if the input deployment's and its new replica set's revision and image are as expected.
@@ -67,7 +67,7 @@ func WatchRecreateDeployment(c clientset.Interface, d *appsv1.Deployment) error
_, allOldRSs, err := deploymentutil.GetOldReplicaSets(d, c.AppsV1())
newRS, nerr := deploymentutil.GetNewReplicaSet(d, c.AppsV1())
if err == nil && nerr == nil {
e2elog.Logf("%+v", d)
framework.Logf("%+v", d)
logReplicaSetsOfDeployment(d, allOldRSs, newRS)
logPodsOfDeployment(c, d, append(allOldRSs, newRS))
}
@@ -128,7 +128,7 @@ func CreateDeployment(client clientset.Interface, replicas int32, podLabels map[
if err != nil {
return nil, fmt.Errorf("deployment %q Create API error: %v", deploymentSpec.Name, err)
}
e2elog.Logf("Waiting deployment %q to complete", deploymentSpec.Name)
framework.Logf("Waiting deployment %q to complete", deploymentSpec.Name)
err = WaitForDeploymentComplete(client, deployment)
if err != nil {
return nil, fmt.Errorf("deployment %q failed to complete: %v", deploymentSpec.Name, err)
@@ -19,14 +19,14 @@ package deployment
import (
appsv1 "k8s.io/api/apps/v1"
clientset "k8s.io/client-go/kubernetes"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"k8s.io/kubernetes/test/e2e/framework"
testutils "k8s.io/kubernetes/test/utils"
)

func logReplicaSetsOfDeployment(deployment *appsv1.Deployment, allOldRSs []*appsv1.ReplicaSet, newRS *appsv1.ReplicaSet) {
testutils.LogReplicaSetsOfDeployment(deployment, allOldRSs, newRS, e2elog.Logf)
testutils.LogReplicaSetsOfDeployment(deployment, allOldRSs, newRS, framework.Logf)
}

func logPodsOfDeployment(c clientset.Interface, deployment *appsv1.Deployment, rsList []*appsv1.ReplicaSet) {
testutils.LogPodsOfDeployment(c, deployment, rsList, e2elog.Logf)
testutils.LogPodsOfDeployment(c, deployment, rsList, framework.Logf)
}
@@ -25,7 +25,7 @@ import (
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"k8s.io/kubernetes/test/e2e/framework"
testutils "k8s.io/kubernetes/test/utils"
)

@@ -43,27 +43,27 @@ func WaitForObservedDeployment(c clientset.Interface, ns, deploymentName string,

// WaitForDeploymentWithCondition waits for the specified deployment condition.
func WaitForDeploymentWithCondition(c clientset.Interface, ns, deploymentName, reason string, condType appsv1.DeploymentConditionType) error {
return testutils.WaitForDeploymentWithCondition(c, ns, deploymentName, reason, condType, e2elog.Logf, poll, pollLongTimeout)
return testutils.WaitForDeploymentWithCondition(c, ns, deploymentName, reason, condType, framework.Logf, poll, pollLongTimeout)
}

// WaitForDeploymentRevisionAndImage waits for the deployment's and its new RS's revision and container image to match the given revision and image.
// Note that deployment revision and its new RS revision should be updated shortly most of the time, but an overwhelmed RS controller
// may result in taking longer to relabel a RS.
func WaitForDeploymentRevisionAndImage(c clientset.Interface, ns, deploymentName string, revision, image string) error {
return testutils.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, revision, image, e2elog.Logf, poll, pollLongTimeout)
return testutils.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, revision, image, framework.Logf, poll, pollLongTimeout)
}

// WaitForDeploymentComplete waits for the deployment to complete, and don't check if rolling update strategy is broken.
// Rolling update strategy is used only during a rolling update, and can be violated in other situations,
// such as shortly after a scaling event or the deployment is just created.
func WaitForDeploymentComplete(c clientset.Interface, d *appsv1.Deployment) error {
return testutils.WaitForDeploymentComplete(c, d, e2elog.Logf, poll, pollLongTimeout)
return testutils.WaitForDeploymentComplete(c, d, framework.Logf, poll, pollLongTimeout)
}

// WaitForDeploymentCompleteAndCheckRolling waits for the deployment to complete, and check rolling update strategy isn't broken at any times.
// Rolling update strategy should not be broken during a rolling update.
func WaitForDeploymentCompleteAndCheckRolling(c clientset.Interface, d *appsv1.Deployment) error {
return testutils.WaitForDeploymentCompleteAndCheckRolling(c, d, e2elog.Logf, poll, pollLongTimeout)
return testutils.WaitForDeploymentCompleteAndCheckRolling(c, d, framework.Logf, poll, pollLongTimeout)
}

// WaitForDeploymentUpdatedReplicasGTE waits for given deployment to be observed by the controller and has at least a number of updatedReplicas
@@ -10,7 +10,7 @@ go_library(
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//test/e2e/framework/log:go_default_library",
"//test/e2e/framework:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",
],
)
@@ -32,7 +32,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
clientset "k8s.io/client-go/kubernetes"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"k8s.io/kubernetes/test/e2e/framework"
)

// ServiceStartTimeout is how long to wait for a service endpoint to be resolvable.
@@ -103,7 +103,7 @@ func ValidateEndpointsPorts(c clientset.Interface, namespace, serviceName string
for start := time.Now(); time.Since(start) < ServiceStartTimeout; time.Sleep(1 * time.Second) {
ep, err := c.CoreV1().Endpoints(namespace).Get(serviceName, metav1.GetOptions{})
if err != nil {
e2elog.Logf("Get endpoints failed (%v elapsed, ignoring for 5s): %v", time.Since(start), err)
framework.Logf("Get endpoints failed (%v elapsed, ignoring for 5s): %v", time.Since(start), err)
continue
}
portsByPodUID := GetContainerPortsByPodUID(ep)
@@ -116,21 +116,21 @@ func ValidateEndpointsPorts(c clientset.Interface, namespace, serviceName string
if err != nil {
return err
}
e2elog.Logf("successfully validated that service %s in namespace %s exposes endpoints %v (%v elapsed)",
framework.Logf("successfully validated that service %s in namespace %s exposes endpoints %v (%v elapsed)",
serviceName, namespace, expectedEndpoints, time.Since(start))
return nil
}
if i%5 == 0 {
e2elog.Logf("Unexpected endpoints: found %v, expected %v (%v elapsed, will retry)", portsByPodUID, expectedEndpoints, time.Since(start))
framework.Logf("Unexpected endpoints: found %v, expected %v (%v elapsed, will retry)", portsByPodUID, expectedEndpoints, time.Since(start))
}
i++
}
if pods, err := c.CoreV1().Pods(metav1.NamespaceAll).List(metav1.ListOptions{}); err == nil {
for _, pod := range pods.Items {
e2elog.Logf("Pod %s\t%s\t%s\t%s", pod.Namespace, pod.Name, pod.Spec.NodeName, pod.DeletionTimestamp)
framework.Logf("Pod %s\t%s\t%s\t%s", pod.Namespace, pod.Name, pod.Spec.NodeName, pod.DeletionTimestamp)
}
} else {
e2elog.Logf("Can't list pod debug info: %v", err)
framework.Logf("Can't list pod debug info: %v", err)
}
return fmt.Errorf("Timed out waiting for service %s in namespace %s to expose endpoints %v (%v elapsed)", serviceName, namespace, expectedEndpoints, ServiceStartTimeout)
}
@@ -18,7 +18,6 @@ go_library(
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/log:go_default_library",
"//test/e2e/framework/node:go_default_library",
"//test/e2e/framework/service:go_default_library",
"//test/e2e/framework/testfiles:go_default_library",
@@ -49,7 +49,6 @@ import (
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
"k8s.io/kubernetes/test/e2e/framework/testfiles"
@@ -137,12 +136,12 @@ type E2ELogger struct{}

// Infof outputs log.
func (l *E2ELogger) Infof(format string, args ...interface{}) {
e2elog.Logf(format, args...)
framework.Logf(format, args...)
}

// Errorf outputs log.
func (l *E2ELogger) Errorf(format string, args ...interface{}) {
e2elog.Logf(format, args...)
framework.Logf(format, args...)
}

// ConformanceTests contains a closure with an entry and exit log line.
@@ -337,7 +336,7 @@ func BuildInsecureClient(timeout time.Duration) *http.Client {
// Ingress, it's updated.
func createTLSSecret(kubeClient clientset.Interface, namespace, secretName string, hosts ...string) (host string, rootCA, privKey []byte, err error) {
host = strings.Join(hosts, ",")
e2elog.Logf("Generating RSA cert for host %v", host)
framework.Logf("Generating RSA cert for host %v", host)
cert, key, err := GenerateRSACerts(host, true)
if err != nil {
return
@@ -354,11 +353,11 @@ func createTLSSecret(kubeClient clientset.Interface, namespace, secretName strin
var s *v1.Secret
if s, err = kubeClient.CoreV1().Secrets(namespace).Get(secretName, metav1.GetOptions{}); err == nil {
// TODO: Retry the update. We don't really expect anything to conflict though.
e2elog.Logf("Updating secret %v in ns %v with hosts %v", secret.Name, namespace, host)
framework.Logf("Updating secret %v in ns %v with hosts %v", secret.Name, namespace, host)
s.Data = secret.Data
_, err = kubeClient.CoreV1().Secrets(namespace).Update(s)
} else {
e2elog.Logf("Creating secret %v in ns %v with hosts %v", secret.Name, namespace, host)
framework.Logf("Creating secret %v in ns %v with hosts %v", secret.Name, namespace, host)
_, err = kubeClient.CoreV1().Secrets(namespace).Create(secret)
}
return host, cert, key, err
@@ -474,7 +473,7 @@ func (j *TestJig) Update(update func(ing *networkingv1beta1.Ingress)) {
for i := 0; i < 3; i++ {
j.Ingress, err = j.Client.NetworkingV1beta1().Ingresses(ns).Get(name, metav1.GetOptions{})
if err != nil {
e2elog.Failf("failed to get ingress %s/%s: %v", ns, name, err)
framework.Failf("failed to get ingress %s/%s: %v", ns, name, err)
}
update(j.Ingress)
j.Ingress, err = j.runUpdate(j.Ingress)
@@ -483,10 +482,10 @@ func (j *TestJig) Update(update func(ing *networkingv1beta1.Ingress)) {
return
}
if !apierrs.IsConflict(err) && !apierrs.IsServerTimeout(err) {
e2elog.Failf("failed to update ingress %s/%s: %v", ns, name, err)
framework.Failf("failed to update ingress %s/%s: %v", ns, name, err)
}
}
e2elog.Failf("too many retries updating ingress %s/%s", ns, name)
framework.Failf("too many retries updating ingress %s/%s", ns, name)
}

// AddHTTPS updates the ingress to add this secret for these hosts.
@@ -544,7 +543,7 @@ func (j *TestJig) GetRootCA(secretName string) (rootCA []byte) {
var ok bool
rootCA, ok = j.RootCAs[secretName]
if !ok {
e2elog.Failf("Failed to retrieve rootCAs, no recorded secret by name %v", secretName)
framework.Failf("Failed to retrieve rootCAs, no recorded secret by name %v", secretName)
}
return
}
@@ -676,7 +675,7 @@ func (j *TestJig) pollIngressWithCert(ing *networkingv1beta1.Ingress, address st
// WaitForIngress returns when it gets the first 200 response
func (j *TestJig) WaitForIngress(waitForNodePort bool) {
if err := j.WaitForGivenIngressWithTimeout(j.Ingress, waitForNodePort, e2eservice.LoadBalancerPollTimeout); err != nil {
e2elog.Failf("error in waiting for ingress to get an address: %s", err)
framework.Failf("error in waiting for ingress to get an address: %s", err)
}
}

@@ -689,7 +688,7 @@ func (j *TestJig) WaitForIngressToStable() {
}
return true, nil
}); err != nil {
e2elog.Failf("error in waiting for ingress to stablize: %v", err)
framework.Failf("error in waiting for ingress to stablize: %v", err)
}
}

@@ -735,7 +734,7 @@ func (j *TestJig) VerifyURL(route, host string, iterations int, interval time.Du
for i := 0; i < iterations; i++ {
b, err := framework.SimpleGET(httpClient, route, host)
if err != nil {
e2elog.Logf(b)
framework.Logf(b)
return err
}
j.Logger.Infof("Verified %v with host %v %d times, sleeping for %v", route, host, i, interval)
@@ -815,7 +814,7 @@ func (j *TestJig) GetDistinctResponseFromIngress() (sets.String, error) {
// Wait for the loadbalancer IP.
address, err := j.WaitForIngressAddress(j.Client, j.Ingress.Namespace, j.Ingress.Name, e2eservice.LoadBalancerPollTimeout)
if err != nil {
e2elog.Failf("Ingress failed to acquire an IP address within %v", e2eservice.LoadBalancerPollTimeout)
framework.Failf("Ingress failed to acquire an IP address within %v", e2eservice.LoadBalancerPollTimeout)
}
responses := sets.NewString()
timeoutClient := &http.Client{Timeout: IngressReqTimeout}
@@ -846,25 +845,25 @@ func (cont *NginxIngressController) Init() {
read := func(file string) string {
return string(testfiles.ReadOrDie(filepath.Join(IngressManifestPath, "nginx", file)))
}
e2elog.Logf("initializing nginx ingress controller")
framework.Logf("initializing nginx ingress controller")
framework.RunKubectlOrDieInput(read("rc.yaml"), "create", "-f", "-", fmt.Sprintf("--namespace=%v", cont.Ns))

rc, err := cont.Client.CoreV1().ReplicationControllers(cont.Ns).Get("nginx-ingress-controller", metav1.GetOptions{})
framework.ExpectNoError(err)
cont.rc = rc

e2elog.Logf("waiting for pods with label %v", rc.Spec.Selector)
framework.Logf("waiting for pods with label %v", rc.Spec.Selector)
sel := labels.SelectorFromSet(labels.Set(rc.Spec.Selector))
framework.ExpectNoError(testutils.WaitForPodsWithLabelRunning(cont.Client, cont.Ns, sel))
pods, err := cont.Client.CoreV1().Pods(cont.Ns).List(metav1.ListOptions{LabelSelector: sel.String()})
framework.ExpectNoError(err)
if len(pods.Items) == 0 {
e2elog.Failf("Failed to find nginx ingress controller pods with selector %v", sel)
framework.Failf("Failed to find nginx ingress controller pods with selector %v", sel)
}
cont.pod = &pods.Items[0]
cont.externalIP, err = framework.GetHostExternalAddress(cont.Client, cont.pod)
framework.ExpectNoError(err)
e2elog.Logf("ingress controller running in pod %v on ip %v", cont.pod.Name, cont.externalIP)
framework.Logf("ingress controller running in pod %v on ip %v", cont.pod.Name, cont.externalIP)
}

func generateBacksideHTTPSIngressSpec(ns string) *networkingv1beta1.Ingress {

0 comments on commit 20f507f

Please sign in to comment.
You can’t perform that action at this time.