Skip to content

Commit

Permalink
Remove e2e-image-puller
Browse files Browse the repository at this point in the history
A long time ago, We added the image prepulling as a workaround due to
the overwhelming amount of flake caused by pulling during the tests.
This functionality has been broken for a while now when we switched to a
COS image where mounting `docker` binary into `busybox` stopped working.
So we just have dead code we should clean up.

Change-Id: I538171a5c1d9361eee7f9e0a99655b88b1721e3e
  • Loading branch information
dims committed Sep 4, 2018
1 parent f3b98a0 commit ae0dde5
Show file tree
Hide file tree
Showing 14 changed files with 8 additions and 215 deletions.
1 change: 0 additions & 1 deletion build/lib/release.sh
Original file line number Diff line number Diff line change
Expand Up @@ -415,7 +415,6 @@ function kube::release::package_kube_manifests_tarball() {
cp "${src_dir}/kube-controller-manager.manifest" "${dst_dir}"
cp "${src_dir}/kube-addon-manager.yaml" "${dst_dir}"
cp "${src_dir}/glbc.manifest" "${dst_dir}"
cp "${src_dir}/e2e-image-puller.manifest" "${dst_dir}/"
cp "${src_dir}/etcd-empty-dir-cleanup.yaml" "${dst_dir}/"
local internal_manifest
for internal_manifest in $(ls "${src_dir}" | grep "^internal-*"); do
Expand Down
4 changes: 0 additions & 4 deletions cluster/gce/config-test.sh
Original file line number Diff line number Diff line change
Expand Up @@ -386,10 +386,6 @@ HAIRPIN_MODE="${HAIRPIN_MODE:-hairpin-veth}" # promiscuous-bridge, hairpin-veth,
# Optional: if set to true, kube-up will configure the cluster to run e2e tests.
E2E_STORAGE_TEST_ENVIRONMENT=${KUBE_E2E_STORAGE_TEST_ENVIRONMENT:-false}

# Optional: if set to true, a image puller is deployed. Only for use in e2e clusters.
# TODO: Pipe this through GKE e2e clusters once we know it helps.
PREPULL_E2E_IMAGES="${PREPULL_E2E_IMAGES:-true}"

# Evict pods whenever compute resource availability on the nodes gets below a threshold.
EVICTION_HARD="${EVICTION_HARD:-memory.available<250Mi,nodefs.available<10%,nodefs.inodesFree<5%}"

Expand Down
11 changes: 0 additions & 11 deletions cluster/gce/gci/configure-helper.sh
Original file line number Diff line number Diff line change
Expand Up @@ -2526,14 +2526,6 @@ function setup-node-termination-handler-manifest {
fi
}

# Starts an image-puller - used in test clusters.
function start-image-puller {
echo "Start image-puller"
local -r e2e_image_puller_manifest="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/e2e-image-puller.manifest"
update-container-runtime "${e2e_image_puller_manifest}"
cp "${e2e_image_puller_manifest}" /etc/kubernetes/manifests/
}

# Setups manifests for ingress controller and gce-specific policies for service controller.
function start-lb-controller {
setup-addon-manifests "addons" "loadbalancing"
Expand Down Expand Up @@ -2745,9 +2737,6 @@ function main() {
if [[ "${KUBE_PROXY_DAEMONSET:-}" != "true" ]]; then
start-kube-proxy
fi
if [[ "${PREPULL_E2E_IMAGES:-}" == "true" ]]; then
start-image-puller
fi
if [[ "${ENABLE_NODE_PROBLEM_DETECTOR:-}" == "standalone" ]]; then
start-node-problem-detector
fi
Expand Down
1 change: 0 additions & 1 deletion cluster/gce/manifests/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,6 @@ filegroup(
srcs = [
"abac-authz-policy.jsonl",
"cluster-autoscaler.manifest",
"e2e-image-puller.manifest",
"etcd.manifest",
"etcd-empty-dir-cleanup.yaml",
"glbc.manifest",
Expand Down
117 changes: 0 additions & 117 deletions cluster/gce/manifests/e2e-image-puller.manifest

This file was deleted.

1 change: 0 additions & 1 deletion cluster/gce/util.sh
Original file line number Diff line number Diff line change
Expand Up @@ -864,7 +864,6 @@ KUBELET_CERT: $(yaml-quote ${KUBELET_CERT_BASE64:-})
KUBELET_KEY: $(yaml-quote ${KUBELET_KEY_BASE64:-})
NETWORK_PROVIDER: $(yaml-quote ${NETWORK_PROVIDER:-})
NETWORK_POLICY_PROVIDER: $(yaml-quote ${NETWORK_POLICY_PROVIDER:-})
PREPULL_E2E_IMAGES: $(yaml-quote ${PREPULL_E2E_IMAGES:-})
HAIRPIN_MODE: $(yaml-quote ${HAIRPIN_MODE:-})
E2E_STORAGE_TEST_ENVIRONMENT: $(yaml-quote ${E2E_STORAGE_TEST_ENVIRONMENT:-})
KUBE_DOCKER_REGISTRY: $(yaml-quote ${KUBE_DOCKER_REGISTRY:-})
Expand Down
3 changes: 1 addition & 2 deletions test/e2e/apps/network_partition.go
Original file line number Diff line number Diff line change
Expand Up @@ -106,12 +106,11 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
f := framework.NewDefaultFramework("network-partition")
var c clientset.Interface
var ns string
ignoreLabels := framework.ImagePullerLabels

BeforeEach(func() {
c = f.ClientSet
ns = f.Namespace.Name
_, err := framework.GetPodsInNamespace(c, ns, ignoreLabels)
_, err := framework.GetPodsInNamespace(c, ns, map[string]string{})
Expect(err).NotTo(HaveOccurred())

// TODO(foxish): Re-enable testing on gce after kubernetes#56787 is fixed.
Expand Down
32 changes: 1 addition & 31 deletions test/e2e/e2e.go
Original file line number Diff line number Diff line change
Expand Up @@ -183,7 +183,7 @@ var _ = ginkgo.SynchronizedBeforeSuite(func() []byte {
// #41007. To avoid those pods preventing the whole test runs (and just
// wasting the whole run), we allow for some not-ready pods (with the
// number equal to the number of allowed not-ready nodes).
if err := framework.WaitForPodsRunningReady(c, metav1.NamespaceSystem, int32(framework.TestContext.MinStartupPods), int32(framework.TestContext.AllowedNotReadyNodes), podStartupTimeout, framework.ImagePullerLabels); err != nil {
if err := framework.WaitForPodsRunningReady(c, metav1.NamespaceSystem, int32(framework.TestContext.MinStartupPods), int32(framework.TestContext.AllowedNotReadyNodes), podStartupTimeout, map[string]string{}); err != nil {
framework.DumpAllNamespaceInfo(c, metav1.NamespaceSystem)
framework.LogFailedContainers(c, metav1.NamespaceSystem, framework.Logf)
runKubernetesServiceTestContainer(c, metav1.NamespaceDefault)
Expand All @@ -194,36 +194,6 @@ var _ = ginkgo.SynchronizedBeforeSuite(func() []byte {
framework.Logf("WARNING: Waiting for all daemonsets to be ready failed: %v", err)
}

if err := framework.WaitForPodsSuccess(c, metav1.NamespaceSystem, framework.ImagePullerLabels, framework.ImagePrePullingTimeout); err != nil {
// There is no guarantee that the image pulling will succeed in 3 minutes
// and we don't even run the image puller on all platforms (including GKE).
// We wait for it so we get an indication of failures in the logs, and to
// maximize benefit of image pre-pulling.
framework.Logf("WARNING: Image pulling pods failed to enter success in %v: %v", framework.ImagePrePullingTimeout, err)
}

// Dump the output of the nethealth containers only once per run
if framework.TestContext.DumpLogsOnFailure {
logFunc := framework.Logf
if framework.TestContext.ReportDir != "" {
filePath := path.Join(framework.TestContext.ReportDir, "nethealth.txt")
file, err := os.Create(filePath)
if err != nil {
framework.Logf("Failed to create a file with network health data %v: %v\nPrinting to stdout", filePath, err)
} else {
defer file.Close()
if err = file.Chmod(0644); err != nil {
framework.Logf("Failed to chmod to 644 of %v: %v", filePath, err)
}
logFunc = framework.GetLogToFileFunc(file)
framework.Logf("Dumping network health container logs from all nodes to file %v", filePath)
}
} else {
framework.Logf("Dumping network health container logs from all nodes...")
}
framework.LogContainersInPodsWithLabels(c, metav1.NamespaceSystem, framework.ImagePullerLabels, "nethealth", logFunc)
}

// Log the version of the server and this client.
framework.Logf("e2e test version: %s", version.Get().GitVersion)

Expand Down
20 changes: 0 additions & 20 deletions test/e2e/framework/framework.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@ import (
"bytes"
"fmt"
"os"
"path"
"strings"
"sync"
"time"
Expand Down Expand Up @@ -340,25 +339,6 @@ func (f *Framework) AfterEach() {
if !f.SkipNamespaceCreation {
DumpAllNamespaceInfo(f.ClientSet, f.Namespace.Name)
}

logFunc := Logf
if TestContext.ReportDir != "" {
filePath := path.Join(TestContext.ReportDir, "image-puller.txt")
file, err := os.Create(filePath)
if err != nil {
By(fmt.Sprintf("Failed to create a file with image-puller data %v: %v\nPrinting to stdout", filePath, err))
} else {
By(fmt.Sprintf("Dumping a list of prepulled images on each node to file %v", filePath))
defer file.Close()
if err = file.Chmod(0644); err != nil {
Logf("Failed to chmod to 644 of %v: %v", filePath, err)
}
logFunc = GetLogToFileFunc(file)
}
} else {
By("Dumping a list of prepulled images on each node...")
}
LogContainersInPodsWithLabels(f.ClientSet, metav1.NamespaceSystem, ImagePullerLabels, "image-puller", logFunc)
}

if TestContext.GatherKubeSystemResourceUsageData != "false" && TestContext.GatherKubeSystemResourceUsageData != "none" && f.gatherer != nil {
Expand Down
11 changes: 1 addition & 10 deletions test/e2e/framework/util.go
Original file line number Diff line number Diff line change
Expand Up @@ -201,19 +201,10 @@ const (

// ssh port
sshPort = "22"

// ImagePrePullingTimeout is the time we wait for the e2e-image-puller
// static pods to pull the list of seeded images. If they don't pull
// images within this time we simply log their output and carry on
// with the tests.
ImagePrePullingTimeout = 5 * time.Minute
)

var (
BusyBoxImage = imageutils.GetE2EImage(imageutils.BusyBox)
// Label allocated to the image puller static pod that runs on each node
// before e2es.
ImagePullerLabels = map[string]string{"name": "e2e-image-puller"}

// For parsing Kubectl version for version-skewed testing.
gitVersionRegexp = regexp.MustCompile("GitVersion:\"(v.+?)\"")
Expand Down Expand Up @@ -634,7 +625,7 @@ func WaitForPodsSuccess(c clientset.Interface, ns string, successPodLabels map[s
//
// If ignoreLabels is not empty, pods matching this selector are ignored.
func WaitForPodsRunningReady(c clientset.Interface, ns string, minPods, allowedNotReadyPods int32, timeout time.Duration, ignoreLabels map[string]string) error {
ignoreSelector := labels.SelectorFromSet(ignoreLabels)
ignoreSelector := labels.SelectorFromSet(map[string]string{})
start := time.Now()
Logf("Waiting up to %v for all pods (need at least %d) in namespace '%s' to be running and ready",
timeout, minPods, ns)
Expand Down
7 changes: 2 additions & 5 deletions test/e2e/lifecycle/resize_nodes.go
Original file line number Diff line number Diff line change
Expand Up @@ -45,13 +45,12 @@ var _ = SIGDescribe("Nodes [Disruptive]", func() {
var systemPodsNo int32
var c clientset.Interface
var ns string
ignoreLabels := framework.ImagePullerLabels
var group string

BeforeEach(func() {
c = f.ClientSet
ns = f.Namespace.Name
systemPods, err := framework.GetPodsInNamespace(c, ns, ignoreLabels)
systemPods, err := framework.GetPodsInNamespace(c, ns, map[string]string{})
Expect(err).NotTo(HaveOccurred())
systemPodsNo = int32(len(systemPods))
if strings.Index(framework.TestContext.CloudConfig.NodeInstanceGroup, ",") >= 0 {
Expand Down Expand Up @@ -104,10 +103,8 @@ var _ = SIGDescribe("Nodes [Disruptive]", func() {
// Many e2e tests assume that the cluster is fully healthy before they start. Wait until
// the cluster is restored to health.
By("waiting for system pods to successfully restart")
err := framework.WaitForPodsRunningReady(c, metav1.NamespaceSystem, systemPodsNo, 0, framework.PodReadyBeforeTimeout, ignoreLabels)
err := framework.WaitForPodsRunningReady(c, metav1.NamespaceSystem, systemPodsNo, 0, framework.PodReadyBeforeTimeout, map[string]string{})
Expect(err).NotTo(HaveOccurred())
By("waiting for image prepulling pods to complete")
framework.WaitForPodsSuccess(c, metav1.NamespaceSystem, framework.ImagePullerLabels, framework.ImagePrePullingTimeout)
})

It("should be able to delete nodes", func() {
Expand Down
7 changes: 0 additions & 7 deletions test/e2e/node/kubelet_perf.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@ import (
"strings"
"time"

metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/uuid"
clientset "k8s.io/client-go/kubernetes"
Expand Down Expand Up @@ -199,12 +198,6 @@ var _ = SIGDescribe("Kubelet [Serial] [Slow]", func() {
var rm *framework.ResourceMonitor

BeforeEach(func() {
// Wait until image prepull pod has completed so that they wouldn't
// affect the runtime cpu usage. Fail the test if prepulling cannot
// finish in time.
if err := framework.WaitForPodsSuccess(f.ClientSet, metav1.NamespaceSystem, framework.ImagePullerLabels, imagePrePullingLongTimeout); err != nil {
framework.Failf("Image puller didn't complete in %v, not running resource usage test since the metrics might be adultrated", imagePrePullingLongTimeout)
}
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
nodeNames = sets.NewString()
for _, node := range nodes.Items {
Expand Down
5 changes: 2 additions & 3 deletions test/e2e/scheduling/equivalence_cache_predicates.go
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,6 @@ var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() {
var systemPodsNo int
var ns string
f := framework.NewDefaultFramework("equivalence-cache")
ignoreLabels := framework.ImagePullerLabels

BeforeEach(func() {
cs = f.ClientSet
Expand All @@ -60,7 +59,7 @@ var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() {
// Every test case in this suite assumes that cluster add-on pods stay stable and
// cannot be run in parallel with any other test that touches Nodes or Pods.
// It is so because we need to have precise control on what's running in the cluster.
systemPods, err := framework.GetPodsInNamespace(cs, ns, ignoreLabels)
systemPods, err := framework.GetPodsInNamespace(cs, ns, map[string]string{})
Expect(err).NotTo(HaveOccurred())
systemPodsNo = 0
for _, pod := range systemPods {
Expand All @@ -69,7 +68,7 @@ var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() {
}
}

err = framework.WaitForPodsRunningReady(cs, api.NamespaceSystem, int32(systemPodsNo), int32(systemPodsNo), framework.PodReadyBeforeTimeout, ignoreLabels)
err = framework.WaitForPodsRunningReady(cs, api.NamespaceSystem, int32(systemPodsNo), int32(systemPodsNo), framework.PodReadyBeforeTimeout, map[string]string{})
Expect(err).NotTo(HaveOccurred())

for _, node := range nodeList.Items {
Expand Down
3 changes: 1 addition & 2 deletions test/e2e/scheduling/priorities.go
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,6 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() {
var systemPodsNo int
var ns string
f := framework.NewDefaultFramework("sched-priority")
ignoreLabels := framework.ImagePullerLabels

AfterEach(func() {
})
Expand All @@ -78,7 +77,7 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() {

err := framework.CheckTestingNSDeletedExcept(cs, ns)
framework.ExpectNoError(err)
err = framework.WaitForPodsRunningReady(cs, metav1.NamespaceSystem, int32(systemPodsNo), 0, framework.PodReadyBeforeTimeout, ignoreLabels)
err = framework.WaitForPodsRunningReady(cs, metav1.NamespaceSystem, int32(systemPodsNo), 0, framework.PodReadyBeforeTimeout, map[string]string{})
Expect(err).NotTo(HaveOccurred())
})

Expand Down

0 comments on commit ae0dde5

Please sign in to comment.