Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Update e2e tests to list only schedulable nodes, to make them work with master Node registered. #18509

Merged
merged 1 commit into from
Dec 17, 2015
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
1 change: 1 addition & 0 deletions test/e2e/cadvisor.go
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,7 @@ var _ = Describe("Cadvisor", func() {
})

func CheckCadvisorHealthOnAllNodes(c *client.Client, timeout time.Duration) {
// It should be OK to list unschedulable Nodes here.
By("getting list of nodes")
nodeList, err := c.Nodes().List(api.ListOptions{})
expectNoError(err)
Expand Down
3 changes: 1 addition & 2 deletions test/e2e/cluster_size_autoscaling.go
Original file line number Diff line number Diff line change
Expand Up @@ -41,8 +41,7 @@ var _ = Describe("[Autoscaling] [Skipped]", func() {
BeforeEach(func() {
SkipUnlessProviderIs("gce")

nodes, err := f.Client.Nodes().List(api.ListOptions{})
expectNoError(err)
nodes := ListSchedulableNodesOrDie(f.Client)
nodeCount = len(nodes.Items)
Expect(nodeCount).NotTo(BeZero())
cpu := nodes.Items[0].Status.Capacity[api.ResourceCPU]
Expand Down
7 changes: 1 addition & 6 deletions test/e2e/cluster_upgrade.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,8 +30,6 @@ import (

"k8s.io/kubernetes/pkg/api"
client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/util"
"k8s.io/kubernetes/pkg/util/wait"

Expand Down Expand Up @@ -354,10 +352,7 @@ func testNodeUpgrade(f *Framework, nUp func(f *Framework, n int, v string) error
}

func checkNodesVersions(c *client.Client, want string) error {
l, err := listNodes(c, labels.Everything(), fields.Everything())
if err != nil {
return fmt.Errorf("checkNodesVersions() failed to list nodes: %v", err)
}
l := ListSchedulableNodesOrDie(c)
for _, n := range l.Items {
// We do prefix trimming and then matching because:
// want looks like: 0.19.3-815-g50e67d4
Expand Down
14 changes: 3 additions & 11 deletions test/e2e/daemon_set.go
Original file line number Diff line number Diff line change
Expand Up @@ -160,8 +160,7 @@ var _ = Describe("Daemon set", func() {
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pods to be running on no nodes")

By("Change label of node, check that daemon pod is launched.")
nodeClient := c.Nodes()
nodeList, err := nodeClient.List(api.ListOptions{})
nodeList := ListSchedulableNodesOrDie(f.Client)
Expect(len(nodeList.Items)).To(BeNumerically(">", 0))
newNode, err := setDaemonSetNodeLabels(c, nodeList.Items[0].Name, nodeSelector)
Expect(err).NotTo(HaveOccurred(), "error setting labels on node")
Expand Down Expand Up @@ -196,11 +195,7 @@ func separateDaemonSetNodeLabels(labels map[string]string) (map[string]string, m
}

func clearDaemonSetNodeLabels(c *client.Client) error {
nodeClient := c.Nodes()
nodeList, err := nodeClient.List(api.ListOptions{})
if err != nil {
return err
}
nodeList := ListSchedulableNodesOrDie(c)
for _, node := range nodeList.Items {
_, err := setDaemonSetNodeLabels(c, node.Name, map[string]string{})
if err != nil {
Expand Down Expand Up @@ -282,10 +277,7 @@ func checkDaemonPodOnNodes(f *Framework, selector map[string]string, nodeNames [

func checkRunningOnAllNodes(f *Framework, selector map[string]string) func() (bool, error) {
return func() (bool, error) {
nodeList, err := f.Client.Nodes().List(api.ListOptions{})
if err != nil {
return false, nil
}
nodeList := ListSchedulableNodesOrDie(f.Client)
nodeNames := make([]string, 0)
for _, node := range nodeList.Items {
nodeNames = append(nodeNames, node.Name)
Expand Down
3 changes: 1 addition & 2 deletions test/e2e/density.go
Original file line number Diff line number Diff line change
Expand Up @@ -158,8 +158,7 @@ var _ = Describe("Density [Skipped]", func() {
ns = framework.Namespace.Name
var err error

nodes, err := c.Nodes().List(api.ListOptions{})
expectNoError(err)
nodes := ListSchedulableNodesOrDie(c)
nodeCount = len(nodes.Items)
Expect(nodeCount).NotTo(BeZero())

Expand Down
5 changes: 1 addition & 4 deletions test/e2e/es_cluster_logging.go
Original file line number Diff line number Diff line change
Expand Up @@ -181,10 +181,7 @@ func ClusterLevelLoggingWithElasticsearch(f *Framework) {
}

// Obtain a list of nodes so we can place one synthetic logger on each node.
nodes, err := f.Client.Nodes().List(api.ListOptions{})
if err != nil {
Failf("Failed to list nodes: %v", err)
}
nodes := ListSchedulableNodesOrDie(f.Client)
nodeCount := len(nodes.Items)
if nodeCount == 0 {
Failf("Failed to find any nodes")
Expand Down
22 changes: 11 additions & 11 deletions test/e2e/example_k8petstore.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,17 +18,18 @@ package e2e

import (
"fmt"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/kubernetes/pkg/api"
client "k8s.io/kubernetes/pkg/client/unversioned"
"log"
"os"
"os/exec"
"path/filepath"
"strconv"
"syscall"
"time"

client "k8s.io/kubernetes/pkg/client/unversioned"

. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)

const (
Expand Down Expand Up @@ -151,17 +152,16 @@ T:

var _ = Describe("[Example] Pet Store [Skipped]", func() {

// The number of minions dictates total number of generators/transaction expectations.
var minionCount int
// The number of nodes dictates total number of generators/transaction expectations.
var nodeCount int
f := NewFramework("petstore")

It(fmt.Sprintf("should scale to persist a nominal number ( %v ) of transactions in %v seconds", k8bpsSmokeTestTransactions, k8bpsSmokeTestTimeout), func() {
minions, err := f.Client.Nodes().List(api.ListOptions{})
Expect(err).NotTo(HaveOccurred())
minionCount = len(minions.Items)
nodes := ListSchedulableNodesOrDie(f.Client)
nodeCount = len(nodes.Items)

loadGenerators := minionCount
restServers := minionCount
loadGenerators := nodeCount
restServers := nodeCount
fmt.Printf("load generators / rest servers [ %v / %v ] ", loadGenerators, restServers)
runK8petstore(restServers, loadGenerators, f.Client, f.Namespace.Name, k8bpsSmokeTestTransactions, k8bpsSmokeTestTimeout)
})
Expand Down
1 change: 1 addition & 0 deletions test/e2e/kubectl.go
Original file line number Diff line number Diff line change
Expand Up @@ -529,6 +529,7 @@ var _ = Describe("Kubectl client", func() {
checkOutput(output, requiredStrings)

// Node
// It should be OK to list unschedulable Nodes here.
nodes, err := c.Nodes().List(api.ListOptions{})
Expect(err).NotTo(HaveOccurred())
node := nodes.Items[0]
Expand Down
4 changes: 1 addition & 3 deletions test/e2e/kubelet.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@ import (
"strings"
"time"

"k8s.io/kubernetes/pkg/api"
client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/util"
"k8s.io/kubernetes/pkg/util/sets"
Expand Down Expand Up @@ -94,8 +93,7 @@ var _ = Describe("kubelet", func() {
var resourceMonitor *resourceMonitor

BeforeEach(func() {
nodes, err := framework.Client.Nodes().List(api.ListOptions{})
expectNoError(err)
nodes := ListSchedulableNodesOrDie(framework.Client)
numNodes = len(nodes.Items)
nodeNames = sets.NewString()
for _, node := range nodes.Items {
Expand Down
1 change: 1 addition & 0 deletions test/e2e/kubelet_perf.go
Original file line number Diff line number Diff line change
Expand Up @@ -144,6 +144,7 @@ var _ = Describe("Kubelet", func() {
var rm *resourceMonitor

BeforeEach(func() {
// It should be OK to list unschedulable Nodes here.
nodes, err := framework.Client.Nodes().List(api.ListOptions{})
expectNoError(err)
nodeNames = sets.NewString()
Expand Down
1 change: 1 addition & 0 deletions test/e2e/kubelet_stats.go
Original file line number Diff line number Diff line change
Expand Up @@ -685,6 +685,7 @@ func newResourceMonitor(c *client.Client, containerNames []string, pollingInterv
}

func (r *resourceMonitor) Start() {
// It should be OK to monitor unschedulable Nodes
nodes, err := r.client.Nodes().List(api.ListOptions{})
if err != nil {
Failf("resourceMonitor: unable to get list of nodes: %v", err)
Expand Down
6 changes: 2 additions & 4 deletions test/e2e/kubeproxy.go
Original file line number Diff line number Diff line change
Expand Up @@ -429,8 +429,7 @@ func (config *KubeProxyTestConfig) setup() {
}

By("Getting node addresses")
nodeList, err := config.f.Client.Nodes().List(api.ListOptions{})
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to get node list: %v", err))
nodeList := ListSchedulableNodesOrDie(config.f.Client)
config.externalAddrs = NodeAddresses(nodeList, api.NodeExternalIP)
if len(config.externalAddrs) < 2 {
// fall back to legacy IPs
Expand Down Expand Up @@ -468,8 +467,7 @@ func (config *KubeProxyTestConfig) cleanup() {
}

func (config *KubeProxyTestConfig) createNetProxyPods(podName string, selector map[string]string) []*api.Pod {
nodes, err := config.f.Client.Nodes().List(api.ListOptions{})
Expect(err).NotTo(HaveOccurred())
nodes := ListSchedulableNodesOrDie(config.f.Client)

// create pods, one for each node
createdPods := make([]*api.Pod, 0, len(nodes.Items))
Expand Down
4 changes: 1 addition & 3 deletions test/e2e/latency.go
Original file line number Diff line number Diff line change
Expand Up @@ -67,10 +67,8 @@ var _ = Describe("Latency [Skipped]", func() {
BeforeEach(func() {
c = framework.Client
ns = framework.Namespace.Name
var err error

nodes, err := c.Nodes().List(api.ListOptions{})
expectNoError(err)
nodes := ListSchedulableNodesOrDie(framework.Client)
nodeCount = len(nodes.Items)
Expect(nodeCount).NotTo(BeZero())

Expand Down
5 changes: 2 additions & 3 deletions test/e2e/load.go
Original file line number Diff line number Diff line change
Expand Up @@ -72,15 +72,14 @@ var _ = Describe("Load capacity [Skipped]", func() {
BeforeEach(func() {
c = framework.Client
ns = framework.Namespace.Name
nodes, err := c.Nodes().List(api.ListOptions{})
expectNoError(err)
nodes := ListSchedulableNodesOrDie(c)
nodeCount = len(nodes.Items)
Expect(nodeCount).NotTo(BeZero())

// Terminating a namespace (deleting the remaining objects from it - which
// generally means events) can affect the current run. Thus we wait for all
// terminating namespace to be finally deleted before starting this test.
err = checkTestingNSDeletedExcept(c, ns)
err := checkTestingNSDeletedExcept(c, ns)
expectNoError(err)

expectNoError(resetMetrics(c))
Expand Down
3 changes: 1 addition & 2 deletions test/e2e/mesos.go
Original file line number Diff line number Diff line change
Expand Up @@ -64,8 +64,7 @@ var _ = Describe("Mesos", func() {
client := framework.Client
expectNoError(allNodesReady(client, util.ForeverTestTimeout), "all nodes ready")

nodelist, err := client.Nodes().List(api.ListOptions{})
expectNoError(err, "nodes fetched from apiserver")
nodelist := ListSchedulableNodesOrDie(framework.Client)

const ns = "static-pods"
numpods := len(nodelist.Items)
Expand Down
1 change: 1 addition & 0 deletions test/e2e/monitor_resources.go
Original file line number Diff line number Diff line change
Expand Up @@ -82,6 +82,7 @@ var _ = Describe("Resource usage of system containers", func() {

It("should not exceed expected amount.", func() {
By("Getting ResourceConsumption on all nodes")
// It should be OK to list unschedulable Nodes here.
nodeList, err := c.Nodes().List(api.ListOptions{})
expectNoError(err)

Expand Down
1 change: 1 addition & 0 deletions test/e2e/monitoring.go
Original file line number Diff line number Diff line change
Expand Up @@ -123,6 +123,7 @@ func expectedServicesExist(c *client.Client) error {
}

func getAllNodesInCluster(c *client.Client) ([]string, error) {
// It should be OK to list unschedulable Nodes here.
nodeList, err := c.Nodes().List(api.ListOptions{})
if err != nil {
return nil, err
Expand Down
5 changes: 1 addition & 4 deletions test/e2e/networking.go
Original file line number Diff line number Diff line change
Expand Up @@ -136,10 +136,7 @@ var _ = Describe("Networking", func() {

By("Creating a webserver (pending) pod on each node")

nodes, err := f.Client.Nodes().List(api.ListOptions{})
if err != nil {
Failf("Failed to list nodes: %v", err)
}
nodes := ListSchedulableNodesOrDie(f.Client)
// previous tests may have cause failures of some nodes. Let's skip
// 'Not Ready' nodes, just in case (there is no need to fail the test).
filterNodes(nodes, func(node api.Node) bool {
Expand Down
10 changes: 3 additions & 7 deletions test/e2e/nodeoutofdisk.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,6 @@ import (
"k8s.io/kubernetes/pkg/api/resource"
client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/util/wait"

. "github.com/onsi/ginkgo"
Expand Down Expand Up @@ -73,8 +72,7 @@ var _ = Describe("NodeOutOfDisk", func() {
framework.beforeEach()
c = framework.Client

nodelist, err := listNodes(c, labels.Everything(), fields.Everything())
expectNoError(err, "Error retrieving nodes")
nodelist := ListSchedulableNodesOrDie(c)
Expect(len(nodelist.Items)).To(BeNumerically(">", 1))

unfilledNodeName = nodelist.Items[0].Name
Expand All @@ -86,8 +84,7 @@ var _ = Describe("NodeOutOfDisk", func() {
AfterEach(func() {
defer framework.afterEach()

nodelist, err := listNodes(c, labels.Everything(), fields.Everything())
expectNoError(err, "Error retrieving nodes")
nodelist := ListSchedulableNodesOrDie(c)
Expect(len(nodelist.Items)).ToNot(BeZero())
for _, node := range nodelist.Items {
if unfilledNodeName == node.Name || recoveredNodeName == node.Name {
Expand Down Expand Up @@ -150,8 +147,7 @@ var _ = Describe("NodeOutOfDisk", func() {
}
})

nodelist, err := listNodes(c, labels.Everything(), fields.Everything())
expectNoError(err, "Error retrieving nodes")
nodelist := ListSchedulableNodesOrDie(c)
Expect(len(nodelist.Items)).To(BeNumerically(">", 1))

nodeToRecover := nodelist.Items[1]
Expand Down
4 changes: 1 addition & 3 deletions test/e2e/pd.go
Original file line number Diff line number Diff line change
Expand Up @@ -52,9 +52,7 @@ var _ = Describe("Pod Disks", func() {
SkipUnlessNodeCountIsAtLeast(2)

podClient = framework.Client.Pods(framework.Namespace.Name)

nodes, err := framework.Client.Nodes().List(api.ListOptions{})
expectNoError(err, "Failed to list nodes for e2e cluster.")
nodes := ListSchedulableNodesOrDie(framework.Client)

Expect(len(nodes.Items)).To(BeNumerically(">=", 2), "Requires at least 2 nodes")

Expand Down
6 changes: 2 additions & 4 deletions test/e2e/proxy.go
Original file line number Diff line number Diff line change
Expand Up @@ -250,10 +250,8 @@ func truncate(b []byte, maxLen int) []byte {
}

func pickNode(c *client.Client) (string, error) {
nodes, err := c.Nodes().List(api.ListOptions{})
if err != nil {
return "", err
}
// TODO: investigate why it doesn't work on master Node.
nodes := ListSchedulableNodesOrDie(c)
if len(nodes.Items) == 0 {
return "", fmt.Errorf("no nodes exist, can't test node proxy")
}
Expand Down
5 changes: 1 addition & 4 deletions test/e2e/reboot.go
Original file line number Diff line number Diff line change
Expand Up @@ -115,10 +115,7 @@ var _ = Describe("Reboot", func() {

func testReboot(c *client.Client, rebootCmd string) {
// Get all nodes, and kick off the test on each.
nodelist, err := listNodes(c, labels.Everything(), fields.Everything())
if err != nil {
Failf("Error getting nodes: %v", err)
}
nodelist := ListSchedulableNodesOrDie(c)
result := make([]bool, len(nodelist.Items))
wg := sync.WaitGroup{}
wg.Add(len(nodelist.Items))
Expand Down
6 changes: 3 additions & 3 deletions test/e2e/restart.go
Original file line number Diff line number Diff line change
Expand Up @@ -161,11 +161,11 @@ func checkNodesReady(c *client.Client, nt time.Duration, expect int) ([]string,
var errLast error
start := time.Now()
found := wait.Poll(poll, nt, func() (bool, error) {
// Even though listNodes(...) has its own retries, a rolling-update
// (GCE/GKE implementation of restart) can complete before the apiserver
// A rolling-update (GCE/GKE implementation of restart) can complete before the apiserver
// knows about all of the nodes. Thus, we retry the list nodes call
// until we get the expected number of nodes.
nodeList, errLast = listNodes(c, labels.Everything(), fields.Everything())
nodeList, errLast = c.Nodes().List(api.ListOptions{
FieldSelector: fields.Set{"spec.unschedulable": "false"}.AsSelector()})
if errLast != nil {
return false, nil
}
Expand Down
4 changes: 1 addition & 3 deletions test/e2e/scheduler_predicates.go
Original file line number Diff line number Diff line change
Expand Up @@ -194,9 +194,7 @@ var _ = Describe("SchedulerPredicates", func() {
BeforeEach(func() {
c = framework.Client
ns = framework.Namespace.Name
var err error
nodeList, err = c.Nodes().List(api.ListOptions{})
expectNoError(err)
nodeList = ListSchedulableNodesOrDie(c)
})

// This test verifies that max-pods flag works as advertised. It assumes that cluster add-on pods stay stable
Expand Down
5 changes: 1 addition & 4 deletions test/e2e/service.go
Original file line number Diff line number Diff line change
Expand Up @@ -1102,10 +1102,7 @@ func collectAddresses(nodes *api.NodeList, addressType api.NodeAddressType) []st
}

func getNodePublicIps(c *client.Client) ([]string, error) {
nodes, err := c.Nodes().List(api.ListOptions{})
if err != nil {
return nil, err
}
nodes := ListSchedulableNodesOrDie(c)

ips := collectAddresses(nodes, api.NodeExternalIP)
if len(ips) == 0 {
Expand Down