Skip to content

Commit

Permalink
Remove special-case handling of master label nodes in e2e
Browse files Browse the repository at this point in the history
  • Loading branch information
liggitt committed Apr 16, 2019
1 parent 440d086 commit a7119c2
Show file tree
Hide file tree
Showing 24 changed files with 44 additions and 41 deletions.
16 changes: 8 additions & 8 deletions pkg/controller/service/service_controller.go
Expand Up @@ -24,7 +24,7 @@ import (

"reflect"

"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/sets"
Expand Down Expand Up @@ -61,13 +61,13 @@ const (
clientRetryCount = 5
clientRetryInterval = 5 * time.Second

// LabelNodeRoleMaster specifies that a node is a master
// It's copied over to kubeadm until it's merged in core: https://github.com/kubernetes/kubernetes/pull/39112
LabelNodeRoleMaster = "node-role.kubernetes.io/master"
// labelNodeRoleMaster specifies that a node is a master
// TODO: remove this special-case handling, see https://issue.k8s.io/65618
labelNodeRoleMaster = "node-role.kubernetes.io/master"

// LabelNodeRoleExcludeBalancer specifies that the node should be
// labelNodeRoleExcludeBalancer specifies that the node should be
// exclude from load balancers created by a cloud provider.
LabelNodeRoleExcludeBalancer = "alpha.service-controller.kubernetes.io/exclude-balancer"
labelNodeRoleExcludeBalancer = "alpha.service-controller.kubernetes.io/exclude-balancer"
)

type cachedService struct {
Expand Down Expand Up @@ -595,12 +595,12 @@ func getNodeConditionPredicate() corelisters.NodeConditionPredicate {

// As of 1.6, we will taint the master, but not necessarily mark it unschedulable.
// Recognize nodes labeled as master, and filter them also, as we were doing previously.
if _, hasMasterRoleLabel := node.Labels[LabelNodeRoleMaster]; hasMasterRoleLabel {
if _, hasMasterRoleLabel := node.Labels[labelNodeRoleMaster]; hasMasterRoleLabel {
return false
}

if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.ServiceNodeExclusion) {
if _, hasExcludeBalancerLabel := node.Labels[LabelNodeRoleExcludeBalancer]; hasExcludeBalancerLabel {
if _, hasExcludeBalancerLabel := node.Labels[labelNodeRoleExcludeBalancer]; hasExcludeBalancerLabel {
return false
}
}
Expand Down
2 changes: 1 addition & 1 deletion test/e2e/autoscaling/cluster_size_autoscaling.go
Expand Up @@ -376,7 +376,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount+extraNodes, resizeTimeout))
// We wait for nodes to become schedulable to make sure the new nodes
// will be returned by getPoolNodes below.
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, resizeTimeout))
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableSelector, resizeTimeout))
klog.Infof("Not enabling cluster autoscaler for the node pool (on purpose).")

By("Getting memory available on new nodes, so we can account for it when creating RC")
Expand Down
2 changes: 1 addition & 1 deletion test/e2e/e2e.go
Expand Up @@ -101,7 +101,7 @@ var _ = ginkgo.SynchronizedBeforeSuite(func() []byte {
// In large clusters we may get to this point but still have a bunch
// of nodes without Routes created. Since this would make a node
// unschedulable, we need to wait until all of them are schedulable.
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout))
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableSelector, framework.TestContext.NodeSchedulableTimeout))

// If NumNodes is not specified then auto-detect how many are scheduleable and not tainted
if framework.TestContext.CloudConfig.NumNodes == framework.DefaultNumNodes {
Expand Down
1 change: 0 additions & 1 deletion test/e2e/framework/BUILD
Expand Up @@ -46,7 +46,6 @@ go_library(
"//pkg/controller:go_default_library",
"//pkg/controller/deployment/util:go_default_library",
"//pkg/controller/nodelifecycle:go_default_library",
"//pkg/controller/service:go_default_library",
"//pkg/features:go_default_library",
"//pkg/kubelet/apis/config:go_default_library",
"//pkg/kubelet/apis/stats/v1alpha1:go_default_library",
Expand Down
4 changes: 2 additions & 2 deletions test/e2e/framework/networking_utils.go
Expand Up @@ -572,7 +572,7 @@ func (config *NetworkingTestConfig) setup(selector map[string]string) {
config.setupCore(selector)

ginkgo.By("Getting node addresses")
ExpectNoError(WaitForAllNodesSchedulable(config.f.ClientSet, 10*time.Minute))
ExpectNoError(WaitForAllNodesSchedulable(config.f.ClientSet, TestContext.NodeSchedulableSelector, 10*time.Minute))
nodeList := GetReadySchedulableNodesOrDie(config.f.ClientSet)
config.ExternalAddrs = NodeAddresses(nodeList, v1.NodeExternalIP)

Expand Down Expand Up @@ -626,7 +626,7 @@ func shuffleNodes(nodes []v1.Node) []v1.Node {
}

func (config *NetworkingTestConfig) createNetProxyPods(podName string, selector map[string]string) []*v1.Pod {
ExpectNoError(WaitForAllNodesSchedulable(config.f.ClientSet, 10*time.Minute))
ExpectNoError(WaitForAllNodesSchedulable(config.f.ClientSet, TestContext.NodeSchedulableSelector, 10*time.Minute))
nodeList := GetReadySchedulableNodesOrDie(config.f.ClientSet)

// To make this test work reasonably fast in large clusters,
Expand Down
7 changes: 5 additions & 2 deletions test/e2e/framework/test_context.go
Expand Up @@ -131,7 +131,9 @@ type TestContextType struct {
IncludeClusterAutoscalerMetrics bool
// Currently supported values are 'hr' for human-readable and 'json'. It's a comma separated list.
OutputPrintType string
// NodeSchedulableTimeout is the timeout for waiting for all nodes to be schedulable.
// NodeSchedulableSelector is the label selector of the nodes we should wait to be scheduleable. If empty, wait for all nodes.
NodeSchedulableSelector string
// NodeSchedulableTimeout is the timeout for waiting for all nodes matching NodeSchedulableSelector to be schedulable.
NodeSchedulableTimeout time.Duration
// SystemDaemonsetStartupTimeout is the timeout for waiting for all system daemonsets to be ready.
SystemDaemonsetStartupTimeout time.Duration
Expand Down Expand Up @@ -313,7 +315,8 @@ func RegisterClusterFlags() {
flag.StringVar(&cloudConfig.ConfigFile, "cloud-config-file", "", "Cloud config file. Only required if provider is azure.")
flag.IntVar(&TestContext.MinStartupPods, "minStartupPods", 0, "The number of pods which we need to see in 'Running' state with a 'Ready' condition of true, before we try running tests. This is useful in any cluster which needs some base pod-based services running before it can be used.")
flag.DurationVar(&TestContext.SystemPodsStartupTimeout, "system-pods-startup-timeout", 10*time.Minute, "Timeout for waiting for all system pods to be running before starting tests.")
flag.DurationVar(&TestContext.NodeSchedulableTimeout, "node-schedulable-timeout", 30*time.Minute, "Timeout for waiting for all nodes to be schedulable.")
flag.DurationVar(&TestContext.NodeSchedulableTimeout, "node-schedulable-timeout", 30*time.Minute, "Timeout for waiting for nodes to be schedulable.")
flag.StringVar(&TestContext.NodeSchedulableSelector, "node-schedulable-selector", TestContext.NodeSchedulableSelector, "Label selector for nodes to wait to be scheduleable. If empty, wait for all nodes.")
flag.DurationVar(&TestContext.SystemDaemonsetStartupTimeout, "system-daemonsets-startup-timeout", 5*time.Minute, "Timeout for waiting for all system daemonsets to be ready.")
flag.StringVar(&TestContext.EtcdUpgradeStorage, "etcd-upgrade-storage", "", "The storage version to upgrade to (either 'etcdv2' or 'etcdv3') if doing an etcd upgrade test.")
flag.StringVar(&TestContext.EtcdUpgradeVersion, "etcd-upgrade-version", "", "The etcd binary version to upgrade to (e.g., '3.0.14', '2.3.7') if doing an etcd upgrade test.")
Expand Down
19 changes: 10 additions & 9 deletions test/e2e/framework/util.go
Expand Up @@ -83,7 +83,6 @@ import (
"k8s.io/kubernetes/pkg/client/conditions"
"k8s.io/kubernetes/pkg/controller"
nodectlr "k8s.io/kubernetes/pkg/controller/nodelifecycle"
"k8s.io/kubernetes/pkg/controller/service"
"k8s.io/kubernetes/pkg/features"
kubepod "k8s.io/kubernetes/pkg/kubelet/pod"
"k8s.io/kubernetes/pkg/kubelet/util/format"
Expand Down Expand Up @@ -2727,8 +2726,15 @@ func GetReadyNodesIncludingTaintedOrDie(c clientset.Interface) (nodes *v1.NodeLi

// WaitForAllNodesSchedulable waits up to timeout for all
// (but TestContext.AllowedNotReadyNodes) to become scheduable.
func WaitForAllNodesSchedulable(c clientset.Interface, timeout time.Duration) error {
Logf("Waiting up to %v for all (but %d) nodes to be schedulable", timeout, TestContext.AllowedNotReadyNodes)
func WaitForAllNodesSchedulable(c clientset.Interface, labelSelector string, timeout time.Duration) error {
match := "all nodes"
if len(labelSelector) > 0 {
match += fmt.Sprintf(" matching %q", labelSelector)
}
if TestContext.AllowedNotReadyNodes > 0 {
match += fmt.Sprintf(" (but %d)", TestContext.AllowedNotReadyNodes)
}
Logf("Waiting up to %v for %s to be schedulable", timeout, match)

var notSchedulable []*v1.Node
attempt := 0
Expand All @@ -2737,6 +2743,7 @@ func WaitForAllNodesSchedulable(c clientset.Interface, timeout time.Duration) er
notSchedulable = nil
opts := metav1.ListOptions{
ResourceVersion: "0",
LabelSelector: labelSelector,
FieldSelector: fields.Set{"spec.unschedulable": "false"}.AsSelector().String(),
}
nodes, err := c.CoreV1().Nodes().List(opts)
Expand All @@ -2749,12 +2756,6 @@ func WaitForAllNodesSchedulable(c clientset.Interface, timeout time.Duration) er
}
for i := range nodes.Items {
node := &nodes.Items[i]
if _, hasMasterRoleLabel := node.ObjectMeta.Labels[service.LabelNodeRoleMaster]; hasMasterRoleLabel {
// Kops clusters have masters with spec.unscheduable = false and
// node-role.kubernetes.io/master NoSchedule taint.
// Don't wait for them.
continue
}
if !isNodeSchedulable(node) || !isNodeUntainted(node) {
notSchedulable = append(notSchedulable, node)
}
Expand Down
2 changes: 1 addition & 1 deletion test/e2e/network/dns_scale_records.go
Expand Up @@ -43,7 +43,7 @@ var _ = SIGDescribe("[Feature:PerformanceDNS][Serial]", func() {
f := framework.NewDefaultFramework("performancedns")

BeforeEach(func() {
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(f.ClientSet, framework.TestContext.NodeSchedulableTimeout))
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(f.ClientSet, framework.TestContext.NodeSchedulableSelector, framework.TestContext.NodeSchedulableTimeout))
framework.WaitForAllNodesHealthy(f.ClientSet, time.Minute)

err := framework.CheckTestingNSDeletedExcept(f.ClientSet, f.Namespace.Name)
Expand Down
2 changes: 1 addition & 1 deletion test/e2e/storage/flexvolume_mounted_volume_resize.go
Expand Up @@ -55,7 +55,7 @@ var _ = utils.SIGDescribe("Mounted flexvolume expand[Slow]", func() {
framework.SkipUnlessSSHKeyPresent()
c = f.ClientSet
ns = f.Namespace.Name
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout))
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableSelector, framework.TestContext.NodeSchedulableTimeout))

nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
if len(nodeList.Items) != 0 {
Expand Down
2 changes: 1 addition & 1 deletion test/e2e/storage/flexvolume_online_resize.go
Expand Up @@ -55,7 +55,7 @@ var _ = utils.SIGDescribe("Mounted flexvolume volume expand [Slow] [Feature:Expa
framework.SkipUnlessSSHKeyPresent()
c = f.ClientSet
ns = f.Namespace.Name
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout))
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableSelector, framework.TestContext.NodeSchedulableTimeout))

nodeList = framework.GetReadySchedulableNodesOrDie(f.ClientSet)
if len(nodeList.Items) == 0 {
Expand Down
2 changes: 1 addition & 1 deletion test/e2e/storage/mounted_volume_resize.go
Expand Up @@ -54,7 +54,7 @@ var _ = utils.SIGDescribe("Mounted volume expand", func() {
framework.SkipUnlessProviderIs("aws", "gce")
c = f.ClientSet
ns = f.Namespace.Name
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout))
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableSelector, framework.TestContext.NodeSchedulableTimeout))

nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
if len(nodeList.Items) != 0 {
Expand Down
2 changes: 1 addition & 1 deletion test/e2e/storage/pd.go
Expand Up @@ -443,7 +443,7 @@ var _ = utils.SIGDescribe("Pod Disks", func() {

func countReadyNodes(c clientset.Interface, hostName types.NodeName) int {
framework.WaitForNodeToBeReady(c, string(hostName), nodeStatusTimeout)
framework.WaitForAllNodesSchedulable(c, nodeStatusTimeout)
framework.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableSelector, nodeStatusTimeout)
nodes := framework.GetReadySchedulableNodesOrDie(c)
return len(nodes.Items)
}
Expand Down
2 changes: 1 addition & 1 deletion test/e2e/storage/pv_protection.go
Expand Up @@ -50,7 +50,7 @@ var _ = utils.SIGDescribe("PV Protection", func() {
BeforeEach(func() {
client = f.ClientSet
nameSpace = f.Namespace.Name
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(client, framework.TestContext.NodeSchedulableTimeout))
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(client, framework.TestContext.NodeSchedulableSelector, framework.TestContext.NodeSchedulableTimeout))

// Enforce binding only within test space via selector labels
volLabel = labels.Set{framework.VolumeSelectorKey: nameSpace}
Expand Down
2 changes: 1 addition & 1 deletion test/e2e/storage/pvc_protection.go
Expand Up @@ -44,7 +44,7 @@ var _ = utils.SIGDescribe("PVC Protection", func() {
BeforeEach(func() {
client = f.ClientSet
nameSpace = f.Namespace.Name
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(client, framework.TestContext.NodeSchedulableTimeout))
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(client, framework.TestContext.NodeSchedulableSelector, framework.TestContext.NodeSchedulableTimeout))

By("Creating a PVC")
suffix := "pvc-protection"
Expand Down
2 changes: 1 addition & 1 deletion test/e2e/storage/volume_expand.go
Expand Up @@ -54,7 +54,7 @@ var _ = utils.SIGDescribe("Volume expand", func() {
framework.SkipUnlessProviderIs("aws", "gce")
c = f.ClientSet
ns = f.Namespace.Name
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout))
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableSelector, framework.TestContext.NodeSchedulableTimeout))
test := testsuites.StorageClassTest{
Name: "default",
ClaimSize: "2Gi",
Expand Down
2 changes: 1 addition & 1 deletion test/e2e/storage/volume_limits.go
Expand Up @@ -33,7 +33,7 @@ var _ = utils.SIGDescribe("Volume limits", func() {
BeforeEach(func() {
framework.SkipUnlessProviderIs("aws", "gce", "gke")
c = f.ClientSet
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout))
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableSelector, framework.TestContext.NodeSchedulableTimeout))
})

It("should verify that all nodes have volume limits", func() {
Expand Down
2 changes: 1 addition & 1 deletion test/e2e/storage/vsphere/pv_reclaimpolicy.go
Expand Up @@ -44,7 +44,7 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:ReclaimPolicy]", func() {
BeforeEach(func() {
c = f.ClientSet
ns = f.Namespace.Name
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout))
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableSelector, framework.TestContext.NodeSchedulableTimeout))
})

utils.SIGDescribe("persistentvolumereclaim:vsphere", func() {
Expand Down
2 changes: 1 addition & 1 deletion test/e2e/storage/vsphere/pvc_label_selector.go
Expand Up @@ -63,7 +63,7 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:LabelSelector]", func() {
ns = f.Namespace.Name
Bootstrap(f)
nodeInfo = GetReadySchedulableRandomNodeInfo()
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout))
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableSelector, framework.TestContext.NodeSchedulableTimeout))
ssdlabels = make(map[string]string)
ssdlabels["volume-type"] = "ssd"
vvollabels = make(map[string]string)
Expand Down
2 changes: 1 addition & 1 deletion test/e2e/storage/vsphere/vsphere_volume_master_restart.go
Expand Up @@ -60,7 +60,7 @@ var _ = utils.SIGDescribe("Volume Attach Verify [Feature:vsphere][Serial][Disrup
Bootstrap(f)
client = f.ClientSet
namespace = f.Namespace.Name
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(client, framework.TestContext.NodeSchedulableTimeout))
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(client, framework.TestContext.NodeSchedulableSelector, framework.TestContext.NodeSchedulableTimeout))

nodes := framework.GetReadySchedulableNodesOrDie(client)
numNodes = len(nodes.Items)
Expand Down
2 changes: 1 addition & 1 deletion test/e2e/storage/vsphere/vsphere_volume_node_delete.go
Expand Up @@ -43,7 +43,7 @@ var _ = utils.SIGDescribe("Node Unregister [Feature:vsphere] [Slow] [Disruptive]
Bootstrap(f)
client = f.ClientSet
namespace = f.Namespace.Name
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(client, framework.TestContext.NodeSchedulableTimeout))
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(client, framework.TestContext.NodeSchedulableSelector, framework.TestContext.NodeSchedulableTimeout))
framework.ExpectNoError(err)
workingDir = os.Getenv("VSPHERE_WORKING_DIR")
Expect(workingDir).NotTo(BeEmpty())
Expand Down
2 changes: 1 addition & 1 deletion test/e2e/storage/vsphere/vsphere_volume_node_poweroff.go
Expand Up @@ -52,7 +52,7 @@ var _ = utils.SIGDescribe("Node Poweroff [Feature:vsphere] [Slow] [Disruptive]",
Bootstrap(f)
client = f.ClientSet
namespace = f.Namespace.Name
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(client, framework.TestContext.NodeSchedulableTimeout))
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(client, framework.TestContext.NodeSchedulableSelector, framework.TestContext.NodeSchedulableTimeout))
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
Expect(nodeList.Items).NotTo(BeEmpty(), "Unable to find ready and schedulable Node")
Expect(len(nodeList.Items) > 1).To(BeTrue(), "At least 2 nodes are required for this test")
Expand Down
2 changes: 1 addition & 1 deletion test/e2e/storage/vsphere/vsphere_volume_placement.go
Expand Up @@ -52,7 +52,7 @@ var _ = utils.SIGDescribe("Volume Placement", func() {
Bootstrap(f)
c = f.ClientSet
ns = f.Namespace.Name
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout))
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableSelector, framework.TestContext.NodeSchedulableTimeout))
if !isNodeLabeled {
node1Name, node1KeyValueLabel, node2Name, node2KeyValueLabel = testSetupVolumePlacement(c, ns)
isNodeLabeled = true
Expand Down
2 changes: 1 addition & 1 deletion test/e2e/storage/vsphere/vsphere_volume_vpxd_restart.go
Expand Up @@ -74,7 +74,7 @@ var _ = utils.SIGDescribe("Verify Volume Attach Through vpxd Restart [Feature:vs
Bootstrap(f)
client = f.ClientSet
namespace = f.Namespace.Name
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(client, framework.TestContext.NodeSchedulableTimeout))
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(client, framework.TestContext.NodeSchedulableSelector, framework.TestContext.NodeSchedulableTimeout))

nodes := framework.GetReadySchedulableNodesOrDie(client)
numNodes := len(nodes.Items)
Expand Down
2 changes: 1 addition & 1 deletion test/e2e_node/gpu_device_plugin.go
Expand Up @@ -93,7 +93,7 @@ var _ = framework.KubeDescribe("NVIDIA GPU Device Plugin [Feature:GPUDevicePlugi

By("Restarting Kubelet and creating another pod")
restartKubelet()
framework.WaitForAllNodesSchedulable(f.ClientSet, framework.TestContext.NodeSchedulableTimeout)
framework.WaitForAllNodesSchedulable(f.ClientSet, framework.TestContext.NodeSchedulableSelector, framework.TestContext.NodeSchedulableTimeout)
Eventually(func() bool {
return gpu.NumberOfNVIDIAGPUs(getLocalNode(f)) > 0
}, 5*time.Minute, framework.Poll).Should(BeTrue())
Expand Down

0 comments on commit a7119c2

Please sign in to comment.