Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Move node related methods to framework/node package #78282

Merged
merged 1 commit into from
Jun 18, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
1 change: 1 addition & 0 deletions test/e2e/apps/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,7 @@ go_library(
"//test/e2e/framework/deployment:go_default_library",
"//test/e2e/framework/job:go_default_library",
"//test/e2e/framework/log:go_default_library",
"//test/e2e/framework/node:go_default_library",
"//test/e2e/framework/pod:go_default_library",
"//test/e2e/framework/replicaset:go_default_library",
"//test/e2e/framework/ssh:go_default_library",
Expand Down
31 changes: 16 additions & 15 deletions test/e2e/apps/network_partition.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@ import (
"k8s.io/kubernetes/test/e2e/framework"
jobutil "k8s.io/kubernetes/test/e2e/framework/job"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
testutils "k8s.io/kubernetes/test/utils"

Expand All @@ -59,7 +60,7 @@ func expectNodeReadiness(isReady bool, newNode chan *v1.Node) {
for !expected && !timeout {
select {
case n := <-newNode:
if framework.IsNodeConditionSetAsExpected(n, v1.NodeReady, isReady) {
if e2enode.IsConditionSetAsExpected(n, v1.NodeReady, isReady) {
expected = true
} else {
e2elog.Logf("Observed node ready status is NOT %v as expected", isReady)
Expand Down Expand Up @@ -142,8 +143,8 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
nodeOpts := metav1.ListOptions{}
nodes, err := c.CoreV1().Nodes().List(nodeOpts)
framework.ExpectNoError(err)
framework.FilterNodes(nodes, func(node v1.Node) bool {
if !framework.IsNodeConditionSetAsExpected(&node, v1.NodeReady, true) {
e2enode.Filter(nodes, func(node v1.Node) bool {
if !e2enode.IsConditionSetAsExpected(&node, v1.NodeReady, true) {
return false
}
podOpts = metav1.ListOptions{FieldSelector: fields.OneTermEqualSelector(api.PodHostField, node.Name).String()}
Expand Down Expand Up @@ -199,7 +200,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
go controller.Run(stopCh)

ginkgo.By(fmt.Sprintf("Block traffic from node %s to the master", node.Name))
host, err := framework.GetNodeExternalIP(&node)
host, err := e2enode.GetExternalIP(&node)
framework.ExpectNoError(err)
masterAddresses := framework.GetAllMasterAddresses(c)
defer func() {
Expand Down Expand Up @@ -240,7 +241,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
// The source for the Docker container kubernetes/serve_hostname is in contrib/for-demos/serve_hostname
name := "my-hostname-net"
common.NewSVCByName(c, ns, name)
numNodes, err := framework.NumberOfRegisteredNodes(f.ClientSet)
numNodes, err := e2enode.TotalRegistered(f.ClientSet)
framework.ExpectNoError(err)
replicas := int32(numNodes)
common.NewRCByName(c, ns, name, replicas, nil)
Expand Down Expand Up @@ -274,7 +275,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
})

e2elog.Logf("Waiting %v for node %s to be ready once temporary network failure ends", resizeNodeReadyTimeout, node.Name)
if !framework.WaitForNodeToBeReady(c, node.Name, resizeNodeReadyTimeout) {
if !e2enode.WaitForNodeToBeReady(c, node.Name, resizeNodeReadyTimeout) {
framework.Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout)
}

Expand Down Expand Up @@ -307,7 +308,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
gracePeriod := int64(30)

common.NewSVCByName(c, ns, name)
numNodes, err := framework.NumberOfRegisteredNodes(f.ClientSet)
numNodes, err := e2enode.TotalRegistered(f.ClientSet)
framework.ExpectNoError(err)
replicas := int32(numNodes)
common.NewRCByName(c, ns, name, replicas, &gracePeriod)
Expand Down Expand Up @@ -341,7 +342,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
})

e2elog.Logf("Waiting %v for node %s to be ready once temporary network failure ends", resizeNodeReadyTimeout, node.Name)
if !framework.WaitForNodeToBeReady(c, node.Name, resizeNodeReadyTimeout) {
if !e2enode.WaitForNodeToBeReady(c, node.Name, resizeNodeReadyTimeout) {
framework.Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout)
}
})
Expand Down Expand Up @@ -382,9 +383,9 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {

pst := framework.NewStatefulSetTester(c)

nn, err := framework.NumberOfRegisteredNodes(f.ClientSet)
nn, err := e2enode.TotalRegistered(f.ClientSet)
framework.ExpectNoError(err)
nodes, err := framework.CheckNodesReady(f.ClientSet, nn, framework.NodeReadyInitialTimeout)
nodes, err := e2enode.CheckReady(f.ClientSet, nn, framework.NodeReadyInitialTimeout)
framework.ExpectNoError(err)
common.RestartNodes(f.ClientSet, nodes)

Expand Down Expand Up @@ -414,7 +415,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
})

e2elog.Logf("Waiting %v for node %s to be ready once temporary network failure ends", resizeNodeReadyTimeout, node.Name)
if !framework.WaitForNodeToBeReady(c, node.Name, resizeNodeReadyTimeout) {
if !e2enode.WaitForNodeToBeReady(c, node.Name, resizeNodeReadyTimeout) {
framework.Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout)
}

Expand Down Expand Up @@ -462,7 +463,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
})

e2elog.Logf("Waiting %v for node %s to be ready once temporary network failure ends", resizeNodeReadyTimeout, node.Name)
if !framework.WaitForNodeToBeReady(c, node.Name, resizeNodeReadyTimeout) {
if !e2enode.WaitForNodeToBeReady(c, node.Name, resizeNodeReadyTimeout) {
framework.Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout)
}
})
Expand All @@ -485,8 +486,8 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
ginkgo.By("choose a node - we will block all network traffic on this node")
var podOpts metav1.ListOptions
nodes := framework.GetReadySchedulableNodesOrDie(c)
framework.FilterNodes(nodes, func(node v1.Node) bool {
if !framework.IsNodeConditionSetAsExpected(&node, v1.NodeReady, true) {
e2enode.Filter(nodes, func(node v1.Node) bool {
if !e2enode.IsConditionSetAsExpected(&node, v1.NodeReady, true) {
return false
}
podOpts = metav1.ListOptions{FieldSelector: fields.OneTermEqualSelector(api.PodHostField, node.Name).String()}
Expand Down Expand Up @@ -581,7 +582,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
go controller.Run(stopCh)

ginkgo.By(fmt.Sprintf("Block traffic from node %s to the master", node.Name))
host, err := framework.GetNodeExternalIP(&node)
host, err := e2enode.GetExternalIP(&node)
framework.ExpectNoError(err)
masterAddresses := framework.GetAllMasterAddresses(c)
defer func() {
Expand Down
1 change: 1 addition & 0 deletions test/e2e/auth/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,7 @@ go_library(
"//test/e2e/framework/deployment:go_default_library",
"//test/e2e/framework/job:go_default_library",
"//test/e2e/framework/log:go_default_library",
"//test/e2e/framework/node:go_default_library",
"//test/e2e/framework/pod:go_default_library",
"//test/utils:go_default_library",
"//test/utils/image:go_default_library",
Expand Down
5 changes: 3 additions & 2 deletions test/e2e/auth/node_authn.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ import (

"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
)

var _ = SIGDescribe("[Feature:NodeAuthenticator]", func() {
Expand All @@ -42,9 +43,9 @@ var _ = SIGDescribe("[Feature:NodeAuthenticator]", func() {
gomega.Expect(len(nodeList.Items)).NotTo(gomega.BeZero())

pickedNode := nodeList.Items[0]
nodeIPs = framework.GetNodeAddresses(&pickedNode, v1.NodeExternalIP)
nodeIPs = e2enode.GetAddresses(&pickedNode, v1.NodeExternalIP)
// The pods running in the cluster can see the internal addresses.
nodeIPs = append(nodeIPs, framework.GetNodeAddresses(&pickedNode, v1.NodeInternalIP)...)
nodeIPs = append(nodeIPs, e2enode.GetAddresses(&pickedNode, v1.NodeInternalIP)...)

// make sure ServiceAccount admission controller is enabled, so secret generation on SA creation works
saName := "default"
Expand Down
1 change: 1 addition & 0 deletions test/e2e/autoscaling/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@ go_library(
"//test/e2e/common:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/log:go_default_library",
"//test/e2e/framework/node:go_default_library",
"//test/e2e/framework/pod:go_default_library",
"//test/e2e/instrumentation/monitoring:go_default_library",
"//test/e2e/scheduling:go_default_library",
Expand Down
3 changes: 2 additions & 1 deletion test/e2e/autoscaling/autoscaling_timer.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/test/e2e/common"
"k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"

"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
Expand Down Expand Up @@ -73,7 +74,7 @@ var _ = SIGDescribe("[Feature:ClusterSizeAutoscalingScaleUp] [Slow] Autoscaling"
if len(nodeGroupName) > 0 {
// Scale down back to only 'nodesNum' nodes, as expected at the start of the test.
framework.ExpectNoError(framework.ResizeGroup(nodeGroupName, nodesNum))
framework.ExpectNoError(framework.WaitForReadyNodes(f.ClientSet, nodesNum, 15*time.Minute))
framework.ExpectNoError(e2enode.WaitForReadyNodes(f.ClientSet, nodesNum, 15*time.Minute))
}
})

Expand Down
15 changes: 8 additions & 7 deletions test/e2e/autoscaling/cluster_autoscaler_scalability.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@ import (
"k8s.io/apimachinery/pkg/util/strategicpatch"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image"

Expand Down Expand Up @@ -87,7 +88,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", fun
}
}

framework.ExpectNoError(framework.WaitForReadyNodes(c, sum, scaleUpTimeout))
framework.ExpectNoError(e2enode.WaitForReadyNodes(c, sum, scaleUpTimeout))

nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
nodeCount = len(nodes.Items)
Expand All @@ -112,7 +113,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", fun
ginkgo.AfterEach(func() {
ginkgo.By(fmt.Sprintf("Restoring initial size of the cluster"))
setMigSizes(originalSizes)
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount, scaleDownTimeout))
framework.ExpectNoError(e2enode.WaitForReadyNodes(c, nodeCount, scaleDownTimeout))
nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
framework.ExpectNoError(err)
s := time.Now()
Expand Down Expand Up @@ -214,7 +215,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", fun
anyKey(originalSizes): totalNodes,
}
setMigSizes(newSizes)
framework.ExpectNoError(framework.WaitForReadyNodes(f.ClientSet, totalNodes, largeResizeTimeout))
framework.ExpectNoError(e2enode.WaitForReadyNodes(f.ClientSet, totalNodes, largeResizeTimeout))

// run replicas
rcConfig := reserveMemoryRCConfig(f, "some-pod", replicas, replicas*perNodeReservation, largeScaleUpTimeout)
Expand Down Expand Up @@ -248,7 +249,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", fun
}
setMigSizes(newSizes)

framework.ExpectNoError(framework.WaitForReadyNodes(f.ClientSet, totalNodes, largeResizeTimeout))
framework.ExpectNoError(e2enode.WaitForReadyNodes(f.ClientSet, totalNodes, largeResizeTimeout))

// annotate all nodes with no-scale-down
ScaleDownDisabledKey := "cluster-autoscaler.kubernetes.io/scale-down-disabled"
Expand Down Expand Up @@ -302,7 +303,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", fun
anyKey(originalSizes): totalNodes,
}
setMigSizes(newSizes)
framework.ExpectNoError(framework.WaitForReadyNodes(f.ClientSet, totalNodes, largeResizeTimeout))
framework.ExpectNoError(e2enode.WaitForReadyNodes(f.ClientSet, totalNodes, largeResizeTimeout))
divider := int(float64(totalNodes) * 0.7)
fullNodesCount := divider
underutilizedNodesCount := totalNodes - fullNodesCount
Expand Down Expand Up @@ -348,7 +349,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", fun
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, podsConfig.Name)

// Ensure that no new nodes have been added so far.
gomega.Expect(framework.NumberOfReadyNodes(f.ClientSet)).To(gomega.Equal(nodeCount))
gomega.Expect(e2enode.TotalReady(f.ClientSet)).To(gomega.Equal(nodeCount))

// Start a number of schedulable pods to ensure CA reacts.
additionalNodes := maxNodes - nodeCount
Expand Down Expand Up @@ -385,7 +386,7 @@ func simpleScaleUpTestWithTolerance(f *framework.Framework, config *scaleUpTestC
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size >= minExpectedNodeCount }, scaleUpTimeout))
} else {
framework.ExpectNoError(framework.WaitForReadyNodes(f.ClientSet, config.expectedResult.nodes, scaleUpTimeout))
framework.ExpectNoError(e2enode.WaitForReadyNodes(f.ClientSet, config.expectedResult.nodes, scaleUpTimeout))
}
klog.Infof("cluster is increased")
if tolerateMissingPodCount > 0 {
Expand Down
31 changes: 16 additions & 15 deletions test/e2e/autoscaling/cluster_size_autoscaling.go
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,7 @@ import (
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
"k8s.io/kubernetes/test/e2e/scheduling"
testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
Expand Down Expand Up @@ -108,7 +109,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
sum += size
}
// Give instances time to spin up
framework.ExpectNoError(framework.WaitForReadyNodes(c, sum, scaleUpTimeout))
framework.ExpectNoError(e2enode.WaitForReadyNodes(c, sum, scaleUpTimeout))

nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
nodeCount = len(nodes.Items)
Expand Down Expand Up @@ -142,7 +143,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
for _, size := range originalSizes {
expectedNodes += size
}
framework.ExpectNoError(framework.WaitForReadyNodes(c, expectedNodes, scaleDownTimeout))
framework.ExpectNoError(e2enode.WaitForReadyNodes(c, expectedNodes, scaleDownTimeout))
nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
framework.ExpectNoError(err)

Expand Down Expand Up @@ -373,7 +374,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
addNodePool(extraPoolName, "n1-standard-4", 1)
defer deleteNodePool(extraPoolName)
extraNodes := getPoolInitialSize(extraPoolName)
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount+extraNodes, resizeTimeout))
framework.ExpectNoError(e2enode.WaitForReadyNodes(c, nodeCount+extraNodes, resizeTimeout))
// We wait for nodes to become schedulable to make sure the new nodes
// will be returned by getPoolNodes below.
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, resizeTimeout))
Expand Down Expand Up @@ -407,7 +408,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
addNodePool(extraPoolName, "n1-standard-4", 1)
defer deleteNodePool(extraPoolName)
extraNodes := getPoolInitialSize(extraPoolName)
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount+extraNodes, resizeTimeout))
framework.ExpectNoError(e2enode.WaitForReadyNodes(c, nodeCount+extraNodes, resizeTimeout))
framework.ExpectNoError(enableAutoscaler(extraPoolName, 1, 2))
framework.ExpectNoError(disableAutoscaler(extraPoolName, 1, 2))
})
Expand Down Expand Up @@ -437,7 +438,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "extra-pod")

framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount+newPods, scaleUpTimeout))
framework.ExpectNoError(e2enode.WaitForReadyNodes(c, nodeCount+newPods, scaleUpTimeout))
})

ginkgo.It("should increase cluster size if pod requesting EmptyDir volume is pending [Feature:ClusterSizeAutoscalingScaleUp]", func() {
Expand All @@ -458,7 +459,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "extra-pod")

framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount+newPods, scaleUpTimeout))
framework.ExpectNoError(e2enode.WaitForReadyNodes(c, nodeCount+newPods, scaleUpTimeout))
})

ginkgo.It("should increase cluster size if pod requesting volume is pending [Feature:ClusterSizeAutoscalingScaleUp]", func() {
Expand Down Expand Up @@ -530,7 +531,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
}()

framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount+newPods, scaleUpTimeout))
framework.ExpectNoError(e2enode.WaitForReadyNodes(c, nodeCount+newPods, scaleUpTimeout))
})

ginkgo.It("should add node to the particular mig [Feature:ClusterSizeAutoscalingScaleUp]", func() {
Expand Down Expand Up @@ -641,7 +642,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
addNodePool(extraPoolName, "n1-standard-4", 1)
defer deleteNodePool(extraPoolName)
extraNodes := getPoolInitialSize(extraPoolName)
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount+extraNodes, resizeTimeout))
framework.ExpectNoError(e2enode.WaitForReadyNodes(c, nodeCount+extraNodes, resizeTimeout))
framework.ExpectNoError(enableAutoscaler(extraPoolName, 1, 2))
defer disableAutoscaler(extraPoolName, 1, 2)

Expand All @@ -655,7 +656,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
// reseting all the timers in scale down code. Adding 5 extra minutes to workaround
// this issue.
// TODO: Remove the extra time when GKE restart is fixed.
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount+extraNodes+1, scaleUpTimeout+5*time.Minute))
framework.ExpectNoError(e2enode.WaitForReadyNodes(c, nodeCount+extraNodes+1, scaleUpTimeout+5*time.Minute))
})

simpleScaleDownTest := func(unready int) {
Expand Down Expand Up @@ -766,7 +767,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
}
}
framework.ExpectNoError(framework.ResizeGroup(minMig, int32(0)))
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount-minSize, resizeTimeout))
framework.ExpectNoError(e2enode.WaitForReadyNodes(c, nodeCount-minSize, resizeTimeout))
}

ginkgo.By("Make remaining nodes unschedulable")
Expand Down Expand Up @@ -812,7 +813,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
addNodePool(extraPoolName, "n1-standard-4", 1)
defer deleteNodePool(extraPoolName)
extraNodes := getPoolInitialSize(extraPoolName)
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount+extraNodes, resizeTimeout))
framework.ExpectNoError(e2enode.WaitForReadyNodes(c, nodeCount+extraNodes, resizeTimeout))
framework.ExpectNoError(enableAutoscaler(extraPoolName, 0, 1))
defer disableAutoscaler(extraPoolName, 0, 1)

Expand Down Expand Up @@ -845,7 +846,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
}
}
framework.ExpectNoError(framework.ResizeGroup(minMig, int32(1)))
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount-minSize+1, resizeTimeout))
framework.ExpectNoError(e2enode.WaitForReadyNodes(c, nodeCount-minSize+1, resizeTimeout))
ngNodes, err := framework.GetGroupNodes(minMig)
framework.ExpectNoError(err)
gomega.Expect(len(ngNodes) == 1).To(gomega.BeTrue())
Expand Down Expand Up @@ -926,7 +927,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
}
testFunction()
// Give nodes time to recover from network failure
framework.ExpectNoError(framework.WaitForReadyNodes(c, len(nodes.Items), nodesRecoverTimeout))
framework.ExpectNoError(e2enode.WaitForReadyNodes(c, len(nodes.Items), nodesRecoverTimeout))
})

ginkgo.It("shouldn't scale up when expendable pod is created [Feature:ClusterSizeAutoscalingScaleUp]", func() {
Expand Down Expand Up @@ -1339,8 +1340,8 @@ func WaitForClusterSizeFuncWithUnready(c clientset.Interface, sizeFunc func(int)
numNodes := len(nodes.Items)

// Filter out not-ready nodes.
framework.FilterNodes(nodes, func(node v1.Node) bool {
return framework.IsNodeConditionSetAsExpected(&node, v1.NodeReady, true)
e2enode.Filter(nodes, func(node v1.Node) bool {
return e2enode.IsConditionSetAsExpected(&node, v1.NodeReady, true)
})
numReady := len(nodes.Items)

Expand Down