Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Use clientset in GetReadySchedulableNodesOrDie #35122

Merged
merged 1 commit into from
Oct 19, 2016
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
2 changes: 1 addition & 1 deletion test/e2e/cluster_size_autoscaling.go
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaling [Slow]", func() {
c = f.Client
framework.SkipUnlessProviderIs("gce", "gke")

nodes := framework.GetReadySchedulableNodesOrDie(f.Client)
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
nodeCount = len(nodes.Items)
Expect(nodeCount).NotTo(BeZero())
cpu := nodes.Items[0].Status.Capacity[api.ResourceCPU]
Expand Down
15 changes: 8 additions & 7 deletions test/e2e/cluster_upgrade.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ import (
"strings"

"k8s.io/kubernetes/pkg/api"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/chaosmonkey"
Expand Down Expand Up @@ -59,7 +60,7 @@ var _ = framework.KubeDescribe("Upgrade [Feature:Upgrade]", func() {
v, err := realVersion(framework.TestContext.UpgradeTarget)
framework.ExpectNoError(err)
framework.ExpectNoError(framework.NodeUpgrade(f, v, framework.TestContext.UpgradeImage))
framework.ExpectNoError(checkNodesVersions(f.Client, v))
framework.ExpectNoError(checkNodesVersions(f.ClientSet, v))
})
cm.Register(func(sem *chaosmonkey.Semaphore) {
// Close over f.
Expand All @@ -73,7 +74,7 @@ var _ = framework.KubeDescribe("Upgrade [Feature:Upgrade]", func() {
v, err := realVersion(framework.TestContext.UpgradeTarget)
framework.ExpectNoError(err)
framework.ExpectNoError(framework.NodeUpgrade(f, v, framework.TestContext.UpgradeImage))
framework.ExpectNoError(checkNodesVersions(f.Client, v))
framework.ExpectNoError(checkNodesVersions(f.ClientSet, v))
})
cm.Register(func(sem *chaosmonkey.Semaphore) {
// Close over f.
Expand All @@ -91,7 +92,7 @@ var _ = framework.KubeDescribe("Upgrade [Feature:Upgrade]", func() {
framework.ExpectNoError(framework.MasterUpgrade(v))
framework.ExpectNoError(checkMasterVersion(f.Client, v))
framework.ExpectNoError(framework.NodeUpgrade(f, v, framework.TestContext.UpgradeImage))
framework.ExpectNoError(checkNodesVersions(f.Client, v))
framework.ExpectNoError(checkNodesVersions(f.ClientSet, v))
})
cm.Register(func(sem *chaosmonkey.Semaphore) {
// Close over f.
Expand All @@ -107,7 +108,7 @@ var _ = framework.KubeDescribe("Upgrade [Feature:Upgrade]", func() {
framework.ExpectNoError(framework.MasterUpgrade(v))
framework.ExpectNoError(checkMasterVersion(f.Client, v))
framework.ExpectNoError(framework.NodeUpgrade(f, v, framework.TestContext.UpgradeImage))
framework.ExpectNoError(checkNodesVersions(f.Client, v))
framework.ExpectNoError(checkNodesVersions(f.ClientSet, v))
})
cm.Register(func(sem *chaosmonkey.Semaphore) {
// Close over f.
Expand Down Expand Up @@ -146,7 +147,7 @@ func testService(f *framework.Framework, sem *chaosmonkey.Semaphore, testDuringD
// Setup
serviceName := "service-test"

jig := NewServiceTestJig(f.Client, serviceName)
jig := NewServiceTestJig(f.Client, f.ClientSet, serviceName)
// nodeIP := pickNodeIP(jig.Client) // for later

By("creating a TCP service " + serviceName + " with type=LoadBalancer in namespace " + f.Namespace.Name)
Expand Down Expand Up @@ -209,8 +210,8 @@ func checkMasterVersion(c *client.Client, want string) error {
return nil
}

func checkNodesVersions(c *client.Client, want string) error {
l := framework.GetReadySchedulableNodesOrDie(c)
func checkNodesVersions(cs clientset.Interface, want string) error {
l := framework.GetReadySchedulableNodesOrDie(cs)
for _, n := range l.Items {
// We do prefix trimming and then matching because:
// want looks like: 0.19.3-815-g50e67d4
Expand Down
2 changes: 1 addition & 1 deletion test/e2e/daemon_restart.go
Original file line number Diff line number Diff line change
Expand Up @@ -300,7 +300,7 @@ var _ = framework.KubeDescribe("DaemonRestart [Disruptive]", func() {

It("Kubelet should not restart containers across restart", func() {

nodeIPs, err := getNodePublicIps(f.Client)
nodeIPs, err := getNodePublicIps(f.ClientSet)
framework.ExpectNoError(err)
preRestarts, badNodes := getContainerRestarts(f.Client, ns, labelSelector)
if preRestarts != 0 {
Expand Down
13 changes: 7 additions & 6 deletions test/e2e/daemon_set.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ import (
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/apimachinery/registered"
"k8s.io/kubernetes/pkg/apis/extensions"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/kubectl"
"k8s.io/kubernetes/pkg/labels"
Expand Down Expand Up @@ -68,7 +69,7 @@ var _ = framework.KubeDescribe("Daemon set [Serial]", func() {
} else {
framework.Logf("unable to dump pods: %v", err)
}
err := clearDaemonSetNodeLabels(f.Client)
err := clearDaemonSetNodeLabels(f.Client, f.ClientSet)
Expect(err).NotTo(HaveOccurred())
})

Expand All @@ -83,7 +84,7 @@ var _ = framework.KubeDescribe("Daemon set [Serial]", func() {
BeforeEach(func() {
ns = f.Namespace.Name
c = f.Client
err := clearDaemonSetNodeLabels(c)
err := clearDaemonSetNodeLabels(c, f.ClientSet)
Expect(err).NotTo(HaveOccurred())
})

Expand Down Expand Up @@ -180,7 +181,7 @@ var _ = framework.KubeDescribe("Daemon set [Serial]", func() {
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pods to be running on no nodes")

By("Change label of node, check that daemon pod is launched.")
nodeList := framework.GetReadySchedulableNodesOrDie(f.Client)
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
Expect(len(nodeList.Items)).To(BeNumerically(">", 0))
newNode, err := setDaemonSetNodeLabels(c, nodeList.Items[0].Name, nodeSelector)
Expect(err).NotTo(HaveOccurred(), "error setting labels on node")
Expand Down Expand Up @@ -248,7 +249,7 @@ var _ = framework.KubeDescribe("Daemon set [Serial]", func() {
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pods to be running on no nodes")

By("Change label of node, check that daemon pod is launched.")
nodeList := framework.GetReadySchedulableNodesOrDie(f.Client)
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
Expect(len(nodeList.Items)).To(BeNumerically(">", 0))
newNode, err := setDaemonSetNodeLabels(c, nodeList.Items[0].Name, nodeSelector)
Expect(err).NotTo(HaveOccurred(), "error setting labels on node")
Expand Down Expand Up @@ -284,8 +285,8 @@ func separateDaemonSetNodeLabels(labels map[string]string) (map[string]string, m
return daemonSetLabels, otherLabels
}

func clearDaemonSetNodeLabels(c *client.Client) error {
nodeList := framework.GetReadySchedulableNodesOrDie(c)
func clearDaemonSetNodeLabels(c *client.Client, cs clientset.Interface) error {
nodeList := framework.GetReadySchedulableNodesOrDie(cs)
for _, node := range nodeList.Items {
_, err := setDaemonSetNodeLabels(c, node.Name, map[string]string{})
if err != nil {
Expand Down
2 changes: 1 addition & 1 deletion test/e2e/empty_dir_wrapper.go
Original file line number Diff line number Diff line change
Expand Up @@ -309,7 +309,7 @@ func makeConfigMapVolumes(configMapNames []string) (volumes []api.Volume, volume

func testNoWrappedVolumeRace(f *framework.Framework, volumes []api.Volume, volumeMounts []api.VolumeMount, podCount int32) {
rcName := wrappedVolumeRaceRCNamePrefix + string(uuid.NewUUID())
nodeList := framework.GetReadySchedulableNodesOrDie(f.Client)
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
Expect(len(nodeList.Items)).To(BeNumerically(">", 0))
targetNode := nodeList.Items[0]

Expand Down
2 changes: 1 addition & 1 deletion test/e2e/example_k8petstore.go
Original file line number Diff line number Diff line change
Expand Up @@ -165,7 +165,7 @@ var _ = framework.KubeDescribe("Pet Store [Feature:Example]", func() {
f := framework.NewDefaultFramework("petstore")

It(fmt.Sprintf("should scale to persist a nominal number ( %v ) of transactions in %v seconds", k8bpsSmokeTestFinalTransactions, k8bpsSmokeTestTimeout), func() {
nodes := framework.GetReadySchedulableNodesOrDie(f.Client)
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
nodeCount = len(nodes.Items)

loadGenerators := nodeCount
Expand Down
4 changes: 2 additions & 2 deletions test/e2e/framework/framework.go
Original file line number Diff line number Diff line change
Expand Up @@ -261,7 +261,7 @@ func (f *Framework) BeforeEach() {
f.logsSizeWaitGroup = sync.WaitGroup{}
f.logsSizeWaitGroup.Add(1)
f.logsSizeCloseChannel = make(chan bool)
f.logsSizeVerifier = NewLogsVerifier(f.Client, f.logsSizeCloseChannel)
f.logsSizeVerifier = NewLogsVerifier(f.Client, f.ClientSet, f.logsSizeCloseChannel)
go func() {
f.logsSizeVerifier.Run()
f.logsSizeWaitGroup.Done()
Expand Down Expand Up @@ -659,7 +659,7 @@ func (f *Framework) CreateServiceForSimpleApp(contPort, svcPort int, appName str

// CreatePodsPerNodeForSimpleApp Creates pods w/ labels. Useful for tests which make a bunch of pods w/o any networking.
func (f *Framework) CreatePodsPerNodeForSimpleApp(appName string, podSpec func(n api.Node) api.PodSpec, maxCount int) map[string]string {
nodes := GetReadySchedulableNodesOrDie(f.Client)
nodes := GetReadySchedulableNodesOrDie(f.ClientSet)
labels := map[string]string{
"app": appName + "-pod",
}
Expand Down
7 changes: 5 additions & 2 deletions test/e2e/framework/log_size_monitoring.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ import (
"text/tabwriter"
"time"

clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
client "k8s.io/kubernetes/pkg/client/unversioned"
)

Expand Down Expand Up @@ -65,6 +66,7 @@ type LogSizeGatherer struct {
// It oversees a <workersNo> workers which do the gathering.
type LogsSizeVerifier struct {
client *client.Client
clientset clientset.Interface
stopChannel chan bool
// data stores LogSizeData groupped per IP and log_path
data *LogsSizeData
Expand Down Expand Up @@ -142,8 +144,8 @@ func (d *LogsSizeData) AddNewData(ip, path string, timestamp time.Time, size int
}

// NewLogsVerifier creates a new LogsSizeVerifier which will stop when stopChannel is closed
func NewLogsVerifier(c *client.Client, stopChannel chan bool) *LogsSizeVerifier {
nodeAddresses, err := NodeSSHHosts(c)
func NewLogsVerifier(c *client.Client, cs clientset.Interface, stopChannel chan bool) *LogsSizeVerifier {
nodeAddresses, err := NodeSSHHosts(cs)
ExpectNoError(err)
masterAddress := GetMasterHost() + ":22"

Expand All @@ -152,6 +154,7 @@ func NewLogsVerifier(c *client.Client, stopChannel chan bool) *LogsSizeVerifier

verifier := &LogsSizeVerifier{
client: c,
clientset: cs,
stopChannel: stopChannel,
data: prepareData(masterAddress, nodeAddresses),
masterAddress: masterAddress,
Expand Down
4 changes: 2 additions & 2 deletions test/e2e/framework/networking_utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -432,7 +432,7 @@ func (config *NetworkingTestConfig) setup(selector map[string]string) {

By("Getting node addresses")
ExpectNoError(WaitForAllNodesSchedulable(config.f.Client))
nodeList := GetReadySchedulableNodesOrDie(config.f.Client)
nodeList := GetReadySchedulableNodesOrDie(config.f.ClientSet)
config.ExternalAddrs = NodeAddresses(nodeList, api.NodeExternalIP)
if len(config.ExternalAddrs) < 2 {
// fall back to legacy IPs
Expand Down Expand Up @@ -483,7 +483,7 @@ func shuffleNodes(nodes []api.Node) []api.Node {

func (config *NetworkingTestConfig) createNetProxyPods(podName string, selector map[string]string) []*api.Pod {
ExpectNoError(WaitForAllNodesSchedulable(config.f.Client))
nodeList := GetReadySchedulableNodesOrDie(config.f.Client)
nodeList := GetReadySchedulableNodesOrDie(config.f.ClientSet)

// To make this test work reasonably fast in large clusters,
// we limit the number of NetProxyPods to no more than 100 ones
Expand Down
8 changes: 4 additions & 4 deletions test/e2e/framework/util.go
Original file line number Diff line number Diff line change
Expand Up @@ -2336,11 +2336,11 @@ func getNodeEvents(c *client.Client, nodeName string) []api.Event {
}

// waitListSchedulableNodesOrDie is a wrapper around listing nodes supporting retries.
func waitListSchedulableNodesOrDie(c *client.Client) *api.NodeList {
func waitListSchedulableNodesOrDie(c clientset.Interface) *api.NodeList {
var nodes *api.NodeList
var err error
if wait.PollImmediate(Poll, SingleCallTimeout, func() (bool, error) {
nodes, err = c.Nodes().List(api.ListOptions{FieldSelector: fields.Set{
nodes, err = c.Core().Nodes().List(api.ListOptions{FieldSelector: fields.Set{
"spec.unschedulable": "false",
}.AsSelector()})
return err == nil, nil
Expand All @@ -2365,7 +2365,7 @@ func isNodeSchedulable(node *api.Node) bool {
// 1) Needs to be schedulable.
// 2) Needs to be ready.
// If EITHER 1 or 2 is not true, most tests will want to ignore the node entirely.
func GetReadySchedulableNodesOrDie(c *client.Client) (nodes *api.NodeList) {
func GetReadySchedulableNodesOrDie(c clientset.Interface) (nodes *api.NodeList) {
nodes = waitListSchedulableNodesOrDie(c)
// previous tests may have cause failures of some nodes. Let's skip
// 'Not Ready' nodes, just in case (there is no need to fail the test).
Expand Down Expand Up @@ -3254,7 +3254,7 @@ func NodeAddresses(nodelist *api.NodeList, addrType api.NodeAddressType) []strin
// NodeSSHHosts returns SSH-able host names for all schedulable nodes - this excludes master node.
// It returns an error if it can't find an external IP for every node, though it still returns all
// hosts that it found in that case.
func NodeSSHHosts(c *client.Client) ([]string, error) {
func NodeSSHHosts(c clientset.Interface) ([]string, error) {
nodelist := waitListSchedulableNodesOrDie(c)

// TODO(roberthbailey): Use the "preferred" address for the node, once such a thing is defined (#2462).
Expand Down
2 changes: 1 addition & 1 deletion test/e2e/gke_node_pools.go
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,7 @@ func testCreateDeleteNodePool(f *framework.Framework, poolName string) {
// label with the given node pool name.
func nodesWithPoolLabel(f *framework.Framework, poolName string) int {
nodeCount := 0
nodeList := framework.GetReadySchedulableNodesOrDie(f.Client)
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
for _, node := range nodeList.Items {
if poolLabel := node.Labels["cloud.google.com/gke-nodepool"]; poolLabel == poolName {
nodeCount++
Expand Down
2 changes: 1 addition & 1 deletion test/e2e/kubelet.go
Original file line number Diff line number Diff line change
Expand Up @@ -137,7 +137,7 @@ var _ = framework.KubeDescribe("kubelet", func() {

BeforeEach(func() {
c = f.Client
nodes := framework.GetReadySchedulableNodesOrDie(f.Client)
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
numNodes = len(nodes.Items)
nodeNames = sets.NewString()
// If there are a lot of nodes, we don't want to use all of them
Expand Down
2 changes: 1 addition & 1 deletion test/e2e/kubelet_perf.go
Original file line number Diff line number Diff line change
Expand Up @@ -203,7 +203,7 @@ var _ = framework.KubeDescribe("Kubelet [Serial] [Slow]", func() {
if err := framework.WaitForPodsSuccess(f.Client, api.NamespaceSystem, framework.ImagePullerLabels, imagePrePullingLongTimeout); err != nil {
framework.Failf("Image puller didn't complete in %v, not running resource usage test since the metrics might be adulterated", imagePrePullingLongTimeout)
}
nodes := framework.GetReadySchedulableNodesOrDie(f.Client)
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
nodeNames = sets.NewString()
for _, node := range nodes.Items {
nodeNames.Insert(node.Name)
Expand Down
2 changes: 1 addition & 1 deletion test/e2e/load.go
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,7 @@ var _ = framework.KubeDescribe("Load capacity", func() {
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c))

ns = f.Namespace.Name
nodes := framework.GetReadySchedulableNodesOrDie(c)
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
nodeCount = len(nodes.Items)
Expect(nodeCount).NotTo(BeZero())

Expand Down
2 changes: 1 addition & 1 deletion test/e2e/logging_soak.go
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ var _ = framework.KubeDescribe("Logging soak [Performance] [Slow] [Disruptive]",
// was produced in each and every pod at least once. The final arg is the timeout for the test to verify all the pods got logs.
func RunLogPodsWithSleepOf(f *framework.Framework, sleep time.Duration, podname string, timeout time.Duration) {

nodes := framework.GetReadySchedulableNodesOrDie(f.Client)
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
totalPods := len(nodes.Items)
Expect(totalPods).NotTo(Equal(0))

Expand Down
2 changes: 1 addition & 1 deletion test/e2e/mesos.go
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ var _ = framework.KubeDescribe("Mesos", func() {
client := f.Client
framework.ExpectNoError(framework.AllNodesReady(client, wait.ForeverTestTimeout), "all nodes ready")

nodelist := framework.GetReadySchedulableNodesOrDie(f.Client)
nodelist := framework.GetReadySchedulableNodesOrDie(f.ClientSet)

const ns = "static-pods"
numpods := int32(len(nodelist.Items))
Expand Down
2 changes: 1 addition & 1 deletion test/e2e/metrics_grabber_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ var _ = framework.KubeDescribe("MetricsGrabber", func() {

It("should grab all metrics from a Kubelet.", func() {
By("Proxying to Node through the API server")
nodes := framework.GetReadySchedulableNodesOrDie(c)
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
Expect(nodes.Items).NotTo(BeEmpty())
response, err := grabber.GrabFromKubelet(nodes.Items[0].Name)
framework.ExpectNoError(err)
Expand Down
4 changes: 2 additions & 2 deletions test/e2e/networking_perf.go
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ var _ = framework.KubeDescribe("Networking IPerf [Experimental] [Slow] [Feature:
maxBandwidthBits := gceBandwidthBitsEstimate

It(fmt.Sprintf("should transfer ~ 1GB onto the service endpoint %v servers (maximum of %v clients)", numServer, numClient), func() {
nodes := framework.GetReadySchedulableNodesOrDie(f.Client)
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
totalPods := len(nodes.Items)
// for a single service, we expect to divide bandwidth between the network. Very crude estimate.
expectedBandwidth := int(float64(maxBandwidthBits) / float64(totalPods))
Expand Down Expand Up @@ -110,7 +110,7 @@ var _ = framework.KubeDescribe("Networking IPerf [Experimental] [Slow] [Feature:

// Calculate expected number of clients based on total nodes.
expectedCli := func() int {
nodes := framework.GetReadySchedulableNodesOrDie(f.Client)
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
return int(math.Min(float64(len(nodes.Items)), float64(numClient)))
}()

Expand Down
9 changes: 6 additions & 3 deletions test/e2e/nodeoutofdisk.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ import (
cadvisorapi "github.com/google/cadvisor/info/v1"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/util/wait"
Expand Down Expand Up @@ -67,13 +68,15 @@ const (
// Flaky issue #20015. We have no clear path for how to test this functionality in a non-flaky way.
var _ = framework.KubeDescribe("NodeOutOfDisk [Serial] [Flaky] [Disruptive]", func() {
var c *client.Client
var cs clientset.Interface
var unfilledNodeName, recoveredNodeName string
f := framework.NewDefaultFramework("node-outofdisk")

BeforeEach(func() {
c = f.Client
cs = f.ClientSet

nodelist := framework.GetReadySchedulableNodesOrDie(c)
nodelist := framework.GetReadySchedulableNodesOrDie(cs)

// Skip this test on small clusters. No need to fail since it is not a use
// case that any cluster of small size needs to support.
Expand All @@ -87,7 +90,7 @@ var _ = framework.KubeDescribe("NodeOutOfDisk [Serial] [Flaky] [Disruptive]", fu

AfterEach(func() {

nodelist := framework.GetReadySchedulableNodesOrDie(c)
nodelist := framework.GetReadySchedulableNodesOrDie(cs)
Expect(len(nodelist.Items)).ToNot(BeZero())
for _, node := range nodelist.Items {
if unfilledNodeName == node.Name || recoveredNodeName == node.Name {
Expand Down Expand Up @@ -150,7 +153,7 @@ var _ = framework.KubeDescribe("NodeOutOfDisk [Serial] [Flaky] [Disruptive]", fu
}
})

nodelist := framework.GetReadySchedulableNodesOrDie(c)
nodelist := framework.GetReadySchedulableNodesOrDie(cs)
Expect(len(nodelist.Items)).To(BeNumerically(">", 1))

nodeToRecover := nodelist.Items[1]
Expand Down
2 changes: 1 addition & 1 deletion test/e2e/pd.go
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ var _ = framework.KubeDescribe("Pod Disks", func() {

podClient = f.Client.Pods(f.Namespace.Name)
nodeClient = f.Client.Nodes()
nodes := framework.GetReadySchedulableNodesOrDie(f.Client)
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)

Expect(len(nodes.Items)).To(BeNumerically(">=", 2), "Requires at least 2 nodes")

Expand Down
2 changes: 1 addition & 1 deletion test/e2e/petset.go
Original file line number Diff line number Diff line change
Expand Up @@ -285,7 +285,7 @@ var _ = framework.KubeDescribe("Pet set recreate [Slow] [Feature:PetSet]", func(

It("should recreate evicted petset", func() {
By("looking for a node to schedule pet set and pod")
nodes := framework.GetReadySchedulableNodesOrDie(f.Client)
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
node := nodes.Items[0]

By("creating pod with conflicting port in namespace " + f.Namespace.Name)
Expand Down