Skip to content

Commit

Permalink
Remove unused variables (only assigned to) from test code.
Browse files Browse the repository at this point in the history
This is revealed by the go/types package, which is stricter than
the Go compiler about unused variables. See also: golang/go#8560
  • Loading branch information
Ryan Hitchman committed Feb 27, 2018
1 parent b79fe10 commit e04b91f
Show file tree
Hide file tree
Showing 13 changed files with 10 additions and 38 deletions.
10 changes: 4 additions & 6 deletions pkg/controller/service/service_controller_test.go
Expand Up @@ -308,7 +308,6 @@ func TestGetNodeConditionPredicate(t *testing.T) {
func TestProcessServiceUpdate(t *testing.T) {

var controller *ServiceController
var cloud *fakecloud.FakeCloud

//A pair of old and new loadbalancer IP address
oldLBIP := "192.168.1.1"
Expand All @@ -327,7 +326,7 @@ func TestProcessServiceUpdate(t *testing.T) {
svc: defaultExternalService(),
updateFn: func(svc *v1.Service) *v1.Service {

controller, cloud, _ = newController()
controller, _, _ = newController()
controller.cache.getOrCreate("validKey")
return svc

Expand Down Expand Up @@ -398,7 +397,6 @@ func TestProcessServiceUpdate(t *testing.T) {
func TestSyncService(t *testing.T) {

var controller *ServiceController
var cloud *fakecloud.FakeCloud

testCases := []struct {
testName string
Expand All @@ -410,7 +408,7 @@ func TestSyncService(t *testing.T) {
testName: "if an invalid service name is synced",
key: "invalid/key/string",
updateFn: func() {
controller, cloud, _ = newController()
controller, _, _ = newController()

},
expectedFn: func(e error) error {
Expand All @@ -429,7 +427,7 @@ func TestSyncService(t *testing.T) {
testName: "if an invalid service is synced",
key: "somethingelse",
updateFn: func() {
controller, cloud, _ = newController()
controller, _, _ = newController()
srv := controller.cache.getOrCreate("external-balancer")
srv.state = defaultExternalService()
},
Expand All @@ -443,7 +441,7 @@ func TestSyncService(t *testing.T) {
key: "external-balancer",
updateFn: func() {
testSvc := defaultExternalService()
controller, cloud, _ = newController()
controller, _, _ = newController()
controller.enqueueService(testSvc)
svc := controller.cache.getOrCreate("external-balancer")
svc.state = testSvc
Expand Down
4 changes: 0 additions & 4 deletions staging/src/k8s.io/apiserver/pkg/server/config_test.go
Expand Up @@ -45,9 +45,7 @@ func TestNewWithDelegate(t *testing.T) {
t.Fatal("unable to create fake client set")
}

delegateHealthzCalled := false
delegateConfig.HealthzChecks = append(delegateConfig.HealthzChecks, healthz.NamedCheck("delegate-health", func(r *http.Request) error {
delegateHealthzCalled = true
return fmt.Errorf("delegate failed healthcheck")
}))

Expand All @@ -74,9 +72,7 @@ func TestNewWithDelegate(t *testing.T) {
wrappingConfig.LoopbackClientConfig = &rest.Config{}
wrappingConfig.SwaggerConfig = DefaultSwaggerConfig()

wrappingHealthzCalled := false
wrappingConfig.HealthzChecks = append(wrappingConfig.HealthzChecks, healthz.NamedCheck("wrapping-health", func(r *http.Request) error {
wrappingHealthzCalled = true
return fmt.Errorf("wrapping failed healthcheck")
}))

Expand Down
7 changes: 1 addition & 6 deletions test/e2e/apps/network_partition.go
Expand Up @@ -104,25 +104,20 @@ func newPodOnNode(c clientset.Interface, namespace, podName, nodeName string) er

var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
f := framework.NewDefaultFramework("network-partition")
var systemPodsNo int32
var c clientset.Interface
var ns string
ignoreLabels := framework.ImagePullerLabels
var group string

BeforeEach(func() {
c = f.ClientSet
ns = f.Namespace.Name
systemPods, err := framework.GetPodsInNamespace(c, ns, ignoreLabels)
_, err := framework.GetPodsInNamespace(c, ns, ignoreLabels)
Expect(err).NotTo(HaveOccurred())
systemPodsNo = int32(len(systemPods))

// TODO(foxish): Re-enable testing on gce after kubernetes#56787 is fixed.
framework.SkipUnlessProviderIs("gke", "aws")
if strings.Index(framework.TestContext.CloudConfig.NodeInstanceGroup, ",") >= 0 {
framework.Failf("Test dose not support cluster setup with more than one MIG: %s", framework.TestContext.CloudConfig.NodeInstanceGroup)
} else {
group = framework.TestContext.CloudConfig.NodeInstanceGroup
}
})

Expand Down
1 change: 0 additions & 1 deletion test/e2e/instrumentation/monitoring/BUILD
Expand Up @@ -41,7 +41,6 @@ go_library(
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/client-go/discovery:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset:go_default_library",
"//vendor/k8s.io/metrics/pkg/client/custom_metrics:go_default_library",
],
)
Expand Down
Expand Up @@ -32,7 +32,6 @@ import (
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/selection"
"k8s.io/client-go/discovery"
kubeaggrcs "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset"
"k8s.io/kubernetes/test/e2e/framework"
customclient "k8s.io/metrics/pkg/client/custom_metrics"
)
Expand All @@ -50,13 +49,11 @@ var _ = instrumentation.SIGDescribe("Stackdriver Monitoring", func() {

f := framework.NewDefaultFramework("stackdriver-monitoring")
var kubeClient clientset.Interface
var kubeAggrClient kubeaggrcs.Interface
var customMetricsClient customclient.CustomMetricsClient
var discoveryClient *discovery.DiscoveryClient

It("should run Custom Metrics - Stackdriver Adapter [Feature:StackdriverCustomMetrics]", func() {
kubeClient = f.ClientSet
kubeAggrClient = f.AggregatorClient
config, err := framework.LoadConfig()
if err != nil {
framework.Failf("Failed to load config: %s", err)
Expand Down
3 changes: 0 additions & 3 deletions test/e2e/network/network_tiers.go
Expand Up @@ -26,7 +26,6 @@ import (
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/cloudprovider"
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud"
Expand All @@ -40,14 +39,12 @@ var _ = SIGDescribe("Services [Feature:GCEAlphaFeature][Slow]", func() {
f := framework.NewDefaultFramework("services")

var cs clientset.Interface
var internalClientset internalclientset.Interface
serviceLBNames := []string{}

BeforeEach(func() {
// This test suite requires the GCE environment.
framework.SkipUnlessProviderIs("gce")
cs = f.ClientSet
internalClientset = f.InternalClientset
})

AfterEach(func() {
Expand Down
3 changes: 1 addition & 2 deletions test/e2e/node/kubelet.go
Expand Up @@ -372,7 +372,6 @@ var _ = SIGDescribe("kubelet", func() {
var (
nfsServerPod *v1.Pod
nfsIP string
NFSconfig framework.VolumeTestConfig
pod *v1.Pod // client pod
)

Expand All @@ -390,7 +389,7 @@ var _ = SIGDescribe("kubelet", func() {

BeforeEach(func() {
framework.SkipUnlessProviderIs(framework.ProvidersWithSSH...)
NFSconfig, nfsServerPod, nfsIP = framework.NewNFSServer(c, ns, []string{"-G", "777", "/exports"})
_, nfsServerPod, nfsIP = framework.NewNFSServer(c, ns, []string{"-G", "777", "/exports"})
})

AfterEach(func() {
Expand Down
1 change: 0 additions & 1 deletion test/e2e/scalability/BUILD
Expand Up @@ -31,7 +31,6 @@ go_library(
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
Expand Down
4 changes: 1 addition & 3 deletions test/e2e/scalability/density.go
Expand Up @@ -32,7 +32,6 @@ import (
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/sets"
utiluuid "k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/apimachinery/pkg/watch"
clientset "k8s.io/client-go/kubernetes"
Expand Down Expand Up @@ -349,7 +348,6 @@ var _ = SIGDescribe("Density", func() {
var nodeCpuCapacity int64
var nodeMemCapacity int64
var nodes *v1.NodeList
var masters sets.String

testCaseBaseName := "density"
missingMeasurements := 0
Expand Down Expand Up @@ -417,7 +415,7 @@ var _ = SIGDescribe("Density", func() {
ns = f.Namespace.Name
testPhaseDurations = timer.NewTestPhaseTimer()

masters, nodes = framework.GetMasterAndWorkerNodesOrDie(c)
_, nodes = framework.GetMasterAndWorkerNodesOrDie(c)
nodeCount = len(nodes.Items)
Expect(nodeCount).NotTo(BeZero())

Expand Down
4 changes: 1 addition & 3 deletions test/e2e/scheduling/priorities.go
Expand Up @@ -29,7 +29,6 @@ import (
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/uuid"
clientset "k8s.io/client-go/kubernetes"
priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util"
Expand Down Expand Up @@ -62,7 +61,6 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() {
var nodeList *v1.NodeList
var systemPodsNo int
var ns string
var masterNodes sets.String
f := framework.NewDefaultFramework("sched-priority")
ignoreLabels := framework.ImagePullerLabels

Expand All @@ -75,7 +73,7 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() {
nodeList = &v1.NodeList{}

framework.WaitForAllNodesHealthy(cs, time.Minute)
masterNodes, nodeList = framework.GetMasterAndWorkerNodesOrDie(cs)
_, nodeList = framework.GetMasterAndWorkerNodesOrDie(cs)

err := framework.CheckTestingNSDeletedExcept(cs, ns)
framework.ExpectNoError(err)
Expand Down
2 changes: 0 additions & 2 deletions test/e2e/scheduling/taints_test.go
Expand Up @@ -147,14 +147,12 @@ const (
// - lack of eviction of short-tolerating pod after taint removal.
var _ = SIGDescribe("NoExecuteTaintManager [Serial]", func() {
var cs clientset.Interface
var nodeList *v1.NodeList
var ns string
f := framework.NewDefaultFramework("taint-control")

BeforeEach(func() {
cs = f.ClientSet
ns = f.Namespace.Name
nodeList = &v1.NodeList{}

framework.WaitForAllNodesHealthy(cs, time.Minute)

Expand Down
2 changes: 0 additions & 2 deletions test/e2e/storage/csi_volumes.go
Expand Up @@ -182,7 +182,6 @@ var _ = utils.SIGDescribe("CSI Volumes [Feature:CSI]", func() {
ns *v1.Namespace
node v1.Node
config framework.VolumeTestConfig
suffix string
)

BeforeEach(func() {
Expand All @@ -197,7 +196,6 @@ var _ = utils.SIGDescribe("CSI Volumes [Feature:CSI]", func() {
ServerNodeName: node.Name,
WaitForCompletion: true,
}
suffix = ns.Name
})

// Create one of these for each of the drivers to be tested
Expand Down
4 changes: 2 additions & 2 deletions test/e2e_node/cpu_manager_test.go
Expand Up @@ -202,7 +202,7 @@ func enableCPUManagerInKubelet(f *framework.Framework) (oldCfg *kubeletconfig.Ku
}

func runCPUManagerTests(f *framework.Framework) {
var cpuCap, cpuAlloc, cpuRes int64
var cpuCap, cpuAlloc int64
var oldCfg *kubeletconfig.KubeletConfiguration
var cpuListString, expAllowedCPUsListRegex string
var cpuList []int
Expand All @@ -213,7 +213,7 @@ func runCPUManagerTests(f *framework.Framework) {
var pod, pod1, pod2 *v1.Pod

It("should assign CPUs as expected based on the Pod spec", func() {
cpuCap, cpuAlloc, cpuRes = getLocalNodeCPUDetails(f)
cpuCap, cpuAlloc, _ = getLocalNodeCPUDetails(f)

// Skip CPU Manager tests altogether if the CPU capacity < 2.
if cpuCap < 2 {
Expand Down

0 comments on commit e04b91f

Please sign in to comment.