Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix staticcheck failures for test/e2e and test/integration #95281

Merged
merged 1 commit into from Feb 9, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
1 change: 0 additions & 1 deletion hack/.staticcheck_failures
@@ -1,7 +1,6 @@
cluster/images/etcd/migrate
pkg/controller/replicaset
pkg/kubelet/dockershim
test/e2e/autoscaling
test/integration/garbagecollector
test/integration/scheduler_perf
vendor/k8s.io/apimachinery/pkg/api/apitesting/roundtrip
Expand Down
1 change: 1 addition & 0 deletions test/e2e/autoscaling/BUILD
Expand Up @@ -54,6 +54,7 @@ go_library(
"//vendor/github.com/onsi/ginkgo:go_default_library",
"//vendor/golang.org/x/oauth2/google:go_default_library",
"//vendor/google.golang.org/api/monitoring/v3:go_default_library",
"//vendor/google.golang.org/api/option:go_default_library",
"//vendor/k8s.io/klog/v2:go_default_library",
],
)
Expand Down
Expand Up @@ -22,6 +22,7 @@ import (
"time"

gcm "google.golang.org/api/monitoring/v3"
"google.golang.org/api/option"
appsv1 "k8s.io/api/apps/v1"
as "k8s.io/api/autoscaling/v2beta1"
v1 "k8s.io/api/core/v1"
Expand Down Expand Up @@ -233,6 +234,9 @@ func (tc *CustomMetricTestCase) Run() {

ctx := context.Background()
client, err := google.DefaultClient(ctx, gcm.CloudPlatformScope)
if err != nil {
framework.Failf("Failed to initialize gcm default client, %v", err)
}

// Hack for running tests locally, needed to authenticate in Stackdriver
// If this is your use case, create application default credentials:
Expand All @@ -247,7 +251,7 @@ func (tc *CustomMetricTestCase) Run() {
client := oauth2.NewClient(oauth2.NoContext, ts)
*/

gcmService, err := gcm.New(client)
gcmService, err := gcm.NewService(ctx, option.WithHTTPClient(client))
if err != nil {
framework.Failf("Failed to create gcm service, %v", err)
}
Expand Down
30 changes: 14 additions & 16 deletions test/e2e/autoscaling/horizontal_pod_autoscaling.go
Expand Up @@ -29,7 +29,6 @@ import (
// These tests don't seem to be running properly in parallel: issue: #20338.
//
var _ = SIGDescribe("[Feature:HPA] Horizontal pod autoscaling (scale resource: CPU)", func() {
var rc *e2eautoscaling.ResourceConsumer
f := framework.NewDefaultFramework("horizontal-pod-autoscaling")

titleUp := "Should scale from 1 pod to 3 pods and from 3 to 5"
Expand All @@ -38,31 +37,31 @@ var _ = SIGDescribe("[Feature:HPA] Horizontal pod autoscaling (scale resource: C
SIGDescribe("[Serial] [Slow] Deployment", func() {
// CPU tests via deployments
ginkgo.It(titleUp, func() {
scaleUp("test-deployment", e2eautoscaling.KindDeployment, false, rc, f)
scaleUp("test-deployment", e2eautoscaling.KindDeployment, false, f)
})
ginkgo.It(titleDown, func() {
scaleDown("test-deployment", e2eautoscaling.KindDeployment, false, rc, f)
scaleDown("test-deployment", e2eautoscaling.KindDeployment, false, f)
})
})

SIGDescribe("[Serial] [Slow] ReplicaSet", func() {
// CPU tests via ReplicaSets
ginkgo.It(titleUp, func() {
scaleUp("rs", e2eautoscaling.KindReplicaSet, false, rc, f)
scaleUp("rs", e2eautoscaling.KindReplicaSet, false, f)
})
ginkgo.It(titleDown, func() {
scaleDown("rs", e2eautoscaling.KindReplicaSet, false, rc, f)
scaleDown("rs", e2eautoscaling.KindReplicaSet, false, f)
})
})

// These tests take ~20 minutes each.
SIGDescribe("[Serial] [Slow] ReplicationController", func() {
// CPU tests via replication controllers
ginkgo.It(titleUp+" and verify decision stability", func() {
scaleUp("rc", e2eautoscaling.KindRC, true, rc, f)
scaleUp("rc", e2eautoscaling.KindRC, true, f)
})
ginkgo.It(titleDown+" and verify decision stability", func() {
scaleDown("rc", e2eautoscaling.KindRC, true, rc, f)
scaleDown("rc", e2eautoscaling.KindRC, true, f)
})
})

Expand All @@ -77,7 +76,7 @@ var _ = SIGDescribe("[Feature:HPA] Horizontal pod autoscaling (scale resource: C
maxPods: 2,
firstScale: 2,
}
scaleTest.run("rc-light", e2eautoscaling.KindRC, rc, f)
scaleTest.run("rc-light", e2eautoscaling.KindRC, f)
})
ginkgo.It("Should scale from 2 pods to 1 pod [Slow]", func() {
scaleTest := &HPAScaleTest{
Expand All @@ -89,7 +88,7 @@ var _ = SIGDescribe("[Feature:HPA] Horizontal pod autoscaling (scale resource: C
maxPods: 2,
firstScale: 1,
}
scaleTest.run("rc-light", e2eautoscaling.KindRC, rc, f)
scaleTest.run("rc-light", e2eautoscaling.KindRC, f)
})
})
})
Expand All @@ -106,17 +105,16 @@ type HPAScaleTest struct {
firstScaleStasis time.Duration
cpuBurst int
secondScale int32
secondScaleStasis time.Duration
}

// run is a method which runs an HPA lifecycle, from a starting state, to an expected
// The initial state is defined by the initPods parameter.
// The first state change is due to the CPU being consumed initially, which HPA responds to by changing pod counts.
// The second state change (optional) is due to the CPU burst parameter, which HPA again responds to.
// TODO The use of 3 states is arbitrary, we could eventually make this test handle "n" states once this test stabilizes.
func (scaleTest *HPAScaleTest) run(name string, kind schema.GroupVersionKind, rc *e2eautoscaling.ResourceConsumer, f *framework.Framework) {
func (scaleTest *HPAScaleTest) run(name string, kind schema.GroupVersionKind, f *framework.Framework) {
const timeToWait = 15 * time.Minute
rc = e2eautoscaling.NewDynamicResourceConsumer(name, f.Namespace.Name, kind, scaleTest.initPods, scaleTest.totalInitialCPUUsage, 0, 0, scaleTest.perPodCPURequest, 200, f.ClientSet, f.ScalesGetter)
rc := e2eautoscaling.NewDynamicResourceConsumer(name, f.Namespace.Name, kind, scaleTest.initPods, scaleTest.totalInitialCPUUsage, 0, 0, scaleTest.perPodCPURequest, 200, f.ClientSet, f.ScalesGetter)
defer rc.CleanUp()
hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscaler(rc, scaleTest.targetCPUUtilizationPercent, scaleTest.minPods, scaleTest.maxPods)
defer e2eautoscaling.DeleteHorizontalPodAutoscaler(rc, hpa.Name)
Expand All @@ -131,7 +129,7 @@ func (scaleTest *HPAScaleTest) run(name string, kind schema.GroupVersionKind, rc
}
}

func scaleUp(name string, kind schema.GroupVersionKind, checkStability bool, rc *e2eautoscaling.ResourceConsumer, f *framework.Framework) {
func scaleUp(name string, kind schema.GroupVersionKind, checkStability bool, f *framework.Framework) {
stasis := 0 * time.Minute
if checkStability {
stasis = 10 * time.Minute
Expand All @@ -148,10 +146,10 @@ func scaleUp(name string, kind schema.GroupVersionKind, checkStability bool, rc
cpuBurst: 700,
secondScale: 5,
}
scaleTest.run(name, kind, rc, f)
scaleTest.run(name, kind, f)
}

func scaleDown(name string, kind schema.GroupVersionKind, checkStability bool, rc *e2eautoscaling.ResourceConsumer, f *framework.Framework) {
func scaleDown(name string, kind schema.GroupVersionKind, checkStability bool, f *framework.Framework) {
stasis := 0 * time.Minute
if checkStability {
stasis = 10 * time.Minute
Expand All @@ -168,5 +166,5 @@ func scaleDown(name string, kind schema.GroupVersionKind, checkStability bool, r
cpuBurst: 10,
secondScale: 1,
}
scaleTest.run(name, kind, rc, f)
scaleTest.run(name, kind, f)
}
37 changes: 22 additions & 15 deletions test/integration/garbagecollector/garbage_collector_test.go
Expand Up @@ -366,17 +366,16 @@ func testCrossNamespaceReferences(t *testing.T, watchCache bool) {
}
invalidOwnerReferences = append(invalidOwnerReferences, metav1.OwnerReference{Name: "invalid", UID: parent.UID, APIVersion: "v1", Kind: "Pod", Controller: pointer.BoolPtr(false)})

invalidUIDs := []types.UID{}
for i := 0; i < workers; i++ {
invalidChildType1, err := clientSet.CoreV1().ConfigMaps(namespaceA).Create(context.TODO(), &v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{GenerateName: "invalid-child-", OwnerReferences: invalidOwnerReferences}}, metav1.CreateOptions{})
_, err := clientSet.CoreV1().ConfigMaps(namespaceA).Create(context.TODO(), &v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{GenerateName: "invalid-child-", OwnerReferences: invalidOwnerReferences}}, metav1.CreateOptions{})
if err != nil {
t.Fatal(err)
}
invalidChildType2, err := clientSet.CoreV1().Secrets(namespaceA).Create(context.TODO(), &v1.Secret{ObjectMeta: metav1.ObjectMeta{GenerateName: "invalid-child-a-", OwnerReferences: invalidOwnerReferences}}, metav1.CreateOptions{})
_, err = clientSet.CoreV1().Secrets(namespaceA).Create(context.TODO(), &v1.Secret{ObjectMeta: metav1.ObjectMeta{GenerateName: "invalid-child-a-", OwnerReferences: invalidOwnerReferences}}, metav1.CreateOptions{})
if err != nil {
t.Fatal(err)
}
invalidChildType3, err := clientSet.CoreV1().Secrets(namespaceA).Create(context.TODO(), &v1.Secret{
_, err = clientSet.CoreV1().Secrets(namespaceA).Create(context.TODO(), &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"single-bad-reference": "true"},
GenerateName: "invalid-child-b-",
Expand All @@ -386,7 +385,6 @@ func testCrossNamespaceReferences(t *testing.T, watchCache bool) {
if err != nil {
t.Fatal(err)
}
invalidUIDs = append(invalidUIDs, invalidChildType1.UID, invalidChildType2.UID, invalidChildType3.UID)
}

// start GC with existing objects in place to simulate controller-manager restart
Expand Down Expand Up @@ -572,7 +570,7 @@ func TestCreateWithNonExistentOwner(t *testing.T) {
}
}

func setupRCsPods(t *testing.T, gc *garbagecollector.GarbageCollector, clientSet clientset.Interface, nameSuffix, namespace string, initialFinalizers []string, options metav1.DeleteOptions, wg *sync.WaitGroup, rcUIDs chan types.UID) {
func setupRCsPods(t *testing.T, gc *garbagecollector.GarbageCollector, clientSet clientset.Interface, nameSuffix, namespace string, initialFinalizers []string, options metav1.DeleteOptions, wg *sync.WaitGroup, rcUIDs chan types.UID, errs chan string) {
defer wg.Done()
rcClient := clientSet.CoreV1().ReplicationControllers(namespace)
podClient := clientSet.CoreV1().Pods(namespace)
Expand All @@ -582,7 +580,8 @@ func setupRCsPods(t *testing.T, gc *garbagecollector.GarbageCollector, clientSet
rc.ObjectMeta.Finalizers = initialFinalizers
rc, err := rcClient.Create(context.TODO(), rc, metav1.CreateOptions{})
if err != nil {
t.Fatalf("Failed to create replication controller: %v", err)
errs <- fmt.Sprintf("Failed to create replication controller: %v", err)
return
}
rcUIDs <- rc.ObjectMeta.UID
// create pods.
Expand All @@ -592,7 +591,8 @@ func setupRCsPods(t *testing.T, gc *garbagecollector.GarbageCollector, clientSet
pod := newPod(podName, namespace, []metav1.OwnerReference{{UID: rc.ObjectMeta.UID, Name: rc.ObjectMeta.Name}})
createdPod, err := podClient.Create(context.TODO(), pod, metav1.CreateOptions{})
if err != nil {
t.Fatalf("Failed to create Pod: %v", err)
errs <- fmt.Sprintf("Failed to create Pod: %v", err)
return
}
podUIDs = append(podUIDs, createdPod.ObjectMeta.UID)
}
Expand Down Expand Up @@ -624,12 +624,14 @@ func setupRCsPods(t *testing.T, gc *garbagecollector.GarbageCollector, clientSet
return true, nil
})
if err != nil {
t.Fatalf("failed to observe the expected pods in the GC graph for rc %s", rcName)
errs <- fmt.Sprintf("failed to observe the expected pods in the GC graph for rc %s", rcName)
return
}
}
// delete the rc
if err := rcClient.Delete(context.TODO(), rc.ObjectMeta.Name, options); err != nil {
t.Fatalf("failed to delete replication controller: %v", err)
errs <- fmt.Sprintf("failed to delete replication controller: %v", err)
return
}
}

Expand Down Expand Up @@ -672,19 +674,24 @@ func TestStressingCascadingDeletion(t *testing.T) {
var wg sync.WaitGroup
wg.Add(collections * 5)
rcUIDs := make(chan types.UID, collections*5)
errs := make(chan string, 5)
for i := 0; i < collections; i++ {
// rc is created with empty finalizers, deleted with nil delete options, pods will remain.
go setupRCsPods(t, gc, clientSet, "collection1-"+strconv.Itoa(i), ns.Name, []string{}, metav1.DeleteOptions{}, &wg, rcUIDs)
go setupRCsPods(t, gc, clientSet, "collection1-"+strconv.Itoa(i), ns.Name, []string{}, metav1.DeleteOptions{}, &wg, rcUIDs, errs)
// rc is created with the orphan finalizer, deleted with nil options, pods will remain.
go setupRCsPods(t, gc, clientSet, "collection2-"+strconv.Itoa(i), ns.Name, []string{metav1.FinalizerOrphanDependents}, metav1.DeleteOptions{}, &wg, rcUIDs)
go setupRCsPods(t, gc, clientSet, "collection2-"+strconv.Itoa(i), ns.Name, []string{metav1.FinalizerOrphanDependents}, metav1.DeleteOptions{}, &wg, rcUIDs, errs)
// rc is created with the orphan finalizer, deleted with DeleteOptions.OrphanDependents=false, pods will be deleted.
go setupRCsPods(t, gc, clientSet, "collection3-"+strconv.Itoa(i), ns.Name, []string{metav1.FinalizerOrphanDependents}, getNonOrphanOptions(), &wg, rcUIDs)
go setupRCsPods(t, gc, clientSet, "collection3-"+strconv.Itoa(i), ns.Name, []string{metav1.FinalizerOrphanDependents}, getNonOrphanOptions(), &wg, rcUIDs, errs)
// rc is created with empty finalizers, deleted with DeleteOptions.OrphanDependents=true, pods will remain.
go setupRCsPods(t, gc, clientSet, "collection4-"+strconv.Itoa(i), ns.Name, []string{}, getOrphanOptions(), &wg, rcUIDs)
go setupRCsPods(t, gc, clientSet, "collection4-"+strconv.Itoa(i), ns.Name, []string{}, getOrphanOptions(), &wg, rcUIDs, errs)
// rc is created with empty finalizers, deleted with DeleteOptions.PropagationPolicy=Orphan, pods will remain.
go setupRCsPods(t, gc, clientSet, "collection5-"+strconv.Itoa(i), ns.Name, []string{}, getPropagateOrphanOptions(), &wg, rcUIDs)
go setupRCsPods(t, gc, clientSet, "collection5-"+strconv.Itoa(i), ns.Name, []string{}, getPropagateOrphanOptions(), &wg, rcUIDs, errs)
}
wg.Wait()
close(errs)
for errString := range errs {
t.Fatalf(errString)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

it's a bit strange that we're not showing all the errors here, but I agree this is what the intent of the original code looks to be... fail on the first err and it's a race to see which one arrives first

}
t.Logf("all pods are created, all replications controllers are created then deleted")
// wait for the RCs and Pods to reach the expected numbers.
if err := wait.Poll(1*time.Second, 300*time.Second, func() (bool, error) {
Expand Down