From 4f1a8e5e38d084422a6c146e3f428e10e5e10355 Mon Sep 17 00:00:00 2001 From: Caleb Woodbine Date: Mon, 6 Jan 2020 02:04:05 +0000 Subject: [PATCH 01/92] Add: test to ensure that an event can be fetched, patched, deleted, and listed --- test/e2e/framework/events/events.go | 206 ++++++++++++++++++++++++++++ 1 file changed, 206 insertions(+) diff --git a/test/e2e/framework/events/events.go b/test/e2e/framework/events/events.go index 10612c0a9373..5165dd3eee27 100644 --- a/test/e2e/framework/events/events.go +++ b/test/e2e/framework/events/events.go @@ -18,6 +18,7 @@ package events import ( "context" + "encoding/json" "fmt" "strings" "time" @@ -25,8 +26,213 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/cache" + "k8s.io/kubernetes/test/e2e/framework" + + "github.com/onsi/ginkgo" + "k8s.io/apimachinery/pkg/types" ) +// Action is a function to be performed by the system. +type Action func() error + +var _ = ginkgo.Describe("[sig-api-machinery] Events", func() { + f := framework.NewDefaultFramework("events") + + ginkgo.It("should ensure that an event can be fetched, patched, deleted, and listed", func() { + eventTestName := "event-test" + + ginkgo.By("creating a test event") + // create a test event in test namespace + _, err := f.ClientSet.CoreV1().Events(f.Namespace.Name).Create(&v1.Event{ + ObjectMeta: metav1.ObjectMeta{ + Name: eventTestName, + Labels: map[string]string{ + "testevent-constant": "true", + }, + }, + Message: "This is a test event", + Reason: "Test", + Type: "Normal", + Count: 1, + InvolvedObject: v1.ObjectReference{ + Namespace: f.Namespace.Name, + }, + }) + framework.ExpectNoError(err, "failed to create test event") + + ginkgo.By("listing all events in all namespaces") + // get a list of Events in all namespaces to ensure endpoint coverage + eventsList, err := f.ClientSet.CoreV1().Events("").List(metav1.ListOptions{ + LabelSelector: "testevent-constant=true", + }) + framework.ExpectNoError(err, "failed list all events") + + foundCreatedEvent := false + var eventCreatedName string + for _, val := range eventsList.Items { + if val.ObjectMeta.Name == eventTestName && val.ObjectMeta.Namespace == f.Namespace.Name { + foundCreatedEvent = true + eventCreatedName = val.ObjectMeta.Name + break + } + } + framework.ExpectEqual(foundCreatedEvent, true, "unable to find the test event") + + ginkgo.By("patching the test event") + // patch the event's message + eventPatchMessage := "This is a test event - patched" + eventPatch, err := json.Marshal(map[string]interface{}{ + "message": eventPatchMessage, + }) + framework.ExpectNoError(err, "failed to marshal the patch JSON payload") + + _, err = f.ClientSet.CoreV1().Events(f.Namespace.Name).Patch(eventTestName, types.StrategicMergePatchType, []byte(eventPatch)) + framework.ExpectNoError(err, "failed to patch the test event") + + ginkgo.By("fetching the test event") + // get event by name + event, err := f.ClientSet.CoreV1().Events(f.Namespace.Name).Get(eventCreatedName, metav1.GetOptions{}) + framework.ExpectNoError(err, "failed to fetch the test event") + framework.ExpectEqual(event.Message, eventPatchMessage, "test event message does not match patch message") + + ginkgo.By("deleting the test event") + // delete original event + err = f.ClientSet.CoreV1().Events(f.Namespace.Name).Delete(eventCreatedName, &metav1.DeleteOptions{}) + framework.ExpectNoError(err, "failed to delete the test event") + + ginkgo.By("listing all events in all namespaces") + // get a list of Events list namespace + eventsList, err = f.ClientSet.CoreV1().Events("").List(metav1.ListOptions{ + LabelSelector: "testevent-constant=true", + }) + framework.ExpectNoError(err, "fail to list all events") + foundCreatedEvent = false + for _, val := range eventsList.Items { + if val.ObjectMeta.Name == eventTestName && val.ObjectMeta.Namespace == f.Namespace.Name { + foundCreatedEvent = true + break + } + } + framework.ExpectEqual(foundCreatedEvent, false, "failed to find test event") + }) +}) + +// ObserveNodeUpdateAfterAction returns true if a node update matching the predicate was emitted +// from the system after performing the supplied action. +func ObserveNodeUpdateAfterAction(c clientset.Interface, nodeName string, nodePredicate func(*v1.Node) bool, action Action) (bool, error) { + observedMatchingNode := false + nodeSelector := fields.OneTermEqualSelector("metadata.name", nodeName) + informerStartedChan := make(chan struct{}) + var informerStartedGuard sync.Once + + _, controller := cache.NewInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + options.FieldSelector = nodeSelector.String() + ls, err := c.CoreV1().Nodes().List(options) + return ls, err + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + // Signal parent goroutine that watching has begun. + defer informerStartedGuard.Do(func() { close(informerStartedChan) }) + options.FieldSelector = nodeSelector.String() + w, err := c.CoreV1().Nodes().Watch(options) + return w, err + }, + }, + &v1.Node{}, + 0, + cache.ResourceEventHandlerFuncs{ + UpdateFunc: func(oldObj, newObj interface{}) { + n, ok := newObj.(*v1.Node) + framework.ExpectEqual(ok, true) + if nodePredicate(n) { + observedMatchingNode = true + } + }, + }, + ) + + // Start the informer and block this goroutine waiting for the started signal. + informerStopChan := make(chan struct{}) + defer func() { close(informerStopChan) }() + go controller.Run(informerStopChan) + <-informerStartedChan + + // Invoke the action function. + err := action() + if err != nil { + return false, err + } + + // Poll whether the informer has found a matching node update with a timeout. + // Wait up 2 minutes polling every second. + timeout := 2 * time.Minute + interval := 1 * time.Second + err = wait.Poll(interval, timeout, func() (bool, error) { + return observedMatchingNode, nil + }) + return err == nil, err +} + +// ObserveEventAfterAction returns true if an event matching the predicate was emitted +// from the system after performing the supplied action. +func ObserveEventAfterAction(c clientset.Interface, ns string, eventPredicate func(*v1.Event) bool, action Action) (bool, error) { + observedMatchingEvent := false + informerStartedChan := make(chan struct{}) + var informerStartedGuard sync.Once + + // Create an informer to list/watch events from the test framework namespace. + _, controller := cache.NewInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + ls, err := c.CoreV1().Events(ns).List(options) + return ls, err + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + // Signal parent goroutine that watching has begun. + defer informerStartedGuard.Do(func() { close(informerStartedChan) }) + w, err := c.CoreV1().Events(ns).Watch(options) + return w, err + }, + }, + &v1.Event{}, + 0, + cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { + e, ok := obj.(*v1.Event) + ginkgo.By(fmt.Sprintf("Considering event: \nType = [%s], Name = [%s], Reason = [%s], Message = [%s]", e.Type, e.Name, e.Reason, e.Message)) + framework.ExpectEqual(ok, true) + if eventPredicate(e) { + observedMatchingEvent = true + } + }, + }, + ) + + // Start the informer and block this goroutine waiting for the started signal. + informerStopChan := make(chan struct{}) + defer func() { close(informerStopChan) }() + go controller.Run(informerStopChan) + <-informerStartedChan + + // Invoke the action function. + err := action() + if err != nil { + return false, err + } + + // Poll whether the informer has found a matching event with a timeout. + // Wait up 2 minutes polling every second. + timeout := 2 * time.Minute + interval := 1 * time.Second + err = wait.Poll(interval, timeout, func() (bool, error) { + return observedMatchingEvent, nil + }) + return err == nil, err +} + // WaitTimeoutForEvent waits the given timeout duration for an event to occur. func WaitTimeoutForEvent(c clientset.Interface, namespace, eventSelector, msg string, timeout time.Duration) error { interval := 2 * time.Second From 9a8e1a1462bd99e0163934030f96ea0d018a52c1 Mon Sep 17 00:00:00 2001 From: Caleb Woodbine Date: Tue, 28 Jan 2020 19:14:17 +0000 Subject: [PATCH 02/92] Fix: types.go in BUILD file --- test/e2e/framework/events/BUILD | 3 +++ 1 file changed, 3 insertions(+) diff --git a/test/e2e/framework/events/BUILD b/test/e2e/framework/events/BUILD index ebac16a84d2a..ffbe4ee3c44d 100644 --- a/test/e2e/framework/events/BUILD +++ b/test/e2e/framework/events/BUILD @@ -7,6 +7,9 @@ go_library( visibility = ["//visibility:public"], deps = [ "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", ], From 54f9654799ffd3a25bc4f068c7b18eb1dde5dd49 Mon Sep 17 00:00:00 2001 From: Caleb Woodbine Date: Wed, 26 Feb 2020 16:10:25 +1300 Subject: [PATCH 03/92] Fix build failure; Remove unrelated code --- test/e2e/framework/events/BUILD | 5 +- test/e2e/framework/events/events.go | 131 ++-------------------------- 2 files changed, 11 insertions(+), 125 deletions(-) diff --git a/test/e2e/framework/events/BUILD b/test/e2e/framework/events/BUILD index ffbe4ee3c44d..671c711dcacc 100644 --- a/test/e2e/framework/events/BUILD +++ b/test/e2e/framework/events/BUILD @@ -7,11 +7,12 @@ go_library( visibility = ["//visibility:public"], deps = [ "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", + "//staging/src/k8s.io/client-go/tools/cache:go_default_library", + "//test/e2e/framework:go_default_library", + "//vendor/github.com/onsi/ginkgo:go_default_library", ], ) diff --git a/test/e2e/framework/events/events.go b/test/e2e/framework/events/events.go index 5165dd3eee27..4b060d3fffee 100644 --- a/test/e2e/framework/events/events.go +++ b/test/e2e/framework/events/events.go @@ -23,10 +23,10 @@ import ( "strings" "time" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" - "k8s.io/client-go/tools/cache" "k8s.io/kubernetes/test/e2e/framework" "github.com/onsi/ginkgo" @@ -44,7 +44,7 @@ var _ = ginkgo.Describe("[sig-api-machinery] Events", func() { ginkgo.By("creating a test event") // create a test event in test namespace - _, err := f.ClientSet.CoreV1().Events(f.Namespace.Name).Create(&v1.Event{ + _, err := f.ClientSet.CoreV1().Events(f.Namespace.Name).Create(context.TODO(), &v1.Event{ ObjectMeta: metav1.ObjectMeta{ Name: eventTestName, Labels: map[string]string{ @@ -58,12 +58,12 @@ var _ = ginkgo.Describe("[sig-api-machinery] Events", func() { InvolvedObject: v1.ObjectReference{ Namespace: f.Namespace.Name, }, - }) + }, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create test event") ginkgo.By("listing all events in all namespaces") // get a list of Events in all namespaces to ensure endpoint coverage - eventsList, err := f.ClientSet.CoreV1().Events("").List(metav1.ListOptions{ + eventsList, err := f.ClientSet.CoreV1().Events("").List(context.TODO(), metav1.ListOptions{ LabelSelector: "testevent-constant=true", }) framework.ExpectNoError(err, "failed list all events") @@ -87,23 +87,23 @@ var _ = ginkgo.Describe("[sig-api-machinery] Events", func() { }) framework.ExpectNoError(err, "failed to marshal the patch JSON payload") - _, err = f.ClientSet.CoreV1().Events(f.Namespace.Name).Patch(eventTestName, types.StrategicMergePatchType, []byte(eventPatch)) + _, err = f.ClientSet.CoreV1().Events(f.Namespace.Name).Patch(context.TODO(), eventTestName, types.StrategicMergePatchType, []byte(eventPatch), metav1.PatchOptions{}) framework.ExpectNoError(err, "failed to patch the test event") ginkgo.By("fetching the test event") // get event by name - event, err := f.ClientSet.CoreV1().Events(f.Namespace.Name).Get(eventCreatedName, metav1.GetOptions{}) + event, err := f.ClientSet.CoreV1().Events(f.Namespace.Name).Get(context.TODO(), eventCreatedName, metav1.GetOptions{}) framework.ExpectNoError(err, "failed to fetch the test event") framework.ExpectEqual(event.Message, eventPatchMessage, "test event message does not match patch message") ginkgo.By("deleting the test event") // delete original event - err = f.ClientSet.CoreV1().Events(f.Namespace.Name).Delete(eventCreatedName, &metav1.DeleteOptions{}) + err = f.ClientSet.CoreV1().Events(f.Namespace.Name).Delete(context.TODO(), eventCreatedName, &metav1.DeleteOptions{}) framework.ExpectNoError(err, "failed to delete the test event") ginkgo.By("listing all events in all namespaces") // get a list of Events list namespace - eventsList, err = f.ClientSet.CoreV1().Events("").List(metav1.ListOptions{ + eventsList, err = f.ClientSet.CoreV1().Events("").List(context.TODO(), metav1.ListOptions{ LabelSelector: "testevent-constant=true", }) framework.ExpectNoError(err, "fail to list all events") @@ -118,121 +118,6 @@ var _ = ginkgo.Describe("[sig-api-machinery] Events", func() { }) }) -// ObserveNodeUpdateAfterAction returns true if a node update matching the predicate was emitted -// from the system after performing the supplied action. -func ObserveNodeUpdateAfterAction(c clientset.Interface, nodeName string, nodePredicate func(*v1.Node) bool, action Action) (bool, error) { - observedMatchingNode := false - nodeSelector := fields.OneTermEqualSelector("metadata.name", nodeName) - informerStartedChan := make(chan struct{}) - var informerStartedGuard sync.Once - - _, controller := cache.NewInformer( - &cache.ListWatch{ - ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { - options.FieldSelector = nodeSelector.String() - ls, err := c.CoreV1().Nodes().List(options) - return ls, err - }, - WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { - // Signal parent goroutine that watching has begun. - defer informerStartedGuard.Do(func() { close(informerStartedChan) }) - options.FieldSelector = nodeSelector.String() - w, err := c.CoreV1().Nodes().Watch(options) - return w, err - }, - }, - &v1.Node{}, - 0, - cache.ResourceEventHandlerFuncs{ - UpdateFunc: func(oldObj, newObj interface{}) { - n, ok := newObj.(*v1.Node) - framework.ExpectEqual(ok, true) - if nodePredicate(n) { - observedMatchingNode = true - } - }, - }, - ) - - // Start the informer and block this goroutine waiting for the started signal. - informerStopChan := make(chan struct{}) - defer func() { close(informerStopChan) }() - go controller.Run(informerStopChan) - <-informerStartedChan - - // Invoke the action function. - err := action() - if err != nil { - return false, err - } - - // Poll whether the informer has found a matching node update with a timeout. - // Wait up 2 minutes polling every second. - timeout := 2 * time.Minute - interval := 1 * time.Second - err = wait.Poll(interval, timeout, func() (bool, error) { - return observedMatchingNode, nil - }) - return err == nil, err -} - -// ObserveEventAfterAction returns true if an event matching the predicate was emitted -// from the system after performing the supplied action. -func ObserveEventAfterAction(c clientset.Interface, ns string, eventPredicate func(*v1.Event) bool, action Action) (bool, error) { - observedMatchingEvent := false - informerStartedChan := make(chan struct{}) - var informerStartedGuard sync.Once - - // Create an informer to list/watch events from the test framework namespace. - _, controller := cache.NewInformer( - &cache.ListWatch{ - ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { - ls, err := c.CoreV1().Events(ns).List(options) - return ls, err - }, - WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { - // Signal parent goroutine that watching has begun. - defer informerStartedGuard.Do(func() { close(informerStartedChan) }) - w, err := c.CoreV1().Events(ns).Watch(options) - return w, err - }, - }, - &v1.Event{}, - 0, - cache.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { - e, ok := obj.(*v1.Event) - ginkgo.By(fmt.Sprintf("Considering event: \nType = [%s], Name = [%s], Reason = [%s], Message = [%s]", e.Type, e.Name, e.Reason, e.Message)) - framework.ExpectEqual(ok, true) - if eventPredicate(e) { - observedMatchingEvent = true - } - }, - }, - ) - - // Start the informer and block this goroutine waiting for the started signal. - informerStopChan := make(chan struct{}) - defer func() { close(informerStopChan) }() - go controller.Run(informerStopChan) - <-informerStartedChan - - // Invoke the action function. - err := action() - if err != nil { - return false, err - } - - // Poll whether the informer has found a matching event with a timeout. - // Wait up 2 minutes polling every second. - timeout := 2 * time.Minute - interval := 1 * time.Second - err = wait.Poll(interval, timeout, func() (bool, error) { - return observedMatchingEvent, nil - }) - return err == nil, err -} - // WaitTimeoutForEvent waits the given timeout duration for an event to occur. func WaitTimeoutForEvent(c clientset.Interface, namespace, eventSelector, msg string, timeout time.Duration) error { interval := 2 * time.Second From be7332e91761c40b0e2b2c3c0e781ecc3243c63e Mon Sep 17 00:00:00 2001 From: Caleb Woodbine Date: Wed, 26 Feb 2020 16:54:20 +1300 Subject: [PATCH 04/92] Fix BUILD --- test/e2e/framework/events/BUILD | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/e2e/framework/events/BUILD b/test/e2e/framework/events/BUILD index 671c711dcacc..e7f503cfe61b 100644 --- a/test/e2e/framework/events/BUILD +++ b/test/e2e/framework/events/BUILD @@ -6,11 +6,11 @@ go_library( importpath = "k8s.io/kubernetes/test/e2e/framework/events", visibility = ["//visibility:public"], deps = [ + "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", - "//staging/src/k8s.io/client-go/tools/cache:go_default_library", "//test/e2e/framework:go_default_library", "//vendor/github.com/onsi/ginkgo:go_default_library", ], From 6b8b08c8ab2766adf3498a668ad2649e11ba91c7 Mon Sep 17 00:00:00 2001 From: Caleb Woodbine Date: Thu, 27 Feb 2020 09:53:35 +1300 Subject: [PATCH 05/92] Create ReplicationController lifecycle test --- test/e2e/apps/rc.go | 170 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 170 insertions(+) diff --git a/test/e2e/apps/rc.go b/test/e2e/apps/rc.go index c5b3246ed7b6..c465e39b7814 100644 --- a/test/e2e/apps/rc.go +++ b/test/e2e/apps/rc.go @@ -17,6 +17,7 @@ limitations under the License. package apps import ( + "encoding/json" "context" "fmt" "time" @@ -34,6 +35,10 @@ import ( e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" imageutils "k8s.io/kubernetes/test/utils/image" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/runtime/schema" + autoscalingv1 "k8s.io/api/autoscaling/v1" + "k8s.io/client-go/dynamic" "github.com/onsi/ginkgo" ) @@ -41,6 +46,14 @@ import ( var _ = SIGDescribe("ReplicationController", func() { f := framework.NewDefaultFramework("replication-controller") + var ns string + var dc dynamic.Interface + + ginkgo.BeforeEach(func() { + ns = f.Namespace.Name + dc = f.DynamicClient + }) + /* Release : v1.9 Testname: Replication Controller, run basic image @@ -84,6 +97,163 @@ var _ = SIGDescribe("ReplicationController", func() { framework.ConformanceIt("should release no longer matching pods", func() { testRCReleaseControlledNotMatching(f) }) + + ginkgo.It("should test the lifecycle of a ReplicationController", func() { + testRcName := "rc-test" + testRcNamespace := ns + testRcInitialReplicaCount := int32(1) + testRcMaxReplicaCount := int32(2) + rcResource := schema.GroupVersionResource{Group: "", Version: "v1", Resource: "replicationcontrollers"} + + rcTest := v1.ReplicationController{ + ObjectMeta: metav1.ObjectMeta{ + Name: testRcName, + Labels: map[string]string{"test-rc-static": "true"}, + }, + Spec: v1.ReplicationControllerSpec{ + Replicas: &testRcInitialReplicaCount, + Selector: map[string]string{"test-rc-static": "true"}, + Template: &v1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Name: testRcName, + Labels: map[string]string{"test-rc-static": "true"}, + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{{ + Name: testRcName, + Image: "nginx", + }}, + }, + }, + }, + } + + // Create a ReplicationController + _, err := f.ClientSet.CoreV1().ReplicationControllers(testRcNamespace).Create(context.TODO(), &rcTest, metav1.CreateOptions{}) + framework.ExpectNoError(err, "Failed to create ReplicationController") + + // setup a watch for the RC + rcWatch, err := f.ClientSet.CoreV1().ReplicationControllers(testRcNamespace).Watch(context.TODO(), metav1.ListOptions{LabelSelector: "test-rc-static=true"}) + framework.ExpectNoError(err, "Failed to setup watch on newly created ReplicationController") + + rcWatchChan := rcWatch.ResultChan() + + for event := range rcWatchChan { + rc, ok := event.Object.(*v1.ReplicationController) + framework.ExpectEqual(ok, true, "Unable to convert type of ReplicationController watch event") + if rc.Status.Replicas == testRcInitialReplicaCount && rc.Status.ReadyReplicas == testRcInitialReplicaCount { + break + } + } + + rcLabelPatchPayload, err := json.Marshal(v1.ReplicationController{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"test-rc": "patched"}, + }, + }) + framework.ExpectNoError(err, "failed to marshal json of replicationcontroller label patch") + // Patch the ReplicationController + _, err = f.ClientSet.CoreV1().ReplicationControllers(testRcNamespace).Patch(context.TODO(), testRcName, types.StrategicMergePatchType, []byte(rcLabelPatchPayload), metav1.PatchOptions{}) + framework.ExpectNoError(err, "Failed to patch ReplicationController") + + rcStatusPatchPayload, err := json.Marshal(map[string]interface{}{ + "status": map[string]interface{}{ + "readyReplicas": 0, + "availableReplicas": 0, + }, + }) + framework.ExpectNoError(err, "Failed to marshal JSON of ReplicationController label patch") + + // Patch the ReplicationController's status + rcStatus, err := f.ClientSet.CoreV1().ReplicationControllers(testRcNamespace).Patch(context.TODO(), testRcName, types.StrategicMergePatchType, []byte(rcStatusPatchPayload), metav1.PatchOptions{}, "status") + framework.ExpectNoError(err, "Failed to patch ReplicationControllerStatus") + framework.ExpectEqual(rcStatus.Status.ReadyReplicas, int32(0), "ReplicationControllerStatus's readyReplicas does not equal 0") + + rcStatusUnstructured, err := dc.Resource(rcResource).Namespace(testRcNamespace).Get(testRcName, metav1.GetOptions{}, "status") + framework.ExpectNoError(err, "Failed to fetch ReplicationControllerStatus") + + rcStatusUjson, err := json.Marshal(rcStatusUnstructured) + framework.ExpectNoError(err, "Failed to marshal json of replicationcontroller label patch") + json.Unmarshal(rcStatusUjson, &rcStatus) + framework.ExpectEqual(rcStatus.Status.Replicas, testRcInitialReplicaCount, "ReplicationController ReplicaSet cound does not match initial Replica count") + + rcScalePatchPayload, err := json.Marshal(autoscalingv1.Scale{ + Spec: autoscalingv1.ScaleSpec{ + Replicas: 2, + }, + }) + framework.ExpectNoError(err, "Failed to marshal json of replicationcontroller label patch") + + // Patch the ReplicationController's scale + rcScale, err := f.ClientSet.CoreV1().ReplicationControllers(testRcNamespace).Patch(context.TODO(), testRcName, types.StrategicMergePatchType, []byte(rcScalePatchPayload), metav1.PatchOptions{}, "scale") + framework.ExpectNoError(err, "Failed to patch ReplicationControllerScale") + + for event := range rcWatchChan { + rc, ok := event.Object.(*v1.ReplicationController) + framework.ExpectEqual(ok, true, "Unable to convert type of ReplicationController watch event") + if rc.Status.Replicas == testRcMaxReplicaCount && rc.Status.ReadyReplicas == testRcMaxReplicaCount { + rcScale = rc + break + } + } + framework.ExpectEqual(rcScale.Status.Replicas, testRcMaxReplicaCount, "ReplicationController ReplicasSet Scale does not match the expected scale") + + // Get the ReplicationController + rc, err := f.ClientSet.CoreV1().ReplicationControllers(testRcNamespace).Get(context.TODO(), testRcName, metav1.GetOptions{}) + framework.ExpectNoError(err, "Failed to fetch ReplicationController") + framework.ExpectEqual(rc.ObjectMeta.Labels["test-rc"], "patched", "ReplicationController is missing a label from earlier patch") + + rcStatusUpdatePayload := rc + rcStatusUpdatePayload.Status.AvailableReplicas = 1 + rcStatusUpdatePayload.Status.ReadyReplicas = 1 + + // Replace the ReplicationController's status + rcStatus, err = f.ClientSet.CoreV1().ReplicationControllers(testRcNamespace).UpdateStatus(context.TODO(), rcStatusUpdatePayload, metav1.UpdateOptions{}) + framework.ExpectNoError(err, "Failed to update ReplicationControllerStatus") + framework.ExpectEqual(rcStatus.Status.ReadyReplicas, int32(1), "ReplicationControllerStatus readyReplicas does not equal 1") + + for event := range rcWatchChan { + rc, ok := event.Object.(*v1.ReplicationController) + framework.ExpectEqual(ok, true, "Unable to convert type of ReplicationController watch event") + if rc.Status.Replicas == testRcMaxReplicaCount && rc.Status.ReadyReplicas == testRcMaxReplicaCount { + break + } + } + + rcs, err := f.ClientSet.CoreV1().ReplicationControllers("").List(context.TODO(), metav1.ListOptions{LabelSelector: "test-rc-static=true"}) + framework.ExpectNoError(err, "Failed to list ReplicationController") + framework.ExpectEqual(len(rcs.Items) > 0, true) + + foundRc := false + for _, rcItem := range rcs.Items { + if rcItem.ObjectMeta.Name == testRcName && + rcItem.ObjectMeta.Namespace == testRcNamespace && + rcItem.ObjectMeta.Labels["test-rc-static"] == "true" && + rcItem.ObjectMeta.Labels["test-rc"] == "patched" && + rcItem.Status.Replicas == testRcMaxReplicaCount && + rcItem.Status.ReadyReplicas == testRcMaxReplicaCount { + foundRc = true + } + } + framework.ExpectEqual(foundRc, true) + + // Delete ReplicationController + err = f.ClientSet.CoreV1().ReplicationControllers(testRcNamespace).DeleteCollection(context.TODO(), &metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: "test-rc-static=true"}) + framework.ExpectNoError(err, "Failed to delete ReplicationControllers") + + for event := range rcWatchChan { + rc, ok := event.Object.(*v1.ReplicationController) + framework.ExpectEqual(ok, true, "Unable to convert type of ReplicationController watch event") + if rc.ObjectMeta.DeletionTimestamp != nil { + break + } + } + time.Sleep(1 * time.Second) + + // Get the ReplicationController to check that it's deleted + _, err = f.ClientSet.CoreV1().ReplicationControllers(testRcNamespace).Get(context.TODO(), testRcName, metav1.GetOptions{}) + framework.ExpectError(err, "Failed to delete ReplicationController") + }) }) func newRC(rsName string, replicas int32, rcPodLabels map[string]string, imageName string, image string, args []string) *v1.ReplicationController { From 2622a6caceca8c1b9a258c8c8743fb83c4ded7b1 Mon Sep 17 00:00:00 2001 From: Caleb Woodbine Date: Thu, 27 Feb 2020 14:48:39 +1300 Subject: [PATCH 06/92] Fix formatting and BUILD --- test/e2e/apps/BUILD | 1 + test/e2e/apps/rc.go | 18 +++++++++--------- 2 files changed, 10 insertions(+), 9 deletions(-) diff --git a/test/e2e/apps/BUILD b/test/e2e/apps/BUILD index ba8a93c495e6..50686bd25798 100644 --- a/test/e2e/apps/BUILD +++ b/test/e2e/apps/BUILD @@ -38,6 +38,7 @@ go_library( "//pkg/master/ports:go_default_library", "//pkg/scheduler/nodeinfo:go_default_library", "//staging/src/k8s.io/api/apps/v1:go_default_library", + "//staging/src/k8s.io/api/autoscaling/v1:go_default_library", "//staging/src/k8s.io/api/batch/v1:go_default_library", "//staging/src/k8s.io/api/batch/v1beta1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", diff --git a/test/e2e/apps/rc.go b/test/e2e/apps/rc.go index c465e39b7814..4d96def684d4 100644 --- a/test/e2e/apps/rc.go +++ b/test/e2e/apps/rc.go @@ -17,28 +17,28 @@ limitations under the License. package apps import ( - "encoding/json" "context" + "encoding/json" "fmt" "time" + autoscalingv1 "k8s.io/api/autoscaling/v1" v1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/dynamic" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/pkg/controller/replication" "k8s.io/kubernetes/test/e2e/framework" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" imageutils "k8s.io/kubernetes/test/utils/image" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/runtime/schema" - autoscalingv1 "k8s.io/api/autoscaling/v1" - "k8s.io/client-go/dynamic" "github.com/onsi/ginkgo" ) @@ -107,7 +107,7 @@ var _ = SIGDescribe("ReplicationController", func() { rcTest := v1.ReplicationController{ ObjectMeta: metav1.ObjectMeta{ - Name: testRcName, + Name: testRcName, Labels: map[string]string{"test-rc-static": "true"}, }, Spec: v1.ReplicationControllerSpec{ @@ -115,12 +115,12 @@ var _ = SIGDescribe("ReplicationController", func() { Selector: map[string]string{"test-rc-static": "true"}, Template: &v1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ - Name: testRcName, + Name: testRcName, Labels: map[string]string{"test-rc-static": "true"}, }, Spec: v1.PodSpec{ Containers: []v1.Container{{ - Name: testRcName, + Name: testRcName, Image: "nginx", }}, }, @@ -158,7 +158,7 @@ var _ = SIGDescribe("ReplicationController", func() { rcStatusPatchPayload, err := json.Marshal(map[string]interface{}{ "status": map[string]interface{}{ - "readyReplicas": 0, + "readyReplicas": 0, "availableReplicas": 0, }, }) From e5e303786880da3ec24e66161ed344660e8a7f6c Mon Sep 17 00:00:00 2001 From: Caleb Woodbine Date: Mon, 2 Mar 2020 13:46:41 +1300 Subject: [PATCH 07/92] Use max Replica count instead of hardcoded value --- test/e2e/apps/rc.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/e2e/apps/rc.go b/test/e2e/apps/rc.go index 4d96def684d4..88e43243a4b9 100644 --- a/test/e2e/apps/rc.go +++ b/test/e2e/apps/rc.go @@ -179,7 +179,7 @@ var _ = SIGDescribe("ReplicationController", func() { rcScalePatchPayload, err := json.Marshal(autoscalingv1.Scale{ Spec: autoscalingv1.ScaleSpec{ - Replicas: 2, + Replicas: testRcMaxReplicaCount, }, }) framework.ExpectNoError(err, "Failed to marshal json of replicationcontroller label patch") From 5239ae44b6848598691013f30fb9bb40be2b69ff Mon Sep 17 00:00:00 2001 From: Caleb Woodbine Date: Mon, 2 Mar 2020 15:42:23 +1300 Subject: [PATCH 08/92] Update delay time before deletion recheck --- test/e2e/apps/rc.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/e2e/apps/rc.go b/test/e2e/apps/rc.go index 88e43243a4b9..2d70af236618 100644 --- a/test/e2e/apps/rc.go +++ b/test/e2e/apps/rc.go @@ -248,7 +248,7 @@ var _ = SIGDescribe("ReplicationController", func() { break } } - time.Sleep(1 * time.Second) + time.Sleep(10 * time.Second) // Get the ReplicationController to check that it's deleted _, err = f.ClientSet.CoreV1().ReplicationControllers(testRcNamespace).Get(context.TODO(), testRcName, metav1.GetOptions{}) From 3b1e2249b076b7d5d3b319add0eb8128390133b8 Mon Sep 17 00:00:00 2001 From: Caleb Woodbine Date: Wed, 4 Mar 2020 09:19:06 +1300 Subject: [PATCH 09/92] Add ginkgo.By statements, TimeoutSeconds to the ReplicationController watch --- test/e2e/apps/rc.go | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/test/e2e/apps/rc.go b/test/e2e/apps/rc.go index 2d70af236618..abbb0abcef56 100644 --- a/test/e2e/apps/rc.go +++ b/test/e2e/apps/rc.go @@ -128,16 +128,19 @@ var _ = SIGDescribe("ReplicationController", func() { }, } + ginkgo.By("creating a ReplicationController") // Create a ReplicationController _, err := f.ClientSet.CoreV1().ReplicationControllers(testRcNamespace).Create(context.TODO(), &rcTest, metav1.CreateOptions{}) framework.ExpectNoError(err, "Failed to create ReplicationController") // setup a watch for the RC - rcWatch, err := f.ClientSet.CoreV1().ReplicationControllers(testRcNamespace).Watch(context.TODO(), metav1.ListOptions{LabelSelector: "test-rc-static=true"}) + rcWatchTimeoutSeconds := int64(60) + rcWatch, err := f.ClientSet.CoreV1().ReplicationControllers(testRcNamespace).Watch(context.TODO(), metav1.ListOptions{LabelSelector: "test-rc-static=true", TimeoutSeconds: &rcWatchTimeoutSeconds}) framework.ExpectNoError(err, "Failed to setup watch on newly created ReplicationController") rcWatchChan := rcWatch.ResultChan() + ginkgo.By("waiting for available Replicas") for event := range rcWatchChan { rc, ok := event.Object.(*v1.ReplicationController) framework.ExpectEqual(ok, true, "Unable to convert type of ReplicationController watch event") @@ -153,6 +156,7 @@ var _ = SIGDescribe("ReplicationController", func() { }) framework.ExpectNoError(err, "failed to marshal json of replicationcontroller label patch") // Patch the ReplicationController + ginkgo.By("patching ReplicationController") _, err = f.ClientSet.CoreV1().ReplicationControllers(testRcNamespace).Patch(context.TODO(), testRcName, types.StrategicMergePatchType, []byte(rcLabelPatchPayload), metav1.PatchOptions{}) framework.ExpectNoError(err, "Failed to patch ReplicationController") @@ -165,10 +169,12 @@ var _ = SIGDescribe("ReplicationController", func() { framework.ExpectNoError(err, "Failed to marshal JSON of ReplicationController label patch") // Patch the ReplicationController's status + ginkgo.By("patching ReplicationController status") rcStatus, err := f.ClientSet.CoreV1().ReplicationControllers(testRcNamespace).Patch(context.TODO(), testRcName, types.StrategicMergePatchType, []byte(rcStatusPatchPayload), metav1.PatchOptions{}, "status") framework.ExpectNoError(err, "Failed to patch ReplicationControllerStatus") framework.ExpectEqual(rcStatus.Status.ReadyReplicas, int32(0), "ReplicationControllerStatus's readyReplicas does not equal 0") + ginkgo.By("fetching ReplicationController status") rcStatusUnstructured, err := dc.Resource(rcResource).Namespace(testRcNamespace).Get(testRcName, metav1.GetOptions{}, "status") framework.ExpectNoError(err, "Failed to fetch ReplicationControllerStatus") @@ -185,6 +191,7 @@ var _ = SIGDescribe("ReplicationController", func() { framework.ExpectNoError(err, "Failed to marshal json of replicationcontroller label patch") // Patch the ReplicationController's scale + ginkgo.By("patching ReplicationController scale") rcScale, err := f.ClientSet.CoreV1().ReplicationControllers(testRcNamespace).Patch(context.TODO(), testRcName, types.StrategicMergePatchType, []byte(rcScalePatchPayload), metav1.PatchOptions{}, "scale") framework.ExpectNoError(err, "Failed to patch ReplicationControllerScale") @@ -199,6 +206,7 @@ var _ = SIGDescribe("ReplicationController", func() { framework.ExpectEqual(rcScale.Status.Replicas, testRcMaxReplicaCount, "ReplicationController ReplicasSet Scale does not match the expected scale") // Get the ReplicationController + ginkgo.By("fetching ReplicationController; ensuring that it's patched") rc, err := f.ClientSet.CoreV1().ReplicationControllers(testRcNamespace).Get(context.TODO(), testRcName, metav1.GetOptions{}) framework.ExpectNoError(err, "Failed to fetch ReplicationController") framework.ExpectEqual(rc.ObjectMeta.Labels["test-rc"], "patched", "ReplicationController is missing a label from earlier patch") @@ -208,10 +216,12 @@ var _ = SIGDescribe("ReplicationController", func() { rcStatusUpdatePayload.Status.ReadyReplicas = 1 // Replace the ReplicationController's status + ginkgo.By("updating ReplicationController status") rcStatus, err = f.ClientSet.CoreV1().ReplicationControllers(testRcNamespace).UpdateStatus(context.TODO(), rcStatusUpdatePayload, metav1.UpdateOptions{}) framework.ExpectNoError(err, "Failed to update ReplicationControllerStatus") framework.ExpectEqual(rcStatus.Status.ReadyReplicas, int32(1), "ReplicationControllerStatus readyReplicas does not equal 1") + ginkgo.By(fmt.Sprintf("waiting for ReplicationController readyReplicas to be equal to %v", testRcMaxReplicaCount)) for event := range rcWatchChan { rc, ok := event.Object.(*v1.ReplicationController) framework.ExpectEqual(ok, true, "Unable to convert type of ReplicationController watch event") @@ -220,10 +230,12 @@ var _ = SIGDescribe("ReplicationController", func() { } } + ginkgo.By("listing all ReplicationControllers") rcs, err := f.ClientSet.CoreV1().ReplicationControllers("").List(context.TODO(), metav1.ListOptions{LabelSelector: "test-rc-static=true"}) framework.ExpectNoError(err, "Failed to list ReplicationController") framework.ExpectEqual(len(rcs.Items) > 0, true) + ginkgo.By("checking that ReplicationController has expected values") foundRc := false for _, rcItem := range rcs.Items { if rcItem.ObjectMeta.Name == testRcName && @@ -238,9 +250,11 @@ var _ = SIGDescribe("ReplicationController", func() { framework.ExpectEqual(foundRc, true) // Delete ReplicationController + ginkgo.By("deleting ReplicationControllers by collection") err = f.ClientSet.CoreV1().ReplicationControllers(testRcNamespace).DeleteCollection(context.TODO(), &metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: "test-rc-static=true"}) framework.ExpectNoError(err, "Failed to delete ReplicationControllers") + ginkgo.By("waiting for ReplicationController is have a DeletionTimestamp") for event := range rcWatchChan { rc, ok := event.Object.(*v1.ReplicationController) framework.ExpectEqual(ok, true, "Unable to convert type of ReplicationController watch event") @@ -248,9 +262,11 @@ var _ = SIGDescribe("ReplicationController", func() { break } } + ginkgo.By("waiting for 10 seconds to ensure that it's deleted") time.Sleep(10 * time.Second) // Get the ReplicationController to check that it's deleted + ginkgo.By("fetching the ReplicationController to ensure that it's deleted") _, err = f.ClientSet.CoreV1().ReplicationControllers(testRcNamespace).Get(context.TODO(), testRcName, metav1.GetOptions{}) framework.ExpectError(err, "Failed to delete ReplicationController") }) From 054a886bf960fa5523714ea29aa224956d0756d0 Mon Sep 17 00:00:00 2001 From: Caleb Woodbine Date: Wed, 4 Mar 2020 11:14:03 +1300 Subject: [PATCH 10/92] Remove sleep and final fetch of ReplicationController --- test/e2e/apps/rc.go | 7 ------- 1 file changed, 7 deletions(-) diff --git a/test/e2e/apps/rc.go b/test/e2e/apps/rc.go index abbb0abcef56..f1c8a16edc45 100644 --- a/test/e2e/apps/rc.go +++ b/test/e2e/apps/rc.go @@ -262,13 +262,6 @@ var _ = SIGDescribe("ReplicationController", func() { break } } - ginkgo.By("waiting for 10 seconds to ensure that it's deleted") - time.Sleep(10 * time.Second) - - // Get the ReplicationController to check that it's deleted - ginkgo.By("fetching the ReplicationController to ensure that it's deleted") - _, err = f.ClientSet.CoreV1().ReplicationControllers(testRcNamespace).Get(context.TODO(), testRcName, metav1.GetOptions{}) - framework.ExpectError(err, "Failed to delete ReplicationController") }) }) From c715fec47d2a3450d20fd6c61a0435750b286f87 Mon Sep 17 00:00:00 2001 From: Caleb Woodbine Date: Wed, 4 Mar 2020 13:08:59 +1300 Subject: [PATCH 11/92] Update ReplicationController event watch check --- test/e2e/apps/rc.go | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/test/e2e/apps/rc.go b/test/e2e/apps/rc.go index f1c8a16edc45..fa2f1ba23554 100644 --- a/test/e2e/apps/rc.go +++ b/test/e2e/apps/rc.go @@ -256,9 +256,7 @@ var _ = SIGDescribe("ReplicationController", func() { ginkgo.By("waiting for ReplicationController is have a DeletionTimestamp") for event := range rcWatchChan { - rc, ok := event.Object.(*v1.ReplicationController) - framework.ExpectEqual(ok, true, "Unable to convert type of ReplicationController watch event") - if rc.ObjectMeta.DeletionTimestamp != nil { + if event.Type == "DELETED" { break } } From 3fa4acb9b7030f5364fc68e4876572cf42239ab5 Mon Sep 17 00:00:00 2001 From: Caleb Woodbine Date: Wed, 4 Mar 2020 14:55:11 +1300 Subject: [PATCH 12/92] Ensure current Replica check is from ReplicationController watch --- test/e2e/apps/rc.go | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/test/e2e/apps/rc.go b/test/e2e/apps/rc.go index fa2f1ba23554..a7ebadd97f5b 100644 --- a/test/e2e/apps/rc.go +++ b/test/e2e/apps/rc.go @@ -192,18 +192,20 @@ var _ = SIGDescribe("ReplicationController", func() { // Patch the ReplicationController's scale ginkgo.By("patching ReplicationController scale") - rcScale, err := f.ClientSet.CoreV1().ReplicationControllers(testRcNamespace).Patch(context.TODO(), testRcName, types.StrategicMergePatchType, []byte(rcScalePatchPayload), metav1.PatchOptions{}, "scale") + _, err = f.ClientSet.CoreV1().ReplicationControllers(testRcNamespace).Patch(context.TODO(), testRcName, types.StrategicMergePatchType, []byte(rcScalePatchPayload), metav1.PatchOptions{}, "scale") framework.ExpectNoError(err, "Failed to patch ReplicationControllerScale") + var rcFromWatch *v1.ReplicationController + ginkgo.By("waiting for ReplicationController's scale to be the max amount") for event := range rcWatchChan { rc, ok := event.Object.(*v1.ReplicationController) framework.ExpectEqual(ok, true, "Unable to convert type of ReplicationController watch event") if rc.Status.Replicas == testRcMaxReplicaCount && rc.Status.ReadyReplicas == testRcMaxReplicaCount { - rcScale = rc + rcFromWatch = rc break } } - framework.ExpectEqual(rcScale.Status.Replicas, testRcMaxReplicaCount, "ReplicationController ReplicasSet Scale does not match the expected scale") + framework.ExpectEqual(rcFromWatch.Status.Replicas, testRcMaxReplicaCount, "ReplicationController ReplicasSet Scale does not match the expected scale") // Get the ReplicationController ginkgo.By("fetching ReplicationController; ensuring that it's patched") From ca3542273cb59e751a3058ce65dc08fe17599c50 Mon Sep 17 00:00:00 2001 From: Caleb Woodbine Date: Wed, 4 Mar 2020 15:44:46 +1300 Subject: [PATCH 13/92] Update Replica count check, undo capitalization for framework.Expect* statements --- test/e2e/apps/rc.go | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/test/e2e/apps/rc.go b/test/e2e/apps/rc.go index a7ebadd97f5b..6674797c06bf 100644 --- a/test/e2e/apps/rc.go +++ b/test/e2e/apps/rc.go @@ -197,20 +197,23 @@ var _ = SIGDescribe("ReplicationController", func() { var rcFromWatch *v1.ReplicationController ginkgo.By("waiting for ReplicationController's scale to be the max amount") + foundRcWithMaxScale := false for event := range rcWatchChan { rc, ok := event.Object.(*v1.ReplicationController) framework.ExpectEqual(ok, true, "Unable to convert type of ReplicationController watch event") - if rc.Status.Replicas == testRcMaxReplicaCount && rc.Status.ReadyReplicas == testRcMaxReplicaCount { + if rc.ObjectMeta.Name == testRcName && rc.ObjectMeta.Namespace == testRcNamespace && rc.Status.Replicas == testRcMaxReplicaCount && rc.Status.ReadyReplicas == testRcMaxReplicaCount { + foundRcWithMaxScale = true rcFromWatch = rc break } } + framework.ExpectEqual(foundRcWithMaxScale, true, "failed to locate a ReplicationController with max scale") framework.ExpectEqual(rcFromWatch.Status.Replicas, testRcMaxReplicaCount, "ReplicationController ReplicasSet Scale does not match the expected scale") // Get the ReplicationController ginkgo.By("fetching ReplicationController; ensuring that it's patched") rc, err := f.ClientSet.CoreV1().ReplicationControllers(testRcNamespace).Get(context.TODO(), testRcName, metav1.GetOptions{}) - framework.ExpectNoError(err, "Failed to fetch ReplicationController") + framework.ExpectNoError(err, "failed to fetch ReplicationController") framework.ExpectEqual(rc.ObjectMeta.Labels["test-rc"], "patched", "ReplicationController is missing a label from earlier patch") rcStatusUpdatePayload := rc @@ -220,13 +223,13 @@ var _ = SIGDescribe("ReplicationController", func() { // Replace the ReplicationController's status ginkgo.By("updating ReplicationController status") rcStatus, err = f.ClientSet.CoreV1().ReplicationControllers(testRcNamespace).UpdateStatus(context.TODO(), rcStatusUpdatePayload, metav1.UpdateOptions{}) - framework.ExpectNoError(err, "Failed to update ReplicationControllerStatus") + framework.ExpectNoError(err, "failed to update ReplicationControllerStatus") framework.ExpectEqual(rcStatus.Status.ReadyReplicas, int32(1), "ReplicationControllerStatus readyReplicas does not equal 1") ginkgo.By(fmt.Sprintf("waiting for ReplicationController readyReplicas to be equal to %v", testRcMaxReplicaCount)) for event := range rcWatchChan { rc, ok := event.Object.(*v1.ReplicationController) - framework.ExpectEqual(ok, true, "Unable to convert type of ReplicationController watch event") + framework.ExpectEqual(ok, true, "unable to convert type of ReplicationController watch event") if rc.Status.Replicas == testRcMaxReplicaCount && rc.Status.ReadyReplicas == testRcMaxReplicaCount { break } @@ -234,7 +237,7 @@ var _ = SIGDescribe("ReplicationController", func() { ginkgo.By("listing all ReplicationControllers") rcs, err := f.ClientSet.CoreV1().ReplicationControllers("").List(context.TODO(), metav1.ListOptions{LabelSelector: "test-rc-static=true"}) - framework.ExpectNoError(err, "Failed to list ReplicationController") + framework.ExpectNoError(err, "failed to list ReplicationController") framework.ExpectEqual(len(rcs.Items) > 0, true) ginkgo.By("checking that ReplicationController has expected values") From 957ab9afafde1a438372a2fe2345fe442c770619 Mon Sep 17 00:00:00 2001 From: Caleb Woodbine Date: Thu, 5 Mar 2020 09:55:38 +1300 Subject: [PATCH 14/92] Adjust RC watch timeout to 180, update progress log statement --- test/e2e/apps/rc.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/e2e/apps/rc.go b/test/e2e/apps/rc.go index 6674797c06bf..1211583505ad 100644 --- a/test/e2e/apps/rc.go +++ b/test/e2e/apps/rc.go @@ -134,7 +134,7 @@ var _ = SIGDescribe("ReplicationController", func() { framework.ExpectNoError(err, "Failed to create ReplicationController") // setup a watch for the RC - rcWatchTimeoutSeconds := int64(60) + rcWatchTimeoutSeconds := int64(180) rcWatch, err := f.ClientSet.CoreV1().ReplicationControllers(testRcNamespace).Watch(context.TODO(), metav1.ListOptions{LabelSelector: "test-rc-static=true", TimeoutSeconds: &rcWatchTimeoutSeconds}) framework.ExpectNoError(err, "Failed to setup watch on newly created ReplicationController") @@ -259,7 +259,7 @@ var _ = SIGDescribe("ReplicationController", func() { err = f.ClientSet.CoreV1().ReplicationControllers(testRcNamespace).DeleteCollection(context.TODO(), &metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: "test-rc-static=true"}) framework.ExpectNoError(err, "Failed to delete ReplicationControllers") - ginkgo.By("waiting for ReplicationController is have a DeletionTimestamp") + ginkgo.By("waiting for ReplicationController to have a DELETED event") for event := range rcWatchChan { if event.Type == "DELETED" { break From 6b215055497803127633d3f9d2e3241980a8f9bf Mon Sep 17 00:00:00 2001 From: Aaron Crickenberger Date: Mon, 9 Mar 2020 18:04:51 -0700 Subject: [PATCH 15/92] Replace raw usages of tar in build/lib/release.sh They broke the ability to run make quick-release on darwin. Use ${TAR} instead so that gtar can be used if available. --- build/lib/release.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/build/lib/release.sh b/build/lib/release.sh index 6d23dc203398..2a52b04e3d58 100644 --- a/build/lib/release.sh +++ b/build/lib/release.sh @@ -462,7 +462,7 @@ function kube::release::package_kube_manifests_tarball() { cp "${KUBE_ROOT}/cluster/gce/gci/health-monitor.sh" "${dst_dir}/health-monitor.sh" # Merge GCE-specific addons with general purpose addons. for d in cluster/addons cluster/gce/addons; do - find "${KUBE_ROOT}/${d}" \( \( -name \*.yaml -o -name \*.yaml.in -o -name \*.json \) -a ! \( -name \*demo\* \) \) -print0 | tar c --transform "s|${KUBE_ROOT#/*}/${d}||" --null -T - | "${TAR}" x -C "${dst_dir}" + find "${KUBE_ROOT}/${d}" \( \( -name \*.yaml -o -name \*.yaml.in -o -name \*.json \) -a ! \( -name \*demo\* \) \) -print0 | "${TAR}" c --transform "s|${KUBE_ROOT#/*}/${d}||" --null -T - | "${TAR}" x -C "${dst_dir}" done kube::release::clean_cruft @@ -528,7 +528,7 @@ function kube::release::package_test_tarballs() { # the portable test tarball. mkdir -p "${release_stage}/test/images" cp -fR "${KUBE_ROOT}/test/images" "${release_stage}/test/" - tar c "${KUBE_TEST_PORTABLE[@]}" | tar x -C "${release_stage}" + "${TAR}" c "${KUBE_TEST_PORTABLE[@]}" | "${TAR}" x -C "${release_stage}" kube::release::clean_cruft From acf38c5f8af22fc59b1a3c889717074cd470c6ec Mon Sep 17 00:00:00 2001 From: Tim Allclair Date: Tue, 17 Mar 2020 13:58:34 -0700 Subject: [PATCH 16/92] Move PSP tests behind a feature tag --- test/e2e/auth/pod_security_policy.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/test/e2e/auth/pod_security_policy.go b/test/e2e/auth/pod_security_policy.go index 264a7d258cc0..d19fbaee1429 100644 --- a/test/e2e/auth/pod_security_policy.go +++ b/test/e2e/auth/pod_security_policy.go @@ -44,7 +44,7 @@ import ( const nobodyUser = int64(65534) -var _ = SIGDescribe("PodSecurityPolicy", func() { +var _ = SIGDescribe("PodSecurityPolicy [Feature:PodSecurityPolicy]", func() { f := framework.NewDefaultFramework("podsecuritypolicy") f.SkipPrivilegedPSPBinding = true @@ -54,7 +54,8 @@ var _ = SIGDescribe("PodSecurityPolicy", func() { var ns string // Test namespace, for convenience ginkgo.BeforeEach(func() { if !framework.IsPodSecurityPolicyEnabled(f.ClientSet) { - e2eskipper.Skipf("PodSecurityPolicy not enabled") + framework.Failf("PodSecurityPolicy not enabled") + return } if !auth.IsRBACEnabled(f.ClientSet.RbacV1()) { e2eskipper.Skipf("RBAC not enabled") From 7499d80fd83c66b1ca7e85c55fa914982844249f Mon Sep 17 00:00:00 2001 From: toyoda Date: Wed, 18 Mar 2020 16:30:56 +0900 Subject: [PATCH 17/92] Use ExpectNotEqual in test/e2e/network/ --- hack/verify-test-code.sh | 23 +++++++++++++++++++++++ test/e2e/network/service.go | 2 +- 2 files changed, 24 insertions(+), 1 deletion(-) diff --git a/hack/verify-test-code.sh b/hack/verify-test-code.sh index de3db419efdd..2590eee9b8cb 100755 --- a/hack/verify-test-code.sh +++ b/hack/verify-test-code.sh @@ -46,6 +46,15 @@ do fi done +errors_expect_no_equal=() +for file in "${all_e2e_files[@]}" +do + if grep -E "Expect\(.*\)\.(NotTo|ToNot)\((gomega\.Equal|Equal)" "${file}" > /dev/null + then + errors_expect_no_equal+=( "${file}" ) + fi +done + errors_expect_equal=() for file in "${all_e2e_files[@]}" do @@ -83,6 +92,20 @@ if [ ${#errors_expect_error[@]} -ne 0 ]; then exit 1 fi +if [ ${#errors_expect_no_equal[@]} -ne 0 ]; then + { + echo "Errors:" + for err in "${errors_expect_no_equal[@]}"; do + echo "$err" + done + echo + echo 'The above files need to use framework.ExpectNotEqual(foo, bar) instead of ' + echo 'Expect(foo).NotTo(Equal(bar)) or gomega.Expect(foo).NotTo(gomega.Equal(bar))' + echo + } >&2 + exit 1 +fi + if [ ${#errors_expect_equal[@]} -ne 0 ]; then { echo "Errors:" diff --git a/test/e2e/network/service.go b/test/e2e/network/service.go index ff9fd5fc1144..dbcdc210e617 100644 --- a/test/e2e/network/service.go +++ b/test/e2e/network/service.go @@ -935,7 +935,7 @@ var _ = SIGDescribe("Services", func() { pausePods, err := cs.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{LabelSelector: labelSelector.String()}) framework.ExpectNoError(err, "Error in listing pods associated with pause pod deployments") - gomega.Expect(pausePods.Items[0].Spec.NodeName).ToNot(gomega.Equal(pausePods.Items[1].Spec.NodeName)) + framework.ExpectNotEqual(pausePods.Items[0].Spec.NodeName, pausePods.Items[1].Spec.NodeName) serviceAddress := net.JoinHostPort(serviceIP, strconv.Itoa(servicePort)) From 6cad278a8ccf7ab83e2603cd47b15c8a72cdd6f1 Mon Sep 17 00:00:00 2001 From: drfish Date: Wed, 18 Mar 2020 15:57:37 +0800 Subject: [PATCH 18/92] Remove dependency for benchmark integration tests from e2e fw --- test/integration/benchmark/jsonify/BUILD | 5 +--- test/integration/benchmark/jsonify/main.go | 35 +++++++++++++++++++--- 2 files changed, 32 insertions(+), 8 deletions(-) diff --git a/test/integration/benchmark/jsonify/BUILD b/test/integration/benchmark/jsonify/BUILD index 4fe25d5fd164..20b1b64638ed 100644 --- a/test/integration/benchmark/jsonify/BUILD +++ b/test/integration/benchmark/jsonify/BUILD @@ -5,10 +5,7 @@ go_library( srcs = ["main.go"], importpath = "k8s.io/kubernetes/test/integration/benchmark/jsonify", visibility = ["//visibility:private"], - deps = [ - "//test/e2e/perftype:go_default_library", - "//vendor/golang.org/x/tools/benchmark/parse:go_default_library", - ], + deps = ["//vendor/golang.org/x/tools/benchmark/parse:go_default_library"], ) go_binary( diff --git a/test/integration/benchmark/jsonify/main.go b/test/integration/benchmark/jsonify/main.go index 49476a5c7264..3614e9548568 100644 --- a/test/integration/benchmark/jsonify/main.go +++ b/test/integration/benchmark/jsonify/main.go @@ -24,9 +24,36 @@ import ( "os" benchparse "golang.org/x/tools/benchmark/parse" - "k8s.io/kubernetes/test/e2e/perftype" ) +// TODO(random-liu): Replace this with prometheus' data model. + +// The following performance data structures are generalized and well-formatted. +// They can be pretty printed in json format and be analyzed by other performance +// analyzing tools, such as Perfdash (k8s.io/contrib/perfdash). + +// DataItem is the data point. +type DataItem struct { + // Data is a map from bucket to real data point (e.g. "Perc90" -> 23.5). Notice + // that all data items with the same label combination should have the same buckets. + Data map[string]float64 `json:"data"` + // Unit is the data unit. Notice that all data items with the same label combination + // should have the same unit. + Unit string `json:"unit"` + // Labels is the labels of the data item. + Labels map[string]string `json:"labels,omitempty"` +} + +// PerfData contains all data items generated in current test. +type PerfData struct { + // Version is the version of the metrics. The metrics consumer could use the version + // to detect metrics version change and decide what version to support. + Version string `json:"version"` + DataItems []DataItem `json:"dataItems"` + // Labels is the labels of the dataset. + Labels map[string]string `json:"labels,omitempty"` +} + func main() { err := run() if err != nil { @@ -42,7 +69,7 @@ func run() error { if err != nil { return err } - data := perftype.PerfData{Version: "v1"} + data := PerfData{Version: "v1"} for _, benchMarks := range benchmarkSet { for _, benchMark := range benchMarks { data.DataItems = appendIfMeasured(data.DataItems, benchMark, benchparse.NsPerOp, "time", "μs", benchMark.NsPerOp/1000.0) @@ -63,11 +90,11 @@ func run() error { return ioutil.WriteFile(os.Args[1], formatted.Bytes(), 0664) } -func appendIfMeasured(items []perftype.DataItem, benchmark *benchparse.Benchmark, metricType int, metricName string, unit string, value float64) []perftype.DataItem { +func appendIfMeasured(items []DataItem, benchmark *benchparse.Benchmark, metricType int, metricName string, unit string, value float64) []DataItem { if metricType != 0 && (benchmark.Measured&metricType) == 0 { return items } - return append(items, perftype.DataItem{ + return append(items, DataItem{ Unit: unit, Labels: map[string]string{ "benchmark": benchmark.Name, From 5de3c64ad0e9199bb08846a7aa00e8b2499b5083 Mon Sep 17 00:00:00 2001 From: yaseenhamdulay <31653044+yaseenhamdulay@users.noreply.github.com> Date: Thu, 5 Mar 2020 16:12:47 +0000 Subject: [PATCH 19/92] Create etcd user in cloud-init master.yaml rather than in configure-helper.sh An etcd unix user is currently created in configure-helper.sh if it does not exist on the master. cloud-init is the only supported mechanism to add users on COS VMs. If an attempt is made to add a key using OS Login or the instance metadata mechanism the google_accounts_daemon will race with useradd and potentially attempt to use the same UID. This will lock out any attempt to SSH into the VM. We therefore migrate to using cloud-init to create this user and prevent this issue from occurring. --- cluster/gce/gci/configure-helper.sh | 3 --- cluster/gce/gci/master.yaml | 5 +++++ 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/cluster/gce/gci/configure-helper.sh b/cluster/gce/gci/configure-helper.sh index 2ac0d1760715..aac725f0ddfc 100644 --- a/cluster/gce/gci/configure-helper.sh +++ b/cluster/gce/gci/configure-helper.sh @@ -445,9 +445,6 @@ function mount-master-pd { mkdir -p "${mount_point}/srv/sshproxy" ln -s -f "${mount_point}/srv/sshproxy" /etc/srv/sshproxy - if ! id etcd &>/dev/null; then - useradd -s /sbin/nologin -d /var/etcd etcd - fi chown -R etcd "${mount_point}/var/etcd" chgrp -R etcd "${mount_point}/var/etcd" } diff --git a/cluster/gce/gci/master.yaml b/cluster/gce/gci/master.yaml index fd0a88d081d1..70c5ce6716a2 100644 --- a/cluster/gce/gci/master.yaml +++ b/cluster/gce/gci/master.yaml @@ -1,5 +1,10 @@ #cloud-config +users: +- name: etcd + homedir: /var/etcd + lock_passwd: true + write_files: - path: /etc/systemd/system/kube-master-installation.service permissions: 0644 From 380bf20cc11f356824df4b11ba25b07260def45b Mon Sep 17 00:00:00 2001 From: zzxwill Date: Mon, 23 Mar 2020 19:40:43 +0800 Subject: [PATCH 20/92] Explain a littble bit on how to prepare Dockerfile before building kube-build image --- build/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build/README.md b/build/README.md index 81fc95ad58a1..686813917037 100644 --- a/build/README.md +++ b/build/README.md @@ -34,7 +34,7 @@ The following scripts are found in the [`build/`](.) directory. Note that all sc ## Basic Flow -The scripts directly under [`build/`](.) are used to build and test. They will ensure that the `kube-build` Docker image is built (based on [`build/build-image/Dockerfile`](build-image/Dockerfile)) and then execute the appropriate command in that container. These scripts will both ensure that the right data is cached from run to run for incremental builds and will copy the results back out of the container. +The scripts directly under [`build/`](.) are used to build and test. They will ensure that the `kube-build` Docker image is built (based on [`build/build-image/Dockerfile`](build-image/Dockerfile) and after base image's `KUBE_BUILD_IMAGE_CROSS_TAG` from Dockerfile is repalced with one of those actual tags of the base image, like `v1.13.9-2`)) and then execute the appropriate command in that container. These scripts will both ensure that the right data is cached from run to run for incremental builds and will copy the results back out of the container. The `kube-build` container image is built by first creating a "context" directory in `_output/images/build-image`. It is done there instead of at the root of the Kubernetes repo to minimize the amount of data we need to package up when building the image. From 9ad065d8040bd19af97ec49a248e4428e0ef2152 Mon Sep 17 00:00:00 2001 From: Zhou Peng Date: Tue, 24 Mar 2020 09:00:30 +0800 Subject: [PATCH 21/92] [apimachinery]: cleanup deprecated const StatusTooManyRequests `make all` goes happy Signed-off-by: Zhou Peng --- staging/src/k8s.io/apimachinery/pkg/api/errors/errors.go | 8 -------- 1 file changed, 8 deletions(-) diff --git a/staging/src/k8s.io/apimachinery/pkg/api/errors/errors.go b/staging/src/k8s.io/apimachinery/pkg/api/errors/errors.go index 363b8152b0cf..d3927d817382 100644 --- a/staging/src/k8s.io/apimachinery/pkg/api/errors/errors.go +++ b/staging/src/k8s.io/apimachinery/pkg/api/errors/errors.go @@ -30,14 +30,6 @@ import ( "k8s.io/apimachinery/pkg/util/validation/field" ) -const ( - // StatusTooManyRequests means the server experienced too many requests within a - // given window and that the client must wait to perform the action again. - // DEPRECATED: please use http.StatusTooManyRequests, this will be removed in - // the future version. - StatusTooManyRequests = http.StatusTooManyRequests -) - // StatusError is an error intended for consumption by a REST API server; it can also be // reconstructed by clients from a REST response. Public to allow easy type switches. type StatusError struct { From ed2697743cee52953fbfdac36fb0f3067106f7ab Mon Sep 17 00:00:00 2001 From: zzxwill Date: Tue, 24 Mar 2020 10:46:26 +0800 Subject: [PATCH 22/92] remove duplicate closing brace --- build/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build/README.md b/build/README.md index 686813917037..196c0478d26e 100644 --- a/build/README.md +++ b/build/README.md @@ -34,7 +34,7 @@ The following scripts are found in the [`build/`](.) directory. Note that all sc ## Basic Flow -The scripts directly under [`build/`](.) are used to build and test. They will ensure that the `kube-build` Docker image is built (based on [`build/build-image/Dockerfile`](build-image/Dockerfile) and after base image's `KUBE_BUILD_IMAGE_CROSS_TAG` from Dockerfile is repalced with one of those actual tags of the base image, like `v1.13.9-2`)) and then execute the appropriate command in that container. These scripts will both ensure that the right data is cached from run to run for incremental builds and will copy the results back out of the container. +The scripts directly under [`build/`](.) are used to build and test. They will ensure that the `kube-build` Docker image is built (based on [`build/build-image/Dockerfile`](build-image/Dockerfile) and after base image's `KUBE_BUILD_IMAGE_CROSS_TAG` from Dockerfile is repalced with one of those actual tags of the base image, like `v1.13.9-2`) and then execute the appropriate command in that container. These scripts will both ensure that the right data is cached from run to run for incremental builds and will copy the results back out of the container. The `kube-build` container image is built by first creating a "context" directory in `_output/images/build-image`. It is done there instead of at the root of the Kubernetes repo to minimize the amount of data we need to package up when building the image. From 04d06d2f236d205c7482e3234cadcdb30ebd7d17 Mon Sep 17 00:00:00 2001 From: zhouya0 Date: Tue, 24 Mar 2020 14:23:26 +0800 Subject: [PATCH 23/92] Remove prometheus references from etcd version monitor --- cluster/images/etcd-version-monitor/BUILD | 2 +- .../images/etcd-version-monitor/etcd-version-monitor.go | 6 ++---- .../k8s.io/component-base/metrics/testutil/metrics.go | 9 +++++++++ 3 files changed, 12 insertions(+), 5 deletions(-) diff --git a/cluster/images/etcd-version-monitor/BUILD b/cluster/images/etcd-version-monitor/BUILD index cd7ace3cc3e9..36a59d66af85 100644 --- a/cluster/images/etcd-version-monitor/BUILD +++ b/cluster/images/etcd-version-monitor/BUILD @@ -17,9 +17,9 @@ go_library( importpath = "k8s.io/kubernetes/cluster/images/etcd-version-monitor", deps = [ "//staging/src/k8s.io/component-base/metrics:go_default_library", + "//staging/src/k8s.io/component-base/metrics/testutil:go_default_library", "//vendor/github.com/gogo/protobuf/proto:go_default_library", "//vendor/github.com/prometheus/client_model/go:go_default_library", - "//vendor/github.com/prometheus/common/expfmt:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", "//vendor/k8s.io/klog:go_default_library", ], diff --git a/cluster/images/etcd-version-monitor/etcd-version-monitor.go b/cluster/images/etcd-version-monitor/etcd-version-monitor.go index 411d2f136efb..3126ff2515da 100644 --- a/cluster/images/etcd-version-monitor/etcd-version-monitor.go +++ b/cluster/images/etcd-version-monitor/etcd-version-monitor.go @@ -27,10 +27,10 @@ import ( "github.com/gogo/protobuf/proto" dto "github.com/prometheus/client_model/go" - "github.com/prometheus/common/expfmt" "github.com/spf13/pflag" "k8s.io/component-base/metrics" + "k8s.io/component-base/metrics/testutil" "k8s.io/klog" ) @@ -271,9 +271,7 @@ func scrapeMetrics() (map[string]*dto.MetricFamily, error) { } defer resp.Body.Close() - // Parse the metrics in text format to a MetricFamily struct. - var textParser expfmt.TextParser - return textParser.TextToMetricFamilies(resp.Body) + return testutil.TextToMetricFamilies(resp.Body) } func renameMetric(mf *dto.MetricFamily, name string) { diff --git a/staging/src/k8s.io/component-base/metrics/testutil/metrics.go b/staging/src/k8s.io/component-base/metrics/testutil/metrics.go index cf39cb7ab567..2af9c01058e6 100644 --- a/staging/src/k8s.io/component-base/metrics/testutil/metrics.go +++ b/staging/src/k8s.io/component-base/metrics/testutil/metrics.go @@ -92,6 +92,15 @@ func ParseMetrics(data string, output *Metrics) error { } } +// TextToMetricFamilies reads 'in' as the simple and flat text-based exchange +// format and creates MetricFamily proto messages. It returns the MetricFamily +// proto messages in a map where the metric names are the keys, along with any +// error encountered. +func TextToMetricFamilies(in io.Reader) (map[string]*dto.MetricFamily, error) { + var textParser expfmt.TextParser + return textParser.TextToMetricFamilies(in) +} + // ExtractMetricSamples parses the prometheus metric samples from the input string. func ExtractMetricSamples(metricsBlob string) ([]*model.Sample, error) { dec := expfmt.NewDecoder(strings.NewReader(metricsBlob), expfmt.FmtText) From e5d6536ade058ce5df6afc83a2329f23fab64860 Mon Sep 17 00:00:00 2001 From: Ed Bartosh Date: Mon, 14 Oct 2019 15:43:51 +0300 Subject: [PATCH 24/92] add UpgradePlan to the kubeadm.output API group --- cmd/kubeadm/app/apis/output/register.go | 1 + cmd/kubeadm/app/apis/output/types.go | 17 +++++ .../app/apis/output/v1alpha1/register.go | 1 + cmd/kubeadm/app/apis/output/v1alpha1/types.go | 17 +++++ .../v1alpha1/zz_generated.conversion.go | 64 +++++++++++++++++++ .../output/v1alpha1/zz_generated.deepcopy.go | 46 +++++++++++++ .../app/apis/output/zz_generated.deepcopy.go | 46 +++++++++++++ 7 files changed, 192 insertions(+) diff --git a/cmd/kubeadm/app/apis/output/register.go b/cmd/kubeadm/app/apis/output/register.go index ada0599efee8..d7b7fcdd8e9f 100644 --- a/cmd/kubeadm/app/apis/output/register.go +++ b/cmd/kubeadm/app/apis/output/register.go @@ -48,6 +48,7 @@ func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &BootstrapToken{}, &Images{}, + &UpgradePlan{}, ) return nil } diff --git a/cmd/kubeadm/app/apis/output/types.go b/cmd/kubeadm/app/apis/output/types.go index 033836303d03..a71ddae5f801 100644 --- a/cmd/kubeadm/app/apis/output/types.go +++ b/cmd/kubeadm/app/apis/output/types.go @@ -40,3 +40,20 @@ type Images struct { Images []string } + +// ComponentUpgradePlan represents information about upgrade plan for one component +type ComponentUpgradePlan struct { + Name string + CurrentVersion string + NewVersion string +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// UpgradePlan represents information about upgrade plan for the output +// produced by 'kubeadm upgrade plan' +type UpgradePlan struct { + metav1.TypeMeta + + Components []ComponentUpgradePlan +} diff --git a/cmd/kubeadm/app/apis/output/v1alpha1/register.go b/cmd/kubeadm/app/apis/output/v1alpha1/register.go index 0d30c22278e2..a11268592105 100644 --- a/cmd/kubeadm/app/apis/output/v1alpha1/register.go +++ b/cmd/kubeadm/app/apis/output/v1alpha1/register.go @@ -60,6 +60,7 @@ func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &BootstrapToken{}, &Images{}, + &UpgradePlan{}, ) metav1.AddToGroupVersion(scheme, SchemeGroupVersion) return nil diff --git a/cmd/kubeadm/app/apis/output/v1alpha1/types.go b/cmd/kubeadm/app/apis/output/v1alpha1/types.go index d20e14026e92..1ea18ae455e4 100644 --- a/cmd/kubeadm/app/apis/output/v1alpha1/types.go +++ b/cmd/kubeadm/app/apis/output/v1alpha1/types.go @@ -40,3 +40,20 @@ type Images struct { Images []string `json:"images"` } + +// ComponentUpgradePlan represents information about upgrade plan for one component +type ComponentUpgradePlan struct { + Name string `json:"name"` + CurrentVersion string `json:"currentVersion"` + NewVersion string `json:"newVersion"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// UpgradePlan represents information about upgrade plan for the output +// produced by 'kubeadm upgrade plan' +type UpgradePlan struct { + metav1.TypeMeta + + Components []ComponentUpgradePlan `json:"components"` +} diff --git a/cmd/kubeadm/app/apis/output/v1alpha1/zz_generated.conversion.go b/cmd/kubeadm/app/apis/output/v1alpha1/zz_generated.conversion.go index 512f3dbde913..c675fde12d04 100644 --- a/cmd/kubeadm/app/apis/output/v1alpha1/zz_generated.conversion.go +++ b/cmd/kubeadm/app/apis/output/v1alpha1/zz_generated.conversion.go @@ -45,6 +45,16 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddGeneratedConversionFunc((*ComponentUpgradePlan)(nil), (*output.ComponentUpgradePlan)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_ComponentUpgradePlan_To_output_ComponentUpgradePlan(a.(*ComponentUpgradePlan), b.(*output.ComponentUpgradePlan), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*output.ComponentUpgradePlan)(nil), (*ComponentUpgradePlan)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_output_ComponentUpgradePlan_To_v1alpha1_ComponentUpgradePlan(a.(*output.ComponentUpgradePlan), b.(*ComponentUpgradePlan), scope) + }); err != nil { + return err + } if err := s.AddGeneratedConversionFunc((*Images)(nil), (*output.Images)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha1_Images_To_output_Images(a.(*Images), b.(*output.Images), scope) }); err != nil { @@ -55,6 +65,16 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddGeneratedConversionFunc((*UpgradePlan)(nil), (*output.UpgradePlan)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_UpgradePlan_To_output_UpgradePlan(a.(*UpgradePlan), b.(*output.UpgradePlan), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*output.UpgradePlan)(nil), (*UpgradePlan)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_output_UpgradePlan_To_v1alpha1_UpgradePlan(a.(*output.UpgradePlan), b.(*UpgradePlan), scope) + }); err != nil { + return err + } return nil } @@ -78,6 +98,30 @@ func Convert_output_BootstrapToken_To_v1alpha1_BootstrapToken(in *output.Bootstr return autoConvert_output_BootstrapToken_To_v1alpha1_BootstrapToken(in, out, s) } +func autoConvert_v1alpha1_ComponentUpgradePlan_To_output_ComponentUpgradePlan(in *ComponentUpgradePlan, out *output.ComponentUpgradePlan, s conversion.Scope) error { + out.Name = in.Name + out.CurrentVersion = in.CurrentVersion + out.NewVersion = in.NewVersion + return nil +} + +// Convert_v1alpha1_ComponentUpgradePlan_To_output_ComponentUpgradePlan is an autogenerated conversion function. +func Convert_v1alpha1_ComponentUpgradePlan_To_output_ComponentUpgradePlan(in *ComponentUpgradePlan, out *output.ComponentUpgradePlan, s conversion.Scope) error { + return autoConvert_v1alpha1_ComponentUpgradePlan_To_output_ComponentUpgradePlan(in, out, s) +} + +func autoConvert_output_ComponentUpgradePlan_To_v1alpha1_ComponentUpgradePlan(in *output.ComponentUpgradePlan, out *ComponentUpgradePlan, s conversion.Scope) error { + out.Name = in.Name + out.CurrentVersion = in.CurrentVersion + out.NewVersion = in.NewVersion + return nil +} + +// Convert_output_ComponentUpgradePlan_To_v1alpha1_ComponentUpgradePlan is an autogenerated conversion function. +func Convert_output_ComponentUpgradePlan_To_v1alpha1_ComponentUpgradePlan(in *output.ComponentUpgradePlan, out *ComponentUpgradePlan, s conversion.Scope) error { + return autoConvert_output_ComponentUpgradePlan_To_v1alpha1_ComponentUpgradePlan(in, out, s) +} + func autoConvert_v1alpha1_Images_To_output_Images(in *Images, out *output.Images, s conversion.Scope) error { out.Images = *(*[]string)(unsafe.Pointer(&in.Images)) return nil @@ -97,3 +141,23 @@ func autoConvert_output_Images_To_v1alpha1_Images(in *output.Images, out *Images func Convert_output_Images_To_v1alpha1_Images(in *output.Images, out *Images, s conversion.Scope) error { return autoConvert_output_Images_To_v1alpha1_Images(in, out, s) } + +func autoConvert_v1alpha1_UpgradePlan_To_output_UpgradePlan(in *UpgradePlan, out *output.UpgradePlan, s conversion.Scope) error { + out.Components = *(*[]output.ComponentUpgradePlan)(unsafe.Pointer(&in.Components)) + return nil +} + +// Convert_v1alpha1_UpgradePlan_To_output_UpgradePlan is an autogenerated conversion function. +func Convert_v1alpha1_UpgradePlan_To_output_UpgradePlan(in *UpgradePlan, out *output.UpgradePlan, s conversion.Scope) error { + return autoConvert_v1alpha1_UpgradePlan_To_output_UpgradePlan(in, out, s) +} + +func autoConvert_output_UpgradePlan_To_v1alpha1_UpgradePlan(in *output.UpgradePlan, out *UpgradePlan, s conversion.Scope) error { + out.Components = *(*[]ComponentUpgradePlan)(unsafe.Pointer(&in.Components)) + return nil +} + +// Convert_output_UpgradePlan_To_v1alpha1_UpgradePlan is an autogenerated conversion function. +func Convert_output_UpgradePlan_To_v1alpha1_UpgradePlan(in *output.UpgradePlan, out *UpgradePlan, s conversion.Scope) error { + return autoConvert_output_UpgradePlan_To_v1alpha1_UpgradePlan(in, out, s) +} diff --git a/cmd/kubeadm/app/apis/output/v1alpha1/zz_generated.deepcopy.go b/cmd/kubeadm/app/apis/output/v1alpha1/zz_generated.deepcopy.go index 418114c7a9aa..b388605a3db3 100644 --- a/cmd/kubeadm/app/apis/output/v1alpha1/zz_generated.deepcopy.go +++ b/cmd/kubeadm/app/apis/output/v1alpha1/zz_generated.deepcopy.go @@ -50,6 +50,22 @@ func (in *BootstrapToken) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComponentUpgradePlan) DeepCopyInto(out *ComponentUpgradePlan) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentUpgradePlan. +func (in *ComponentUpgradePlan) DeepCopy() *ComponentUpgradePlan { + if in == nil { + return nil + } + out := new(ComponentUpgradePlan) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Images) DeepCopyInto(out *Images) { *out = *in @@ -79,3 +95,33 @@ func (in *Images) DeepCopyObject() runtime.Object { } return nil } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UpgradePlan) DeepCopyInto(out *UpgradePlan) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.Components != nil { + in, out := &in.Components, &out.Components + *out = make([]ComponentUpgradePlan, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UpgradePlan. +func (in *UpgradePlan) DeepCopy() *UpgradePlan { + if in == nil { + return nil + } + out := new(UpgradePlan) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *UpgradePlan) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/cmd/kubeadm/app/apis/output/zz_generated.deepcopy.go b/cmd/kubeadm/app/apis/output/zz_generated.deepcopy.go index fb19e22ed7ca..f4a57a4859b4 100644 --- a/cmd/kubeadm/app/apis/output/zz_generated.deepcopy.go +++ b/cmd/kubeadm/app/apis/output/zz_generated.deepcopy.go @@ -50,6 +50,22 @@ func (in *BootstrapToken) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComponentUpgradePlan) DeepCopyInto(out *ComponentUpgradePlan) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentUpgradePlan. +func (in *ComponentUpgradePlan) DeepCopy() *ComponentUpgradePlan { + if in == nil { + return nil + } + out := new(ComponentUpgradePlan) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Images) DeepCopyInto(out *Images) { *out = *in @@ -79,3 +95,33 @@ func (in *Images) DeepCopyObject() runtime.Object { } return nil } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UpgradePlan) DeepCopyInto(out *UpgradePlan) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.Components != nil { + in, out := &in.Components, &out.Components + *out = make([]ComponentUpgradePlan, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UpgradePlan. +func (in *UpgradePlan) DeepCopy() *UpgradePlan { + if in == nil { + return nil + } + out := new(UpgradePlan) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *UpgradePlan) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} From 0eac66d647f3e0559305cb9dfb4d246192c8012d Mon Sep 17 00:00:00 2001 From: Ed Bartosh Date: Wed, 4 Mar 2020 15:04:22 +0200 Subject: [PATCH 25/92] kubeadm: refactor printAvailableUpgrades Split printAvailableUpgrades into 2 functions: - genUpgradePlan that handles business logic - printUpgradePlan that outputs upgrade plan --- cmd/kubeadm/app/cmd/upgrade/BUILD | 1 + cmd/kubeadm/app/cmd/upgrade/plan.go | 225 +++++++++++++---------- cmd/kubeadm/app/cmd/upgrade/plan_test.go | 204 ++++++++++---------- cmd/kubeadm/app/constants/constants.go | 6 + 4 files changed, 231 insertions(+), 205 deletions(-) diff --git a/cmd/kubeadm/app/cmd/upgrade/BUILD b/cmd/kubeadm/app/cmd/upgrade/BUILD index eab8937bad07..958b345a9370 100644 --- a/cmd/kubeadm/app/cmd/upgrade/BUILD +++ b/cmd/kubeadm/app/cmd/upgrade/BUILD @@ -15,6 +15,7 @@ go_library( deps = [ "//cmd/kubeadm/app/apis/kubeadm:go_default_library", "//cmd/kubeadm/app/apis/kubeadm/validation:go_default_library", + "//cmd/kubeadm/app/apis/output/v1alpha1:go_default_library", "//cmd/kubeadm/app/cmd/options:go_default_library", "//cmd/kubeadm/app/cmd/phases/upgrade/node:go_default_library", "//cmd/kubeadm/app/cmd/phases/workflow:go_default_library", diff --git a/cmd/kubeadm/app/cmd/upgrade/plan.go b/cmd/kubeadm/app/cmd/upgrade/plan.go index a4cb2be73fdb..a07d8646a7d9 100644 --- a/cmd/kubeadm/app/cmd/upgrade/plan.go +++ b/cmd/kubeadm/app/cmd/upgrade/plan.go @@ -29,6 +29,8 @@ import ( "k8s.io/apimachinery/pkg/util/version" "k8s.io/klog" kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" + outputapiv1alpha1 "k8s.io/kubernetes/cmd/kubeadm/app/apis/output/v1alpha1" + "k8s.io/kubernetes/cmd/kubeadm/app/constants" "k8s.io/kubernetes/cmd/kubeadm/app/phases/upgrade" etcdutil "k8s.io/kubernetes/cmd/kubeadm/app/util/etcd" ) @@ -97,131 +99,154 @@ func runPlan(flags *planFlags, userVersion string) error { return errors.Wrap(err, "[upgrade/versions] FATAL") } - // Tell the user which upgrades are available - printAvailableUpgrades(availUpgrades, os.Stdout, isExternalEtcd) + // No upgrades available + if len(availUpgrades) == 0 { + klog.V(1).Infoln("[upgrade/plan] Awesome, you're up-to-date! Enjoy!") + return nil + } + + // Generate and print upgrade plans + for _, up := range availUpgrades { + plan, unstableVersionFlag, err := genUpgradePlan(&up, isExternalEtcd) + if err != nil { + return err + } + + printUpgradePlan(&up, plan, unstableVersionFlag, isExternalEtcd, os.Stdout) + } return nil } -// printAvailableUpgrades prints a UX-friendly overview of what versions are available to upgrade to -// TODO look into columnize or some other formatter when time permits instead of using the tabwriter -func printAvailableUpgrades(upgrades []upgrade.Upgrade, w io.Writer, isExternalEtcd bool) { +// newComponentUpgradePlan helper creates outputapiv1alpha1.ComponentUpgradePlan object +func newComponentUpgradePlan(name, currentVersion, newVersion string) outputapiv1alpha1.ComponentUpgradePlan { + return outputapiv1alpha1.ComponentUpgradePlan{ + Name: name, + CurrentVersion: currentVersion, + NewVersion: newVersion, + } +} - // Return quickly if no upgrades can be made - if len(upgrades) == 0 { - fmt.Fprintln(w, "Awesome, you're up-to-date! Enjoy!") - return +// TODO There is currently no way to cleanly output upgrades that involve adding, removing, or changing components +// https://github.com/kubernetes/kubeadm/issues/810 was created to track addressing this. +func appendDNSComponent(components []outputapiv1alpha1.ComponentUpgradePlan, up *upgrade.Upgrade, DNSType kubeadmapi.DNSAddOnType, name string) []outputapiv1alpha1.ComponentUpgradePlan { + beforeVersion, afterVersion := "", "" + if up.Before.DNSType == DNSType { + beforeVersion = up.Before.DNSVersion + } + if up.After.DNSType == DNSType { + afterVersion = up.After.DNSVersion } - // The tab writer writes to the "real" writer w - tabw := tabwriter.NewWriter(w, 10, 4, 3, ' ', 0) - // Loop through the upgrade possibilities and output text to the command line - for _, upgrade := range upgrades { + if beforeVersion != "" || afterVersion != "" { + components = append(components, newComponentUpgradePlan(name, beforeVersion, afterVersion)) + } + return components +} - newK8sVersion, err := version.ParseSemantic(upgrade.After.KubeVersion) - if err != nil { - fmt.Fprintf(w, "Unable to parse normalized version %q as a semantic version\n", upgrade.After.KubeVersion) - continue - } +// genUpgradePlan generates output-friendly upgrade plan out of upgrade.Upgrade structure +func genUpgradePlan(up *upgrade.Upgrade, isExternalEtcd bool) (*outputapiv1alpha1.UpgradePlan, string, error) { + newK8sVersion, err := version.ParseSemantic(up.After.KubeVersion) + if err != nil { + return nil, "", errors.Wrapf(err, "Unable to parse normalized version %q as a semantic version", up.After.KubeVersion) + } - UnstableVersionFlag := "" - if len(newK8sVersion.PreRelease()) != 0 { - if strings.HasPrefix(newK8sVersion.PreRelease(), "rc") { - UnstableVersionFlag = " --allow-release-candidate-upgrades" - } else { - UnstableVersionFlag = " --allow-experimental-upgrades" - } + unstableVersionFlag := "" + if len(newK8sVersion.PreRelease()) != 0 { + if strings.HasPrefix(newK8sVersion.PreRelease(), "rc") { + unstableVersionFlag = " --allow-release-candidate-upgrades" + } else { + unstableVersionFlag = " --allow-experimental-upgrades" } + } - if isExternalEtcd && upgrade.CanUpgradeEtcd() { - fmt.Fprintln(w, "External components that should be upgraded manually before you upgrade the control plane with 'kubeadm upgrade apply':") - fmt.Fprintln(tabw, "COMPONENT\tCURRENT\tAVAILABLE") - fmt.Fprintf(tabw, "Etcd\t%s\t%s\n", upgrade.Before.EtcdVersion, upgrade.After.EtcdVersion) + components := []outputapiv1alpha1.ComponentUpgradePlan{} - // We should flush the writer here at this stage; as the columns will now be of the right size, adjusted to the above content - tabw.Flush() - fmt.Fprintln(w, "") - } + if isExternalEtcd && up.CanUpgradeEtcd() { + components = append(components, newComponentUpgradePlan(constants.Etcd, up.Before.EtcdVersion, up.After.EtcdVersion)) + } - if upgrade.CanUpgradeKubelets() { - fmt.Fprintln(w, "Components that must be upgraded manually after you have upgraded the control plane with 'kubeadm upgrade apply':") - fmt.Fprintln(tabw, "COMPONENT\tCURRENT\tAVAILABLE") - firstPrinted := false - - // The map is of the form :. Here all the keys are put into a slice and sorted - // in order to always get the right order. Then the map value is extracted separately - for _, oldVersion := range sortedSliceFromStringIntMap(upgrade.Before.KubeletVersions) { - nodeCount := upgrade.Before.KubeletVersions[oldVersion] - if !firstPrinted { - // Output the Kubelet header only on the first version pair - fmt.Fprintf(tabw, "Kubelet\t%d x %s\t%s\n", nodeCount, oldVersion, upgrade.After.KubeVersion) - firstPrinted = true - continue - } - fmt.Fprintf(tabw, "\t%d x %s\t%s\n", nodeCount, oldVersion, upgrade.After.KubeVersion) - } - // We should flush the writer here at this stage; as the columns will now be of the right size, adjusted to the above content - tabw.Flush() - fmt.Fprintln(w, "") + if up.CanUpgradeKubelets() { + // The map is of the form :. Here all the keys are put into a slice and sorted + // in order to always get the right order. Then the map value is extracted separately + for _, oldVersion := range sortedSliceFromStringIntMap(up.Before.KubeletVersions) { + nodeCount := up.Before.KubeletVersions[oldVersion] + components = append(components, newComponentUpgradePlan(constants.Kubelet, fmt.Sprintf("%d x %s", nodeCount, oldVersion), up.After.KubeVersion)) } + } - fmt.Fprintf(w, "Upgrade to the latest %s:\n", upgrade.Description) - fmt.Fprintln(w, "") - fmt.Fprintln(tabw, "COMPONENT\tCURRENT\tAVAILABLE") - fmt.Fprintf(tabw, "API Server\t%s\t%s\n", upgrade.Before.KubeVersion, upgrade.After.KubeVersion) - fmt.Fprintf(tabw, "Controller Manager\t%s\t%s\n", upgrade.Before.KubeVersion, upgrade.After.KubeVersion) - fmt.Fprintf(tabw, "Scheduler\t%s\t%s\n", upgrade.Before.KubeVersion, upgrade.After.KubeVersion) - fmt.Fprintf(tabw, "Kube Proxy\t%s\t%s\n", upgrade.Before.KubeVersion, upgrade.After.KubeVersion) - - // TODO There is currently no way to cleanly output upgrades that involve adding, removing, or changing components - // https://github.com/kubernetes/kubeadm/issues/810 was created to track addressing this. - printCoreDNS, printKubeDNS := false, false - coreDNSBeforeVersion, coreDNSAfterVersion, kubeDNSBeforeVersion, kubeDNSAfterVersion := "", "", "", "" - - switch upgrade.Before.DNSType { - case kubeadmapi.CoreDNS: - printCoreDNS = true - coreDNSBeforeVersion = upgrade.Before.DNSVersion - case kubeadmapi.KubeDNS: - printKubeDNS = true - kubeDNSBeforeVersion = upgrade.Before.DNSVersion - } + components = append(components, newComponentUpgradePlan(constants.KubeAPIServer, up.Before.KubeVersion, up.After.KubeVersion)) + components = append(components, newComponentUpgradePlan(constants.KubeControllerManager, up.Before.KubeVersion, up.After.KubeVersion)) + components = append(components, newComponentUpgradePlan(constants.KubeScheduler, up.Before.KubeVersion, up.After.KubeVersion)) + components = append(components, newComponentUpgradePlan(constants.KubeProxy, up.Before.KubeVersion, up.After.KubeVersion)) - switch upgrade.After.DNSType { - case kubeadmapi.CoreDNS: - printCoreDNS = true - coreDNSAfterVersion = upgrade.After.DNSVersion - case kubeadmapi.KubeDNS: - printKubeDNS = true - kubeDNSAfterVersion = upgrade.After.DNSVersion - } + components = appendDNSComponent(components, up, kubeadmapi.CoreDNS, constants.CoreDNS) + components = appendDNSComponent(components, up, kubeadmapi.KubeDNS, constants.KubeDNS) - if printCoreDNS { - fmt.Fprintf(tabw, "CoreDNS\t%s\t%s\n", coreDNSBeforeVersion, coreDNSAfterVersion) - } - if printKubeDNS { - fmt.Fprintf(tabw, "Kube DNS\t%s\t%s\n", kubeDNSBeforeVersion, kubeDNSAfterVersion) - } + if !isExternalEtcd { + components = append(components, newComponentUpgradePlan(constants.Etcd, up.Before.EtcdVersion, up.After.EtcdVersion)) + } - if !isExternalEtcd { - fmt.Fprintf(tabw, "Etcd\t%s\t%s\n", upgrade.Before.EtcdVersion, upgrade.After.EtcdVersion) - } + return &outputapiv1alpha1.UpgradePlan{Components: components}, unstableVersionFlag, nil +} + +// printUpgradePlan prints a UX-friendly overview of what versions are available to upgrade to +func printUpgradePlan(up *upgrade.Upgrade, plan *outputapiv1alpha1.UpgradePlan, unstableVersionFlag string, isExternalEtcd bool, w io.Writer) { + // The tab writer writes to the "real" writer w + tabw := tabwriter.NewWriter(w, 10, 4, 3, ' ', 0) - // The tabwriter should be flushed at this stage as we have now put in all the required content for this time. This is required for the tabs' size to be correct. + // endOfTable helper function flashes table writer + endOfTable := func() { tabw.Flush() fmt.Fprintln(w, "") - fmt.Fprintln(w, "You can now apply the upgrade by executing the following command:") - fmt.Fprintln(w, "") - fmt.Fprintf(w, "\tkubeadm upgrade apply %s%s\n", upgrade.After.KubeVersion, UnstableVersionFlag) - fmt.Fprintln(w, "") + } - if upgrade.Before.KubeadmVersion != upgrade.After.KubeadmVersion { - fmt.Fprintf(w, "Note: Before you can perform this upgrade, you have to update kubeadm to %s.\n", upgrade.After.KubeadmVersion) - fmt.Fprintln(w, "") + printHeader := true + printManualUpgradeHeader := true + for _, component := range plan.Components { + if isExternalEtcd && component.Name == constants.Etcd { + fmt.Fprintln(w, "External components that should be upgraded manually before you upgrade the control plane with 'kubeadm upgrade apply':") + fmt.Fprintln(tabw, "COMPONENT\tCURRENT\tAVAILABLE") + fmt.Fprintf(tabw, "%s\t%s\t%s\n", component.Name, component.CurrentVersion, component.NewVersion) + // end of external components table + endOfTable() + } else if component.Name == constants.Kubelet { + if printManualUpgradeHeader { + fmt.Fprintln(w, "Components that must be upgraded manually after you have upgraded the control plane with 'kubeadm upgrade apply':") + fmt.Fprintln(tabw, "COMPONENT\tCURRENT\tAVAILABLE") + fmt.Fprintf(tabw, "%s\t%s\t%s\n", component.Name, component.CurrentVersion, component.NewVersion) + printManualUpgradeHeader = false + } else { + fmt.Fprintf(tabw, "%s\t%s\t%s\n", "", component.CurrentVersion, component.NewVersion) + } + } else { + if printHeader { + // End of manual upgrades table + endOfTable() + + fmt.Fprintf(w, "Upgrade to the latest %s:\n", up.Description) + fmt.Fprintln(w, "") + fmt.Fprintln(tabw, "COMPONENT\tCURRENT\tAVAILABLE") + printHeader = false + } + fmt.Fprintf(tabw, "%s\t%s\t%s\n", component.Name, component.CurrentVersion, component.NewVersion) } + } + // End of control plane table + endOfTable() + + //fmt.Fprintln(w, "") + fmt.Fprintln(w, "You can now apply the upgrade by executing the following command:") + fmt.Fprintln(w, "") + fmt.Fprintf(w, "\tkubeadm upgrade apply %s%s\n", up.After.KubeVersion, unstableVersionFlag) + fmt.Fprintln(w, "") - fmt.Fprintln(w, "_____________________________________________________________________") + if up.Before.KubeadmVersion != up.After.KubeadmVersion { + fmt.Fprintf(w, "Note: Before you can perform this upgrade, you have to update kubeadm to %s.\n", up.After.KubeadmVersion) fmt.Fprintln(w, "") } + + fmt.Fprintln(w, "_____________________________________________________________________") + fmt.Fprintln(w, "") } // sortedSliceFromStringIntMap returns a slice of the keys in the map sorted alphabetically diff --git a/cmd/kubeadm/app/cmd/upgrade/plan_test.go b/cmd/kubeadm/app/cmd/upgrade/plan_test.go index 7f4c703607f9..560e6aa6120e 100644 --- a/cmd/kubeadm/app/cmd/upgrade/plan_test.go +++ b/cmd/kubeadm/app/cmd/upgrade/plan_test.go @@ -75,19 +75,6 @@ func TestPrintAvailableUpgrades(t *testing.T) { expectedBytes []byte externalEtcd bool }{ - { - name: "Up to date", - upgrades: []upgrade.Upgrade{}, - expectedBytes: []byte(`Awesome, you're up-to-date! Enjoy! -`), - }, - { - name: "Up to date external etcd", - externalEtcd: true, - upgrades: []upgrade.Upgrade{}, - expectedBytes: []byte(`Awesome, you're up-to-date! Enjoy! -`), - }, { name: "Patch version available", upgrades: []upgrade.Upgrade{ @@ -114,17 +101,17 @@ func TestPrintAvailableUpgrades(t *testing.T) { }, expectedBytes: []byte(`Components that must be upgraded manually after you have upgraded the control plane with 'kubeadm upgrade apply': COMPONENT CURRENT AVAILABLE -Kubelet 1 x v1.8.1 v1.8.3 +kubelet 1 x v1.8.1 v1.8.3 Upgrade to the latest version in the v1.8 series: -COMPONENT CURRENT AVAILABLE -API Server v1.8.1 v1.8.3 -Controller Manager v1.8.1 v1.8.3 -Scheduler v1.8.1 v1.8.3 -Kube Proxy v1.8.1 v1.8.3 -Kube DNS 1.14.5 1.14.5 -Etcd 3.0.17 3.0.17 +COMPONENT CURRENT AVAILABLE +kube-apiserver v1.8.1 v1.8.3 +kube-controller-manager v1.8.1 v1.8.3 +kube-scheduler v1.8.1 v1.8.3 +kube-proxy v1.8.1 v1.8.3 +kube-dns 1.14.5 1.14.5 +etcd 3.0.17 3.0.17 You can now apply the upgrade by executing the following command: @@ -162,17 +149,17 @@ _____________________________________________________________________ }, expectedBytes: []byte(`Components that must be upgraded manually after you have upgraded the control plane with 'kubeadm upgrade apply': COMPONENT CURRENT AVAILABLE -Kubelet 1 x v1.8.3 v1.9.0 +kubelet 1 x v1.8.3 v1.9.0 Upgrade to the latest stable version: -COMPONENT CURRENT AVAILABLE -API Server v1.8.3 v1.9.0 -Controller Manager v1.8.3 v1.9.0 -Scheduler v1.8.3 v1.9.0 -Kube Proxy v1.8.3 v1.9.0 -Kube DNS 1.14.5 1.14.13 -Etcd 3.0.17 3.1.12 +COMPONENT CURRENT AVAILABLE +kube-apiserver v1.8.3 v1.9.0 +kube-controller-manager v1.8.3 v1.9.0 +kube-scheduler v1.8.3 v1.9.0 +kube-proxy v1.8.3 v1.9.0 +kube-dns 1.14.5 1.14.13 +etcd 3.0.17 3.1.12 You can now apply the upgrade by executing the following command: @@ -228,17 +215,17 @@ _____________________________________________________________________ }, expectedBytes: []byte(`Components that must be upgraded manually after you have upgraded the control plane with 'kubeadm upgrade apply': COMPONENT CURRENT AVAILABLE -Kubelet 1 x v1.8.3 v1.8.5 +kubelet 1 x v1.8.3 v1.8.5 Upgrade to the latest version in the v1.8 series: -COMPONENT CURRENT AVAILABLE -API Server v1.8.3 v1.8.5 -Controller Manager v1.8.3 v1.8.5 -Scheduler v1.8.3 v1.8.5 -Kube Proxy v1.8.3 v1.8.5 -Kube DNS 1.14.5 1.14.5 -Etcd 3.0.17 3.0.17 +COMPONENT CURRENT AVAILABLE +kube-apiserver v1.8.3 v1.8.5 +kube-controller-manager v1.8.3 v1.8.5 +kube-scheduler v1.8.3 v1.8.5 +kube-proxy v1.8.3 v1.8.5 +kube-dns 1.14.5 1.14.5 +etcd 3.0.17 3.0.17 You can now apply the upgrade by executing the following command: @@ -248,17 +235,17 @@ _____________________________________________________________________ Components that must be upgraded manually after you have upgraded the control plane with 'kubeadm upgrade apply': COMPONENT CURRENT AVAILABLE -Kubelet 1 x v1.8.3 v1.9.0 +kubelet 1 x v1.8.3 v1.9.0 Upgrade to the latest stable version: -COMPONENT CURRENT AVAILABLE -API Server v1.8.3 v1.9.0 -Controller Manager v1.8.3 v1.9.0 -Scheduler v1.8.3 v1.9.0 -Kube Proxy v1.8.3 v1.9.0 -Kube DNS 1.14.5 1.14.13 -Etcd 3.0.17 3.1.12 +COMPONENT CURRENT AVAILABLE +kube-apiserver v1.8.3 v1.9.0 +kube-controller-manager v1.8.3 v1.9.0 +kube-scheduler v1.8.3 v1.9.0 +kube-proxy v1.8.3 v1.9.0 +kube-dns 1.14.5 1.14.13 +etcd 3.0.17 3.1.12 You can now apply the upgrade by executing the following command: @@ -296,17 +283,17 @@ _____________________________________________________________________ }, expectedBytes: []byte(`Components that must be upgraded manually after you have upgraded the control plane with 'kubeadm upgrade apply': COMPONENT CURRENT AVAILABLE -Kubelet 1 x v1.8.5 v1.9.0-beta.1 +kubelet 1 x v1.8.5 v1.9.0-beta.1 Upgrade to the latest experimental version: -COMPONENT CURRENT AVAILABLE -API Server v1.8.5 v1.9.0-beta.1 -Controller Manager v1.8.5 v1.9.0-beta.1 -Scheduler v1.8.5 v1.9.0-beta.1 -Kube Proxy v1.8.5 v1.9.0-beta.1 -Kube DNS 1.14.5 1.14.13 -Etcd 3.0.17 3.1.12 +COMPONENT CURRENT AVAILABLE +kube-apiserver v1.8.5 v1.9.0-beta.1 +kube-controller-manager v1.8.5 v1.9.0-beta.1 +kube-scheduler v1.8.5 v1.9.0-beta.1 +kube-proxy v1.8.5 v1.9.0-beta.1 +kube-dns 1.14.5 1.14.13 +etcd 3.0.17 3.1.12 You can now apply the upgrade by executing the following command: @@ -344,17 +331,17 @@ _____________________________________________________________________ }, expectedBytes: []byte(`Components that must be upgraded manually after you have upgraded the control plane with 'kubeadm upgrade apply': COMPONENT CURRENT AVAILABLE -Kubelet 1 x v1.8.5 v1.9.0-rc.1 +kubelet 1 x v1.8.5 v1.9.0-rc.1 Upgrade to the latest release candidate version: -COMPONENT CURRENT AVAILABLE -API Server v1.8.5 v1.9.0-rc.1 -Controller Manager v1.8.5 v1.9.0-rc.1 -Scheduler v1.8.5 v1.9.0-rc.1 -Kube Proxy v1.8.5 v1.9.0-rc.1 -Kube DNS 1.14.5 1.14.13 -Etcd 3.0.17 3.1.12 +COMPONENT CURRENT AVAILABLE +kube-apiserver v1.8.5 v1.9.0-rc.1 +kube-controller-manager v1.8.5 v1.9.0-rc.1 +kube-scheduler v1.8.5 v1.9.0-rc.1 +kube-proxy v1.8.5 v1.9.0-rc.1 +kube-dns 1.14.5 1.14.13 +etcd 3.0.17 3.1.12 You can now apply the upgrade by executing the following command: @@ -393,18 +380,18 @@ _____________________________________________________________________ }, expectedBytes: []byte(`Components that must be upgraded manually after you have upgraded the control plane with 'kubeadm upgrade apply': COMPONENT CURRENT AVAILABLE -Kubelet 1 x v1.9.2 v1.9.3 +kubelet 1 x v1.9.2 v1.9.3 2 x v1.9.3 v1.9.3 Upgrade to the latest version in the v1.9 series: -COMPONENT CURRENT AVAILABLE -API Server v1.9.2 v1.9.3 -Controller Manager v1.9.2 v1.9.3 -Scheduler v1.9.2 v1.9.3 -Kube Proxy v1.9.2 v1.9.3 -Kube DNS 1.14.5 1.14.8 -Etcd 3.0.17 3.1.12 +COMPONENT CURRENT AVAILABLE +kube-apiserver v1.9.2 v1.9.3 +kube-controller-manager v1.9.2 v1.9.3 +kube-scheduler v1.9.2 v1.9.3 +kube-proxy v1.9.2 v1.9.3 +kube-dns 1.14.5 1.14.8 +etcd 3.0.17 3.1.12 You can now apply the upgrade by executing the following command: @@ -444,20 +431,20 @@ _____________________________________________________________________ externalEtcd: true, expectedBytes: []byte(`External components that should be upgraded manually before you upgrade the control plane with 'kubeadm upgrade apply': COMPONENT CURRENT AVAILABLE -Etcd 3.0.17 3.1.12 +etcd 3.0.17 3.1.12 Components that must be upgraded manually after you have upgraded the control plane with 'kubeadm upgrade apply': COMPONENT CURRENT AVAILABLE -Kubelet 1 x v1.9.2 v1.9.3 +kubelet 1 x v1.9.2 v1.9.3 Upgrade to the latest version in the v1.9 series: -COMPONENT CURRENT AVAILABLE -API Server v1.9.2 v1.9.3 -Controller Manager v1.9.2 v1.9.3 -Scheduler v1.9.2 v1.9.3 -Kube Proxy v1.9.2 v1.9.3 -Kube DNS 1.14.5 1.14.8 +COMPONENT CURRENT AVAILABLE +kube-apiserver v1.9.2 v1.9.3 +kube-controller-manager v1.9.2 v1.9.3 +kube-scheduler v1.9.2 v1.9.3 +kube-proxy v1.9.2 v1.9.3 +kube-dns 1.14.5 1.14.8 You can now apply the upgrade by executing the following command: @@ -495,18 +482,18 @@ _____________________________________________________________________ }, expectedBytes: []byte(`Components that must be upgraded manually after you have upgraded the control plane with 'kubeadm upgrade apply': COMPONENT CURRENT AVAILABLE -Kubelet 1 x v1.10.2 v1.11.0 +kubelet 1 x v1.10.2 v1.11.0 Upgrade to the latest kubedns to coredns: -COMPONENT CURRENT AVAILABLE -API Server v1.10.2 v1.11.0 -Controller Manager v1.10.2 v1.11.0 -Scheduler v1.10.2 v1.11.0 -Kube Proxy v1.10.2 v1.11.0 -CoreDNS 1.0.6 -Kube DNS 1.14.7 -Etcd 3.1.11 3.2.18 +COMPONENT CURRENT AVAILABLE +kube-apiserver v1.10.2 v1.11.0 +kube-controller-manager v1.10.2 v1.11.0 +kube-scheduler v1.10.2 v1.11.0 +kube-proxy v1.10.2 v1.11.0 +CoreDNS 1.0.6 +kube-dns 1.14.7 +etcd 3.1.11 3.2.18 You can now apply the upgrade by executing the following command: @@ -542,17 +529,17 @@ _____________________________________________________________________ }, expectedBytes: []byte(`Components that must be upgraded manually after you have upgraded the control plane with 'kubeadm upgrade apply': COMPONENT CURRENT AVAILABLE -Kubelet 1 x v1.10.2 v1.11.0 +kubelet 1 x v1.10.2 v1.11.0 Upgrade to the latest coredns: -COMPONENT CURRENT AVAILABLE -API Server v1.10.2 v1.11.0 -Controller Manager v1.10.2 v1.11.0 -Scheduler v1.10.2 v1.11.0 -Kube Proxy v1.10.2 v1.11.0 -CoreDNS 1.0.5 1.0.6 -Etcd 3.1.11 3.2.18 +COMPONENT CURRENT AVAILABLE +kube-apiserver v1.10.2 v1.11.0 +kube-controller-manager v1.10.2 v1.11.0 +kube-scheduler v1.10.2 v1.11.0 +kube-proxy v1.10.2 v1.11.0 +CoreDNS 1.0.5 1.0.6 +etcd 3.1.11 3.2.18 You can now apply the upgrade by executing the following command: @@ -588,18 +575,18 @@ _____________________________________________________________________ }, expectedBytes: []byte(`Components that must be upgraded manually after you have upgraded the control plane with 'kubeadm upgrade apply': COMPONENT CURRENT AVAILABLE -Kubelet 1 x v1.10.2 v1.11.0 +kubelet 1 x v1.10.2 v1.11.0 Upgrade to the latest coredns to kubedns: -COMPONENT CURRENT AVAILABLE -API Server v1.10.2 v1.11.0 -Controller Manager v1.10.2 v1.11.0 -Scheduler v1.10.2 v1.11.0 -Kube Proxy v1.10.2 v1.11.0 -CoreDNS 1.0.6 -Kube DNS 1.14.9 -Etcd 3.1.11 3.2.18 +COMPONENT CURRENT AVAILABLE +kube-apiserver v1.10.2 v1.11.0 +kube-controller-manager v1.10.2 v1.11.0 +kube-scheduler v1.10.2 v1.11.0 +kube-proxy v1.10.2 v1.11.0 +CoreDNS 1.0.6 +kube-dns 1.14.9 +etcd 3.1.11 3.2.18 You can now apply the upgrade by executing the following command: @@ -613,11 +600,18 @@ _____________________________________________________________________ for _, rt := range tests { t.Run(rt.name, func(t *testing.T) { rt.buf = bytes.NewBufferString("") - printAvailableUpgrades(rt.upgrades, rt.buf, rt.externalEtcd) + // Generate and print upgrade plans + for _, up := range rt.upgrades { + plan, unstableVersionFlag, err := genUpgradePlan(&up, rt.externalEtcd) + if err != nil { + t.Errorf("failed genUpgradePlan, err: %+v", err) + } + printUpgradePlan(&up, plan, unstableVersionFlag, rt.externalEtcd, rt.buf) + } actualBytes := rt.buf.Bytes() if !bytes.Equal(actualBytes, rt.expectedBytes) { t.Errorf( - "failed PrintAvailableUpgrades:\n\texpected: %q\n\t actual: %q", + "failed PrintAvailableUpgrades:\n\texpected: %q\n\n\tactual : %q", string(rt.expectedBytes), string(actualBytes), ) diff --git a/cmd/kubeadm/app/constants/constants.go b/cmd/kubeadm/app/constants/constants.go index a309708c119f..34508d1d784c 100644 --- a/cmd/kubeadm/app/constants/constants.go +++ b/cmd/kubeadm/app/constants/constants.go @@ -279,6 +279,12 @@ const ( KubeProxy = "kube-proxy" // HyperKube defines variable used internally when referring to the hyperkube image HyperKube = "hyperkube" + // CoreDNS defines variable used internally when referring to the CoreDNS component + CoreDNS = "CoreDNS" + // KubeDNS defines variable used internally when referring to the KubeDNS component + KubeDNS = "kube-dns" + // Kubelet defines variable used internally when referring to the Kubelet + Kubelet = "kubelet" // SelfHostingPrefix describes the prefix workloads that are self-hosted by kubeadm has SelfHostingPrefix = "self-hosted-" From 58f78a53ee1afdc6f656d2517275e52bda34c865 Mon Sep 17 00:00:00 2001 From: Yaseen Hamdulay Date: Tue, 24 Mar 2020 11:30:48 +0000 Subject: [PATCH 26/92] Add ssh_redirect_user --- cluster/gce/gci/master.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/cluster/gce/gci/master.yaml b/cluster/gce/gci/master.yaml index 70c5ce6716a2..6fd8f876e19c 100644 --- a/cluster/gce/gci/master.yaml +++ b/cluster/gce/gci/master.yaml @@ -4,6 +4,7 @@ users: - name: etcd homedir: /var/etcd lock_passwd: true + ssh_redirect_user: true write_files: - path: /etc/systemd/system/kube-master-installation.service From 772eacadc19c62fd0897f9319ee71aa02d85884c Mon Sep 17 00:00:00 2001 From: Caleb Woodbine Date: Wed, 25 Mar 2020 10:50:03 +1300 Subject: [PATCH 27/92] Update naming of watchEvents --- test/e2e/apps/rc.go | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/test/e2e/apps/rc.go b/test/e2e/apps/rc.go index 1211583505ad..4f1fcaee20c5 100644 --- a/test/e2e/apps/rc.go +++ b/test/e2e/apps/rc.go @@ -141,9 +141,9 @@ var _ = SIGDescribe("ReplicationController", func() { rcWatchChan := rcWatch.ResultChan() ginkgo.By("waiting for available Replicas") - for event := range rcWatchChan { - rc, ok := event.Object.(*v1.ReplicationController) - framework.ExpectEqual(ok, true, "Unable to convert type of ReplicationController watch event") + for watchEvent := range rcWatchChan { + rc, ok := watchEvent.Object.(*v1.ReplicationController) + framework.ExpectEqual(ok, true, "Unable to convert type of ReplicationController watch watchEvent") if rc.Status.Replicas == testRcInitialReplicaCount && rc.Status.ReadyReplicas == testRcInitialReplicaCount { break } @@ -198,9 +198,9 @@ var _ = SIGDescribe("ReplicationController", func() { var rcFromWatch *v1.ReplicationController ginkgo.By("waiting for ReplicationController's scale to be the max amount") foundRcWithMaxScale := false - for event := range rcWatchChan { - rc, ok := event.Object.(*v1.ReplicationController) - framework.ExpectEqual(ok, true, "Unable to convert type of ReplicationController watch event") + for watchEvent := range rcWatchChan { + rc, ok := watchEvent.Object.(*v1.ReplicationController) + framework.ExpectEqual(ok, true, "Unable to convert type of ReplicationController watch watchEvent") if rc.ObjectMeta.Name == testRcName && rc.ObjectMeta.Namespace == testRcNamespace && rc.Status.Replicas == testRcMaxReplicaCount && rc.Status.ReadyReplicas == testRcMaxReplicaCount { foundRcWithMaxScale = true rcFromWatch = rc @@ -227,9 +227,9 @@ var _ = SIGDescribe("ReplicationController", func() { framework.ExpectEqual(rcStatus.Status.ReadyReplicas, int32(1), "ReplicationControllerStatus readyReplicas does not equal 1") ginkgo.By(fmt.Sprintf("waiting for ReplicationController readyReplicas to be equal to %v", testRcMaxReplicaCount)) - for event := range rcWatchChan { - rc, ok := event.Object.(*v1.ReplicationController) - framework.ExpectEqual(ok, true, "unable to convert type of ReplicationController watch event") + for watchEvent := range rcWatchChan { + rc, ok := watchEvent.Object.(*v1.ReplicationController) + framework.ExpectEqual(ok, true, "unable to convert type of ReplicationController watch watchEvent") if rc.Status.Replicas == testRcMaxReplicaCount && rc.Status.ReadyReplicas == testRcMaxReplicaCount { break } @@ -259,9 +259,9 @@ var _ = SIGDescribe("ReplicationController", func() { err = f.ClientSet.CoreV1().ReplicationControllers(testRcNamespace).DeleteCollection(context.TODO(), &metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: "test-rc-static=true"}) framework.ExpectNoError(err, "Failed to delete ReplicationControllers") - ginkgo.By("waiting for ReplicationController to have a DELETED event") - for event := range rcWatchChan { - if event.Type == "DELETED" { + ginkgo.By("waiting for ReplicationController to have a DELETED watchEvent") + for watchEvent := range rcWatchChan { + if watchEvent.Type == "DELETED" { break } } From dacd32887f502f5046694bd46837be833fbdf6af Mon Sep 17 00:00:00 2001 From: Caleb Woodbine Date: Wed, 25 Mar 2020 11:24:14 +1300 Subject: [PATCH 28/92] Update DeleteOptions --- test/e2e/apps/rc.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/e2e/apps/rc.go b/test/e2e/apps/rc.go index 4f1fcaee20c5..675774217f06 100644 --- a/test/e2e/apps/rc.go +++ b/test/e2e/apps/rc.go @@ -256,7 +256,7 @@ var _ = SIGDescribe("ReplicationController", func() { // Delete ReplicationController ginkgo.By("deleting ReplicationControllers by collection") - err = f.ClientSet.CoreV1().ReplicationControllers(testRcNamespace).DeleteCollection(context.TODO(), &metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: "test-rc-static=true"}) + err = f.ClientSet.CoreV1().ReplicationControllers(testRcNamespace).DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: "test-rc-static=true"}) framework.ExpectNoError(err, "Failed to delete ReplicationControllers") ginkgo.By("waiting for ReplicationController to have a DELETED watchEvent") From 9aa3c23ad36a36a1cae1b57649010e2db3a954f5 Mon Sep 17 00:00:00 2001 From: Caleb Woodbine Date: Wed, 25 Mar 2020 15:10:28 +1300 Subject: [PATCH 29/92] Update DynamicClient fetch of Pod statement --- test/e2e/apps/rc.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/e2e/apps/rc.go b/test/e2e/apps/rc.go index 675774217f06..d92066a196b1 100644 --- a/test/e2e/apps/rc.go +++ b/test/e2e/apps/rc.go @@ -175,7 +175,7 @@ var _ = SIGDescribe("ReplicationController", func() { framework.ExpectEqual(rcStatus.Status.ReadyReplicas, int32(0), "ReplicationControllerStatus's readyReplicas does not equal 0") ginkgo.By("fetching ReplicationController status") - rcStatusUnstructured, err := dc.Resource(rcResource).Namespace(testRcNamespace).Get(testRcName, metav1.GetOptions{}, "status") + rcStatusUnstructured, err := dc.Resource(rcResource).Namespace(testRcNamespace).Get(context.TODO(), testRcName, metav1.GetOptions{}, "status") framework.ExpectNoError(err, "Failed to fetch ReplicationControllerStatus") rcStatusUjson, err := json.Marshal(rcStatusUnstructured) From dfab6b637ffa87753905c29357655b544837a310 Mon Sep 17 00:00:00 2001 From: drfish Date: Wed, 25 Mar 2020 09:57:32 +0800 Subject: [PATCH 30/92] Update .import-aliases for e2e test framework --- hack/.import-aliases | 14 +- test/e2e/apimachinery/aggregator.go | 8 +- .../apimachinery/crd_conversion_webhook.go | 6 +- test/e2e/apimachinery/webhook.go | 6 +- test/e2e/apps/deployment.go | 74 ++++----- test/e2e/apps/network_partition.go | 16 +- test/e2e/apps/replica_set.go | 4 +- test/e2e/apps/statefulset.go | 150 +++++++++--------- test/e2e/apps/wait.go | 16 +- test/e2e/auth/audit.go | 8 +- test/e2e/auth/audit_dynamic.go | 4 +- test/e2e/auth/pod_security_policy.go | 10 +- test/e2e/cloud/gcp/cluster_upgrade.go | 10 +- test/e2e/common/volumes.go | 26 +-- test/e2e/framework/ingress/ingress_utils.go | 10 +- test/e2e/framework/log.go | 6 +- test/e2e/framework/log/logger.go | 4 +- test/e2e/framework/psp.go | 10 +- .../instrumentation/logging/generic_soak.go | 4 +- .../instrumentation/monitoring/accelerator.go | 4 +- .../monitoring/metrics_grabber.go | 6 +- test/e2e/kubectl/kubectl.go | 14 +- test/e2e/manifest/manifest.go | 14 +- test/e2e/network/dual_stack.go | 14 +- test/e2e/network/ingress.go | 142 ++++++++--------- test/e2e/network/scale/ingress.go | 10 +- .../network/scale/localrun/ingress_scale.go | 4 +- test/e2e/network/service.go | 8 +- test/e2e/node/kubelet.go | 4 +- test/e2e/scheduling/nvidia-gpus.go | 4 +- test/e2e/scheduling/preemption.go | 4 +- test/e2e/storage/drivers/csi.go | 6 +- test/e2e/storage/drivers/in_tree.go | 134 ++++++++-------- test/e2e/storage/external/external.go | 32 ++-- test/e2e/storage/external/external_test.go | 4 +- test/e2e/storage/flexvolume.go | 16 +- .../flexvolume_mounted_volume_resize.go | 6 +- test/e2e/storage/mounted_volume_resize.go | 8 +- .../nfs_persistent_volume-disruptive.go | 4 +- test/e2e/storage/persistent_volumes-local.go | 6 +- test/e2e/storage/persistent_volumes.go | 16 +- test/e2e/storage/testpatterns/testpattern.go | 10 +- test/e2e/storage/testsuites/api_test.go | 4 +- test/e2e/storage/testsuites/base.go | 16 +- test/e2e/storage/testsuites/base_test.go | 130 +++++++-------- test/e2e/storage/testsuites/ephemeral.go | 6 +- test/e2e/storage/testsuites/multivolume.go | 4 +- test/e2e/storage/testsuites/provisioning.go | 38 ++--- test/e2e/storage/testsuites/snapshottable.go | 4 +- test/e2e/storage/testsuites/subpath.go | 44 ++--- test/e2e/storage/testsuites/testdriver.go | 6 +- test/e2e/storage/testsuites/volume_expand.go | 4 +- test/e2e/storage/testsuites/volume_io.go | 10 +- test/e2e/storage/testsuites/volumemode.go | 4 +- test/e2e/storage/testsuites/volumes.go | 14 +- test/e2e/storage/utils/create.go | 7 +- test/e2e/storage/volume_metrics.go | 24 +-- test/e2e/storage/volume_provisioning.go | 8 +- test/e2e/storage/volumes.go | 8 +- .../storage/vsphere/vsphere_statefulsets.go | 24 +-- .../vsphere/vsphere_volume_node_poweroff.go | 8 +- test/e2e/upgrades/apps/deployments.go | 16 +- test/e2e/upgrades/apps/replicasets.go | 10 +- test/e2e/upgrades/apps/statefulset.go | 22 +-- test/e2e/upgrades/cassandra.go | 8 +- test/e2e/upgrades/etcd.go | 8 +- test/e2e/upgrades/mysql.go | 8 +- test/e2e_node/device_plugin_test.go | 4 +- test/e2e_node/dynamic_kubelet_config_test.go | 4 +- test/e2e_node/e2e_node_suite_test.go | 4 +- test/e2e_node/gpu_device_plugin_test.go | 14 +- test/e2e_node/image_list.go | 8 +- test/e2e_node/resource_metrics_test.go | 14 +- test/e2e_node/summary_test.go | 68 ++++---- test/e2e_node/topology_manager_test.go | 8 +- 75 files changed, 713 insertions(+), 702 deletions(-) diff --git a/hack/.import-aliases b/hack/.import-aliases index 97b67fcc4522..7bc1c95e7f76 100644 --- a/hack/.import-aliases +++ b/hack/.import-aliases @@ -51,22 +51,34 @@ "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1": "kubeletstatsv1alpha1", "k8s.io/kubernetes/pkg/proxy/apis/config/v1alpha1": "proxyconfigv1alpha1", "k8s.io/kubernetes/pkg/scheduler/apis/config/v1alpha1": "schedulerconfigv1alpha1", + "k8s.io/kubernetes/test/e2e/framework/auth": "e2eauth", "k8s.io/kubernetes/test/e2e/framework/autoscaling": "e2eautoscaling", + "k8s.io/kubernetes/test/e2e/framework/config": "e2econfig", + "k8s.io/kubernetes/test/e2e/framework/deployment": "e2edeployment", "k8s.io/kubernetes/test/e2e/framework/endpoints": "e2eendpoints", "k8s.io/kubernetes/test/e2e/framework/events": "e2eevents", + "k8s.io/kubernetes/test/e2e/framework/ginkgowrapper": "e2eginkgowrapper", + "k8s.io/kubernetes/test/e2e/framework/gpu": "e2egpu", + "k8s.io/kubernetes/test/e2e/framework/ingress": "e2eingress", "k8s.io/kubernetes/test/e2e/framework/job": "e2ejob", "k8s.io/kubernetes/test/e2e/framework/kubectl": "e2ekubectl", "k8s.io/kubernetes/test/e2e/framework/kubelet": "e2ekubelet", "k8s.io/kubernetes/test/e2e/framework/log": "e2elog", + "k8s.io/kubernetes/test/e2e/framework/metrics": "e2emetrics", "k8s.io/kubernetes/test/e2e/framework/network": "e2enetwork", "k8s.io/kubernetes/test/e2e/framework/node": "e2enode", "k8s.io/kubernetes/test/e2e/framework/perf": "e2eperf", "k8s.io/kubernetes/test/e2e/framework/pod": "e2epod", "k8s.io/kubernetes/test/e2e/framework/pv": "e2epv", "k8s.io/kubernetes/test/e2e/framework/rc": "e2erc", + "k8s.io/kubernetes/test/e2e/framework/replicaset": "e2ereplicaset", "k8s.io/kubernetes/test/e2e/framework/resource": "e2eresource", "k8s.io/kubernetes/test/e2e/framework/security": "e2esecurity", "k8s.io/kubernetes/test/e2e/framework/service": "e2eservice", "k8s.io/kubernetes/test/e2e/framework/skipper": "e2eskipper", - "k8s.io/kubernetes/test/e2e/framework/ssh": "e2essh" + "k8s.io/kubernetes/test/e2e/framework/ssh": "e2essh", + "k8s.io/kubernetes/test/e2e/framework/statefulset": "e2estatefulset", + "k8s.io/kubernetes/test/e2e/framework/testfiles": "e2etestfiles", + "k8s.io/kubernetes/test/e2e/framework/timer": "e2etimer", + "k8s.io/kubernetes/test/e2e/framework/volume": "e2evolume" } diff --git a/test/e2e/apimachinery/aggregator.go b/test/e2e/apimachinery/aggregator.go index 1af9c17c4744..5bfd2923a7ea 100644 --- a/test/e2e/apimachinery/aggregator.go +++ b/test/e2e/apimachinery/aggregator.go @@ -42,7 +42,7 @@ import ( aggregatorclient "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset" rbacv1helpers "k8s.io/kubernetes/pkg/apis/rbac/v1" "k8s.io/kubernetes/test/e2e/framework" - e2edeploy "k8s.io/kubernetes/test/e2e/framework/deployment" + e2edeployment "k8s.io/kubernetes/test/e2e/framework/deployment" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" imageutils "k8s.io/kubernetes/test/utils/image" samplev1alpha1 "k8s.io/sample-apiserver/pkg/apis/wardle/v1alpha1" @@ -274,9 +274,9 @@ func TestSampleAPIServer(f *framework.Framework, aggrclient *aggregatorclient.Cl } deployment, err := client.AppsV1().Deployments(namespace).Create(context.TODO(), d, metav1.CreateOptions{}) framework.ExpectNoError(err, "creating deployment %s in namespace %s", deploymentName, namespace) - err = e2edeploy.WaitForDeploymentRevisionAndImage(client, namespace, deploymentName, "1", image) + err = e2edeployment.WaitForDeploymentRevisionAndImage(client, namespace, deploymentName, "1", image) framework.ExpectNoError(err, "waiting for the deployment of image %s in %s in %s to complete", image, deploymentName, namespace) - err = e2edeploy.WaitForDeploymentRevisionAndImage(client, namespace, deploymentName, "1", etcdImage) + err = e2edeployment.WaitForDeploymentRevisionAndImage(client, namespace, deploymentName, "1", etcdImage) framework.ExpectNoError(err, "waiting for the deployment of image %s in %s to complete", etcdImage, deploymentName, namespace) // kubectl create -f service.yaml @@ -333,7 +333,7 @@ func TestSampleAPIServer(f *framework.Framework, aggrclient *aggregatorclient.Cl // kubectl get deployments -n && status == Running // NOTE: aggregated apis should generally be set up in their own namespace (). As the test framework // is setting up a new namespace, we are just using that. - err = e2edeploy.WaitForDeploymentComplete(client, deployment) + err = e2edeployment.WaitForDeploymentComplete(client, deployment) framework.ExpectNoError(err, "deploying extension apiserver in namespace %s", namespace) // kubectl create -f apiservice.yaml diff --git a/test/e2e/apimachinery/crd_conversion_webhook.go b/test/e2e/apimachinery/crd_conversion_webhook.go index a274133bf19c..28da7ae89c6d 100644 --- a/test/e2e/apimachinery/crd_conversion_webhook.go +++ b/test/e2e/apimachinery/crd_conversion_webhook.go @@ -35,7 +35,7 @@ import ( "k8s.io/client-go/dynamic" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" - e2edeploy "k8s.io/kubernetes/test/e2e/framework/deployment" + e2edeployment "k8s.io/kubernetes/test/e2e/framework/deployment" "k8s.io/kubernetes/test/utils/crd" imageutils "k8s.io/kubernetes/test/utils/image" "k8s.io/utils/pointer" @@ -339,9 +339,9 @@ func deployCustomResourceWebhookAndService(f *framework.Framework, image string, deployment, err := client.AppsV1().Deployments(namespace).Create(context.TODO(), d, metav1.CreateOptions{}) framework.ExpectNoError(err, "creating deployment %s in namespace %s", deploymentCRDName, namespace) ginkgo.By("Wait for the deployment to be ready") - err = e2edeploy.WaitForDeploymentRevisionAndImage(client, namespace, deploymentCRDName, "1", image) + err = e2edeployment.WaitForDeploymentRevisionAndImage(client, namespace, deploymentCRDName, "1", image) framework.ExpectNoError(err, "waiting for the deployment of image %s in %s in %s to complete", image, deploymentName, namespace) - err = e2edeploy.WaitForDeploymentComplete(client, deployment) + err = e2edeployment.WaitForDeploymentComplete(client, deployment) framework.ExpectNoError(err, "waiting for the deployment status valid", image, deploymentCRDName, namespace) ginkgo.By("Deploying the webhook service") diff --git a/test/e2e/apimachinery/webhook.go b/test/e2e/apimachinery/webhook.go index 6338bdb79a6b..2f3f96ebffcf 100644 --- a/test/e2e/apimachinery/webhook.go +++ b/test/e2e/apimachinery/webhook.go @@ -42,7 +42,7 @@ import ( clientset "k8s.io/client-go/kubernetes" "k8s.io/client-go/util/retry" "k8s.io/kubernetes/test/e2e/framework" - e2edeploy "k8s.io/kubernetes/test/e2e/framework/deployment" + e2edeployment "k8s.io/kubernetes/test/e2e/framework/deployment" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" "k8s.io/kubernetes/test/utils/crd" imageutils "k8s.io/kubernetes/test/utils/image" @@ -840,9 +840,9 @@ func deployWebhookAndService(f *framework.Framework, image string, certCtx *cert deployment, err := client.AppsV1().Deployments(namespace).Create(context.TODO(), d, metav1.CreateOptions{}) framework.ExpectNoError(err, "creating deployment %s in namespace %s", deploymentName, namespace) ginkgo.By("Wait for the deployment to be ready") - err = e2edeploy.WaitForDeploymentRevisionAndImage(client, namespace, deploymentName, "1", image) + err = e2edeployment.WaitForDeploymentRevisionAndImage(client, namespace, deploymentName, "1", image) framework.ExpectNoError(err, "waiting for the deployment of image %s in %s in %s to complete", image, deploymentName, namespace) - err = e2edeploy.WaitForDeploymentComplete(client, deployment) + err = e2edeployment.WaitForDeploymentComplete(client, deployment) framework.ExpectNoError(err, "waiting for the deployment status valid", image, deploymentName, namespace) ginkgo.By("Deploying the webhook service") diff --git a/test/e2e/apps/deployment.go b/test/e2e/apps/deployment.go index b9ec1aae1897..59112320572d 100644 --- a/test/e2e/apps/deployment.go +++ b/test/e2e/apps/deployment.go @@ -41,9 +41,9 @@ import ( appsinternal "k8s.io/kubernetes/pkg/apis/apps" deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util" "k8s.io/kubernetes/test/e2e/framework" - e2edeploy "k8s.io/kubernetes/test/e2e/framework/deployment" + e2edeployment "k8s.io/kubernetes/test/e2e/framework/deployment" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" - "k8s.io/kubernetes/test/e2e/framework/replicaset" + e2ereplicaset "k8s.io/kubernetes/test/e2e/framework/replicaset" e2eresource "k8s.io/kubernetes/test/e2e/framework/resource" e2eservice "k8s.io/kubernetes/test/e2e/framework/service" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" @@ -233,16 +233,16 @@ func testDeleteDeployment(f *framework.Framework) { podLabels := map[string]string{"name": WebserverImageName} replicas := int32(1) framework.Logf("Creating simple deployment %s", deploymentName) - d := e2edeploy.NewDeployment(deploymentName, replicas, podLabels, WebserverImageName, WebserverImage, appsv1.RollingUpdateDeploymentStrategyType) + d := e2edeployment.NewDeployment(deploymentName, replicas, podLabels, WebserverImageName, WebserverImage, appsv1.RollingUpdateDeploymentStrategyType) d.Annotations = map[string]string{"test": "should-copy-to-replica-set", v1.LastAppliedConfigAnnotation: "should-not-copy-to-replica-set"} deploy, err := c.AppsV1().Deployments(ns).Create(context.TODO(), d, metav1.CreateOptions{}) framework.ExpectNoError(err) // Wait for it to be updated to revision 1 - err = e2edeploy.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", WebserverImage) + err = e2edeployment.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", WebserverImage) framework.ExpectNoError(err) - err = e2edeploy.WaitForDeploymentComplete(c, deploy) + err = e2edeployment.WaitForDeploymentComplete(c, deploy) framework.ExpectNoError(err) deployment, err := c.AppsV1().Deployments(ns).Get(context.TODO(), deploymentName, metav1.GetOptions{}) @@ -280,17 +280,17 @@ func testRollingUpdateDeployment(f *framework.Framework) { // Create a deployment to delete webserver pods and instead bring up agnhost pods. deploymentName := "test-rolling-update-deployment" framework.Logf("Creating deployment %q", deploymentName) - d := e2edeploy.NewDeployment(deploymentName, replicas, deploymentPodLabels, AgnhostImageName, AgnhostImage, appsv1.RollingUpdateDeploymentStrategyType) + d := e2edeployment.NewDeployment(deploymentName, replicas, deploymentPodLabels, AgnhostImageName, AgnhostImage, appsv1.RollingUpdateDeploymentStrategyType) deploy, err := c.AppsV1().Deployments(ns).Create(context.TODO(), d, metav1.CreateOptions{}) framework.ExpectNoError(err) // Wait for it to be updated to revision 3546343826724305833. framework.Logf("Ensuring deployment %q gets the next revision from the one the adopted replica set %q has", deploy.Name, rs.Name) - err = e2edeploy.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "3546343826724305833", AgnhostImage) + err = e2edeployment.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "3546343826724305833", AgnhostImage) framework.ExpectNoError(err) framework.Logf("Ensuring status for deployment %q is the expected", deploy.Name) - err = e2edeploy.WaitForDeploymentComplete(c, deploy) + err = e2edeployment.WaitForDeploymentComplete(c, deploy) framework.ExpectNoError(err) // There should be 1 old RS (webserver-controller, which is adopted) @@ -309,22 +309,22 @@ func testRecreateDeployment(f *framework.Framework) { // Create a deployment that brings up agnhost pods. deploymentName := "test-recreate-deployment" framework.Logf("Creating deployment %q", deploymentName) - d := e2edeploy.NewDeployment(deploymentName, int32(1), map[string]string{"name": "sample-pod-3"}, AgnhostImageName, AgnhostImage, appsv1.RecreateDeploymentStrategyType) + d := e2edeployment.NewDeployment(deploymentName, int32(1), map[string]string{"name": "sample-pod-3"}, AgnhostImageName, AgnhostImage, appsv1.RecreateDeploymentStrategyType) deployment, err := c.AppsV1().Deployments(ns).Create(context.TODO(), d, metav1.CreateOptions{}) framework.ExpectNoError(err) // Wait for it to be updated to revision 1 framework.Logf("Waiting deployment %q to be updated to revision 1", deploymentName) - err = e2edeploy.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", AgnhostImage) + err = e2edeployment.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", AgnhostImage) framework.ExpectNoError(err) framework.Logf("Waiting deployment %q to complete", deploymentName) - err = e2edeploy.WaitForDeploymentComplete(c, deployment) + err = e2edeployment.WaitForDeploymentComplete(c, deployment) framework.ExpectNoError(err) // Update deployment to delete agnhost pods and bring up webserver pods. framework.Logf("Triggering a new rollout for deployment %q", deploymentName) - deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deploymentName, func(update *appsv1.Deployment) { + deployment, err = e2edeployment.UpdateDeploymentWithRetries(c, ns, deploymentName, func(update *appsv1.Deployment) { update.Spec.Template.Spec.Containers[0].Name = WebserverImageName update.Spec.Template.Spec.Containers[0].Image = WebserverImage }) @@ -395,7 +395,7 @@ func testDeploymentCleanUpPolicy(f *framework.Framework) { } } }() - d := e2edeploy.NewDeployment(deploymentName, replicas, deploymentPodLabels, AgnhostImageName, AgnhostImage, appsv1.RollingUpdateDeploymentStrategyType) + d := e2edeployment.NewDeployment(deploymentName, replicas, deploymentPodLabels, AgnhostImageName, AgnhostImage, appsv1.RollingUpdateDeploymentStrategyType) d.Spec.RevisionHistoryLimit = revisionHistoryLimit _, err = c.AppsV1().Deployments(ns).Create(context.TODO(), d, metav1.CreateOptions{}) framework.ExpectNoError(err) @@ -427,7 +427,7 @@ func testRolloverDeployment(f *framework.Framework) { // Wait for replica set to become ready before adopting it. framework.Logf("Waiting for pods owned by replica set %q to become ready", rsName) - err = replicaset.WaitForReadyReplicaSet(c, ns, rsName) + err = e2ereplicaset.WaitForReadyReplicaSet(c, ns, rsName) framework.ExpectNoError(err) // Create a deployment to delete webserver pods and instead bring up redis-slave pods. @@ -437,7 +437,7 @@ func testRolloverDeployment(f *framework.Framework) { deploymentImage := "gcr.io/google_samples/gb-redisslave:nonexistent" deploymentStrategyType := appsv1.RollingUpdateDeploymentStrategyType framework.Logf("Creating deployment %q", deploymentName) - newDeployment := e2edeploy.NewDeployment(deploymentName, deploymentReplicas, deploymentPodLabels, deploymentImageName, deploymentImage, deploymentStrategyType) + newDeployment := e2edeployment.NewDeployment(deploymentName, deploymentReplicas, deploymentPodLabels, deploymentImageName, deploymentImage, deploymentStrategyType) newDeployment.Spec.Strategy.RollingUpdate = &appsv1.RollingUpdateDeployment{ MaxUnavailable: intOrStrP(0), MaxSurge: intOrStrP(1), @@ -468,7 +468,7 @@ func testRolloverDeployment(f *framework.Framework) { // The deployment is stuck, update it to rollover the above 2 ReplicaSets and bring up agnhost pods. framework.Logf("Rollover old replica sets for deployment %q with new image update", deploymentName) updatedDeploymentImageName, updatedDeploymentImage := AgnhostImageName, AgnhostImage - deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, newDeployment.Name, func(update *appsv1.Deployment) { + deployment, err = e2edeployment.UpdateDeploymentWithRetries(c, ns, newDeployment.Name, func(update *appsv1.Deployment) { update.Spec.Template.Spec.Containers[0].Name = updatedDeploymentImageName update.Spec.Template.Spec.Containers[0].Image = updatedDeploymentImage }) @@ -481,7 +481,7 @@ func testRolloverDeployment(f *framework.Framework) { // Wait for it to be updated to revision 2 framework.Logf("Wait for revision update of deployment %q to 2", deploymentName) - err = e2edeploy.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "2", updatedDeploymentImage) + err = e2edeployment.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "2", updatedDeploymentImage) framework.ExpectNoError(err) framework.Logf("Make sure deployment %q is complete", deploymentName) @@ -528,7 +528,7 @@ func testIterativeDeployments(f *framework.Framework) { // Create a webserver deployment. deploymentName := "webserver" thirty := int32(30) - d := e2edeploy.NewDeployment(deploymentName, replicas, podLabels, WebserverImageName, WebserverImage, appsv1.RollingUpdateDeploymentStrategyType) + d := e2edeployment.NewDeployment(deploymentName, replicas, podLabels, WebserverImageName, WebserverImage, appsv1.RollingUpdateDeploymentStrategyType) d.Spec.ProgressDeadlineSeconds = &thirty d.Spec.RevisionHistoryLimit = &two d.Spec.Template.Spec.TerminationGracePeriodSeconds = &zero @@ -546,7 +546,7 @@ func testIterativeDeployments(f *framework.Framework) { case n < 0.2: // trigger a new deployment framework.Logf("%02d: triggering a new rollout for deployment %q", i, deployment.Name) - deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *appsv1.Deployment) { + deployment, err = e2edeployment.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *appsv1.Deployment) { newEnv := v1.EnvVar{Name: "A", Value: fmt.Sprintf("%d", i)} update.Spec.Template.Spec.Containers[0].Env = append(update.Spec.Template.Spec.Containers[0].Env, newEnv) randomScale(update, i) @@ -556,7 +556,7 @@ func testIterativeDeployments(f *framework.Framework) { case n < 0.4: // rollback to the previous version framework.Logf("%02d: rolling back a rollout for deployment %q", i, deployment.Name) - deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *appsv1.Deployment) { + deployment, err = e2edeployment.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *appsv1.Deployment) { if update.Annotations == nil { update.Annotations = make(map[string]string) } @@ -567,7 +567,7 @@ func testIterativeDeployments(f *framework.Framework) { case n < 0.6: // just scaling framework.Logf("%02d: scaling deployment %q", i, deployment.Name) - deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *appsv1.Deployment) { + deployment, err = e2edeployment.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *appsv1.Deployment) { randomScale(update, i) }) framework.ExpectNoError(err) @@ -576,14 +576,14 @@ func testIterativeDeployments(f *framework.Framework) { // toggling the deployment if deployment.Spec.Paused { framework.Logf("%02d: pausing deployment %q", i, deployment.Name) - deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *appsv1.Deployment) { + deployment, err = e2edeployment.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *appsv1.Deployment) { update.Spec.Paused = true randomScale(update, i) }) framework.ExpectNoError(err) } else { framework.Logf("%02d: resuming deployment %q", i, deployment.Name) - deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *appsv1.Deployment) { + deployment, err = e2edeployment.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *appsv1.Deployment) { update.Spec.Paused = false randomScale(update, i) }) @@ -620,7 +620,7 @@ func testIterativeDeployments(f *framework.Framework) { deployment, err = c.AppsV1().Deployments(ns).Get(context.TODO(), deployment.Name, metav1.GetOptions{}) framework.ExpectNoError(err) if deployment.Spec.Paused { - deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *appsv1.Deployment) { + deployment, err = e2edeployment.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *appsv1.Deployment) { update.Spec.Paused = false }) } @@ -630,7 +630,7 @@ func testIterativeDeployments(f *framework.Framework) { framework.ExpectNoError(err) framework.Logf("Waiting for deployment %q status", deploymentName) - err = e2edeploy.WaitForDeploymentComplete(c, deployment) + err = e2edeployment.WaitForDeploymentComplete(c, deployment) framework.ExpectNoError(err) framework.Logf("Checking deployment %q for a complete condition", deploymentName) @@ -646,10 +646,10 @@ func testDeploymentsControllerRef(f *framework.Framework) { framework.Logf("Creating Deployment %q", deploymentName) podLabels := map[string]string{"name": WebserverImageName} replicas := int32(1) - d := e2edeploy.NewDeployment(deploymentName, replicas, podLabels, WebserverImageName, WebserverImage, appsv1.RollingUpdateDeploymentStrategyType) + d := e2edeployment.NewDeployment(deploymentName, replicas, podLabels, WebserverImageName, WebserverImage, appsv1.RollingUpdateDeploymentStrategyType) deploy, err := c.AppsV1().Deployments(ns).Create(context.TODO(), d, metav1.CreateOptions{}) framework.ExpectNoError(err) - err = e2edeploy.WaitForDeploymentComplete(c, deploy) + err = e2edeployment.WaitForDeploymentComplete(c, deploy) framework.ExpectNoError(err) framework.Logf("Verifying Deployment %q has only one ReplicaSet", deploymentName) @@ -673,10 +673,10 @@ func testDeploymentsControllerRef(f *framework.Framework) { deploymentName = "test-adopt-deployment" framework.Logf("Creating Deployment %q to adopt the ReplicaSet", deploymentName) - d = e2edeploy.NewDeployment(deploymentName, replicas, podLabels, WebserverImageName, WebserverImage, appsv1.RollingUpdateDeploymentStrategyType) + d = e2edeployment.NewDeployment(deploymentName, replicas, podLabels, WebserverImageName, WebserverImage, appsv1.RollingUpdateDeploymentStrategyType) deploy, err = c.AppsV1().Deployments(ns).Create(context.TODO(), d, metav1.CreateOptions{}) framework.ExpectNoError(err) - err = e2edeploy.WaitForDeploymentComplete(c, deploy) + err = e2edeployment.WaitForDeploymentComplete(c, deploy) framework.ExpectNoError(err) framework.Logf("Waiting for the ReplicaSet to have the right controllerRef") @@ -703,7 +703,7 @@ func testProportionalScalingDeployment(f *framework.Framework) { // Create a webserver deployment. deploymentName := "webserver-deployment" - d := e2edeploy.NewDeployment(deploymentName, replicas, podLabels, WebserverImageName, WebserverImage, appsv1.RollingUpdateDeploymentStrategyType) + d := e2edeployment.NewDeployment(deploymentName, replicas, podLabels, WebserverImageName, WebserverImage, appsv1.RollingUpdateDeploymentStrategyType) d.Spec.Strategy.RollingUpdate = new(appsv1.RollingUpdateDeployment) d.Spec.Strategy.RollingUpdate.MaxSurge = intOrStrP(3) d.Spec.Strategy.RollingUpdate.MaxUnavailable = intOrStrP(2) @@ -722,7 +722,7 @@ func testProportionalScalingDeployment(f *framework.Framework) { framework.ExpectNoError(err, "error in waiting for pods to come up: %v", err) framework.Logf("Waiting for deployment %q to complete", deployment.Name) - err = e2edeploy.WaitForDeploymentComplete(c, deployment) + err = e2edeployment.WaitForDeploymentComplete(c, deployment) framework.ExpectNoError(err) firstRS, err := deploymentutil.GetNewReplicaSet(deployment, c.AppsV1()) @@ -731,7 +731,7 @@ func testProportionalScalingDeployment(f *framework.Framework) { // Update the deployment with a non-existent image so that the new replica set // will be blocked to simulate a partial rollout. framework.Logf("Updating deployment %q with a non-existent image", deploymentName) - deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, d.Name, func(update *appsv1.Deployment) { + deployment, err = e2edeployment.UpdateDeploymentWithRetries(c, ns, d.Name, func(update *appsv1.Deployment) { update.Spec.Template.Spec.Containers[0].Image = "webserver:404" }) framework.ExpectNoError(err) @@ -747,7 +747,7 @@ func testProportionalScalingDeployment(f *framework.Framework) { // First rollout's replicaset should have Deployment's (replicas - maxUnavailable) = 10 - 2 = 8 available replicas. minAvailableReplicas := replicas - int32(maxUnavailable) framework.Logf("Waiting for the first rollout's replicaset to have .status.availableReplicas = %d", minAvailableReplicas) - err = replicaset.WaitForReplicaSetTargetAvailableReplicas(c, firstRS, minAvailableReplicas) + err = e2ereplicaset.WaitForReplicaSetTargetAvailableReplicas(c, firstRS, minAvailableReplicas) framework.ExpectNoError(err) // First rollout's replicaset should have .spec.replicas = 8 too. @@ -796,7 +796,7 @@ func testProportionalScalingDeployment(f *framework.Framework) { // Scale the deployment to 30 replicas. newReplicas = int32(30) framework.Logf("Scaling up the deployment %q from %d to %d", deploymentName, replicas, newReplicas) - deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *appsv1.Deployment) { + deployment, err = e2edeployment.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *appsv1.Deployment) { update.Spec.Replicas = &newReplicas }) framework.ExpectNoError(err) @@ -868,7 +868,7 @@ func testRollingUpdateDeploymentWithLocalTrafficLoadBalancer(f *framework.Framew framework.Logf("Creating Deployment %q", name) podLabels := map[string]string{"name": name} replicas := int32(3) - d := e2edeploy.NewDeployment(name, replicas, podLabels, AgnhostImageName, AgnhostImage, appsv1.RollingUpdateDeploymentStrategyType) + d := e2edeployment.NewDeployment(name, replicas, podLabels, AgnhostImageName, AgnhostImage, appsv1.RollingUpdateDeploymentStrategyType) // NewDeployment assigned the same value to both d.Spec.Selector and // d.Spec.Template.Labels, so mutating the one would mutate the other. // Thus we need to set d.Spec.Template.Labels to a new value if we want @@ -893,7 +893,7 @@ func testRollingUpdateDeploymentWithLocalTrafficLoadBalancer(f *framework.Framew } deployment, err := c.AppsV1().Deployments(ns).Create(context.TODO(), d, metav1.CreateOptions{}) framework.ExpectNoError(err) - err = e2edeploy.WaitForDeploymentComplete(c, deployment) + err = e2edeployment.WaitForDeploymentComplete(c, deployment) framework.ExpectNoError(err) framework.Logf("Creating a service %s with type=LoadBalancer and externalTrafficPolicy=Local in namespace %s", name, ns) @@ -939,7 +939,7 @@ func testRollingUpdateDeploymentWithLocalTrafficLoadBalancer(f *framework.Framew framework.Logf("Triggering a rolling deployment several times") for i := 1; i <= 3; i++ { framework.Logf("Updating label deployment %q pod spec (iteration #%d)", name, i) - deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, d.Name, func(update *appsv1.Deployment) { + deployment, err = e2edeployment.UpdateDeploymentWithRetries(c, ns, d.Name, func(update *appsv1.Deployment) { update.Spec.Template.Labels["iteration"] = fmt.Sprintf("%d", i) setAffinities(update, true) }) diff --git a/test/e2e/apps/network_partition.go b/test/e2e/apps/network_partition.go index 65958f63537b..03b72bb387b2 100644 --- a/test/e2e/apps/network_partition.go +++ b/test/e2e/apps/network_partition.go @@ -43,7 +43,7 @@ import ( e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2eservice "k8s.io/kubernetes/test/e2e/framework/service" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" - e2esset "k8s.io/kubernetes/test/e2e/framework/statefulset" + e2estatefulset "k8s.io/kubernetes/test/e2e/framework/statefulset" testutils "k8s.io/kubernetes/test/utils" "github.com/onsi/ginkgo" @@ -379,13 +379,13 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() { framework.DumpDebugInfo(c, ns) } framework.Logf("Deleting all stateful set in ns %v", ns) - e2esset.DeleteAllStatefulSets(c, ns) + e2estatefulset.DeleteAllStatefulSets(c, ns) }) ginkgo.It("should come back up if node goes down [Slow] [Disruptive]", func() { petMounts := []v1.VolumeMount{{Name: "datadir", MountPath: "/data/"}} podMounts := []v1.VolumeMount{{Name: "home", MountPath: "/home"}} - ps := e2esset.NewStatefulSet(psName, ns, headlessSvcName, 3, petMounts, podMounts, labels) + ps := e2estatefulset.NewStatefulSet(psName, ns, headlessSvcName, 3, petMounts, podMounts, labels) _, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), ps, metav1.CreateOptions{}) framework.ExpectNoError(err) @@ -396,19 +396,19 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() { common.RestartNodes(f.ClientSet, nodes) ginkgo.By("waiting for pods to be running again") - e2esset.WaitForRunningAndReady(c, *ps.Spec.Replicas, ps) + e2estatefulset.WaitForRunningAndReady(c, *ps.Spec.Replicas, ps) }) ginkgo.It("should not reschedule stateful pods if there is a network partition [Slow] [Disruptive]", func() { e2eskipper.SkipUnlessSSHKeyPresent() - ps := e2esset.NewStatefulSet(psName, ns, headlessSvcName, 3, []v1.VolumeMount{}, []v1.VolumeMount{}, labels) + ps := e2estatefulset.NewStatefulSet(psName, ns, headlessSvcName, 3, []v1.VolumeMount{}, []v1.VolumeMount{}, labels) _, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), ps, metav1.CreateOptions{}) framework.ExpectNoError(err) - e2esset.WaitForRunningAndReady(c, *ps.Spec.Replicas, ps) + e2estatefulset.WaitForRunningAndReady(c, *ps.Spec.Replicas, ps) - pod := e2esset.GetPodList(c, ps).Items[0] + pod := e2estatefulset.GetPodList(c, ps).Items[0] node, err := c.CoreV1().Nodes().Get(context.TODO(), pod.Spec.NodeName, metav1.GetOptions{}) framework.ExpectNoError(err) @@ -427,7 +427,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() { } ginkgo.By("waiting for pods to be running again") - e2esset.WaitForRunningAndReady(c, *ps.Spec.Replicas, ps) + e2estatefulset.WaitForRunningAndReady(c, *ps.Spec.Replicas, ps) }) }) diff --git a/test/e2e/apps/replica_set.go b/test/e2e/apps/replica_set.go index 3c4ae78c09ff..f6fb4d8bbd78 100644 --- a/test/e2e/apps/replica_set.go +++ b/test/e2e/apps/replica_set.go @@ -32,7 +32,7 @@ import ( "k8s.io/kubernetes/pkg/controller/replicaset" "k8s.io/kubernetes/test/e2e/framework" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" - replicasetutil "k8s.io/kubernetes/test/e2e/framework/replicaset" + e2ereplicaset "k8s.io/kubernetes/test/e2e/framework/replicaset" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" "github.com/onsi/ginkgo" @@ -229,7 +229,7 @@ func testReplicaSetConditionCheck(f *framework.Framework) { framework.ExpectNoError(err) ginkgo.By(fmt.Sprintf("Scaling down replica set %q to satisfy pod quota", name)) - rs, err = replicasetutil.UpdateReplicaSetWithRetries(c, namespace, name, func(update *appsv1.ReplicaSet) { + rs, err = e2ereplicaset.UpdateReplicaSetWithRetries(c, namespace, name, func(update *appsv1.ReplicaSet) { x := int32(2) update.Spec.Replicas = &x }) diff --git a/test/e2e/apps/statefulset.go b/test/e2e/apps/statefulset.go index 0e915bd73c92..1a3456f824bb 100644 --- a/test/e2e/apps/statefulset.go +++ b/test/e2e/apps/statefulset.go @@ -39,7 +39,7 @@ import ( e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epv "k8s.io/kubernetes/test/e2e/framework/pv" e2eservice "k8s.io/kubernetes/test/e2e/framework/service" - e2esset "k8s.io/kubernetes/test/e2e/framework/statefulset" + e2estatefulset "k8s.io/kubernetes/test/e2e/framework/statefulset" imageutils "k8s.io/kubernetes/test/utils/image" ) @@ -99,7 +99,7 @@ var _ = SIGDescribe("StatefulSet", func() { ginkgo.BeforeEach(func() { statefulPodMounts = []v1.VolumeMount{{Name: "datadir", MountPath: "/data/"}} podMounts = []v1.VolumeMount{{Name: "home", MountPath: "/home"}} - ss = e2esset.NewStatefulSet(ssName, ns, headlessSvcName, 2, statefulPodMounts, podMounts, labels) + ss = e2estatefulset.NewStatefulSet(ssName, ns, headlessSvcName, 2, statefulPodMounts, podMounts, labels) ginkgo.By("Creating service " + headlessSvcName + " in namespace " + ns) headlessService := e2eservice.CreateServiceSpec(headlessSvcName, "", true, labels) @@ -112,7 +112,7 @@ var _ = SIGDescribe("StatefulSet", func() { framework.DumpDebugInfo(c, ns) } framework.Logf("Deleting all statefulset in ns %v", ns) - e2esset.DeleteAllStatefulSets(c, ns) + e2estatefulset.DeleteAllStatefulSets(c, ns) }) // This can't be Conformance yet because it depends on a default @@ -121,37 +121,37 @@ var _ = SIGDescribe("StatefulSet", func() { ginkgo.By("Creating statefulset " + ssName + " in namespace " + ns) e2epv.SkipIfNoDefaultStorageClass(c) *(ss.Spec.Replicas) = 3 - e2esset.PauseNewPods(ss) + e2estatefulset.PauseNewPods(ss) _, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Saturating stateful set " + ss.Name) - e2esset.Saturate(c, ss) + e2estatefulset.Saturate(c, ss) ginkgo.By("Verifying statefulset mounted data directory is usable") - framework.ExpectNoError(e2esset.CheckMount(c, ss, "/data")) + framework.ExpectNoError(e2estatefulset.CheckMount(c, ss, "/data")) ginkgo.By("Verifying statefulset provides a stable hostname for each pod") - framework.ExpectNoError(e2esset.CheckHostname(c, ss)) + framework.ExpectNoError(e2estatefulset.CheckHostname(c, ss)) ginkgo.By("Verifying statefulset set proper service name") - framework.ExpectNoError(e2esset.CheckServiceName(ss, headlessSvcName)) + framework.ExpectNoError(e2estatefulset.CheckServiceName(ss, headlessSvcName)) cmd := "echo $(hostname) | dd of=/data/hostname conv=fsync" ginkgo.By("Running " + cmd + " in all stateful pods") - framework.ExpectNoError(e2esset.ExecInStatefulPods(c, ss, cmd)) + framework.ExpectNoError(e2estatefulset.ExecInStatefulPods(c, ss, cmd)) ginkgo.By("Restarting statefulset " + ss.Name) - e2esset.Restart(c, ss) - e2esset.WaitForRunningAndReady(c, *ss.Spec.Replicas, ss) + e2estatefulset.Restart(c, ss) + e2estatefulset.WaitForRunningAndReady(c, *ss.Spec.Replicas, ss) ginkgo.By("Verifying statefulset mounted data directory is usable") - framework.ExpectNoError(e2esset.CheckMount(c, ss, "/data")) + framework.ExpectNoError(e2estatefulset.CheckMount(c, ss, "/data")) cmd = "if [ \"$(cat /data/hostname)\" = \"$(hostname)\" ]; then exit 0; else exit 1; fi" ginkgo.By("Running " + cmd + " in all stateful pods") - framework.ExpectNoError(e2esset.ExecInStatefulPods(c, ss, cmd)) + framework.ExpectNoError(e2estatefulset.ExecInStatefulPods(c, ss, cmd)) }) // This can't be Conformance yet because it depends on a default @@ -160,7 +160,7 @@ var _ = SIGDescribe("StatefulSet", func() { ginkgo.By("Creating statefulset " + ssName + " in namespace " + ns) e2epv.SkipIfNoDefaultStorageClass(c) *(ss.Spec.Replicas) = 1 - e2esset.PauseNewPods(ss) + e2estatefulset.PauseNewPods(ss) // Replace ss with the one returned from Create() so it has the UID. // Save Kind since it won't be populated in the returned ss. @@ -170,8 +170,8 @@ var _ = SIGDescribe("StatefulSet", func() { ss.Kind = kind ginkgo.By("Saturating stateful set " + ss.Name) - e2esset.Saturate(c, ss) - pods := e2esset.GetPodList(c, ss) + e2estatefulset.Saturate(c, ss) + pods := e2estatefulset.GetPodList(c, ss) gomega.Expect(pods.Items).To(gomega.HaveLen(int(*ss.Spec.Replicas))) ginkgo.By("Checking that stateful set pods are created with ControllerRef") @@ -245,18 +245,18 @@ var _ = SIGDescribe("StatefulSet", func() { ginkgo.By("Creating statefulset " + ssName + " in namespace " + ns) e2epv.SkipIfNoDefaultStorageClass(c) *(ss.Spec.Replicas) = 2 - e2esset.PauseNewPods(ss) + e2estatefulset.PauseNewPods(ss) _, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{}) framework.ExpectNoError(err) - e2esset.WaitForRunning(c, 1, 0, ss) + e2estatefulset.WaitForRunning(c, 1, 0, ss) ginkgo.By("Resuming stateful pod at index 0.") - e2esset.ResumeNextPod(c, ss) + e2estatefulset.ResumeNextPod(c, ss) ginkgo.By("Waiting for stateful pod at index 1 to enter running.") - e2esset.WaitForRunning(c, 2, 1, ss) + e2estatefulset.WaitForRunning(c, 2, 1, ss) // Now we have 1 healthy and 1 unhealthy stateful pod. Deleting the healthy stateful pod should *not* // create a new stateful pod till the remaining stateful pod becomes healthy, which won't happen till @@ -266,13 +266,13 @@ var _ = SIGDescribe("StatefulSet", func() { deleteStatefulPodAtIndex(c, 0, ss) ginkgo.By("Confirming stateful pod at index 0 is recreated.") - e2esset.WaitForRunning(c, 2, 1, ss) + e2estatefulset.WaitForRunning(c, 2, 1, ss) ginkgo.By("Resuming stateful pod at index 1.") - e2esset.ResumeNextPod(c, ss) + e2estatefulset.ResumeNextPod(c, ss) ginkgo.By("Confirming all stateful pods in statefulset are created.") - e2esset.WaitForRunningAndReady(c, *ss.Spec.Replicas, ss) + e2estatefulset.WaitForRunningAndReady(c, *ss.Spec.Replicas, ss) }) // This can't be Conformance yet because it depends on a default @@ -291,7 +291,7 @@ var _ = SIGDescribe("StatefulSet", func() { */ framework.ConformanceIt("should perform rolling updates and roll backs of template modifications", func() { ginkgo.By("Creating a new StatefulSet") - ss := e2esset.NewStatefulSet("ss2", ns, headlessSvcName, 3, nil, nil, labels) + ss := e2estatefulset.NewStatefulSet("ss2", ns, headlessSvcName, 3, nil, nil, labels) rollbackTest(c, ns, ss) }) @@ -302,7 +302,7 @@ var _ = SIGDescribe("StatefulSet", func() { */ framework.ConformanceIt("should perform canary updates and phased rolling updates of template modifications", func() { ginkgo.By("Creating a new StatefulSet") - ss := e2esset.NewStatefulSet("ss2", ns, headlessSvcName, 3, nil, nil, labels) + ss := e2estatefulset.NewStatefulSet("ss2", ns, headlessSvcName, 3, nil, nil, labels) setHTTPProbe(ss) ss.Spec.UpdateStrategy = appsv1.StatefulSetUpdateStrategy{ Type: appsv1.RollingUpdateStatefulSetStrategyType, @@ -316,12 +316,12 @@ var _ = SIGDescribe("StatefulSet", func() { } ss, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{}) framework.ExpectNoError(err) - e2esset.WaitForRunningAndReady(c, *ss.Spec.Replicas, ss) + e2estatefulset.WaitForRunningAndReady(c, *ss.Spec.Replicas, ss) ss = waitForStatus(c, ss) currentRevision, updateRevision := ss.Status.CurrentRevision, ss.Status.UpdateRevision framework.ExpectEqual(currentRevision, updateRevision, fmt.Sprintf("StatefulSet %s/%s created with update revision %s not equal to current revision %s", ss.Namespace, ss.Name, updateRevision, currentRevision)) - pods := e2esset.GetPodList(c, ss) + pods := e2estatefulset.GetPodList(c, ss) for i := range pods.Items { framework.ExpectEqual(pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel], currentRevision, fmt.Sprintf("Pod %s/%s revision %s is not equal to currentRevision %s", pods.Items[i].Namespace, @@ -412,9 +412,9 @@ var _ = SIGDescribe("StatefulSet", func() { ginkgo.By("Restoring Pods to the correct revision when they are deleted") deleteStatefulPodAtIndex(c, 0, ss) deleteStatefulPodAtIndex(c, 2, ss) - e2esset.WaitForRunningAndReady(c, 3, ss) + e2estatefulset.WaitForRunningAndReady(c, 3, ss) ss = getStatefulSet(c, ss.Namespace, ss.Name) - pods = e2esset.GetPodList(c, ss) + pods = e2estatefulset.GetPodList(c, ss) for i := range pods.Items { if i < int(*ss.Spec.UpdateStrategy.RollingUpdate.Partition) { framework.ExpectEqual(pods.Items[i].Spec.Containers[0].Image, oldImage, fmt.Sprintf("Pod %s/%s has image %s not equal to current image %s", @@ -494,19 +494,19 @@ var _ = SIGDescribe("StatefulSet", func() { // The legacy OnDelete strategy only exists for backward compatibility with pre-v1 APIs. ginkgo.It("should implement legacy replacement when the update strategy is OnDelete", func() { ginkgo.By("Creating a new StatefulSet") - ss := e2esset.NewStatefulSet("ss2", ns, headlessSvcName, 3, nil, nil, labels) + ss := e2estatefulset.NewStatefulSet("ss2", ns, headlessSvcName, 3, nil, nil, labels) setHTTPProbe(ss) ss.Spec.UpdateStrategy = appsv1.StatefulSetUpdateStrategy{ Type: appsv1.OnDeleteStatefulSetStrategyType, } ss, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{}) framework.ExpectNoError(err) - e2esset.WaitForRunningAndReady(c, *ss.Spec.Replicas, ss) + e2estatefulset.WaitForRunningAndReady(c, *ss.Spec.Replicas, ss) ss = waitForStatus(c, ss) currentRevision, updateRevision := ss.Status.CurrentRevision, ss.Status.UpdateRevision framework.ExpectEqual(currentRevision, updateRevision, fmt.Sprintf("StatefulSet %s/%s created with update revision %s not equal to current revision %s", ss.Namespace, ss.Name, updateRevision, currentRevision)) - pods := e2esset.GetPodList(c, ss) + pods := e2estatefulset.GetPodList(c, ss) for i := range pods.Items { framework.ExpectEqual(pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel], currentRevision, fmt.Sprintf("Pod %s/%s revision %s is not equal to current revision %s", pods.Items[i].Namespace, @@ -519,9 +519,9 @@ var _ = SIGDescribe("StatefulSet", func() { deleteStatefulPodAtIndex(c, 0, ss) deleteStatefulPodAtIndex(c, 1, ss) deleteStatefulPodAtIndex(c, 2, ss) - e2esset.WaitForRunningAndReady(c, 3, ss) + e2estatefulset.WaitForRunningAndReady(c, 3, ss) ss = getStatefulSet(c, ss.Namespace, ss.Name) - pods = e2esset.GetPodList(c, ss) + pods = e2estatefulset.GetPodList(c, ss) for i := range pods.Items { framework.ExpectEqual(pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel], currentRevision, fmt.Sprintf("Pod %s/%s revision %s is not equal to current revision %s", pods.Items[i].Namespace, @@ -548,9 +548,9 @@ var _ = SIGDescribe("StatefulSet", func() { deleteStatefulPodAtIndex(c, 0, ss) deleteStatefulPodAtIndex(c, 1, ss) deleteStatefulPodAtIndex(c, 2, ss) - e2esset.WaitForRunningAndReady(c, 3, ss) + e2estatefulset.WaitForRunningAndReady(c, 3, ss) ss = getStatefulSet(c, ss.Namespace, ss.Name) - pods = e2esset.GetPodList(c, ss) + pods = e2estatefulset.GetPodList(c, ss) for i := range pods.Items { framework.ExpectEqual(pods.Items[i].Spec.Containers[0].Image, newImage, fmt.Sprintf("Pod %s/%s has image %s not equal to new image %s", pods.Items[i].Namespace, @@ -579,24 +579,24 @@ var _ = SIGDescribe("StatefulSet", func() { framework.ExpectNoError(err) ginkgo.By("Creating stateful set " + ssName + " in namespace " + ns) - ss := e2esset.NewStatefulSet(ssName, ns, headlessSvcName, 1, nil, nil, psLabels) + ss := e2estatefulset.NewStatefulSet(ssName, ns, headlessSvcName, 1, nil, nil, psLabels) setHTTPProbe(ss) ss, err = c.AppsV1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Waiting until all stateful set " + ssName + " replicas will be running in namespace " + ns) - e2esset.WaitForRunningAndReady(c, *ss.Spec.Replicas, ss) + e2estatefulset.WaitForRunningAndReady(c, *ss.Spec.Replicas, ss) ginkgo.By("Confirming that stateful set scale up will halt with unhealthy stateful pod") breakHTTPProbe(c, ss) waitForRunningAndNotReady(c, *ss.Spec.Replicas, ss) - e2esset.WaitForStatusReadyReplicas(c, ss, 0) - e2esset.UpdateReplicas(c, ss, 3) + e2estatefulset.WaitForStatusReadyReplicas(c, ss, 0) + e2estatefulset.UpdateReplicas(c, ss, 3) confirmStatefulPodCount(c, 1, ss, 10*time.Second, true) ginkgo.By("Scaling up stateful set " + ssName + " to 3 replicas and waiting until all of them will be running in namespace " + ns) restoreHTTPProbe(c, ss) - e2esset.WaitForRunningAndReady(c, 3, ss) + e2estatefulset.WaitForRunningAndReady(c, 3, ss) ginkgo.By("Verifying that stateful set " + ssName + " was scaled up in order") expectedOrder := []string{ssName + "-0", ssName + "-1", ssName + "-2"} @@ -622,14 +622,14 @@ var _ = SIGDescribe("StatefulSet", func() { framework.ExpectNoError(err) breakHTTPProbe(c, ss) - e2esset.WaitForStatusReadyReplicas(c, ss, 0) + e2estatefulset.WaitForStatusReadyReplicas(c, ss, 0) waitForRunningAndNotReady(c, 3, ss) - e2esset.UpdateReplicas(c, ss, 0) + e2estatefulset.UpdateReplicas(c, ss, 0) confirmStatefulPodCount(c, 3, ss, 10*time.Second, true) ginkgo.By("Scaling down stateful set " + ssName + " to 0 replicas and waiting until none of pods will run in namespace" + ns) restoreHTTPProbe(c, ss) - e2esset.Scale(c, ss, 0) + e2estatefulset.Scale(c, ss, 0) ginkgo.By("Verifying that stateful set " + ssName + " was scaled down in reverse order") expectedOrder = []string{ssName + "-2", ssName + "-1", ssName + "-0"} @@ -658,37 +658,37 @@ var _ = SIGDescribe("StatefulSet", func() { psLabels := klabels.Set(labels) ginkgo.By("Creating stateful set " + ssName + " in namespace " + ns) - ss := e2esset.NewStatefulSet(ssName, ns, headlessSvcName, 1, nil, nil, psLabels) + ss := e2estatefulset.NewStatefulSet(ssName, ns, headlessSvcName, 1, nil, nil, psLabels) ss.Spec.PodManagementPolicy = appsv1.ParallelPodManagement setHTTPProbe(ss) ss, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By("Waiting until all stateful set " + ssName + " replicas will be running in namespace " + ns) - e2esset.WaitForRunningAndReady(c, *ss.Spec.Replicas, ss) + e2estatefulset.WaitForRunningAndReady(c, *ss.Spec.Replicas, ss) ginkgo.By("Confirming that stateful set scale up will not halt with unhealthy stateful pod") breakHTTPProbe(c, ss) waitForRunningAndNotReady(c, *ss.Spec.Replicas, ss) - e2esset.WaitForStatusReadyReplicas(c, ss, 0) - e2esset.UpdateReplicas(c, ss, 3) + e2estatefulset.WaitForStatusReadyReplicas(c, ss, 0) + e2estatefulset.UpdateReplicas(c, ss, 3) confirmStatefulPodCount(c, 3, ss, 10*time.Second, false) ginkgo.By("Scaling up stateful set " + ssName + " to 3 replicas and waiting until all of them will be running in namespace " + ns) restoreHTTPProbe(c, ss) - e2esset.WaitForRunningAndReady(c, 3, ss) + e2estatefulset.WaitForRunningAndReady(c, 3, ss) ginkgo.By("Scale down will not halt with unhealthy stateful pod") breakHTTPProbe(c, ss) - e2esset.WaitForStatusReadyReplicas(c, ss, 0) + e2estatefulset.WaitForStatusReadyReplicas(c, ss, 0) waitForRunningAndNotReady(c, 3, ss) - e2esset.UpdateReplicas(c, ss, 0) + e2estatefulset.UpdateReplicas(c, ss, 0) confirmStatefulPodCount(c, 0, ss, 10*time.Second, false) ginkgo.By("Scaling down stateful set " + ssName + " to 0 replicas and waiting until none of pods will run in namespace" + ns) restoreHTTPProbe(c, ss) - e2esset.Scale(c, ss, 0) - e2esset.WaitForStatusReplicas(c, ss, 0) + e2estatefulset.Scale(c, ss, 0) + e2estatefulset.WaitForStatusReplicas(c, ss, 0) }) /* @@ -724,7 +724,7 @@ var _ = SIGDescribe("StatefulSet", func() { framework.ExpectNoError(err) ginkgo.By("Creating statefulset with conflicting port in namespace " + f.Namespace.Name) - ss := e2esset.NewStatefulSet(ssName, f.Namespace.Name, headlessSvcName, 1, nil, nil, labels) + ss := e2estatefulset.NewStatefulSet(ssName, f.Namespace.Name, headlessSvcName, 1, nil, nil, labels) statefulPodContainer := &ss.Spec.Template.Spec.Containers[0] statefulPodContainer.Ports = append(statefulPodContainer.Ports, conflictingPort) ss.Spec.Template.Spec.NodeName = node.Name @@ -791,11 +791,11 @@ var _ = SIGDescribe("StatefulSet", func() { */ framework.ConformanceIt("should have a working scale subresource", func() { ginkgo.By("Creating statefulset " + ssName + " in namespace " + ns) - ss := e2esset.NewStatefulSet(ssName, ns, headlessSvcName, 1, nil, nil, labels) + ss := e2estatefulset.NewStatefulSet(ssName, ns, headlessSvcName, 1, nil, nil, labels) setHTTPProbe(ss) ss, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{}) framework.ExpectNoError(err) - e2esset.WaitForRunningAndReady(c, *ss.Spec.Replicas, ss) + e2estatefulset.WaitForRunningAndReady(c, *ss.Spec.Replicas, ss) ss = waitForStatus(c, ss) ginkgo.By("getting scale subresource") @@ -836,7 +836,7 @@ var _ = SIGDescribe("StatefulSet", func() { framework.DumpDebugInfo(c, ns) } framework.Logf("Deleting all statefulset in ns %v", ns) - e2esset.DeleteAllStatefulSets(c, ns) + e2estatefulset.DeleteAllStatefulSets(c, ns) }) // Do not mark this as Conformance. @@ -907,8 +907,8 @@ func (c *clusterAppTester) run() { default: if restartCluster { ginkgo.By("Restarting stateful set " + ss.Name) - e2esset.Restart(c.client, ss) - e2esset.WaitForRunningAndReady(c.client, *ss.Spec.Replicas, ss) + e2estatefulset.Restart(c.client, ss) + e2estatefulset.WaitForRunningAndReady(c.client, *ss.Spec.Replicas, ss) } } @@ -928,7 +928,7 @@ func (z *zookeeperTester) name() string { } func (z *zookeeperTester) deploy(ns string) *appsv1.StatefulSet { - z.ss = e2esset.CreateStatefulSet(z.client, zookeeperManifestPath, ns) + z.ss = e2estatefulset.CreateStatefulSet(z.client, zookeeperManifestPath, ns) return z.ss } @@ -966,7 +966,7 @@ func (m *mysqlGaleraTester) mysqlExec(cmd, ns, podName string) string { } func (m *mysqlGaleraTester) deploy(ns string) *appsv1.StatefulSet { - m.ss = e2esset.CreateStatefulSet(m.client, mysqlGaleraManifestPath, ns) + m.ss = e2estatefulset.CreateStatefulSet(m.client, mysqlGaleraManifestPath, ns) framework.Logf("Deployed statefulset %v, initializing database", m.ss.Name) for _, cmd := range []string{ @@ -1006,7 +1006,7 @@ func (m *redisTester) redisExec(cmd, ns, podName string) string { } func (m *redisTester) deploy(ns string) *appsv1.StatefulSet { - m.ss = e2esset.CreateStatefulSet(m.client, redisManifestPath, ns) + m.ss = e2estatefulset.CreateStatefulSet(m.client, redisManifestPath, ns) return m.ss } @@ -1037,7 +1037,7 @@ func (c *cockroachDBTester) cockroachDBExec(cmd, ns, podName string) string { } func (c *cockroachDBTester) deploy(ns string) *appsv1.StatefulSet { - c.ss = e2esset.CreateStatefulSet(c.client, cockroachDBManifestPath, ns) + c.ss = e2estatefulset.CreateStatefulSet(c.client, cockroachDBManifestPath, ns) framework.Logf("Deployed statefulset %v, initializing database", c.ss.Name) for _, cmd := range []string{ "CREATE DATABASE IF NOT EXISTS foo;", @@ -1088,12 +1088,12 @@ func rollbackTest(c clientset.Interface, ns string, ss *appsv1.StatefulSet) { setHTTPProbe(ss) ss, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{}) framework.ExpectNoError(err) - e2esset.WaitForRunningAndReady(c, *ss.Spec.Replicas, ss) + e2estatefulset.WaitForRunningAndReady(c, *ss.Spec.Replicas, ss) ss = waitForStatus(c, ss) currentRevision, updateRevision := ss.Status.CurrentRevision, ss.Status.UpdateRevision framework.ExpectEqual(currentRevision, updateRevision, fmt.Sprintf("StatefulSet %s/%s created with update revision %s not equal to current revision %s", ss.Namespace, ss.Name, updateRevision, currentRevision)) - pods := e2esset.GetPodList(c, ss) + pods := e2estatefulset.GetPodList(c, ss) for i := range pods.Items { framework.ExpectEqual(pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel], currentRevision, fmt.Sprintf("Pod %s/%s revision %s is not equal to current revision %s", pods.Items[i].Namespace, @@ -1101,7 +1101,7 @@ func rollbackTest(c clientset.Interface, ns string, ss *appsv1.StatefulSet) { pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel], currentRevision)) } - e2esset.SortStatefulPods(pods) + e2estatefulset.SortStatefulPods(pods) err = breakPodHTTPProbe(ss, &pods.Items[1]) framework.ExpectNoError(err) ss, pods = waitForPodNotReady(c, ss, pods.Items[1].Name) @@ -1121,11 +1121,11 @@ func rollbackTest(c clientset.Interface, ns string, ss *appsv1.StatefulSet) { framework.ExpectNotEqual(currentRevision, updateRevision, "Current revision should not equal update revision during rolling update") ginkgo.By("Updating Pods in reverse ordinal order") - pods = e2esset.GetPodList(c, ss) - e2esset.SortStatefulPods(pods) + pods = e2estatefulset.GetPodList(c, ss) + e2estatefulset.SortStatefulPods(pods) err = restorePodHTTPProbe(ss, &pods.Items[1]) framework.ExpectNoError(err) - ss, pods = e2esset.WaitForPodReady(c, ss, pods.Items[1].Name) + ss, pods = e2estatefulset.WaitForPodReady(c, ss, pods.Items[1].Name) ss, pods = waitForRollingUpdate(c, ss) framework.ExpectEqual(ss.Status.CurrentRevision, updateRevision, fmt.Sprintf("StatefulSet %s/%s current revision %s does not equal update revision %s on update completion", ss.Namespace, @@ -1161,10 +1161,10 @@ func rollbackTest(c clientset.Interface, ns string, ss *appsv1.StatefulSet) { framework.ExpectNotEqual(currentRevision, updateRevision, "Current revision should not equal update revision during roll back") ginkgo.By("Rolling back update in reverse ordinal order") - pods = e2esset.GetPodList(c, ss) - e2esset.SortStatefulPods(pods) + pods = e2estatefulset.GetPodList(c, ss) + e2estatefulset.SortStatefulPods(pods) restorePodHTTPProbe(ss, &pods.Items[1]) - ss, pods = e2esset.WaitForPodReady(c, ss, pods.Items[1].Name) + ss, pods = e2estatefulset.WaitForPodReady(c, ss, pods.Items[1].Name) ss, pods = waitForRollingUpdate(c, ss) framework.ExpectEqual(ss.Status.CurrentRevision, priorRevision, fmt.Sprintf("StatefulSet %s/%s current revision %s does not equal prior revision %s on rollback completion", ss.Namespace, @@ -1192,7 +1192,7 @@ func confirmStatefulPodCount(c clientset.Interface, count int, ss *appsv1.Statef start := time.Now() deadline := start.Add(timeout) for t := time.Now(); t.Before(deadline); t = time.Now() { - podList := e2esset.GetPodList(c, ss) + podList := e2estatefulset.GetPodList(c, ss) statefulPodCount := len(podList.Items) if statefulPodCount != count { e2epod.LogPodStates(podList.Items) @@ -1224,7 +1224,7 @@ func breakHTTPProbe(c clientset.Interface, ss *appsv1.StatefulSet) error { } // Ignore 'mv' errors to make this idempotent. cmd := fmt.Sprintf("mv -v /usr/local/apache2/htdocs%v /tmp/ || true", path) - return e2esset.ExecInStatefulPods(c, ss, cmd) + return e2estatefulset.ExecInStatefulPods(c, ss, cmd) } // breakPodHTTPProbe breaks the readiness probe for Nginx StatefulSet containers in one pod. @@ -1248,7 +1248,7 @@ func restoreHTTPProbe(c clientset.Interface, ss *appsv1.StatefulSet) error { } // Ignore 'mv' errors to make this idempotent. cmd := fmt.Sprintf("mv -v /tmp%v /usr/local/apache2/htdocs/ || true", path) - return e2esset.ExecInStatefulPods(c, ss, cmd) + return e2estatefulset.ExecInStatefulPods(c, ss, cmd) } // restorePodHTTPProbe restores the readiness probe for Nginx StatefulSet containers in pod. diff --git a/test/e2e/apps/wait.go b/test/e2e/apps/wait.go index 7e53e48de647..d3e8d0ed480a 100644 --- a/test/e2e/apps/wait.go +++ b/test/e2e/apps/wait.go @@ -23,7 +23,7 @@ import ( podutil "k8s.io/kubernetes/pkg/api/v1/pod" "k8s.io/kubernetes/test/e2e/framework" - e2esset "k8s.io/kubernetes/test/e2e/framework/statefulset" + e2estatefulset "k8s.io/kubernetes/test/e2e/framework/statefulset" ) // waitForPartitionedRollingUpdate waits for all Pods in set to exist and have the correct revision. set must have @@ -43,7 +43,7 @@ func waitForPartitionedRollingUpdate(c clientset.Interface, set *appsv1.Stateful set.Namespace, set.Name) } - e2esset.WaitForState(c, set, func(set2 *appsv1.StatefulSet, pods2 *v1.PodList) (bool, error) { + e2estatefulset.WaitForState(c, set, func(set2 *appsv1.StatefulSet, pods2 *v1.PodList) (bool, error) { set = set2 pods = pods2 partition := int(*set.Spec.UpdateStrategy.RollingUpdate.Partition) @@ -55,7 +55,7 @@ func waitForPartitionedRollingUpdate(c clientset.Interface, set *appsv1.Stateful set.Namespace, set.Name, ) - e2esset.SortStatefulPods(pods) + e2estatefulset.SortStatefulPods(pods) for i := range pods.Items { if pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel] != set.Status.UpdateRevision { framework.Logf("Waiting for Pod %s/%s to have revision %s update revision %s", @@ -85,7 +85,7 @@ func waitForPartitionedRollingUpdate(c clientset.Interface, set *appsv1.Stateful // waitForStatus waits for the StatefulSetStatus's ObservedGeneration to be greater than or equal to set's Generation. // The returned StatefulSet contains such a StatefulSetStatus func waitForStatus(c clientset.Interface, set *appsv1.StatefulSet) *appsv1.StatefulSet { - e2esset.WaitForState(c, set, func(set2 *appsv1.StatefulSet, pods *v1.PodList) (bool, error) { + e2estatefulset.WaitForState(c, set, func(set2 *appsv1.StatefulSet, pods *v1.PodList) (bool, error) { if set2.Status.ObservedGeneration >= set.Generation { set = set2 return true, nil @@ -98,7 +98,7 @@ func waitForStatus(c clientset.Interface, set *appsv1.StatefulSet) *appsv1.State // waitForPodNotReady waits for the Pod named podName in set to exist and to not have a Ready condition. func waitForPodNotReady(c clientset.Interface, set *appsv1.StatefulSet, podName string) (*appsv1.StatefulSet, *v1.PodList) { var pods *v1.PodList - e2esset.WaitForState(c, set, func(set2 *appsv1.StatefulSet, pods2 *v1.PodList) (bool, error) { + e2estatefulset.WaitForState(c, set, func(set2 *appsv1.StatefulSet, pods2 *v1.PodList) (bool, error) { set = set2 pods = pods2 for i := range pods.Items { @@ -121,7 +121,7 @@ func waitForRollingUpdate(c clientset.Interface, set *appsv1.StatefulSet) (*apps set.Name, set.Spec.UpdateStrategy.Type) } - e2esset.WaitForState(c, set, func(set2 *appsv1.StatefulSet, pods2 *v1.PodList) (bool, error) { + e2estatefulset.WaitForState(c, set, func(set2 *appsv1.StatefulSet, pods2 *v1.PodList) (bool, error) { set = set2 pods = pods2 if len(pods.Items) < int(*set.Spec.Replicas) { @@ -132,7 +132,7 @@ func waitForRollingUpdate(c clientset.Interface, set *appsv1.StatefulSet) (*apps set.Namespace, set.Name, ) - e2esset.SortStatefulPods(pods) + e2estatefulset.SortStatefulPods(pods) for i := range pods.Items { if pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel] != set.Status.UpdateRevision { framework.Logf("Waiting for Pod %s/%s to have revision %s update revision %s", @@ -151,5 +151,5 @@ func waitForRollingUpdate(c clientset.Interface, set *appsv1.StatefulSet) (*apps // waitForRunningAndNotReady waits for numStatefulPods in ss to be Running and not Ready. func waitForRunningAndNotReady(c clientset.Interface, numStatefulPods int32, ss *appsv1.StatefulSet) { - e2esset.WaitForRunning(c, numStatefulPods, 0, ss) + e2estatefulset.WaitForRunning(c, numStatefulPods, 0, ss) } diff --git a/test/e2e/auth/audit.go b/test/e2e/auth/audit.go index 3fb88cee86d4..94ba76e185d4 100644 --- a/test/e2e/auth/audit.go +++ b/test/e2e/auth/audit.go @@ -36,8 +36,8 @@ import ( clientset "k8s.io/client-go/kubernetes" restclient "k8s.io/client-go/rest" "k8s.io/kubernetes/test/e2e/framework" - "k8s.io/kubernetes/test/e2e/framework/auth" - e2edeploy "k8s.io/kubernetes/test/e2e/framework/deployment" + e2eauth "k8s.io/kubernetes/test/e2e/framework/auth" + e2edeployment "k8s.io/kubernetes/test/e2e/framework/deployment" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" "k8s.io/kubernetes/test/utils" imageutils "k8s.io/kubernetes/test/utils/image" @@ -204,7 +204,7 @@ var _ = SIGDescribe("Advanced Audit [DisabledForLargeClusters][Flaky]", func() { ginkgo.It("should audit API calls to create, get, update, patch, delete, list, watch deployments.", func() { podLabels := map[string]string{"name": "audit-deployment-pod"} - d := e2edeploy.NewDeployment("audit-deployment", int32(1), podLabels, "agnhost", imageutils.GetE2EImage(imageutils.Agnhost), appsv1.RecreateDeploymentStrategyType) + d := e2edeployment.NewDeployment("audit-deployment", int32(1), podLabels, "agnhost", imageutils.GetE2EImage(imageutils.Agnhost), appsv1.RecreateDeploymentStrategyType) _, err := f.ClientSet.AppsV1().Deployments(namespace).Create(context.TODO(), d, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create audit-deployment") @@ -656,7 +656,7 @@ var _ = SIGDescribe("Advanced Audit [DisabledForLargeClusters][Flaky]", func() { // test authorizer annotations, RBAC is required. ginkgo.It("should audit API calls to get a pod with unauthorized user.", func() { - if !auth.IsRBACEnabled(f.ClientSet.RbacV1()) { + if !e2eauth.IsRBACEnabled(f.ClientSet.RbacV1()) { e2eskipper.Skipf("RBAC not enabled.") } diff --git a/test/e2e/auth/audit_dynamic.go b/test/e2e/auth/audit_dynamic.go index 2fc99b61eaca..b9fa0ebdedfd 100644 --- a/test/e2e/auth/audit_dynamic.go +++ b/test/e2e/auth/audit_dynamic.go @@ -36,7 +36,7 @@ import ( clientset "k8s.io/client-go/kubernetes" restclient "k8s.io/client-go/rest" "k8s.io/kubernetes/test/e2e/framework" - "k8s.io/kubernetes/test/e2e/framework/auth" + e2eauth "k8s.io/kubernetes/test/e2e/framework/auth" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" "k8s.io/kubernetes/test/utils" imageutils "k8s.io/kubernetes/test/utils/image" @@ -347,7 +347,7 @@ var _ = SIGDescribe("[Feature:DynamicAudit]", func() { }, } - if auth.IsRBACEnabled(f.ClientSet.RbacV1()) { + if e2eauth.IsRBACEnabled(f.ClientSet.RbacV1()) { testCases = append(testCases, annotationTestCases...) } expectedEvents := []utils.AuditEvent{} diff --git a/test/e2e/auth/pod_security_policy.go b/test/e2e/auth/pod_security_policy.go index 3d87baa5593d..202e923f65ae 100644 --- a/test/e2e/auth/pod_security_policy.go +++ b/test/e2e/auth/pod_security_policy.go @@ -33,7 +33,7 @@ import ( "k8s.io/kubernetes/pkg/security/podsecuritypolicy/seccomp" psputil "k8s.io/kubernetes/pkg/security/podsecuritypolicy/util" "k8s.io/kubernetes/test/e2e/framework" - "k8s.io/kubernetes/test/e2e/framework/auth" + e2eauth "k8s.io/kubernetes/test/e2e/framework/auth" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" imageutils "k8s.io/kubernetes/test/utils/image" @@ -56,7 +56,7 @@ var _ = SIGDescribe("PodSecurityPolicy", func() { if !framework.IsPodSecurityPolicyEnabled(f.ClientSet) { e2eskipper.Skipf("PodSecurityPolicy not enabled") } - if !auth.IsRBACEnabled(f.ClientSet.RbacV1()) { + if !e2eauth.IsRBACEnabled(f.ClientSet.RbacV1()) { e2eskipper.Skipf("RBAC not enabled") } ns = f.Namespace.Name @@ -72,7 +72,7 @@ var _ = SIGDescribe("PodSecurityPolicy", func() { framework.ExpectNoError(err) ginkgo.By("Binding the edit role to the default SA") - err = auth.BindClusterRole(f.ClientSet.RbacV1(), "edit", ns, + err = e2eauth.BindClusterRole(f.ClientSet.RbacV1(), "edit", ns, rbacv1.Subject{Kind: rbacv1.ServiceAccountKind, Namespace: ns, Name: "default"}) framework.ExpectNoError(err) }) @@ -233,14 +233,14 @@ func createAndBindPSP(f *framework.Framework, pspTemplate *policyv1beta1.PodSecu framework.ExpectNoError(err, "Failed to create PSP role") // Bind the role to the namespace. - err = auth.BindRoleInNamespace(f.ClientSet.RbacV1(), name, ns, rbacv1.Subject{ + err = e2eauth.BindRoleInNamespace(f.ClientSet.RbacV1(), name, ns, rbacv1.Subject{ Kind: rbacv1.ServiceAccountKind, Namespace: ns, Name: "default", }) framework.ExpectNoError(err) - framework.ExpectNoError(auth.WaitForNamedAuthorizationUpdate(f.ClientSet.AuthorizationV1(), + framework.ExpectNoError(e2eauth.WaitForNamedAuthorizationUpdate(f.ClientSet.AuthorizationV1(), serviceaccount.MakeUsername(ns, "default"), ns, "use", name, schema.GroupResource{Group: "policy", Resource: "podsecuritypolicies"}, true)) diff --git a/test/e2e/cloud/gcp/cluster_upgrade.go b/test/e2e/cloud/gcp/cluster_upgrade.go index 3619ff0733c1..a12c8c986ee3 100644 --- a/test/e2e/cloud/gcp/cluster_upgrade.go +++ b/test/e2e/cloud/gcp/cluster_upgrade.go @@ -35,8 +35,8 @@ import ( clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/chaosmonkey" "k8s.io/kubernetes/test/e2e/framework" - "k8s.io/kubernetes/test/e2e/framework/config" - "k8s.io/kubernetes/test/e2e/framework/ginkgowrapper" + e2econfig "k8s.io/kubernetes/test/e2e/framework/config" + e2eginkgowrapper "k8s.io/kubernetes/test/e2e/framework/ginkgowrapper" e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" "k8s.io/kubernetes/test/e2e/upgrades" @@ -48,8 +48,8 @@ import ( ) var ( - upgradeTarget = config.Flags.String("upgrade-target", "ci/latest", "Version to upgrade to (e.g. 'release/stable', 'release/latest', 'ci/latest', '0.19.1', '0.19.1-669-gabac8c8') if doing an upgrade test.") - upgradeImage = config.Flags.String("upgrade-image", "", "Image to upgrade to (e.g. 'container_vm' or 'gci') if doing an upgrade test.") + upgradeTarget = e2econfig.Flags.String("upgrade-target", "ci/latest", "Version to upgrade to (e.g. 'release/stable', 'release/latest', 'ci/latest', '0.19.1', '0.19.1-669-gabac8c8') if doing an upgrade test.") + upgradeImage = e2econfig.Flags.String("upgrade-image", "", "Image to upgrade to (e.g. 'container_vm' or 'gci') if doing an upgrade test.") ) var upgradeTests = []upgrades.Test{ @@ -408,7 +408,7 @@ func finalizeUpgradeTest(start time.Time, tc *junit.TestCase) { } switch r := r.(type) { - case ginkgowrapper.FailurePanic: + case e2eginkgowrapper.FailurePanic: tc.Failures = []*junit.Failure{ { Message: r.Message, diff --git a/test/e2e/common/volumes.go b/test/e2e/common/volumes.go index 192e5874a18b..514538894cc6 100644 --- a/test/e2e/common/volumes.go +++ b/test/e2e/common/volumes.go @@ -50,7 +50,7 @@ import ( clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" - "k8s.io/kubernetes/test/e2e/framework/volume" + e2evolume "k8s.io/kubernetes/test/e2e/framework/volume" "github.com/onsi/ginkgo" ) @@ -77,10 +77,10 @@ var _ = ginkgo.Describe("[sig-storage] GCP Volumes", func() { //////////////////////////////////////////////////////////////////////// ginkgo.Describe("NFSv4", func() { ginkgo.It("should be mountable for NFSv4", func() { - config, _, serverIP := volume.NewNFSServer(c, namespace.Name, []string{}) - defer volume.TestServerCleanup(f, config) + config, _, serverIP := e2evolume.NewNFSServer(c, namespace.Name, []string{}) + defer e2evolume.TestServerCleanup(f, config) - tests := []volume.Test{ + tests := []e2evolume.Test{ { Volume: v1.VolumeSource{ NFS: &v1.NFSVolumeSource{ @@ -95,16 +95,16 @@ var _ = ginkgo.Describe("[sig-storage] GCP Volumes", func() { } // Must match content of test/images/volumes-tester/nfs/index.html - volume.TestVolumeClient(f, config, nil, "" /* fsType */, tests) + e2evolume.TestVolumeClient(f, config, nil, "" /* fsType */, tests) }) }) ginkgo.Describe("NFSv3", func() { ginkgo.It("should be mountable for NFSv3", func() { - config, _, serverIP := volume.NewNFSServer(c, namespace.Name, []string{}) - defer volume.TestServerCleanup(f, config) + config, _, serverIP := e2evolume.NewNFSServer(c, namespace.Name, []string{}) + defer e2evolume.TestServerCleanup(f, config) - tests := []volume.Test{ + tests := []e2evolume.Test{ { Volume: v1.VolumeSource{ NFS: &v1.NFSVolumeSource{ @@ -118,7 +118,7 @@ var _ = ginkgo.Describe("[sig-storage] GCP Volumes", func() { }, } // Must match content of test/images/volume-tester/nfs/index.html - volume.TestVolumeClient(f, config, nil, "" /* fsType */, tests) + e2evolume.TestVolumeClient(f, config, nil, "" /* fsType */, tests) }) }) @@ -128,15 +128,15 @@ var _ = ginkgo.Describe("[sig-storage] GCP Volumes", func() { ginkgo.Describe("GlusterFS", func() { ginkgo.It("should be mountable", func() { // create gluster server and endpoints - config, _, _ := volume.NewGlusterfsServer(c, namespace.Name) + config, _, _ := e2evolume.NewGlusterfsServer(c, namespace.Name) name := config.Prefix + "-server" defer func() { - volume.TestServerCleanup(f, config) + e2evolume.TestServerCleanup(f, config) err := c.CoreV1().Endpoints(namespace.Name).Delete(context.TODO(), name, metav1.DeleteOptions{}) framework.ExpectNoError(err, "defer: Gluster delete endpoints failed") }() - tests := []volume.Test{ + tests := []e2evolume.Test{ { Volume: v1.VolumeSource{ Glusterfs: &v1.GlusterfsVolumeSource{ @@ -151,7 +151,7 @@ var _ = ginkgo.Describe("[sig-storage] GCP Volumes", func() { ExpectedContent: "Hello from GlusterFS!", }, } - volume.TestVolumeClient(f, config, nil, "" /* fsType */, tests) + e2evolume.TestVolumeClient(f, config, nil, "" /* fsType */, tests) }) }) }) diff --git a/test/e2e/framework/ingress/ingress_utils.go b/test/e2e/framework/ingress/ingress_utils.go index 23ae490f909c..3bdec5a96a22 100644 --- a/test/e2e/framework/ingress/ingress_utils.go +++ b/test/e2e/framework/ingress/ingress_utils.go @@ -57,7 +57,7 @@ import ( "k8s.io/client-go/kubernetes/scheme" "k8s.io/kubernetes/test/e2e/framework" e2eservice "k8s.io/kubernetes/test/e2e/framework/service" - "k8s.io/kubernetes/test/e2e/framework/testfiles" + e2etestfiles "k8s.io/kubernetes/test/e2e/framework/testfiles" testutils "k8s.io/kubernetes/test/utils" imageutils "k8s.io/kubernetes/test/utils/image" @@ -445,10 +445,10 @@ func NewIngressTestJig(c clientset.Interface) *TestJig { func (j *TestJig) CreateIngress(manifestPath, ns string, ingAnnotations map[string]string, svcAnnotations map[string]string) { var err error read := func(file string) string { - return string(testfiles.ReadOrDie(filepath.Join(manifestPath, file))) + return string(e2etestfiles.ReadOrDie(filepath.Join(manifestPath, file))) } exists := func(file string) bool { - return testfiles.Exists(filepath.Join(manifestPath, file)) + return e2etestfiles.Exists(filepath.Join(manifestPath, file)) } j.Logger.Infof("creating replication controller") @@ -499,7 +499,7 @@ func marshalToYaml(obj runtime.Object, gv schema.GroupVersion) ([]byte, error) { // ingressFromManifest reads a .json/yaml file and returns the ingress in it. func ingressFromManifest(fileName string) (*networkingv1beta1.Ingress, error) { var ing networkingv1beta1.Ingress - data, err := testfiles.Read(fileName) + data, err := e2etestfiles.Read(fileName) if err != nil { return nil, err } @@ -1008,7 +1008,7 @@ func (cont *NginxIngressController) Init() { framework.ExpectNoError(err) read := func(file string) string { - return string(testfiles.ReadOrDie(filepath.Join(IngressManifestPath, "nginx", file))) + return string(e2etestfiles.ReadOrDie(filepath.Join(IngressManifestPath, "nginx", file))) } framework.Logf("initializing nginx ingress controller") framework.RunKubectlOrDieInput(cont.Ns, read("rc.yaml"), "create", "-f", "-", fmt.Sprintf("--namespace=%v", cont.Ns)) diff --git a/test/e2e/framework/log.go b/test/e2e/framework/log.go index fbc5dd02e18a..3d8ca29e14d9 100644 --- a/test/e2e/framework/log.go +++ b/test/e2e/framework/log.go @@ -26,7 +26,7 @@ import ( "github.com/onsi/ginkgo" // TODO: Remove the following imports (ref: https://github.com/kubernetes/kubernetes/issues/81245) - "k8s.io/kubernetes/test/e2e/framework/ginkgowrapper" + e2eginkgowrapper "k8s.io/kubernetes/test/e2e/framework/ginkgowrapper" ) func nowStamp() string { @@ -53,7 +53,7 @@ func FailfWithOffset(offset int, format string, args ...interface{}) { msg := fmt.Sprintf(format, args...) skip := offset + 1 log("FAIL", "%s\n\nFull Stack Trace\n%s", msg, PrunedStack(skip)) - ginkgowrapper.Fail(nowStamp()+": "+msg, skip) + e2eginkgowrapper.Fail(nowStamp()+": "+msg, skip) } // Fail is a replacement for ginkgo.Fail which logs the problem as it occurs @@ -64,7 +64,7 @@ func Fail(msg string, callerSkip ...int) { skip += callerSkip[0] } log("FAIL", "%s\n\nFull Stack Trace\n%s", msg, PrunedStack(skip)) - ginkgowrapper.Fail(nowStamp()+": "+msg, skip) + e2eginkgowrapper.Fail(nowStamp()+": "+msg, skip) } var codeFilterRE = regexp.MustCompile(`/github.com/onsi/ginkgo/`) diff --git a/test/e2e/framework/log/logger.go b/test/e2e/framework/log/logger.go index 786d700b8a75..c7d88172b6d4 100644 --- a/test/e2e/framework/log/logger.go +++ b/test/e2e/framework/log/logger.go @@ -24,7 +24,7 @@ import ( "github.com/onsi/ginkgo" - "k8s.io/kubernetes/test/e2e/framework/ginkgowrapper" + e2eginkgowrapper "k8s.io/kubernetes/test/e2e/framework/ginkgowrapper" ) func nowStamp() string { @@ -50,5 +50,5 @@ func Failf(format string, args ...interface{}) { func FailfWithOffset(offset int, format string, args ...interface{}) { msg := fmt.Sprintf(format, args...) log("FAIL", msg) - ginkgowrapper.Fail(nowStamp()+": "+msg, 1+offset) + e2eginkgowrapper.Fail(nowStamp()+": "+msg, 1+offset) } diff --git a/test/e2e/framework/psp.go b/test/e2e/framework/psp.go index 40a65502bfd0..1f2a9b7326da 100644 --- a/test/e2e/framework/psp.go +++ b/test/e2e/framework/psp.go @@ -33,7 +33,7 @@ import ( "github.com/onsi/ginkgo" // TODO: Remove the following imports (ref: https://github.com/kubernetes/kubernetes/issues/81245) - "k8s.io/kubernetes/test/e2e/framework/auth" + e2eauth "k8s.io/kubernetes/test/e2e/framework/auth" ) const ( @@ -128,7 +128,7 @@ func CreatePrivilegedPSPBinding(kubeClient clientset.Interface, namespace string ExpectNoError(err, "Failed to create PSP %s", podSecurityPolicyPrivileged) } - if auth.IsRBACEnabled(kubeClient.RbacV1()) { + if e2eauth.IsRBACEnabled(kubeClient.RbacV1()) { // Create the Role to bind it to the namespace. _, err = kubeClient.RbacV1().ClusterRoles().Create(context.TODO(), &rbacv1.ClusterRole{ ObjectMeta: metav1.ObjectMeta{Name: podSecurityPolicyPrivileged}, @@ -145,10 +145,10 @@ func CreatePrivilegedPSPBinding(kubeClient clientset.Interface, namespace string } }) - if auth.IsRBACEnabled(kubeClient.RbacV1()) { + if e2eauth.IsRBACEnabled(kubeClient.RbacV1()) { ginkgo.By(fmt.Sprintf("Binding the %s PodSecurityPolicy to the default service account in %s", podSecurityPolicyPrivileged, namespace)) - err := auth.BindClusterRoleInNamespace(kubeClient.RbacV1(), + err := e2eauth.BindClusterRoleInNamespace(kubeClient.RbacV1(), podSecurityPolicyPrivileged, namespace, rbacv1.Subject{ @@ -157,7 +157,7 @@ func CreatePrivilegedPSPBinding(kubeClient clientset.Interface, namespace string Name: "default", }) ExpectNoError(err) - ExpectNoError(auth.WaitForNamedAuthorizationUpdate(kubeClient.AuthorizationV1(), + ExpectNoError(e2eauth.WaitForNamedAuthorizationUpdate(kubeClient.AuthorizationV1(), serviceaccount.MakeUsername(namespace, "default"), namespace, "use", podSecurityPolicyPrivileged, schema.GroupResource{Group: "extensions", Resource: "podsecuritypolicies"}, true)) } diff --git a/test/e2e/instrumentation/logging/generic_soak.go b/test/e2e/instrumentation/logging/generic_soak.go index 501a7e21ac87..ec54ec7f7683 100644 --- a/test/e2e/instrumentation/logging/generic_soak.go +++ b/test/e2e/instrumentation/logging/generic_soak.go @@ -25,7 +25,7 @@ import ( "k8s.io/api/core/v1" "k8s.io/kubernetes/test/e2e/framework" - "k8s.io/kubernetes/test/e2e/framework/config" + e2econfig "k8s.io/kubernetes/test/e2e/framework/config" e2enode "k8s.io/kubernetes/test/e2e/framework/node" instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common" imageutils "k8s.io/kubernetes/test/utils/image" @@ -37,7 +37,7 @@ var loggingSoak struct { Scale int `default:"1" usage:"number of waves of pods"` TimeBetweenWaves time.Duration `default:"5000ms" usage:"time to wait before dumping the next wave of pods"` } -var _ = config.AddOptions(&loggingSoak, "instrumentation.logging.soak") +var _ = e2econfig.AddOptions(&loggingSoak, "instrumentation.logging.soak") var _ = instrumentation.SIGDescribe("Logging soak [Performance] [Slow] [Disruptive]", func() { diff --git a/test/e2e/instrumentation/monitoring/accelerator.go b/test/e2e/instrumentation/monitoring/accelerator.go index 11d65301e79d..90047e46ea1d 100644 --- a/test/e2e/instrumentation/monitoring/accelerator.go +++ b/test/e2e/instrumentation/monitoring/accelerator.go @@ -26,7 +26,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/kubernetes/test/e2e/framework" - "k8s.io/kubernetes/test/e2e/framework/gpu" + e2egpu "k8s.io/kubernetes/test/e2e/framework/gpu" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common" "k8s.io/kubernetes/test/e2e/scheduling" @@ -93,7 +93,7 @@ func testStackdriverAcceleratorMonitoring(f *framework.Framework) { Args: []string{"nvidia-smi && sleep infinity"}, Resources: v1.ResourceRequirements{ Limits: v1.ResourceList{ - gpu.NVIDIAGPUResourceName: *resource.NewQuantity(1, resource.DecimalSI), + e2egpu.NVIDIAGPUResourceName: *resource.NewQuantity(1, resource.DecimalSI), }, }, }, diff --git a/test/e2e/instrumentation/monitoring/metrics_grabber.go b/test/e2e/instrumentation/monitoring/metrics_grabber.go index 89b0bd6299dc..abb337fa852a 100644 --- a/test/e2e/instrumentation/monitoring/metrics_grabber.go +++ b/test/e2e/instrumentation/monitoring/metrics_grabber.go @@ -23,7 +23,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" - "k8s.io/kubernetes/test/e2e/framework/metrics" + e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics" e2enode "k8s.io/kubernetes/test/e2e/framework/node" instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common" @@ -34,13 +34,13 @@ import ( var _ = instrumentation.SIGDescribe("MetricsGrabber", func() { f := framework.NewDefaultFramework("metrics-grabber") var c, ec clientset.Interface - var grabber *metrics.Grabber + var grabber *e2emetrics.Grabber gin.BeforeEach(func() { var err error c = f.ClientSet ec = f.KubemarkExternalClusterClientSet framework.ExpectNoError(err) - grabber, err = metrics.NewMetricsGrabber(c, ec, true, true, true, true, true) + grabber, err = e2emetrics.NewMetricsGrabber(c, ec, true, true, true, true, true) framework.ExpectNoError(err) }) diff --git a/test/e2e/kubectl/kubectl.go b/test/e2e/kubectl/kubectl.go index 66c8b6e9b9b4..40896fa36dd4 100644 --- a/test/e2e/kubectl/kubectl.go +++ b/test/e2e/kubectl/kubectl.go @@ -62,12 +62,12 @@ import ( "k8s.io/kubernetes/pkg/controller" commonutils "k8s.io/kubernetes/test/e2e/common" "k8s.io/kubernetes/test/e2e/framework" - "k8s.io/kubernetes/test/e2e/framework/auth" + e2eauth "k8s.io/kubernetes/test/e2e/framework/auth" e2eendpoints "k8s.io/kubernetes/test/e2e/framework/endpoints" e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2eservice "k8s.io/kubernetes/test/e2e/framework/service" - "k8s.io/kubernetes/test/e2e/framework/testfiles" + e2etestfiles "k8s.io/kubernetes/test/e2e/framework/testfiles" "k8s.io/kubernetes/test/e2e/scheduling" testutils "k8s.io/kubernetes/test/utils" "k8s.io/kubernetes/test/utils/crd" @@ -178,7 +178,7 @@ func cleanupKubectlInputs(fileContents string, ns string, selectors ...string) { } func readTestFileOrDie(file string) []byte { - return testfiles.ReadOrDie(path.Join(kubeCtlManifestPath, file)) + return e2etestfiles.ReadOrDie(path.Join(kubeCtlManifestPath, file)) } func runKubectlRetryOrDie(ns string, args ...string) string { @@ -270,7 +270,7 @@ var _ = SIGDescribe("Kubectl client", func() { var nautilus string ginkgo.BeforeEach(func() { updateDemoRoot := "test/fixtures/doc-yaml/user-guide/update-demo" - nautilus = commonutils.SubstituteImageName(string(testfiles.ReadOrDie(filepath.Join(updateDemoRoot, "nautilus-rc.yaml.in")))) + nautilus = commonutils.SubstituteImageName(string(e2etestfiles.ReadOrDie(filepath.Join(updateDemoRoot, "nautilus-rc.yaml.in")))) }) /* Release : v1.9 @@ -318,7 +318,7 @@ var _ = SIGDescribe("Kubectl client", func() { "agnhost-master-deployment.yaml.in", "agnhost-slave-deployment.yaml.in", } { - contents := commonutils.SubstituteImageName(string(testfiles.ReadOrDie(filepath.Join(guestbookRoot, gbAppFile)))) + contents := commonutils.SubstituteImageName(string(e2etestfiles.ReadOrDie(filepath.Join(guestbookRoot, gbAppFile)))) run(contents) } } @@ -591,11 +591,11 @@ var _ = SIGDescribe("Kubectl client", func() { ginkgo.It("should handle in-cluster config", func() { ginkgo.By("adding rbac permissions") // grant the view permission widely to allow inspection of the `invalid` namespace and the default namespace - err := auth.BindClusterRole(f.ClientSet.RbacV1(), "view", f.Namespace.Name, + err := e2eauth.BindClusterRole(f.ClientSet.RbacV1(), "view", f.Namespace.Name, rbacv1.Subject{Kind: rbacv1.ServiceAccountKind, Namespace: f.Namespace.Name, Name: "default"}) framework.ExpectNoError(err) - err = auth.WaitForAuthorizationUpdate(f.ClientSet.AuthorizationV1(), + err = e2eauth.WaitForAuthorizationUpdate(f.ClientSet.AuthorizationV1(), serviceaccount.MakeUsername(f.Namespace.Name, "default"), f.Namespace.Name, "list", schema.GroupResource{Resource: "pods"}, true) framework.ExpectNoError(err) diff --git a/test/e2e/manifest/manifest.go b/test/e2e/manifest/manifest.go index 5edeaed53640..53834beab561 100644 --- a/test/e2e/manifest/manifest.go +++ b/test/e2e/manifest/manifest.go @@ -24,13 +24,13 @@ import ( "k8s.io/apimachinery/pkg/runtime" utilyaml "k8s.io/apimachinery/pkg/util/yaml" scheme "k8s.io/client-go/kubernetes/scheme" - "k8s.io/kubernetes/test/e2e/framework/testfiles" + e2etestfiles "k8s.io/kubernetes/test/e2e/framework/testfiles" ) // PodFromManifest reads a .json/yaml file and returns the pod in it. func PodFromManifest(filename string) (*v1.Pod, error) { var pod v1.Pod - data, err := testfiles.Read(filename) + data, err := e2etestfiles.Read(filename) if err != nil { return nil, err } @@ -48,7 +48,7 @@ func PodFromManifest(filename string) (*v1.Pod, error) { // RcFromManifest reads a .json/yaml file and returns the rc in it. func RcFromManifest(fileName string) (*v1.ReplicationController, error) { var controller v1.ReplicationController - data, err := testfiles.Read(fileName) + data, err := e2etestfiles.Read(fileName) if err != nil { return nil, err } @@ -66,7 +66,7 @@ func RcFromManifest(fileName string) (*v1.ReplicationController, error) { // SvcFromManifest reads a .json/yaml file and returns the service in it. func SvcFromManifest(fileName string) (*v1.Service, error) { var svc v1.Service - data, err := testfiles.Read(fileName) + data, err := e2etestfiles.Read(fileName) if err != nil { return nil, err } @@ -84,7 +84,7 @@ func SvcFromManifest(fileName string) (*v1.Service, error) { // StatefulSetFromManifest returns a StatefulSet from a manifest stored in fileName in the Namespace indicated by ns. func StatefulSetFromManifest(fileName, ns string) (*appsv1.StatefulSet, error) { var ss appsv1.StatefulSet - data, err := testfiles.Read(fileName) + data, err := e2etestfiles.Read(fileName) if err != nil { return nil, err } @@ -108,7 +108,7 @@ func StatefulSetFromManifest(fileName, ns string) (*appsv1.StatefulSet, error) { // DaemonSetFromManifest returns a DaemonSet from a manifest stored in fileName in the Namespace indicated by ns. func DaemonSetFromManifest(fileName, ns string) (*appsv1.DaemonSet, error) { var ds appsv1.DaemonSet - data, err := testfiles.Read(fileName) + data, err := e2etestfiles.Read(fileName) if err != nil { return nil, err } @@ -128,7 +128,7 @@ func DaemonSetFromManifest(fileName, ns string) (*appsv1.DaemonSet, error) { // RoleFromManifest returns a Role from a manifest stored in fileName in the Namespace indicated by ns. func RoleFromManifest(fileName, ns string) (*rbacv1.Role, error) { var role rbacv1.Role - data, err := testfiles.Read(fileName) + data, err := e2etestfiles.Read(fileName) if err != nil { return nil, err } diff --git a/test/e2e/network/dual_stack.go b/test/e2e/network/dual_stack.go index ba57003923dc..6feb9aa95123 100644 --- a/test/e2e/network/dual_stack.go +++ b/test/e2e/network/dual_stack.go @@ -31,7 +31,7 @@ import ( "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" - e2edeploy "k8s.io/kubernetes/test/e2e/framework/deployment" + e2edeployment "k8s.io/kubernetes/test/e2e/framework/deployment" e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2eservice "k8s.io/kubernetes/test/e2e/framework/service" @@ -136,7 +136,7 @@ var _ = SIGDescribe("[Feature:IPv6DualStackAlphaFeature] [LinuxOnly]", func() { replicas := int32(len(nodeList.Items)) - serverDeploymentSpec := e2edeploy.NewDeployment(serverDeploymentName, + serverDeploymentSpec := e2edeployment.NewDeployment(serverDeploymentName, replicas, map[string]string{"test": "dual-stack-server"}, "dualstack-test-server", @@ -165,7 +165,7 @@ var _ = SIGDescribe("[Feature:IPv6DualStackAlphaFeature] [LinuxOnly]", func() { }, } - clientDeploymentSpec := e2edeploy.NewDeployment(clientDeploymentName, + clientDeploymentSpec := e2edeployment.NewDeployment(clientDeploymentName, replicas, map[string]string{"test": "dual-stack-client"}, "dualstack-test-client", @@ -198,15 +198,15 @@ var _ = SIGDescribe("[Feature:IPv6DualStackAlphaFeature] [LinuxOnly]", func() { clientDeployment, err := cs.AppsV1().Deployments(f.Namespace.Name).Create(context.TODO(), clientDeploymentSpec, metav1.CreateOptions{}) framework.ExpectNoError(err) - err = e2edeploy.WaitForDeploymentComplete(cs, serverDeployment) + err = e2edeployment.WaitForDeploymentComplete(cs, serverDeployment) framework.ExpectNoError(err) - err = e2edeploy.WaitForDeploymentComplete(cs, clientDeployment) + err = e2edeployment.WaitForDeploymentComplete(cs, clientDeployment) framework.ExpectNoError(err) - serverPods, err := e2edeploy.GetPodsForDeployment(cs, serverDeployment) + serverPods, err := e2edeployment.GetPodsForDeployment(cs, serverDeployment) framework.ExpectNoError(err) - clientPods, err := e2edeploy.GetPodsForDeployment(cs, clientDeployment) + clientPods, err := e2edeployment.GetPodsForDeployment(cs, clientDeployment) framework.ExpectNoError(err) assertNetworkConnectivity(f, *serverPods, *clientPods, "dualstack-test-client", "80") diff --git a/test/e2e/network/ingress.go b/test/e2e/network/ingress.go index 2b1bdfdf22b6..f8e11cae0ca4 100644 --- a/test/e2e/network/ingress.go +++ b/test/e2e/network/ingress.go @@ -36,8 +36,8 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apiserver/pkg/authentication/serviceaccount" "k8s.io/kubernetes/test/e2e/framework" - "k8s.io/kubernetes/test/e2e/framework/auth" - "k8s.io/kubernetes/test/e2e/framework/ingress" + e2eauth "k8s.io/kubernetes/test/e2e/framework/auth" + e2eingress "k8s.io/kubernetes/test/e2e/framework/ingress" "k8s.io/kubernetes/test/e2e/framework/providers/gce" e2eservice "k8s.io/kubernetes/test/e2e/framework/service" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" @@ -54,22 +54,22 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { defer ginkgo.GinkgoRecover() var ( ns string - jig *ingress.TestJig - conformanceTests []ingress.ConformanceTests + jig *e2eingress.TestJig + conformanceTests []e2eingress.ConformanceTests ) f := framework.NewDefaultFramework("ingress") ginkgo.BeforeEach(func() { - jig = ingress.NewIngressTestJig(f.ClientSet) + jig = e2eingress.NewIngressTestJig(f.ClientSet) ns = f.Namespace.Name // this test wants powerful permissions. Since the namespace names are unique, we can leave this // lying around so we don't have to race any caches - err := auth.BindClusterRole(jig.Client.RbacV1(), "cluster-admin", f.Namespace.Name, + err := e2eauth.BindClusterRole(jig.Client.RbacV1(), "cluster-admin", f.Namespace.Name, rbacv1.Subject{Kind: rbacv1.ServiceAccountKind, Namespace: f.Namespace.Name, Name: "default"}) framework.ExpectNoError(err) - err = auth.WaitForAuthorizationUpdate(jig.Client.AuthorizationV1(), + err = e2eauth.WaitForAuthorizationUpdate(jig.Client.AuthorizationV1(), serviceaccount.MakeUsername(f.Namespace.Name, "default"), "", "create", schema.GroupResource{Resource: "pods"}, true) framework.ExpectNoError(err) @@ -116,7 +116,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { }) ginkgo.It("should conform to Ingress spec", func() { - conformanceTests = ingress.CreateIngressComformanceTests(jig, ns, map[string]string{}) + conformanceTests = e2eingress.CreateIngressComformanceTests(jig, ns, map[string]string{}) for _, t := range conformanceTests { ginkgo.By(t.EntryLog) t.Execute() @@ -131,8 +131,8 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { ginkgo.It("should support multiple TLS certs", func() { ginkgo.By("Creating an ingress with no certs.") - jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "multiple-certs"), ns, map[string]string{ - ingress.IngressStaticIPKey: ns, + jig.CreateIngress(filepath.Join(e2eingress.IngressManifestPath, "multiple-certs"), ns, map[string]string{ + e2eingress.IngressStaticIPKey: ns, }, map[string]string{}) ginkgo.By("Adding multiple certs to the ingress.") @@ -167,8 +167,8 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { ginkgo.It("multicluster ingress should get instance group annotation", func() { name := "echomap" - jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "http"), ns, map[string]string{ - ingress.IngressClassKey: ingress.MulticlusterIngressClassValue, + jig.CreateIngress(filepath.Join(e2eingress.IngressManifestPath, "http"), ns, map[string]string{ + e2eingress.IngressClassKey: e2eingress.MulticlusterIngressClassValue, }, map[string]string{}) ginkgo.By(fmt.Sprintf("waiting for Ingress %s to get instance group annotation", name)) @@ -186,13 +186,13 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { // Verify that the ingress does not get other annotations like url-map, target-proxy, backends, etc. // Note: All resources except the firewall rule have an annotation. - umKey := ingress.StatusPrefix + "/url-map" - fwKey := ingress.StatusPrefix + "/forwarding-rule" - tpKey := ingress.StatusPrefix + "/target-proxy" - fwsKey := ingress.StatusPrefix + "/https-forwarding-rule" - tpsKey := ingress.StatusPrefix + "/https-target-proxy" - scKey := ingress.StatusPrefix + "/ssl-cert" - beKey := ingress.StatusPrefix + "/backends" + umKey := e2eingress.StatusPrefix + "/url-map" + fwKey := e2eingress.StatusPrefix + "/forwarding-rule" + tpKey := e2eingress.StatusPrefix + "/target-proxy" + fwsKey := e2eingress.StatusPrefix + "/https-forwarding-rule" + tpsKey := e2eingress.StatusPrefix + "/https-target-proxy" + scKey := e2eingress.StatusPrefix + "/ssl-cert" + beKey := e2eingress.StatusPrefix + "/backends" wait.Poll(2*time.Second, time.Minute, func() (bool, error) { ing, err := f.ClientSet.NetworkingV1beta1().Ingresses(ns).Get(context.TODO(), name, metav1.GetOptions{}) framework.ExpectNoError(err) @@ -272,8 +272,8 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { ginkgo.It("should conform to Ingress spec", func() { jig.PollInterval = 5 * time.Second - conformanceTests = ingress.CreateIngressComformanceTests(jig, ns, map[string]string{ - ingress.NEGAnnotation: `{"ingress": true}`, + conformanceTests = e2eingress.CreateIngressComformanceTests(jig, ns, map[string]string{ + e2eingress.NEGAnnotation: `{"ingress": true}`, }) for _, t := range conformanceTests { ginkgo.By(t.EntryLog) @@ -288,7 +288,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { ginkgo.It("should be able to switch between IG and NEG modes", func() { var err error ginkgo.By("Create a basic HTTP ingress using NEG") - jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "neg"), ns, map[string]string{}, map[string]string{}) + jig.CreateIngress(filepath.Join(e2eingress.IngressManifestPath, "neg"), ns, map[string]string{}, map[string]string{}) jig.WaitForIngress(true) err = gceController.WaitForNegBackendService(jig.GetServicePorts(false)) framework.ExpectNoError(err) @@ -297,7 +297,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { svcList, err := f.ClientSet.CoreV1().Services(ns).List(context.TODO(), metav1.ListOptions{}) framework.ExpectNoError(err) for _, svc := range svcList.Items { - svc.Annotations[ingress.NEGAnnotation] = `{"ingress": false}` + svc.Annotations[e2eingress.NEGAnnotation] = `{"ingress": false}` _, err = f.ClientSet.CoreV1().Services(ns).Update(context.TODO(), &svc, metav1.UpdateOptions{}) framework.ExpectNoError(err) } @@ -315,7 +315,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { svcList, err = f.ClientSet.CoreV1().Services(ns).List(context.TODO(), metav1.ListOptions{}) framework.ExpectNoError(err) for _, svc := range svcList.Items { - svc.Annotations[ingress.NEGAnnotation] = `{"ingress": true}` + svc.Annotations[e2eingress.NEGAnnotation] = `{"ingress": true}` _, err = f.ClientSet.CoreV1().Services(ns).Update(context.TODO(), &svc, metav1.UpdateOptions{}) framework.ExpectNoError(err) } @@ -332,7 +332,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { ginkgo.It("should be able to create a ClusterIP service", func() { ginkgo.By("Create a basic HTTP ingress using NEG") - jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "neg-clusterip"), ns, map[string]string{}, map[string]string{}) + jig.CreateIngress(filepath.Join(e2eingress.IngressManifestPath, "neg-clusterip"), ns, map[string]string{}, map[string]string{}) jig.WaitForIngress(true) svcPorts := jig.GetServicePorts(false) err := gceController.WaitForNegBackendService(svcPorts) @@ -367,7 +367,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { } ginkgo.By("Create a basic HTTP ingress using NEG") - jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "neg"), ns, map[string]string{}, map[string]string{}) + jig.CreateIngress(filepath.Join(e2eingress.IngressManifestPath, "neg"), ns, map[string]string{}, map[string]string{}) jig.WaitForIngress(true) jig.WaitForIngressToStable() err := gceController.WaitForNegBackendService(jig.GetServicePorts(false)) @@ -392,7 +392,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { name := "hostname" replicas := 8 ginkgo.By("Create a basic HTTP ingress using NEG") - jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "neg"), ns, map[string]string{}, map[string]string{}) + jig.CreateIngress(filepath.Join(e2eingress.IngressManifestPath, "neg"), ns, map[string]string{}, map[string]string{}) jig.WaitForIngress(true) jig.WaitForIngressToStable() err := gceController.WaitForNegBackendService(jig.GetServicePorts(false)) @@ -459,11 +459,11 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { svc, err := f.ClientSet.CoreV1().Services(ns).Get(context.TODO(), name, metav1.GetOptions{}) framework.ExpectNoError(err) - var status ingress.NegStatus - v, ok := svc.Annotations[ingress.NEGStatusAnnotation] + var status e2eingress.NegStatus + v, ok := svc.Annotations[e2eingress.NEGStatusAnnotation] if !ok { // Wait for NEG sync loop to find NEGs - framework.Logf("Waiting for %v, got: %+v", ingress.NEGStatusAnnotation, svc.Annotations) + framework.Logf("Waiting for %v, got: %+v", e2eingress.NEGStatusAnnotation, svc.Annotations) return false, nil } err = json.Unmarshal([]byte(v), &status) @@ -471,7 +471,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { framework.Logf("Error in parsing Expose NEG annotation: %v", err) return false, nil } - framework.Logf("Got %v: %v", ingress.NEGStatusAnnotation, v) + framework.Logf("Got %v: %v", e2eingress.NEGStatusAnnotation, v) // Expect 2 NEGs to be created based on the test setup (neg-exposed) if len(status.NetworkEndpointGroups) != 2 { @@ -506,7 +506,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { } ginkgo.By("Create a basic HTTP ingress using NEG") - jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "neg-exposed"), ns, map[string]string{}, map[string]string{}) + jig.CreateIngress(filepath.Join(e2eingress.IngressManifestPath, "neg-exposed"), ns, map[string]string{}, map[string]string{}) jig.WaitForIngress(true) err := gceController.WaitForNegBackendService(jig.GetServicePorts(false)) framework.ExpectNoError(err) @@ -528,7 +528,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { ginkgo.It("should create NEGs for all ports with the Ingress annotation, and NEGs for the standalone annotation otherwise", func() { ginkgo.By("Create a basic HTTP ingress using standalone NEG") - jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "neg-exposed"), ns, map[string]string{}, map[string]string{}) + jig.CreateIngress(filepath.Join(e2eingress.IngressManifestPath, "neg-exposed"), ns, map[string]string{}, map[string]string{}) jig.WaitForIngress(true) name := "hostname" @@ -539,7 +539,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { svcList, err := f.ClientSet.CoreV1().Services(ns).List(context.TODO(), metav1.ListOptions{}) framework.ExpectNoError(err) for _, svc := range svcList.Items { - svc.Annotations[ingress.NEGAnnotation] = `{"ingress":true,"exposed_ports":{"80":{},"443":{}}}` + svc.Annotations[e2eingress.NEGAnnotation] = `{"ingress":true,"exposed_ports":{"80":{},"443":{}}}` _, err = f.ClientSet.CoreV1().Services(ns).Update(context.TODO(), &svc, metav1.UpdateOptions{}) framework.ExpectNoError(err) } @@ -550,7 +550,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { svcList, err = f.ClientSet.CoreV1().Services(ns).List(context.TODO(), metav1.ListOptions{}) framework.ExpectNoError(err) for _, svc := range svcList.Items { - svc.Annotations[ingress.NEGAnnotation] = `{"ingress":true,"exposed_ports":{"443":{}}}` + svc.Annotations[e2eingress.NEGAnnotation] = `{"ingress":true,"exposed_ports":{"443":{}}}` _, err = f.ClientSet.CoreV1().Services(ns).Update(context.TODO(), &svc, metav1.UpdateOptions{}) framework.ExpectNoError(err) } @@ -561,7 +561,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { svcList, err = f.ClientSet.CoreV1().Services(ns).List(context.TODO(), metav1.ListOptions{}) framework.ExpectNoError(err) for _, svc := range svcList.Items { - svc.Annotations[ingress.NEGAnnotation] = `{"ingress":false,"exposed_ports":{"443":{}}}` + svc.Annotations[e2eingress.NEGAnnotation] = `{"ingress":false,"exposed_ports":{"443":{}}}` _, err = f.ClientSet.CoreV1().Services(ns).Update(context.TODO(), &svc, metav1.UpdateOptions{}) framework.ExpectNoError(err) } @@ -572,7 +572,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { svcList, err = f.ClientSet.CoreV1().Services(ns).List(context.TODO(), metav1.ListOptions{}) framework.ExpectNoError(err) for _, svc := range svcList.Items { - delete(svc.Annotations, ingress.NEGAnnotation) + delete(svc.Annotations, e2eingress.NEGAnnotation) // Service cannot be ClusterIP if it's using Instance Groups. svc.Spec.Type = v1.ServiceTypeNodePort _, err = f.ClientSet.CoreV1().Services(ns).Update(context.TODO(), &svc, metav1.UpdateOptions{}) @@ -589,7 +589,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { // Platform specific setup ginkgo.BeforeEach(func() { e2eskipper.SkipUnlessProviderIs("gce", "gke") - jig.Class = ingress.MulticlusterIngressClassValue + jig.Class = e2eingress.MulticlusterIngressClassValue jig.PollInterval = 5 * time.Second ginkgo.By("Initializing gce controller") gceController = &gce.IngressController{ @@ -626,8 +626,8 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { }) ginkgo.It("should conform to Ingress spec", func() { - conformanceTests = ingress.CreateIngressComformanceTests(jig, ns, map[string]string{ - ingress.IngressStaticIPKey: ipName, + conformanceTests = e2eingress.CreateIngressComformanceTests(jig, ns, map[string]string{ + e2eingress.IngressStaticIPKey: ipName, }) for _, t := range conformanceTests { ginkgo.By(t.EntryLog) @@ -651,9 +651,9 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { ginkgo.It("should remove clusters as expected", func() { ingAnnotations := map[string]string{ - ingress.IngressStaticIPKey: ipName, + e2eingress.IngressStaticIPKey: ipName, } - ingFilePath := filepath.Join(ingress.IngressManifestPath, "http") + ingFilePath := filepath.Join(e2eingress.IngressManifestPath, "http") jig.CreateIngress(ingFilePath, ns, ingAnnotations, map[string]string{}) jig.WaitForIngress(false /*waitForNodePort*/) name := jig.Ingress.Name @@ -681,7 +681,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { ginkgo.It("single and multi-cluster ingresses should be able to exist together", func() { ginkgo.By("Creating a single cluster ingress first") jig.Class = "" - singleIngFilePath := filepath.Join(ingress.GCEIngressManifestPath, "static-ip-2") + singleIngFilePath := filepath.Join(e2eingress.GCEIngressManifestPath, "static-ip-2") jig.CreateIngress(singleIngFilePath, ns, map[string]string{}, map[string]string{}) jig.WaitForIngress(false /*waitForNodePort*/) // jig.Ingress will be overwritten when we create MCI, so keep a reference. @@ -689,11 +689,11 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { // Create the multi-cluster ingress next. ginkgo.By("Creating a multi-cluster ingress next") - jig.Class = ingress.MulticlusterIngressClassValue + jig.Class = e2eingress.MulticlusterIngressClassValue ingAnnotations := map[string]string{ - ingress.IngressStaticIPKey: ipName, + e2eingress.IngressStaticIPKey: ipName, } - multiIngFilePath := filepath.Join(ingress.IngressManifestPath, "http") + multiIngFilePath := filepath.Join(e2eingress.IngressManifestPath, "http") jig.CreateIngress(multiIngFilePath, ns, ingAnnotations, map[string]string{}) jig.WaitForIngress(false /*waitForNodePort*/) mciIngress := jig.Ingress @@ -703,7 +703,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { jig.Class = "" jig.TryDeleteIngress() jig.Ingress = mciIngress - jig.Class = ingress.MulticlusterIngressClassValue + jig.Class = e2eingress.MulticlusterIngressClassValue jig.WaitForIngress(false /*waitForNodePort*/) ginkgo.By("Cleanup: Deleting the multi-cluster ingress") @@ -713,13 +713,13 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { // Time: borderline 5m, slow by design ginkgo.Describe("[Slow] Nginx", func() { - var nginxController *ingress.NginxIngressController + var nginxController *e2eingress.NginxIngressController ginkgo.BeforeEach(func() { e2eskipper.SkipUnlessProviderIs("gce", "gke") ginkgo.By("Initializing nginx controller") jig.Class = "nginx" - nginxController = &ingress.NginxIngressController{Ns: ns, Client: jig.Client} + nginxController = &e2eingress.NginxIngressController{Ns: ns, Client: jig.Client} // TODO: This test may fail on other platforms. We can simply skip it // but we want to allow easy testing where a user might've hand @@ -753,7 +753,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { // Poll more frequently to reduce e2e completion time. // This test runs in presubmit. jig.PollInterval = 5 * time.Second - conformanceTests = ingress.CreateIngressComformanceTests(jig, ns, map[string]string{}) + conformanceTests = e2eingress.CreateIngressComformanceTests(jig, ns, map[string]string{}) for _, t := range conformanceTests { ginkgo.By(t.EntryLog) t.Execute() @@ -775,11 +775,11 @@ func verifyKubemciStatusHas(name, expectedSubStr string) { } } -func executePresharedCertTest(f *framework.Framework, jig *ingress.TestJig, staticIPName string) { +func executePresharedCertTest(f *framework.Framework, jig *e2eingress.TestJig, staticIPName string) { preSharedCertName := "test-pre-shared-cert" ginkgo.By(fmt.Sprintf("Creating ssl certificate %q on GCE", preSharedCertName)) testHostname := "test.ingress.com" - cert, key, err := ingress.GenerateRSACerts(testHostname, true) + cert, key, err := e2eingress.GenerateRSACerts(testHostname, true) framework.ExpectNoError(err) gceCloud, err := gce.GetGCECloud() framework.ExpectNoError(err) @@ -811,36 +811,36 @@ func executePresharedCertTest(f *framework.Framework, jig *ingress.TestJig, stat ginkgo.By("Creating an ingress referencing the pre-shared certificate") // Create an ingress referencing this cert using pre-shared-cert annotation. ingAnnotations := map[string]string{ - ingress.IngressPreSharedCertKey: preSharedCertName, + e2eingress.IngressPreSharedCertKey: preSharedCertName, // Disallow HTTP to save resources. This is irrelevant to the // pre-shared cert test. - ingress.IngressAllowHTTPKey: "false", + e2eingress.IngressAllowHTTPKey: "false", } if staticIPName != "" { - ingAnnotations[ingress.IngressStaticIPKey] = staticIPName + ingAnnotations[e2eingress.IngressStaticIPKey] = staticIPName } - jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "pre-shared-cert"), f.Namespace.Name, ingAnnotations, map[string]string{}) + jig.CreateIngress(filepath.Join(e2eingress.IngressManifestPath, "pre-shared-cert"), f.Namespace.Name, ingAnnotations, map[string]string{}) ginkgo.By("Test that ingress works with the pre-shared certificate") err = jig.WaitForIngressWithCert(true, []string{testHostname}, cert) framework.ExpectNoError(err, fmt.Sprintf("Unexpected error while waiting for ingress: %v", err)) } -func executeStaticIPHttpsOnlyTest(f *framework.Framework, jig *ingress.TestJig, ipName, ip string) { - jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "static-ip"), f.Namespace.Name, map[string]string{ - ingress.IngressStaticIPKey: ipName, - ingress.IngressAllowHTTPKey: "false", +func executeStaticIPHttpsOnlyTest(f *framework.Framework, jig *e2eingress.TestJig, ipName, ip string) { + jig.CreateIngress(filepath.Join(e2eingress.IngressManifestPath, "static-ip"), f.Namespace.Name, map[string]string{ + e2eingress.IngressStaticIPKey: ipName, + e2eingress.IngressAllowHTTPKey: "false", }, map[string]string{}) ginkgo.By("waiting for Ingress to come up with ip: " + ip) - httpClient := ingress.BuildInsecureClient(ingress.IngressReqTimeout) - framework.ExpectNoError(ingress.PollURL(fmt.Sprintf("https://%s/", ip), "", e2eservice.LoadBalancerPollTimeout, jig.PollInterval, httpClient, false)) + httpClient := e2eingress.BuildInsecureClient(e2eingress.IngressReqTimeout) + framework.ExpectNoError(e2eingress.PollURL(fmt.Sprintf("https://%s/", ip), "", e2eservice.LoadBalancerPollTimeout, jig.PollInterval, httpClient, false)) ginkgo.By("should reject HTTP traffic") - framework.ExpectNoError(ingress.PollURL(fmt.Sprintf("http://%s/", ip), "", e2eservice.LoadBalancerPollTimeout, jig.PollInterval, httpClient, true)) + framework.ExpectNoError(e2eingress.PollURL(fmt.Sprintf("http://%s/", ip), "", e2eservice.LoadBalancerPollTimeout, jig.PollInterval, httpClient, true)) } -func executeBacksideBacksideHTTPSTest(f *framework.Framework, jig *ingress.TestJig, staticIPName string) { +func executeBacksideBacksideHTTPSTest(f *framework.Framework, jig *e2eingress.TestJig, staticIPName string) { ginkgo.By("Creating a set of ingress, service and deployment that have backside re-encryption configured") deployCreated, svcCreated, ingCreated, err := jig.SetUpBacksideHTTPSIngress(f.ClientSet, f.Namespace.Name, staticIPName) defer func() { @@ -856,9 +856,9 @@ func executeBacksideBacksideHTTPSTest(f *framework.Framework, jig *ingress.TestJ framework.ExpectNoError(err, "ginkgo.Failed to wait for ingress IP") ginkgo.By(fmt.Sprintf("Polling on address %s and verify the backend is serving HTTPS", ingIP)) - timeoutClient := &http.Client{Timeout: ingress.IngressReqTimeout} + timeoutClient := &http.Client{Timeout: e2eingress.IngressReqTimeout} err = wait.PollImmediate(e2eservice.LoadBalancerPollInterval, e2eservice.LoadBalancerPollTimeout, func() (bool, error) { - resp, err := ingress.SimpleGET(timeoutClient, fmt.Sprintf("http://%s", ingIP), "") + resp, err := e2eingress.SimpleGET(timeoutClient, fmt.Sprintf("http://%s", ingIP), "") if err != nil { framework.Logf("SimpleGET failed: %v", err) return false, nil @@ -872,7 +872,7 @@ func executeBacksideBacksideHTTPSTest(f *framework.Framework, jig *ingress.TestJ framework.ExpectNoError(err, "ginkgo.Failed to verify backside re-encryption ingress") } -func detectNegAnnotation(f *framework.Framework, jig *ingress.TestJig, gceController *gce.IngressController, ns, name string, negs int) { +func detectNegAnnotation(f *framework.Framework, jig *e2eingress.TestJig, gceController *gce.IngressController, ns, name string, negs int) { if err := wait.Poll(5*time.Second, negUpdateTimeout, func() (bool, error) { svc, err := f.ClientSet.CoreV1().Services(ns).Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { @@ -889,10 +889,10 @@ func detectNegAnnotation(f *framework.Framework, jig *ingress.TestJig, gceContro return true, nil } - var status ingress.NegStatus - v, ok := svc.Annotations[ingress.NEGStatusAnnotation] + var status e2eingress.NegStatus + v, ok := svc.Annotations[e2eingress.NEGStatusAnnotation] if !ok { - framework.Logf("Waiting for %v, got: %+v", ingress.NEGStatusAnnotation, svc.Annotations) + framework.Logf("Waiting for %v, got: %+v", e2eingress.NEGStatusAnnotation, svc.Annotations) return false, nil } @@ -901,7 +901,7 @@ func detectNegAnnotation(f *framework.Framework, jig *ingress.TestJig, gceContro framework.Logf("Error in parsing Expose NEG annotation: %v", err) return false, nil } - framework.Logf("Got %v: %v", ingress.NEGStatusAnnotation, v) + framework.Logf("Got %v: %v", e2eingress.NEGStatusAnnotation, v) if len(status.NetworkEndpointGroups) != negs { framework.Logf("Expected %d NEGs, got %d", negs, len(status.NetworkEndpointGroups)) diff --git a/test/e2e/network/scale/ingress.go b/test/e2e/network/scale/ingress.go index 8c7864077557..954296beb529 100644 --- a/test/e2e/network/scale/ingress.go +++ b/test/e2e/network/scale/ingress.go @@ -32,7 +32,7 @@ import ( imageutils "k8s.io/kubernetes/test/utils/image" "k8s.io/kubernetes/test/e2e/framework" - "k8s.io/kubernetes/test/e2e/framework/ingress" + e2eingress "k8s.io/kubernetes/test/e2e/framework/ingress" "k8s.io/kubernetes/test/e2e/framework/providers/gce" ) @@ -64,10 +64,10 @@ var ( // IngressScaleFramework defines the framework for ingress scale testing. type IngressScaleFramework struct { Clientset clientset.Interface - Jig *ingress.TestJig + Jig *e2eingress.TestJig GCEController *gce.IngressController CloudConfig framework.CloudConfig - Logger ingress.TestLogger + Logger e2eingress.TestLogger Namespace string EnableTLS bool @@ -97,7 +97,7 @@ func NewIngressScaleFramework(cs clientset.Interface, ns string, cloudConfig fra Namespace: ns, Clientset: cs, CloudConfig: cloudConfig, - Logger: &ingress.E2ELogger{}, + Logger: &e2eingress.E2ELogger{}, EnableTLS: true, NumIngressesTest: []int{ numIngressesSmall, @@ -111,7 +111,7 @@ func NewIngressScaleFramework(cs clientset.Interface, ns string, cloudConfig fra // PrepareScaleTest prepares framework for ingress scale testing. func (f *IngressScaleFramework) PrepareScaleTest() error { f.Logger.Infof("Initializing ingress test suite and gce controller...") - f.Jig = ingress.NewIngressTestJig(f.Clientset) + f.Jig = e2eingress.NewIngressTestJig(f.Clientset) f.Jig.Logger = f.Logger f.Jig.PollInterval = scaleTestPollInterval f.GCEController = &gce.IngressController{ diff --git a/test/e2e/network/scale/localrun/ingress_scale.go b/test/e2e/network/scale/localrun/ingress_scale.go index e4b54f0e9743..147a393ad9c2 100644 --- a/test/e2e/network/scale/localrun/ingress_scale.go +++ b/test/e2e/network/scale/localrun/ingress_scale.go @@ -34,7 +34,7 @@ import ( gcecloud "k8s.io/legacy-cloud-providers/gce" "k8s.io/kubernetes/test/e2e/framework" - "k8s.io/kubernetes/test/e2e/framework/ingress" + e2eingress "k8s.io/kubernetes/test/e2e/framework/ingress" "k8s.io/kubernetes/test/e2e/framework/providers/gce" "k8s.io/kubernetes/test/e2e/network/scale" ) @@ -153,7 +153,7 @@ func main() { // Setting up a localized scale test framework. f := scale.NewIngressScaleFramework(cs, ns.Name, cloudConfig) - f.Logger = &ingress.GLogger{} + f.Logger = &e2eingress.GLogger{} // Customizing scale test. f.EnableTLS = enableTLS f.OutputFile = outputFile diff --git a/test/e2e/network/service.go b/test/e2e/network/service.go index 13239e99c976..070c95758c6b 100644 --- a/test/e2e/network/service.go +++ b/test/e2e/network/service.go @@ -43,7 +43,7 @@ import ( clientset "k8s.io/client-go/kubernetes" cloudprovider "k8s.io/cloud-provider" "k8s.io/kubernetes/test/e2e/framework" - e2edeploy "k8s.io/kubernetes/test/e2e/framework/deployment" + e2edeployment "k8s.io/kubernetes/test/e2e/framework/deployment" e2eendpoints "k8s.io/kubernetes/test/e2e/framework/endpoints" e2enetwork "k8s.io/kubernetes/test/e2e/framework/network" e2enode "k8s.io/kubernetes/test/e2e/framework/node" @@ -926,7 +926,7 @@ var _ = SIGDescribe("Services", func() { framework.ExpectNoError(err, "Failed to delete deployment %s", deployment.Name) }() - framework.ExpectNoError(e2edeploy.WaitForDeploymentComplete(cs, deployment), "Failed to complete pause pod deployment") + framework.ExpectNoError(e2edeployment.WaitForDeploymentComplete(cs, deployment), "Failed to complete pause pod deployment") deployment, err = cs.AppsV1().Deployments(ns).Get(context.TODO(), deployment.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "Error in retrieving pause pod deployment") @@ -2874,7 +2874,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() { ginkgo.By("Creating pause pod deployment to make sure, pausePods are in desired state") deployment := createPausePodDeployment(cs, "pause-pod-deployment", namespace, 1) - framework.ExpectNoError(e2edeploy.WaitForDeploymentComplete(cs, deployment), "Failed to complete pause pod deployment") + framework.ExpectNoError(e2edeployment.WaitForDeploymentComplete(cs, deployment), "Failed to complete pause pod deployment") defer func() { framework.Logf("Deleting deployment") @@ -3242,7 +3242,7 @@ func createAndGetExternalServiceFQDN(cs clientset.Interface, ns, serviceName str func createPausePodDeployment(cs clientset.Interface, name, ns string, replicas int) *appsv1.Deployment { labels := map[string]string{"deployment": "agnhost-pause"} - pauseDeployment := e2edeploy.NewDeployment(name, int32(replicas), labels, "", "", appsv1.RollingUpdateDeploymentStrategyType) + pauseDeployment := e2edeployment.NewDeployment(name, int32(replicas), labels, "", "", appsv1.RollingUpdateDeploymentStrategyType) pauseDeployment.Spec.Template.Spec.Containers[0] = v1.Container{ Name: "agnhost-pause", diff --git a/test/e2e/node/kubelet.go b/test/e2e/node/kubelet.go index e91b268aed8b..350648654534 100644 --- a/test/e2e/node/kubelet.go +++ b/test/e2e/node/kubelet.go @@ -36,7 +36,7 @@ import ( e2erc "k8s.io/kubernetes/test/e2e/framework/rc" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2essh "k8s.io/kubernetes/test/e2e/framework/ssh" - "k8s.io/kubernetes/test/e2e/framework/volume" + e2evolume "k8s.io/kubernetes/test/e2e/framework/volume" testutils "k8s.io/kubernetes/test/utils" imageutils "k8s.io/kubernetes/test/utils/image" @@ -418,7 +418,7 @@ var _ = SIGDescribe("kubelet", func() { ginkgo.BeforeEach(func() { e2eskipper.SkipUnlessProviderIs(framework.ProvidersWithSSH...) - _, nfsServerPod, nfsIP = volume.NewNFSServer(c, ns, []string{"-G", "777", "/exports"}) + _, nfsServerPod, nfsIP = e2evolume.NewNFSServer(c, ns, []string{"-G", "777", "/exports"}) }) ginkgo.AfterEach(func() { diff --git a/test/e2e/scheduling/nvidia-gpus.go b/test/e2e/scheduling/nvidia-gpus.go index 51942901da7b..9c751de1cd26 100644 --- a/test/e2e/scheduling/nvidia-gpus.go +++ b/test/e2e/scheduling/nvidia-gpus.go @@ -28,7 +28,7 @@ import ( "k8s.io/apimachinery/pkg/util/uuid" extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/test/e2e/framework" - "k8s.io/kubernetes/test/e2e/framework/gpu" + e2egpu "k8s.io/kubernetes/test/e2e/framework/gpu" e2ejob "k8s.io/kubernetes/test/e2e/framework/job" e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" @@ -133,7 +133,7 @@ func SetupNVIDIAGPUNode(f *framework.Framework, setupResourceGatherer bool) *fra } else { dsYamlURL = "https://raw.githubusercontent.com/GoogleCloudPlatform/container-engine-accelerators/master/daemonset.yaml" } - gpuResourceName = gpu.NVIDIAGPUResourceName + gpuResourceName = e2egpu.NVIDIAGPUResourceName framework.Logf("Using %v", dsYamlURL) // Creates the DaemonSet that installs Nvidia Drivers. diff --git a/test/e2e/scheduling/preemption.go b/test/e2e/scheduling/preemption.go index 50f3a26f1b38..7130ed8a8e10 100644 --- a/test/e2e/scheduling/preemption.go +++ b/test/e2e/scheduling/preemption.go @@ -40,7 +40,7 @@ import ( "k8s.io/kubernetes/test/e2e/framework" e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" - "k8s.io/kubernetes/test/e2e/framework/replicaset" + e2ereplicaset "k8s.io/kubernetes/test/e2e/framework/replicaset" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" "github.com/onsi/ginkgo" @@ -686,7 +686,7 @@ func createPauseRS(f *framework.Framework, conf pauseRSConfig) *appsv1.ReplicaSe func runPauseRS(f *framework.Framework, conf pauseRSConfig) *appsv1.ReplicaSet { rs := createPauseRS(f, conf) - framework.ExpectNoError(replicaset.WaitForReplicaSetTargetAvailableReplicasWithTimeout(f.ClientSet, rs, conf.Replicas, framework.PodGetTimeout)) + framework.ExpectNoError(e2ereplicaset.WaitForReplicaSetTargetAvailableReplicasWithTimeout(f.ClientSet, rs, conf.Replicas, framework.PodGetTimeout)) return rs } diff --git a/test/e2e/storage/drivers/csi.go b/test/e2e/storage/drivers/csi.go index 71b4dcf8f957..5ffb894430ed 100644 --- a/test/e2e/storage/drivers/csi.go +++ b/test/e2e/storage/drivers/csi.go @@ -54,7 +54,7 @@ import ( e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" - "k8s.io/kubernetes/test/e2e/framework/volume" + e2evolume "k8s.io/kubernetes/test/e2e/framework/volume" "k8s.io/kubernetes/test/e2e/storage/testpatterns" "k8s.io/kubernetes/test/e2e/storage/testsuites" "k8s.io/kubernetes/test/e2e/storage/utils" @@ -83,7 +83,7 @@ func initHostPathCSIDriver(name string, capabilities map[testsuites.Capability]b SupportedFsType: sets.NewString( "", // Default fsType ), - SupportedSizeRange: volume.SizeRange{ + SupportedSizeRange: e2evolume.SizeRange{ Min: "1Mi", }, Capabilities: capabilities, @@ -363,7 +363,7 @@ func InitGcePDCSIDriver() testsuites.TestDriver { Name: GCEPDCSIDriverName, FeatureTag: "[Serial]", MaxFileSize: testpatterns.FileSizeMedium, - SupportedSizeRange: volume.SizeRange{ + SupportedSizeRange: e2evolume.SizeRange{ Min: "5Gi", }, SupportedFsType: sets.NewString( diff --git a/test/e2e/storage/drivers/in_tree.go b/test/e2e/storage/drivers/in_tree.go index d66a20f89514..e8473a736960 100644 --- a/test/e2e/storage/drivers/in_tree.go +++ b/test/e2e/storage/drivers/in_tree.go @@ -54,12 +54,12 @@ import ( "k8s.io/apiserver/pkg/authentication/serviceaccount" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" - "k8s.io/kubernetes/test/e2e/framework/auth" + e2eauth "k8s.io/kubernetes/test/e2e/framework/auth" e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epv "k8s.io/kubernetes/test/e2e/framework/pv" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" - "k8s.io/kubernetes/test/e2e/framework/volume" + e2evolume "k8s.io/kubernetes/test/e2e/framework/volume" "k8s.io/kubernetes/test/e2e/storage/testpatterns" "k8s.io/kubernetes/test/e2e/storage/testsuites" "k8s.io/kubernetes/test/e2e/storage/utils" @@ -99,7 +99,7 @@ func InitNFSDriver() testsuites.TestDriver { Name: "nfs", InTreePluginName: "kubernetes.io/nfs", MaxFileSize: testpatterns.FileSizeLarge, - SupportedSizeRange: volume.SizeRange{ + SupportedSizeRange: e2evolume.SizeRange{ Min: "5Gi", }, SupportedFsType: sets.NewString( @@ -124,8 +124,8 @@ func (n *nfsDriver) GetDriverInfo() *testsuites.DriverInfo { func (n *nfsDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) { } -func (n *nfsDriver) GetVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) *v1.VolumeSource { - nv, ok := volume.(*nfsVolume) +func (n *nfsDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) *v1.VolumeSource { + nv, ok := e2evolume.(*nfsVolume) framework.ExpectEqual(ok, true, "Failed to cast test volume to NFS test volume") return &v1.VolumeSource{ NFS: &v1.NFSVolumeSource{ @@ -136,8 +136,8 @@ func (n *nfsDriver) GetVolumeSource(readOnly bool, fsType string, volume testsui } } -func (n *nfsDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { - nv, ok := volume.(*nfsVolume) +func (n *nfsDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { + nv, ok := e2evolume.(*nfsVolume) framework.ExpectEqual(ok, true, "Failed to cast test volume to NFS test volume") return &v1.PersistentVolumeSource{ NFS: &v1.NFSVolumeSource{ @@ -164,11 +164,11 @@ func (n *nfsDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConf // TODO(mkimuram): cluster-admin gives too much right but system:persistent-volume-provisioner // is not enough. We should create new clusterrole for testing. - err := auth.BindClusterRole(cs.RbacV1(), "cluster-admin", ns.Name, + err := e2eauth.BindClusterRole(cs.RbacV1(), "cluster-admin", ns.Name, rbacv1.Subject{Kind: rbacv1.ServiceAccountKind, Namespace: ns.Name, Name: "default"}) framework.ExpectNoError(err) - err = auth.WaitForAuthorizationUpdate(cs.AuthorizationV1(), + err = e2eauth.WaitForAuthorizationUpdate(cs.AuthorizationV1(), serviceaccount.MakeUsername(ns.Name, "default"), "", "get", schema.GroupResource{Group: "storage.k8s.io", Resource: "storageclasses"}, true) framework.ExpectNoError(err, "Failed to update authorization: %v", err) @@ -199,7 +199,7 @@ func (n *nfsDriver) CreateVolume(config *testsuites.PerTestConfig, volType testp case testpatterns.InlineVolume: fallthrough case testpatterns.PreprovisionedPV: - c, serverPod, serverIP := volume.NewNFSServer(cs, ns.Name, []string{}) + c, serverPod, serverIP := e2evolume.NewNFSServer(cs, ns.Name, []string{}) config.ServerConfig = &c return &nfsVolume{ serverIP: serverIP, @@ -241,7 +241,7 @@ func InitGlusterFSDriver() testsuites.TestDriver { Name: "gluster", InTreePluginName: "kubernetes.io/glusterfs", MaxFileSize: testpatterns.FileSizeMedium, - SupportedSizeRange: volume.SizeRange{ + SupportedSizeRange: e2evolume.SizeRange{ Min: "5Gi", }, SupportedFsType: sets.NewString( @@ -265,8 +265,8 @@ func (g *glusterFSDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) e2eskipper.SkipUnlessNodeOSDistroIs("gci", "ubuntu", "custom") } -func (g *glusterFSDriver) GetVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) *v1.VolumeSource { - gv, ok := volume.(*glusterVolume) +func (g *glusterFSDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) *v1.VolumeSource { + gv, ok := e2evolume.(*glusterVolume) framework.ExpectEqual(ok, true, "Failed to cast test volume to Gluster test volume") name := gv.prefix + "-server" @@ -280,8 +280,8 @@ func (g *glusterFSDriver) GetVolumeSource(readOnly bool, fsType string, volume t } } -func (g *glusterFSDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { - gv, ok := volume.(*glusterVolume) +func (g *glusterFSDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { + gv, ok := e2evolume.(*glusterVolume) framework.ExpectEqual(ok, true, "Failed to cast test volume to Gluster test volume") name := gv.prefix + "-server" @@ -308,7 +308,7 @@ func (g *glusterFSDriver) CreateVolume(config *testsuites.PerTestConfig, volType cs := f.ClientSet ns := f.Namespace - c, serverPod, _ := volume.NewGlusterfsServer(cs, ns.Name) + c, serverPod, _ := e2evolume.NewGlusterfsServer(cs, ns.Name) config.ServerConfig = &c return &glusterVolume{ prefix: config.Prefix, @@ -391,8 +391,8 @@ func (i *iSCSIDriver) GetDriverInfo() *testsuites.DriverInfo { func (i *iSCSIDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) { } -func (i *iSCSIDriver) GetVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) *v1.VolumeSource { - iv, ok := volume.(*iSCSIVolume) +func (i *iSCSIDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) *v1.VolumeSource { + iv, ok := e2evolume.(*iSCSIVolume) framework.ExpectEqual(ok, true, "Failed to cast test volume to iSCSI test volume") volSource := v1.VolumeSource{ @@ -409,8 +409,8 @@ func (i *iSCSIDriver) GetVolumeSource(readOnly bool, fsType string, volume tests return &volSource } -func (i *iSCSIDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { - iv, ok := volume.(*iSCSIVolume) +func (i *iSCSIDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { + iv, ok := e2evolume.(*iSCSIVolume) framework.ExpectEqual(ok, true, "Failed to cast test volume to iSCSI test volume") pvSource := v1.PersistentVolumeSource{ @@ -452,10 +452,10 @@ func (i *iSCSIDriver) CreateVolume(config *testsuites.PerTestConfig, volType tes } // newISCSIServer is an iSCSI-specific wrapper for CreateStorageServer. -func newISCSIServer(cs clientset.Interface, namespace string) (config volume.TestConfig, pod *v1.Pod, ip, iqn string) { +func newISCSIServer(cs clientset.Interface, namespace string) (config e2evolume.TestConfig, pod *v1.Pod, ip, iqn string) { // Generate cluster-wide unique IQN iqn = fmt.Sprintf(iSCSIIQNTemplate, namespace) - config = volume.TestConfig{ + config = e2evolume.TestConfig{ Namespace: namespace, Prefix: "iscsi", ServerImage: imageutils.GetE2EImage(imageutils.VolumeISCSIServer), @@ -471,15 +471,15 @@ func newISCSIServer(cs clientset.Interface, namespace string) (config volume.Tes ServerReadyMessage: "iscsi target started", ServerHostNetwork: true, } - pod, ip = volume.CreateStorageServer(cs, config) + pod, ip = e2evolume.CreateStorageServer(cs, config) // Make sure the client runs on the same node as server so we don't need to open any firewalls. config.ClientNodeSelection = e2epod.NodeSelection{Name: pod.Spec.NodeName} return config, pod, ip, iqn } // newRBDServer is a CephRBD-specific wrapper for CreateStorageServer. -func newRBDServer(cs clientset.Interface, namespace string) (config volume.TestConfig, pod *v1.Pod, secret *v1.Secret, ip string) { - config = volume.TestConfig{ +func newRBDServer(cs clientset.Interface, namespace string) (config e2evolume.TestConfig, pod *v1.Pod, secret *v1.Secret, ip string) { + config = e2evolume.TestConfig{ Namespace: namespace, Prefix: "rbd", ServerImage: imageutils.GetE2EImage(imageutils.VolumeRBDServer), @@ -489,7 +489,7 @@ func newRBDServer(cs clientset.Interface, namespace string) (config volume.TestC }, ServerReadyMessage: "Ceph is ready", } - pod, ip = volume.CreateStorageServer(cs, config) + pod, ip = e2evolume.CreateStorageServer(cs, config) // create secrets for the server secret = &v1.Secret{ TypeMeta: metav1.TypeMeta{ @@ -543,7 +543,7 @@ func InitRbdDriver() testsuites.TestDriver { InTreePluginName: "kubernetes.io/rbd", FeatureTag: "[Feature:Volumes][Serial]", MaxFileSize: testpatterns.FileSizeMedium, - SupportedSizeRange: volume.SizeRange{ + SupportedSizeRange: e2evolume.SizeRange{ Min: "5Gi", }, SupportedFsType: sets.NewString( @@ -571,8 +571,8 @@ func (r *rbdDriver) GetDriverInfo() *testsuites.DriverInfo { func (r *rbdDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) { } -func (r *rbdDriver) GetVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) *v1.VolumeSource { - rv, ok := volume.(*rbdVolume) +func (r *rbdDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) *v1.VolumeSource { + rv, ok := e2evolume.(*rbdVolume) framework.ExpectEqual(ok, true, "Failed to cast test volume to RBD test volume") volSource := v1.VolumeSource{ @@ -593,8 +593,8 @@ func (r *rbdDriver) GetVolumeSource(readOnly bool, fsType string, volume testsui return &volSource } -func (r *rbdDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { - rv, ok := volume.(*rbdVolume) +func (r *rbdDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { + rv, ok := e2evolume.(*rbdVolume) framework.ExpectEqual(ok, true, "Failed to cast test volume to RBD test volume") f := rv.f @@ -671,7 +671,7 @@ func InitCephFSDriver() testsuites.TestDriver { InTreePluginName: "kubernetes.io/cephfs", FeatureTag: "[Feature:Volumes][Serial]", MaxFileSize: testpatterns.FileSizeMedium, - SupportedSizeRange: volume.SizeRange{ + SupportedSizeRange: e2evolume.SizeRange{ Min: "5Gi", }, SupportedFsType: sets.NewString( @@ -694,8 +694,8 @@ func (c *cephFSDriver) GetDriverInfo() *testsuites.DriverInfo { func (c *cephFSDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) { } -func (c *cephFSDriver) GetVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) *v1.VolumeSource { - cv, ok := volume.(*cephVolume) +func (c *cephFSDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) *v1.VolumeSource { + cv, ok := e2evolume.(*cephVolume) framework.ExpectEqual(ok, true, "Failed to cast test volume to Ceph test volume") return &v1.VolumeSource{ @@ -710,8 +710,8 @@ func (c *cephFSDriver) GetVolumeSource(readOnly bool, fsType string, volume test } } -func (c *cephFSDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { - cv, ok := volume.(*cephVolume) +func (c *cephFSDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { + cv, ok := e2evolume.(*cephVolume) framework.ExpectEqual(ok, true, "Failed to cast test volume to Ceph test volume") ns := cv.f.Namespace @@ -793,7 +793,7 @@ func (h *hostPathDriver) GetDriverInfo() *testsuites.DriverInfo { func (h *hostPathDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) { } -func (h *hostPathDriver) GetVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) *v1.VolumeSource { +func (h *hostPathDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) *v1.VolumeSource { // hostPath doesn't support readOnly volume if readOnly { return nil @@ -868,8 +868,8 @@ func (h *hostPathSymlinkDriver) GetDriverInfo() *testsuites.DriverInfo { func (h *hostPathSymlinkDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) { } -func (h *hostPathSymlinkDriver) GetVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) *v1.VolumeSource { - hv, ok := volume.(*hostPathSymlinkVolume) +func (h *hostPathSymlinkDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) *v1.VolumeSource { + hv, ok := e2evolume.(*hostPathSymlinkVolume) framework.ExpectEqual(ok, true, "Failed to cast test volume to Hostpath Symlink test volume") // hostPathSymlink doesn't support readOnly volume @@ -1010,7 +1010,7 @@ func (e *emptydirDriver) GetDriverInfo() *testsuites.DriverInfo { func (e *emptydirDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) { } -func (e *emptydirDriver) GetVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) *v1.VolumeSource { +func (e *emptydirDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) *v1.VolumeSource { // emptydir doesn't support readOnly volume if readOnly { return nil @@ -1059,7 +1059,7 @@ func InitCinderDriver() testsuites.TestDriver { Name: "cinder", InTreePluginName: "kubernetes.io/cinder", MaxFileSize: testpatterns.FileSizeMedium, - SupportedSizeRange: volume.SizeRange{ + SupportedSizeRange: e2evolume.SizeRange{ Min: "5Gi", }, SupportedFsType: sets.NewString( @@ -1089,8 +1089,8 @@ func (c *cinderDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) { e2eskipper.SkipUnlessProviderIs("openstack") } -func (c *cinderDriver) GetVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) *v1.VolumeSource { - cv, ok := volume.(*cinderVolume) +func (c *cinderDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) *v1.VolumeSource { + cv, ok := e2evolume.(*cinderVolume) framework.ExpectEqual(ok, true, "Failed to cast test volume to Cinder test volume") volSource := v1.VolumeSource{ @@ -1105,8 +1105,8 @@ func (c *cinderDriver) GetVolumeSource(readOnly bool, fsType string, volume test return &volSource } -func (c *cinderDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { - cv, ok := volume.(*cinderVolume) +func (c *cinderDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { + cv, ok := e2evolume.(*cinderVolume) framework.ExpectEqual(ok, true, "Failed to cast test volume to Cinder test volume") pvSource := v1.PersistentVolumeSource{ @@ -1233,7 +1233,7 @@ func InitGcePdDriver() testsuites.TestDriver { Name: "gcepd", InTreePluginName: "kubernetes.io/gce-pd", MaxFileSize: testpatterns.FileSizeMedium, - SupportedSizeRange: volume.SizeRange{ + SupportedSizeRange: e2evolume.SizeRange{ Min: "5Gi", }, SupportedFsType: supportedTypes, @@ -1267,8 +1267,8 @@ func (g *gcePdDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) { } } -func (g *gcePdDriver) GetVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) *v1.VolumeSource { - gv, ok := volume.(*gcePdVolume) +func (g *gcePdDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) *v1.VolumeSource { + gv, ok := e2evolume.(*gcePdVolume) framework.ExpectEqual(ok, true, "Failed to cast test volume to GCE PD test volume") volSource := v1.VolumeSource{ GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{ @@ -1282,8 +1282,8 @@ func (g *gcePdDriver) GetVolumeSource(readOnly bool, fsType string, volume tests return &volSource } -func (g *gcePdDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { - gv, ok := volume.(*gcePdVolume) +func (g *gcePdDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { + gv, ok := e2evolume.(*gcePdVolume) framework.ExpectEqual(ok, true, "Failed to cast test volume to GCE PD test volume") pvSource := v1.PersistentVolumeSource{ GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{ @@ -1372,7 +1372,7 @@ func InitVSphereDriver() testsuites.TestDriver { Name: "vsphere", InTreePluginName: "kubernetes.io/vsphere-volume", MaxFileSize: testpatterns.FileSizeMedium, - SupportedSizeRange: volume.SizeRange{ + SupportedSizeRange: e2evolume.SizeRange{ Min: "5Gi", }, SupportedFsType: sets.NewString( @@ -1398,8 +1398,8 @@ func (v *vSphereDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) { e2eskipper.SkipUnlessProviderIs("vsphere") } -func (v *vSphereDriver) GetVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) *v1.VolumeSource { - vsv, ok := volume.(*vSphereVolume) +func (v *vSphereDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) *v1.VolumeSource { + vsv, ok := e2evolume.(*vSphereVolume) framework.ExpectEqual(ok, true, "Failed to cast test volume to vSphere test volume") // vSphere driver doesn't seem to support readOnly volume @@ -1418,8 +1418,8 @@ func (v *vSphereDriver) GetVolumeSource(readOnly bool, fsType string, volume tes return &volSource } -func (v *vSphereDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { - vsv, ok := volume.(*vSphereVolume) +func (v *vSphereDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { + vsv, ok := e2evolume.(*vSphereVolume) framework.ExpectEqual(ok, true, "Failed to cast test volume to vSphere test volume") // vSphere driver doesn't seem to support readOnly volume @@ -1496,7 +1496,7 @@ func InitAzureDiskDriver() testsuites.TestDriver { Name: "azure-disk", InTreePluginName: "kubernetes.io/azure-disk", MaxFileSize: testpatterns.FileSizeMedium, - SupportedSizeRange: volume.SizeRange{ + SupportedSizeRange: e2evolume.SizeRange{ Min: "5Gi", }, SupportedFsType: sets.NewString( @@ -1529,8 +1529,8 @@ func (a *azureDiskDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) e2eskipper.SkipUnlessProviderIs("azure") } -func (a *azureDiskDriver) GetVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) *v1.VolumeSource { - av, ok := volume.(*azureDiskVolume) +func (a *azureDiskDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) *v1.VolumeSource { + av, ok := e2evolume.(*azureDiskVolume) framework.ExpectEqual(ok, true, "Failed to cast test volume to Azure test volume") diskName := av.volumeName[(strings.LastIndex(av.volumeName, "/") + 1):] @@ -1549,8 +1549,8 @@ func (a *azureDiskDriver) GetVolumeSource(readOnly bool, fsType string, volume t return &volSource } -func (a *azureDiskDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { - av, ok := volume.(*azureDiskVolume) +func (a *azureDiskDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { + av, ok := e2evolume.(*azureDiskVolume) framework.ExpectEqual(ok, true, "Failed to cast test volume to Azure test volume") diskName := av.volumeName[(strings.LastIndex(av.volumeName, "/") + 1):] @@ -1626,7 +1626,7 @@ func InitAwsDriver() testsuites.TestDriver { Name: "aws", InTreePluginName: "kubernetes.io/aws-ebs", MaxFileSize: testpatterns.FileSizeMedium, - SupportedSizeRange: volume.SizeRange{ + SupportedSizeRange: e2evolume.SizeRange{ Min: "5Gi", }, SupportedFsType: sets.NewString( @@ -1664,8 +1664,8 @@ func (a *awsDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) { e2eskipper.SkipUnlessProviderIs("aws") } -func (a *awsDriver) GetVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) *v1.VolumeSource { - av, ok := volume.(*awsVolume) +func (a *awsDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) *v1.VolumeSource { + av, ok := e2evolume.(*awsVolume) framework.ExpectEqual(ok, true, "Failed to cast test volume to AWS test volume") volSource := v1.VolumeSource{ AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{ @@ -1679,8 +1679,8 @@ func (a *awsDriver) GetVolumeSource(readOnly bool, fsType string, volume testsui return &volSource } -func (a *awsDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { - av, ok := volume.(*awsVolume) +func (a *awsDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { + av, ok := e2evolume.(*awsVolume) framework.ExpectEqual(ok, true, "Failed to cast test volume to AWS test volume") pvSource := v1.PersistentVolumeSource{ AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{ @@ -1921,8 +1921,8 @@ func (l *localDriver) nodeAffinityForNode(node *v1.Node) *v1.VolumeNodeAffinity } } -func (l *localDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { - lv, ok := volume.(*localVolume) +func (l *localDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { + lv, ok := e2evolume.(*localVolume) framework.ExpectEqual(ok, true, "Failed to cast test volume to local test volume") return &v1.PersistentVolumeSource{ Local: &v1.LocalVolumeSource{ diff --git a/test/e2e/storage/external/external.go b/test/e2e/storage/external/external.go index e04e69c68734..9e963576acb4 100644 --- a/test/e2e/storage/external/external.go +++ b/test/e2e/storage/external/external.go @@ -31,10 +31,10 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/client-go/kubernetes/scheme" "k8s.io/kubernetes/test/e2e/framework" - "k8s.io/kubernetes/test/e2e/framework/config" + e2econfig "k8s.io/kubernetes/test/e2e/framework/config" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" - "k8s.io/kubernetes/test/e2e/framework/volume" + e2evolume "k8s.io/kubernetes/test/e2e/framework/volume" "k8s.io/kubernetes/test/e2e/storage/testpatterns" "k8s.io/kubernetes/test/e2e/storage/testsuites" "k8s.io/kubernetes/test/e2e/storage/utils" @@ -122,7 +122,7 @@ type driverDefinition struct { // SupportedSizeRange defines the desired size of dynamically // provisioned volumes. - SupportedSizeRange volume.SizeRange + SupportedSizeRange e2evolume.SizeRange // ClientNodeName selects a specific node for scheduling test pods. // Can be left empty. Most drivers should not need this and instead @@ -146,7 +146,7 @@ var csiTestSuites = []func() testsuites.TestSuite{ } func init() { - config.Flags.Var(testDriverParameter{}, "storage.testdriver", "name of a .yaml or .json file that defines a driver for storage testing, can be used more than once") + e2econfig.Flags.Var(testDriverParameter{}, "storage.testdriver", "name of a .yaml or .json file that defines a driver for storage testing, can be used more than once") } // testDriverParameter is used to hook loading of the driver @@ -203,7 +203,7 @@ func loadDriverDefinition(filename string) (*driverDefinition, error) { "", // Default fsType ), }, - SupportedSizeRange: volume.SizeRange{ + SupportedSizeRange: e2evolume.SizeRange{ Min: "5Gi", }, } @@ -276,13 +276,13 @@ func (d *driverDefinition) SkipUnsupportedTest(pattern testpatterns.TestPattern) } } -func (d *driverDefinition) GetDynamicProvisionStorageClass(config *testsuites.PerTestConfig, fsType string) *storagev1.StorageClass { +func (d *driverDefinition) GetDynamicProvisionStorageClass(e2econfig *testsuites.PerTestConfig, fsType string) *storagev1.StorageClass { var ( sc *storagev1.StorageClass err error ) - f := config.Framework + f := e2econfig.Framework switch { case d.StorageClass.FromName: @@ -331,15 +331,15 @@ func loadSnapshotClass(filename string) (*unstructured.Unstructured, error) { return snapshotClass, nil } -func (d *driverDefinition) GetSnapshotClass(config *testsuites.PerTestConfig) *unstructured.Unstructured { +func (d *driverDefinition) GetSnapshotClass(e2econfig *testsuites.PerTestConfig) *unstructured.Unstructured { if !d.SnapshotClass.FromName && d.SnapshotClass.FromFile == "" && d.SnapshotClass.FromExistingClassName == "" { e2eskipper.Skipf("Driver %q does not support snapshotting - skipping", d.DriverInfo.Name) } - f := config.Framework + f := e2econfig.Framework snapshotter := d.DriverInfo.Name parameters := map[string]string{} - ns := config.Framework.Namespace.Name + ns := e2econfig.Framework.Namespace.Name suffix := "vsc" switch { @@ -368,24 +368,24 @@ func (d *driverDefinition) GetSnapshotClass(config *testsuites.PerTestConfig) *u return testsuites.GetSnapshotClass(snapshotter, parameters, ns, suffix) } -func (d *driverDefinition) GetVolume(config *testsuites.PerTestConfig, volumeNumber int) (map[string]string, bool, bool) { +func (d *driverDefinition) GetVolume(e2econfig *testsuites.PerTestConfig, volumeNumber int) (map[string]string, bool, bool) { if len(d.InlineVolumes) == 0 { e2eskipper.Skipf("%s does not have any InlineVolumeAttributes defined", d.DriverInfo.Name) } - volume := d.InlineVolumes[volumeNumber%len(d.InlineVolumes)] - return volume.Attributes, volume.Shared, volume.ReadOnly + e2evolume := d.InlineVolumes[volumeNumber%len(d.InlineVolumes)] + return e2evolume.Attributes, e2evolume.Shared, e2evolume.ReadOnly } -func (d *driverDefinition) GetCSIDriverName(config *testsuites.PerTestConfig) string { +func (d *driverDefinition) GetCSIDriverName(e2econfig *testsuites.PerTestConfig) string { return d.DriverInfo.Name } func (d *driverDefinition) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) { - config := &testsuites.PerTestConfig{ + e2econfig := &testsuites.PerTestConfig{ Driver: d, Prefix: "external", Framework: f, ClientNodeSelection: e2epod.NodeSelection{Name: d.ClientNodeName}, } - return config, func() {} + return e2econfig, func() {} } diff --git a/test/e2e/storage/external/external_test.go b/test/e2e/storage/external/external_test.go index 2cf52312a0e8..fbeeb55b9b65 100644 --- a/test/e2e/storage/external/external_test.go +++ b/test/e2e/storage/external/external_test.go @@ -22,7 +22,7 @@ import ( "github.com/stretchr/testify/assert" "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/kubernetes/test/e2e/framework/volume" + e2evolume "k8s.io/kubernetes/test/e2e/framework/volume" "k8s.io/kubernetes/test/e2e/storage/testsuites" ) @@ -34,7 +34,7 @@ func TestDriverParameter(t *testing.T) { "", // Default fsType ), }, - SupportedSizeRange: volume.SizeRange{ + SupportedSizeRange: e2evolume.SizeRange{ Min: "5Gi", }, } diff --git a/test/e2e/storage/flexvolume.go b/test/e2e/storage/flexvolume.go index 2cf7bdf002d2..cad2692ef026 100644 --- a/test/e2e/storage/flexvolume.go +++ b/test/e2e/storage/flexvolume.go @@ -32,8 +32,8 @@ import ( e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2essh "k8s.io/kubernetes/test/e2e/framework/ssh" - "k8s.io/kubernetes/test/e2e/framework/testfiles" - "k8s.io/kubernetes/test/e2e/framework/volume" + e2etestfiles "k8s.io/kubernetes/test/e2e/framework/testfiles" + e2evolume "k8s.io/kubernetes/test/e2e/framework/volume" "k8s.io/kubernetes/test/e2e/storage/utils" ) @@ -51,8 +51,8 @@ const ( // testFlexVolume tests that a client pod using a given flexvolume driver // successfully mounts it and runs -func testFlexVolume(driver string, config volume.TestConfig, f *framework.Framework) { - tests := []volume.Test{ +func testFlexVolume(driver string, config e2evolume.TestConfig, f *framework.Framework) { + tests := []e2evolume.Test{ { Volume: v1.VolumeSource{ FlexVolume: &v1.FlexVolumeSource{ @@ -64,7 +64,7 @@ func testFlexVolume(driver string, config volume.TestConfig, f *framework.Framew ExpectedContent: "Hello from flexvolume!", }, } - volume.TestVolumeClient(f, config, nil, "" /* fsType */, tests) + e2evolume.TestVolumeClient(f, config, nil, "" /* fsType */, tests) } // installFlex installs the driver found at filePath on the node, and restarts @@ -92,7 +92,7 @@ func installFlex(c clientset.Interface, node *v1.Node, vendor, driver, filePath cmd := fmt.Sprintf("sudo mkdir -p %s", flexDir) sshAndLog(cmd, host, true /*failOnError*/) - data := testfiles.ReadOrDie(filePath) + data := e2etestfiles.ReadOrDie(filePath) cmd = fmt.Sprintf("sudo tee <<'EOF' %s\n%s\nEOF", flexFile, string(data)) sshAndLog(cmd, host, true /*failOnError*/) @@ -161,7 +161,7 @@ var _ = utils.SIGDescribe("Flexvolumes", func() { var cs clientset.Interface var ns *v1.Namespace var node *v1.Node - var config volume.TestConfig + var config e2evolume.TestConfig var suffix string ginkgo.BeforeEach(func() { @@ -175,7 +175,7 @@ var _ = utils.SIGDescribe("Flexvolumes", func() { var err error node, err = e2enode.GetRandomReadySchedulableNode(f.ClientSet) framework.ExpectNoError(err) - config = volume.TestConfig{ + config = e2evolume.TestConfig{ Namespace: ns.Name, Prefix: "flex", ClientNodeSelection: e2epod.NodeSelection{Name: node.Name}, diff --git a/test/e2e/storage/flexvolume_mounted_volume_resize.go b/test/e2e/storage/flexvolume_mounted_volume_resize.go index 3246a83d9b33..bb40ff9c00c5 100644 --- a/test/e2e/storage/flexvolume_mounted_volume_resize.go +++ b/test/e2e/storage/flexvolume_mounted_volume_resize.go @@ -31,7 +31,7 @@ import ( utilerrors "k8s.io/apimachinery/pkg/util/errors" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" - e2edeploy "k8s.io/kubernetes/test/e2e/framework/deployment" + e2edeployment "k8s.io/kubernetes/test/e2e/framework/deployment" e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epv "k8s.io/kubernetes/test/e2e/framework/pv" @@ -153,7 +153,7 @@ var _ = utils.SIGDescribe("Mounted flexvolume expand[Slow]", func() { framework.ExpectEqual(len(pvs), 1) ginkgo.By("Creating a deployment with the provisioned volume") - deployment, err := e2edeploy.CreateDeployment(c, int32(1), map[string]string{"test": "app"}, nodeKeyValueLabel, ns, pvcClaims, "") + deployment, err := e2edeployment.CreateDeployment(c, int32(1), map[string]string{"test": "app"}, nodeKeyValueLabel, ns, pvcClaims, "") framework.ExpectNoError(err, "Failed creating deployment %v", err) defer c.AppsV1().Deployments(ns).Delete(context.TODO(), deployment.Name, metav1.DeleteOptions{}) @@ -174,7 +174,7 @@ var _ = utils.SIGDescribe("Mounted flexvolume expand[Slow]", func() { framework.ExpectNoError(err, "While waiting for pvc resize to finish") ginkgo.By("Getting a pod from deployment") - podList, err := e2edeploy.GetPodsForDeployment(c, deployment) + podList, err := e2edeployment.GetPodsForDeployment(c, deployment) framework.ExpectNoError(err, "While getting pods from deployment") gomega.Expect(podList.Items).NotTo(gomega.BeEmpty()) pod := podList.Items[0] diff --git a/test/e2e/storage/mounted_volume_resize.go b/test/e2e/storage/mounted_volume_resize.go index 763975208a68..3edc41a2b80e 100644 --- a/test/e2e/storage/mounted_volume_resize.go +++ b/test/e2e/storage/mounted_volume_resize.go @@ -33,7 +33,7 @@ import ( clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/pkg/client/conditions" "k8s.io/kubernetes/test/e2e/framework" - e2edeploy "k8s.io/kubernetes/test/e2e/framework/deployment" + e2edeployment "k8s.io/kubernetes/test/e2e/framework/deployment" e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epv "k8s.io/kubernetes/test/e2e/framework/pv" @@ -120,7 +120,7 @@ var _ = utils.SIGDescribe("Mounted volume expand", func() { // Keeping pod on same node reproduces the scenario that volume might already be mounted when resize is attempted. // We should consider adding a unit test that exercises this better. ginkgo.By("Creating a deployment with selected PVC") - deployment, err := e2edeploy.CreateDeployment(c, int32(1), map[string]string{"test": "app"}, nodeKeyValueLabel, ns, pvcClaims, "") + deployment, err := e2edeployment.CreateDeployment(c, int32(1), map[string]string{"test": "app"}, nodeKeyValueLabel, ns, pvcClaims, "") framework.ExpectNoError(err, "Failed creating deployment %v", err) defer c.AppsV1().Deployments(ns).Delete(context.TODO(), deployment.Name, metav1.DeleteOptions{}) @@ -147,7 +147,7 @@ var _ = utils.SIGDescribe("Mounted volume expand", func() { framework.ExpectNoError(err, "While waiting for pvc resize to finish") ginkgo.By("Getting a pod from deployment") - podList, err := e2edeploy.GetPodsForDeployment(c, deployment) + podList, err := e2edeployment.GetPodsForDeployment(c, deployment) framework.ExpectNoError(err, "While getting pods from deployment") gomega.Expect(podList.Items).NotTo(gomega.BeEmpty()) pod := podList.Items[0] @@ -172,7 +172,7 @@ var _ = utils.SIGDescribe("Mounted volume expand", func() { func waitForDeploymentToRecreatePod(client clientset.Interface, deployment *appsv1.Deployment) (v1.Pod, error) { var runningPod v1.Pod waitErr := wait.PollImmediate(10*time.Second, 5*time.Minute, func() (bool, error) { - podList, err := e2edeploy.GetPodsForDeployment(client, deployment) + podList, err := e2edeployment.GetPodsForDeployment(client, deployment) if err != nil { return false, fmt.Errorf("failed to get pods for deployment: %v", err) } diff --git a/test/e2e/storage/nfs_persistent_volume-disruptive.go b/test/e2e/storage/nfs_persistent_volume-disruptive.go index 7083f2f3cccd..0762404a1949 100644 --- a/test/e2e/storage/nfs_persistent_volume-disruptive.go +++ b/test/e2e/storage/nfs_persistent_volume-disruptive.go @@ -36,7 +36,7 @@ import ( e2epv "k8s.io/kubernetes/test/e2e/framework/pv" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2essh "k8s.io/kubernetes/test/e2e/framework/ssh" - "k8s.io/kubernetes/test/e2e/framework/volume" + e2evolume "k8s.io/kubernetes/test/e2e/framework/volume" "k8s.io/kubernetes/test/e2e/storage/utils" ) @@ -98,7 +98,7 @@ var _ = utils.SIGDescribe("NFSPersistentVolumes[Disruptive][Flaky]", func() { volLabel = labels.Set{e2epv.VolumeSelectorKey: ns} selector = metav1.SetAsLabelSelector(volLabel) // Start the NFS server pod. - _, nfsServerPod, nfsServerIP = volume.NewNFSServer(c, ns, []string{"-G", "777", "/exports"}) + _, nfsServerPod, nfsServerIP = e2evolume.NewNFSServer(c, ns, []string{"-G", "777", "/exports"}) nfsPVconfig = e2epv.PersistentVolumeConfig{ NamePrefix: "nfs-", Labels: volLabel, diff --git a/test/e2e/storage/persistent_volumes-local.go b/test/e2e/storage/persistent_volumes-local.go index 14773e8d4f81..812f8dba8c3e 100644 --- a/test/e2e/storage/persistent_volumes-local.go +++ b/test/e2e/storage/persistent_volumes-local.go @@ -44,7 +44,7 @@ import ( e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epv "k8s.io/kubernetes/test/e2e/framework/pv" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" - e2esset "k8s.io/kubernetes/test/e2e/framework/statefulset" + e2estatefulset "k8s.io/kubernetes/test/e2e/framework/statefulset" "k8s.io/kubernetes/test/e2e/storage/utils" imageutils "k8s.io/kubernetes/test/utils/image" ) @@ -1194,12 +1194,12 @@ func createStatefulSet(config *localTestConfig, ssReplicas int32, volumeCount in ss, err := config.client.AppsV1().StatefulSets(config.ns).Create(context.TODO(), spec, metav1.CreateOptions{}) framework.ExpectNoError(err) - e2esset.WaitForRunningAndReady(config.client, ssReplicas, ss) + e2estatefulset.WaitForRunningAndReady(config.client, ssReplicas, ss) return ss } func validateStatefulSet(config *localTestConfig, ss *appsv1.StatefulSet, anti bool) { - pods := e2esset.GetPodList(config.client, ss) + pods := e2estatefulset.GetPodList(config.client, ss) nodes := sets.NewString() for _, pod := range pods.Items { diff --git a/test/e2e/storage/persistent_volumes.go b/test/e2e/storage/persistent_volumes.go index c63b49e53619..5635b0c9acb7 100644 --- a/test/e2e/storage/persistent_volumes.go +++ b/test/e2e/storage/persistent_volumes.go @@ -33,8 +33,8 @@ import ( e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epv "k8s.io/kubernetes/test/e2e/framework/pv" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" - e2esset "k8s.io/kubernetes/test/e2e/framework/statefulset" - "k8s.io/kubernetes/test/e2e/framework/volume" + e2estatefulset "k8s.io/kubernetes/test/e2e/framework/statefulset" + e2evolume "k8s.io/kubernetes/test/e2e/framework/volume" "k8s.io/kubernetes/test/e2e/storage/utils" imageutils "k8s.io/kubernetes/test/utils/image" ) @@ -127,7 +127,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() { ) ginkgo.BeforeEach(func() { - _, nfsServerPod, serverIP = volume.NewNFSServer(c, ns, []string{"-G", "777", "/exports"}) + _, nfsServerPod, serverIP = e2evolume.NewNFSServer(c, ns, []string{"-G", "777", "/exports"}) pvConfig = e2epv.PersistentVolumeConfig{ NamePrefix: "nfs-", Labels: volLabel, @@ -315,7 +315,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() { ginkgo.Context("pods that use multiple volumes", func() { ginkgo.AfterEach(func() { - e2esset.DeleteAllStatefulSets(c, ns) + e2estatefulset.DeleteAllStatefulSets(c, ns) }) ginkgo.It("should be reschedulable [Slow]", func() { @@ -355,13 +355,13 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() { spec := makeStatefulSetWithPVCs(ns, writeCmd, mounts, claims, probe) ss, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), spec, metav1.CreateOptions{}) framework.ExpectNoError(err) - e2esset.WaitForRunningAndReady(c, 1, ss) + e2estatefulset.WaitForRunningAndReady(c, 1, ss) ginkgo.By("Deleting the StatefulSet but not the volumes") // Scale down to 0 first so that the Delete is quick - ss, err = e2esset.Scale(c, ss, 0) + ss, err = e2estatefulset.Scale(c, ss, 0) framework.ExpectNoError(err) - e2esset.WaitForStatusReplicas(c, ss, 0) + e2estatefulset.WaitForStatusReplicas(c, ss, 0) err = c.AppsV1().StatefulSets(ns).Delete(context.TODO(), ss.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err) @@ -375,7 +375,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() { spec = makeStatefulSetWithPVCs(ns, validateCmd, mounts, claims, probe) ss, err = c.AppsV1().StatefulSets(ns).Create(context.TODO(), spec, metav1.CreateOptions{}) framework.ExpectNoError(err) - e2esset.WaitForRunningAndReady(c, 1, ss) + e2estatefulset.WaitForRunningAndReady(c, 1, ss) }) }) }) diff --git a/test/e2e/storage/testpatterns/testpattern.go b/test/e2e/storage/testpatterns/testpattern.go index 99b86501504c..f76ca3152182 100644 --- a/test/e2e/storage/testpatterns/testpattern.go +++ b/test/e2e/storage/testpatterns/testpattern.go @@ -19,19 +19,19 @@ package testpatterns import ( v1 "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" - "k8s.io/kubernetes/test/e2e/framework/volume" + e2evolume "k8s.io/kubernetes/test/e2e/framework/volume" ) const ( // MinFileSize represents minimum file size (1 MiB) for testing - MinFileSize = 1 * volume.MiB + MinFileSize = 1 * e2evolume.MiB // FileSizeSmall represents small file size (1 MiB) for testing - FileSizeSmall = 1 * volume.MiB + FileSizeSmall = 1 * e2evolume.MiB // FileSizeMedium represents medium file size (100 MiB) for testing - FileSizeMedium = 100 * volume.MiB + FileSizeMedium = 100 * e2evolume.MiB // FileSizeLarge represents large file size (1 GiB) for testing - FileSizeLarge = 1 * volume.GiB + FileSizeLarge = 1 * e2evolume.GiB ) // TestVolType represents a volume type to be tested in a TestSuite diff --git a/test/e2e/storage/testsuites/api_test.go b/test/e2e/storage/testsuites/api_test.go index 11f3021aeaaf..53f60579f97c 100644 --- a/test/e2e/storage/testsuites/api_test.go +++ b/test/e2e/storage/testsuites/api_test.go @@ -26,7 +26,7 @@ limitations under the License. package testsuites_test import ( - "k8s.io/kubernetes/test/e2e/framework/volume" + e2evolume "k8s.io/kubernetes/test/e2e/framework/volume" "k8s.io/kubernetes/test/e2e/storage/testpatterns" "k8s.io/kubernetes/test/e2e/storage/testsuites" ) @@ -39,7 +39,7 @@ func (f *fakeSuite) GetTestSuiteInfo() testsuites.TestSuiteInfo { Name: "fake", FeatureTag: "", TestPatterns: []testpatterns.TestPattern{testpatterns.DefaultFsDynamicPV}, - SupportedSizeRange: volume.SizeRange{Min: "1Mi", Max: "1Gi"}, + SupportedSizeRange: e2evolume.SizeRange{Min: "1Mi", Max: "1Gi"}, } } diff --git a/test/e2e/storage/testsuites/base.go b/test/e2e/storage/testsuites/base.go index 39bf405018e5..22fbda431c4e 100644 --- a/test/e2e/storage/testsuites/base.go +++ b/test/e2e/storage/testsuites/base.go @@ -40,10 +40,10 @@ import ( "k8s.io/component-base/metrics/testutil" csitrans "k8s.io/csi-translation-lib" "k8s.io/kubernetes/test/e2e/framework" - "k8s.io/kubernetes/test/e2e/framework/metrics" + e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics" e2epv "k8s.io/kubernetes/test/e2e/framework/pv" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" - "k8s.io/kubernetes/test/e2e/framework/volume" + e2evolume "k8s.io/kubernetes/test/e2e/framework/volume" "k8s.io/kubernetes/test/e2e/storage/podlogs" "k8s.io/kubernetes/test/e2e/storage/testpatterns" ) @@ -77,7 +77,7 @@ type TestSuiteInfo struct { Name string // name of the TestSuite FeatureTag string // featureTag for the TestSuite TestPatterns []testpatterns.TestPattern // Slice of TestPattern for the TestSuite - SupportedSizeRange volume.SizeRange // Size range supported by the test suite + SupportedSizeRange e2evolume.SizeRange // Size range supported by the test suite } func getTestNameStr(suite TestSuite, pattern testpatterns.TestPattern) string { @@ -181,7 +181,7 @@ type VolumeResource struct { // CreateVolumeResource constructs a VolumeResource for the current test. It knows how to deal with // different test pattern volume types. -func CreateVolumeResource(driver TestDriver, config *PerTestConfig, pattern testpatterns.TestPattern, testVolumeSizeRange volume.SizeRange) *VolumeResource { +func CreateVolumeResource(driver TestDriver, config *PerTestConfig, pattern testpatterns.TestPattern, testVolumeSizeRange e2evolume.SizeRange) *VolumeResource { r := VolumeResource{ Config: config, Pattern: pattern, @@ -423,12 +423,12 @@ func deleteStorageClass(cs clientset.Interface, className string) error { // the testsuites package whereas volume.TestConfig is merely // an implementation detail. It contains fields that have no effect, // which makes it unsuitable for use in the testsuits public API. -func convertTestConfig(in *PerTestConfig) volume.TestConfig { +func convertTestConfig(in *PerTestConfig) e2evolume.TestConfig { if in.ServerConfig != nil { return *in.ServerConfig } - return volume.TestConfig{ + return e2evolume.TestConfig{ Namespace: in.Framework.Namespace.Name, Prefix: in.Prefix, ClientNodeSelection: in.ClientNodeSelection, @@ -439,7 +439,7 @@ func convertTestConfig(in *PerTestConfig) volume.TestConfig { // intersection of the intervals (if it exists) and return the minimum of the intersection // to be used as the claim size for the test. // if value not set, that means there's no minimum or maximum size limitation and we set default size for it. -func getSizeRangesIntersection(first volume.SizeRange, second volume.SizeRange) (string, error) { +func getSizeRangesIntersection(first e2evolume.SizeRange, second e2evolume.SizeRange) (string, error) { var firstMin, firstMax, secondMin, secondMax resource.Quantity var err error @@ -575,7 +575,7 @@ func getVolumeOpCounts(c clientset.Interface, pluginName string) opCounts { nodeLimit := 25 - metricsGrabber, err := metrics.NewMetricsGrabber(c, nil, true, false, true, false, false) + metricsGrabber, err := e2emetrics.NewMetricsGrabber(c, nil, true, false, true, false, false) if err != nil { framework.ExpectNoError(err, "Error creating metrics grabber: %v", err) diff --git a/test/e2e/storage/testsuites/base_test.go b/test/e2e/storage/testsuites/base_test.go index fc690fd5e301..1cb4eef32c04 100644 --- a/test/e2e/storage/testsuites/base_test.go +++ b/test/e2e/storage/testsuites/base_test.go @@ -19,7 +19,7 @@ package testsuites import ( "testing" - "k8s.io/kubernetes/test/e2e/framework/volume" + e2evolume "k8s.io/kubernetes/test/e2e/framework/volume" ) // getSizeRangesIntersection takes two instances of storage size ranges and determines the @@ -43,8 +43,8 @@ import ( // |---------------------------------------------------------------| func Test_getSizeRangesIntersection(t *testing.T) { type args struct { - first volume.SizeRange - second volume.SizeRange + first e2evolume.SizeRange + second e2evolume.SizeRange } tests := []struct { name string @@ -55,10 +55,10 @@ func Test_getSizeRangesIntersection(t *testing.T) { { name: "case #1: first{min=A,max=?} second{min=C,max=?} where C > A ", args: args{ - first: volume.SizeRange{ + first: e2evolume.SizeRange{ Min: "5Gi", }, - second: volume.SizeRange{ + second: e2evolume.SizeRange{ Min: "10Gi", }, }, @@ -68,10 +68,10 @@ func Test_getSizeRangesIntersection(t *testing.T) { { name: "case #1: first{min=A,max=?} second{min=C,max=?} where C < A ", args: args{ - first: volume.SizeRange{ + first: e2evolume.SizeRange{ Min: "5Gi", }, - second: volume.SizeRange{ + second: e2evolume.SizeRange{ Min: "1Gi", }, }, @@ -81,10 +81,10 @@ func Test_getSizeRangesIntersection(t *testing.T) { { name: "case #2: first{min=A,max=?} second{min=C,max=D} where A > D ", args: args{ - first: volume.SizeRange{ + first: e2evolume.SizeRange{ Min: "5Gi", }, - second: volume.SizeRange{ + second: e2evolume.SizeRange{ Min: "1Gi", Max: "4Gi", }, @@ -95,11 +95,11 @@ func Test_getSizeRangesIntersection(t *testing.T) { { name: "case #2: first{min=A,max=?} second{min=C,max=D} where D > A > C ", args: args{ - first: volume.SizeRange{ + first: e2evolume.SizeRange{ Min: "5Gi", Max: "", }, - second: volume.SizeRange{ + second: e2evolume.SizeRange{ Min: "3Gi", Max: "10Gi", }, @@ -110,11 +110,11 @@ func Test_getSizeRangesIntersection(t *testing.T) { { name: "case #2: first{min=A,max=?} second{min=C,max=D} where A < C ", args: args{ - first: volume.SizeRange{ + first: e2evolume.SizeRange{ Min: "5Gi", Max: "", }, - second: volume.SizeRange{ + second: e2evolume.SizeRange{ Min: "6Gi", Max: "10Gi", }, @@ -125,11 +125,11 @@ func Test_getSizeRangesIntersection(t *testing.T) { { name: "case #3: first{min=A,max=?} second{min=?,max=D} where A > D", args: args{ - first: volume.SizeRange{ + first: e2evolume.SizeRange{ Min: "5Gi", Max: "", }, - second: volume.SizeRange{ + second: e2evolume.SizeRange{ Max: "1Gi", }, }, @@ -139,11 +139,11 @@ func Test_getSizeRangesIntersection(t *testing.T) { { name: "case #3: first{min=A,max=?} second{min=?,max=D} where A < D", args: args{ - first: volume.SizeRange{ + first: e2evolume.SizeRange{ Min: "5Gi", Max: "", }, - second: volume.SizeRange{ + second: e2evolume.SizeRange{ Max: "10Gi", }, }, @@ -153,11 +153,11 @@ func Test_getSizeRangesIntersection(t *testing.T) { { name: "case #4: first{min=A,max=?} second{min=?,max=?} ", args: args{ - first: volume.SizeRange{ + first: e2evolume.SizeRange{ Min: "5Gi", Max: "", }, - second: volume.SizeRange{}, + second: e2evolume.SizeRange{}, }, want: "5Gi", wantErr: false, @@ -166,11 +166,11 @@ func Test_getSizeRangesIntersection(t *testing.T) { { name: "case #5: first{min=A,max=B} second{min=C,max=?} where C < A ", args: args{ - first: volume.SizeRange{ + first: e2evolume.SizeRange{ Min: "5Gi", Max: "10Gi", }, - second: volume.SizeRange{ + second: e2evolume.SizeRange{ Min: "1Gi", }, }, @@ -180,11 +180,11 @@ func Test_getSizeRangesIntersection(t *testing.T) { { name: "case #5: first{min=A,max=B} second{min=C,max=?} where B > C > A ", args: args{ - first: volume.SizeRange{ + first: e2evolume.SizeRange{ Min: "5Gi", Max: "10Gi", }, - second: volume.SizeRange{ + second: e2evolume.SizeRange{ Min: "6Gi", }, }, @@ -194,11 +194,11 @@ func Test_getSizeRangesIntersection(t *testing.T) { { name: "case #5: first{min=A,max=B} second{min=C,max=?} where C > B ", args: args{ - first: volume.SizeRange{ + first: e2evolume.SizeRange{ Min: "5Gi", Max: "10Gi", }, - second: volume.SizeRange{ + second: e2evolume.SizeRange{ Min: "15Gi", }, }, @@ -208,11 +208,11 @@ func Test_getSizeRangesIntersection(t *testing.T) { { name: "case #6: first{min=A,max=B} second{min=C,max=D} where A < B < C < D", args: args{ - first: volume.SizeRange{ + first: e2evolume.SizeRange{ Min: "5Gi", Max: "6Gi", }, - second: volume.SizeRange{ + second: e2evolume.SizeRange{ Min: "7Gi", Max: "8Gi", }, @@ -223,11 +223,11 @@ func Test_getSizeRangesIntersection(t *testing.T) { { name: "case #6: first{min=A,max=B} second{min=C,max=D} where A < C < B < D ", args: args{ - first: volume.SizeRange{ + first: e2evolume.SizeRange{ Min: "5Gi", Max: "10Gi", }, - second: volume.SizeRange{ + second: e2evolume.SizeRange{ Min: "8Gi", Max: "15Gi", }, @@ -238,11 +238,11 @@ func Test_getSizeRangesIntersection(t *testing.T) { { name: "case #7: first{min=A,max=B} second{min=?,max=D} where D < A", args: args{ - first: volume.SizeRange{ + first: e2evolume.SizeRange{ Min: "5Gi", Max: "10Gi", }, - second: volume.SizeRange{ + second: e2evolume.SizeRange{ Max: "3Gi", }, }, @@ -252,11 +252,11 @@ func Test_getSizeRangesIntersection(t *testing.T) { { name: "case #7: first{min=A,max=B} second{min=?,max=D} where B > D > A", args: args{ - first: volume.SizeRange{ + first: e2evolume.SizeRange{ Min: "5Gi", Max: "10Gi", }, - second: volume.SizeRange{ + second: e2evolume.SizeRange{ Max: "8Gi", }, }, @@ -266,11 +266,11 @@ func Test_getSizeRangesIntersection(t *testing.T) { { name: "case #7: first{min=A,max=B} second{min=?,max=D} where D > B", args: args{ - first: volume.SizeRange{ + first: e2evolume.SizeRange{ Min: "5Gi", Max: "10Gi", }, - second: volume.SizeRange{ + second: e2evolume.SizeRange{ Max: "15Gi", }, }, @@ -280,11 +280,11 @@ func Test_getSizeRangesIntersection(t *testing.T) { { name: "case #8: first{min=A,max=B} second{min=?,max=?}", args: args{ - first: volume.SizeRange{ + first: e2evolume.SizeRange{ Min: "5Gi", Max: "10Gi", }, - second: volume.SizeRange{}, + second: e2evolume.SizeRange{}, }, want: "5Gi", wantErr: false, @@ -292,10 +292,10 @@ func Test_getSizeRangesIntersection(t *testing.T) { { name: "case #9: first{min=?,max=B} second{min=C,max=?} where C > B", args: args{ - first: volume.SizeRange{ + first: e2evolume.SizeRange{ Max: "5Gi", }, - second: volume.SizeRange{ + second: e2evolume.SizeRange{ Min: "10Gi", }, }, @@ -305,10 +305,10 @@ func Test_getSizeRangesIntersection(t *testing.T) { { name: "case #9: first{min=?,max=B} second{min=C,max=?} where C < B", args: args{ - first: volume.SizeRange{ + first: e2evolume.SizeRange{ Max: "10Gi", }, - second: volume.SizeRange{ + second: e2evolume.SizeRange{ Min: "5Gi", }, }, @@ -318,10 +318,10 @@ func Test_getSizeRangesIntersection(t *testing.T) { { name: "case #10: first{min=?,max=B} second{min=C,max=D} where B > D", args: args{ - first: volume.SizeRange{ + first: e2evolume.SizeRange{ Max: "10Gi", }, - second: volume.SizeRange{ + second: e2evolume.SizeRange{ Min: "1Gi", Max: "5Gi", }, @@ -332,10 +332,10 @@ func Test_getSizeRangesIntersection(t *testing.T) { { name: "case #10: first{min=?,max=B} second{min=C,max=D} where C < B < D", args: args{ - first: volume.SizeRange{ + first: e2evolume.SizeRange{ Max: "10Gi", }, - second: volume.SizeRange{ + second: e2evolume.SizeRange{ Min: "5Gi", Max: "15Gi", }, @@ -346,10 +346,10 @@ func Test_getSizeRangesIntersection(t *testing.T) { { name: "case #10: first{min=?,max=B} second{min=C,max=D} where B < C", args: args{ - first: volume.SizeRange{ + first: e2evolume.SizeRange{ Max: "10Gi", }, - second: volume.SizeRange{ + second: e2evolume.SizeRange{ Min: "15Gi", Max: "20Gi", }, @@ -360,10 +360,10 @@ func Test_getSizeRangesIntersection(t *testing.T) { { name: "case #11: first{min=?,max=B} second{min=?,max=D} where D < B", args: args{ - first: volume.SizeRange{ + first: e2evolume.SizeRange{ Max: "10Gi", }, - second: volume.SizeRange{ + second: e2evolume.SizeRange{ Max: "5Gi", }, }, @@ -373,10 +373,10 @@ func Test_getSizeRangesIntersection(t *testing.T) { { name: "case #11: first{min=?,max=B} second{min=?,max=D} where D > B", args: args{ - first: volume.SizeRange{ + first: e2evolume.SizeRange{ Max: "10Gi", }, - second: volume.SizeRange{ + second: e2evolume.SizeRange{ Max: "15Gi", }, }, @@ -386,10 +386,10 @@ func Test_getSizeRangesIntersection(t *testing.T) { { name: "case #12: first{min=?,max=B} second{min=?,max=?} ", args: args{ - first: volume.SizeRange{ + first: e2evolume.SizeRange{ Max: "10Gi", }, - second: volume.SizeRange{}, + second: e2evolume.SizeRange{}, }, want: minValidSize, wantErr: false, @@ -397,8 +397,8 @@ func Test_getSizeRangesIntersection(t *testing.T) { { name: "case #13: first{min=?,max=?} second{min=C,max=?} ", args: args{ - first: volume.SizeRange{}, - second: volume.SizeRange{ + first: e2evolume.SizeRange{}, + second: e2evolume.SizeRange{ Min: "5Gi", }, }, @@ -408,8 +408,8 @@ func Test_getSizeRangesIntersection(t *testing.T) { { name: "case #14: first{min=?,max=?} second{min=C,max=D} where C < D", args: args{ - first: volume.SizeRange{}, - second: volume.SizeRange{ + first: e2evolume.SizeRange{}, + second: e2evolume.SizeRange{ Min: "5Gi", Max: "10Gi", }, @@ -420,8 +420,8 @@ func Test_getSizeRangesIntersection(t *testing.T) { { name: "case #14: first{min=?,max=?} second{min=C,max=D} where C > D", args: args{ - first: volume.SizeRange{}, - second: volume.SizeRange{ + first: e2evolume.SizeRange{}, + second: e2evolume.SizeRange{ Min: "10Gi", Max: "5Gi", }, @@ -432,8 +432,8 @@ func Test_getSizeRangesIntersection(t *testing.T) { { name: "case #14: first{min=?,max=?} second{min=C,max=D} where C = D", args: args{ - first: volume.SizeRange{}, - second: volume.SizeRange{ + first: e2evolume.SizeRange{}, + second: e2evolume.SizeRange{ Min: "1Mi", Max: "1Mi", }, @@ -444,8 +444,8 @@ func Test_getSizeRangesIntersection(t *testing.T) { { name: "case #15: first{min=?,max=?} second{min=?,max=D}", args: args{ - first: volume.SizeRange{}, - second: volume.SizeRange{ + first: e2evolume.SizeRange{}, + second: e2evolume.SizeRange{ Max: "10Gi", }, }, @@ -455,8 +455,8 @@ func Test_getSizeRangesIntersection(t *testing.T) { { name: "case #16: first{min=?,max=?} second{min=?,max=?}", args: args{ - first: volume.SizeRange{}, - second: volume.SizeRange{}, + first: e2evolume.SizeRange{}, + second: e2evolume.SizeRange{}, }, want: minValidSize, wantErr: false, diff --git a/test/e2e/storage/testsuites/ephemeral.go b/test/e2e/storage/testsuites/ephemeral.go index b44324182f30..1c7eed0f9c12 100644 --- a/test/e2e/storage/testsuites/ephemeral.go +++ b/test/e2e/storage/testsuites/ephemeral.go @@ -32,7 +32,7 @@ import ( "k8s.io/kubernetes/test/e2e/framework" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" - "k8s.io/kubernetes/test/e2e/framework/volume" + e2evolume "k8s.io/kubernetes/test/e2e/framework/volume" "k8s.io/kubernetes/test/e2e/storage/testpatterns" storageutils "k8s.io/kubernetes/test/e2e/storage/utils" ) @@ -294,8 +294,8 @@ func StartInPodWithInlineVolume(c clientset.Interface, ns, podName, command stri Containers: []v1.Container{ { Name: "csi-volume-tester", - Image: volume.GetTestImage(framework.BusyBoxImage), - Command: volume.GenerateScriptCmd(command), + Image: e2evolume.GetTestImage(framework.BusyBoxImage), + Command: e2evolume.GenerateScriptCmd(command), }, }, RestartPolicy: v1.RestartPolicyNever, diff --git a/test/e2e/storage/testsuites/multivolume.go b/test/e2e/storage/testsuites/multivolume.go index d167c82e7370..a7c31ecc68e1 100644 --- a/test/e2e/storage/testsuites/multivolume.go +++ b/test/e2e/storage/testsuites/multivolume.go @@ -32,7 +32,7 @@ import ( e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epv "k8s.io/kubernetes/test/e2e/framework/pv" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" - "k8s.io/kubernetes/test/e2e/framework/volume" + e2evolume "k8s.io/kubernetes/test/e2e/framework/volume" "k8s.io/kubernetes/test/e2e/storage/testpatterns" "k8s.io/kubernetes/test/e2e/storage/utils" ) @@ -54,7 +54,7 @@ func InitMultiVolumeTestSuite() TestSuite { testpatterns.BlockVolModePreprovisionedPV, testpatterns.BlockVolModeDynamicPV, }, - SupportedSizeRange: volume.SizeRange{ + SupportedSizeRange: e2evolume.SizeRange{ Min: "1Mi", }, }, diff --git a/test/e2e/storage/testsuites/provisioning.go b/test/e2e/storage/testsuites/provisioning.go index 3828d1c2a971..7a2e33846346 100644 --- a/test/e2e/storage/testsuites/provisioning.go +++ b/test/e2e/storage/testsuites/provisioning.go @@ -38,7 +38,7 @@ import ( e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epv "k8s.io/kubernetes/test/e2e/framework/pv" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" - "k8s.io/kubernetes/test/e2e/framework/volume" + e2evolume "k8s.io/kubernetes/test/e2e/framework/volume" "k8s.io/kubernetes/test/e2e/storage/testpatterns" ) @@ -78,7 +78,7 @@ func InitProvisioningTestSuite() TestSuite { testpatterns.BlockVolModeDynamicPV, testpatterns.NtfsDynamicPV, }, - SupportedSizeRange: volume.SizeRange{ + SupportedSizeRange: e2evolume.SizeRange{ Min: "1Mi", }, }, @@ -221,7 +221,7 @@ func (p *provisioningTestSuite) DefineTests(driver TestDriver, pattern testpatte l.pvc.Spec.DataSource = dataSource l.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim) { ginkgo.By("checking whether the created volume has the pre-populated data") - tests := []volume.Test{ + tests := []e2evolume.Test{ { Volume: *createVolumeSource(claim.Name, false /* readOnly */), Mode: pattern.VolMode, @@ -229,7 +229,7 @@ func (p *provisioningTestSuite) DefineTests(driver TestDriver, pattern testpatte ExpectedContent: expectedContent, }, } - volume.TestVolumeClientSlow(f, testConfig, nil, "", tests) + e2evolume.TestVolumeClientSlow(f, testConfig, nil, "", tests) } l.testCase.TestDynamicProvisioning() }) @@ -249,7 +249,7 @@ func (p *provisioningTestSuite) DefineTests(driver TestDriver, pattern testpatte l.pvc.Spec.DataSource = dataSource l.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim) { ginkgo.By("checking whether the created volume has the pre-populated data") - tests := []volume.Test{ + tests := []e2evolume.Test{ { Volume: *createVolumeSource(claim.Name, false /* readOnly */), Mode: pattern.VolMode, @@ -257,7 +257,7 @@ func (p *provisioningTestSuite) DefineTests(driver TestDriver, pattern testpatte ExpectedContent: expectedContent, }, } - volume.TestVolumeClientSlow(f, testConfig, nil, "", tests) + e2evolume.TestVolumeClientSlow(f, testConfig, nil, "", tests) } l.testCase.TestDynamicProvisioning() }) @@ -297,7 +297,7 @@ func (p *provisioningTestSuite) DefineTests(driver TestDriver, pattern testpatte myTestCase.Class = nil // Do not create/delete the storage class in TestDynamicProvisioning, it already exists. myTestCase.PvCheck = func(claim *v1.PersistentVolumeClaim) { ginkgo.By(fmt.Sprintf("checking whether the created volume %d has the pre-populated data", i)) - tests := []volume.Test{ + tests := []e2evolume.Test{ { Volume: *createVolumeSource(claim.Name, false /* readOnly */), Mode: pattern.VolMode, @@ -305,7 +305,7 @@ func (p *provisioningTestSuite) DefineTests(driver TestDriver, pattern testpatte ExpectedContent: expectedContent, }, } - volume.TestVolumeClientSlow(f, myTestConfig, nil, "", tests) + e2evolume.TestVolumeClientSlow(f, myTestConfig, nil, "", tests) } myTestCase.TestDynamicProvisioning() }(i) @@ -467,7 +467,7 @@ func PVWriteReadSingleNodeCheck(client clientset.Interface, claim *v1.Persistent pod = nil // Don't stop twice. // Get a new copy of the PV - volume, err := getBoundPV(client, claim) + e2evolume, err := getBoundPV(client, claim) framework.ExpectNoError(err) ginkgo.By(fmt.Sprintf("checking the created volume has the correct mount options, is readable and retains data on the same node %q", actualNodeName)) @@ -475,7 +475,7 @@ func PVWriteReadSingleNodeCheck(client clientset.Interface, claim *v1.Persistent // We give the second pod the additional responsibility of checking the volume has // been mounted with the PV's mount options, if the PV was provisioned with any - for _, option := range volume.Spec.MountOptions { + for _, option := range e2evolume.Spec.MountOptions { // Get entry, get mount options at 6th word, replace brackets with commas command += fmt.Sprintf(" && ( mount | grep 'on /mnt/test' | awk '{print $6}' | sed 's/^(/,/; s/)$/,/' | grep -q ,%s, )", option) } @@ -486,7 +486,7 @@ func PVWriteReadSingleNodeCheck(client clientset.Interface, claim *v1.Persistent } RunInPodWithVolume(client, claim.Namespace, claim.Name, "pvc-volume-tester-reader", command, e2epod.NodeSelection{Name: actualNodeName}) - return volume + return e2evolume } // PVMultiNodeCheck checks that a PV retains data when moved between nodes. @@ -650,8 +650,8 @@ func StartInPodWithVolume(c clientset.Interface, ns, claimName, podName, command Containers: []v1.Container{ { Name: "volume-tester", - Image: volume.GetTestImage(framework.BusyBoxImage), - Command: volume.GenerateScriptCmd(command), + Image: e2evolume.GetTestImage(framework.BusyBoxImage), + Command: e2evolume.GenerateScriptCmd(command), VolumeMounts: []v1.VolumeMount{ { Name: "my-volume", @@ -708,7 +708,7 @@ func verifyPVCsPending(client clientset.Interface, pvcs []*v1.PersistentVolumeCl func prepareSnapshotDataSourceForProvisioning( f *framework.Framework, - config volume.TestConfig, + config e2evolume.TestConfig, client clientset.Interface, dynamicClient dynamic.Interface, initClaim *v1.PersistentVolumeClaim, @@ -729,7 +729,7 @@ func prepareSnapshotDataSourceForProvisioning( framework.ExpectNoError(err) // write namespace to the /mnt/test (= the volume). - tests := []volume.Test{ + tests := []e2evolume.Test{ { Volume: *createVolumeSource(updatedClaim.Name, false /* readOnly */), Mode: mode, @@ -737,7 +737,7 @@ func prepareSnapshotDataSourceForProvisioning( ExpectedContent: injectContent, }, } - volume.InjectContent(f, config, nil, "", tests) + e2evolume.InjectContent(f, config, nil, "", tests) ginkgo.By("[Initialize dataSource]creating a SnapshotClass") snapshotClass, err = dynamicClient.Resource(SnapshotClassGVR).Create(context.TODO(), snapshotClass, metav1.CreateOptions{}) @@ -784,7 +784,7 @@ func prepareSnapshotDataSourceForProvisioning( func preparePVCDataSourceForProvisioning( f *framework.Framework, - config volume.TestConfig, + config e2evolume.TestConfig, client clientset.Interface, source *v1.PersistentVolumeClaim, class *storagev1.StorageClass, @@ -802,7 +802,7 @@ func preparePVCDataSourceForProvisioning( sourcePVC, err := client.CoreV1().PersistentVolumeClaims(source.Namespace).Create(context.TODO(), source, metav1.CreateOptions{}) framework.ExpectNoError(err) - tests := []volume.Test{ + tests := []e2evolume.Test{ { Volume: *createVolumeSource(sourcePVC.Name, false /* readOnly */), Mode: mode, @@ -810,7 +810,7 @@ func preparePVCDataSourceForProvisioning( ExpectedContent: injectContent, }, } - volume.InjectContent(f, config, nil, "", tests) + e2evolume.InjectContent(f, config, nil, "", tests) dataSourceRef := &v1.TypedLocalObjectReference{ Kind: "PersistentVolumeClaim", diff --git a/test/e2e/storage/testsuites/snapshottable.go b/test/e2e/storage/testsuites/snapshottable.go index 34918f413de3..26fa52f1f781 100644 --- a/test/e2e/storage/testsuites/snapshottable.go +++ b/test/e2e/storage/testsuites/snapshottable.go @@ -31,7 +31,7 @@ import ( "k8s.io/kubernetes/test/e2e/framework" e2epv "k8s.io/kubernetes/test/e2e/framework/pv" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" - "k8s.io/kubernetes/test/e2e/framework/volume" + e2evolume "k8s.io/kubernetes/test/e2e/framework/volume" "k8s.io/kubernetes/test/e2e/storage/testpatterns" ) @@ -64,7 +64,7 @@ func InitSnapshottableTestSuite() TestSuite { TestPatterns: []testpatterns.TestPattern{ testpatterns.DynamicSnapshot, }, - SupportedSizeRange: volume.SizeRange{ + SupportedSizeRange: e2evolume.SizeRange{ Min: "1Mi", }, }, diff --git a/test/e2e/storage/testsuites/subpath.go b/test/e2e/storage/testsuites/subpath.go index 925f6dcc9312..fc6b64412cdf 100644 --- a/test/e2e/storage/testsuites/subpath.go +++ b/test/e2e/storage/testsuites/subpath.go @@ -37,7 +37,7 @@ import ( e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" - "k8s.io/kubernetes/test/e2e/framework/volume" + e2evolume "k8s.io/kubernetes/test/e2e/framework/volume" "k8s.io/kubernetes/test/e2e/storage/testpatterns" "k8s.io/kubernetes/test/e2e/storage/utils" imageutils "k8s.io/kubernetes/test/utils/image" @@ -70,7 +70,7 @@ func InitSubPathTestSuite() TestSuite { testpatterns.DefaultFsDynamicPV, testpatterns.NtfsDynamicPV, }, - SupportedSizeRange: volume.SizeRange{ + SupportedSizeRange: e2evolume.SizeRange{ Min: "1Mi", }, }, @@ -441,8 +441,8 @@ func (s *subPathTestSuite) DefineTests(driver TestDriver, pattern testpatterns.T defer cleanup() // Change volume container to busybox so we can exec later - l.pod.Spec.Containers[1].Image = volume.GetTestImage(imageutils.GetE2EImage(imageutils.BusyBox)) - l.pod.Spec.Containers[1].Command = volume.GenerateScriptCmd("sleep 100000") + l.pod.Spec.Containers[1].Image = e2evolume.GetTestImage(imageutils.GetE2EImage(imageutils.BusyBox)) + l.pod.Spec.Containers[1].Command = e2evolume.GenerateScriptCmd("sleep 100000") ginkgo.By(fmt.Sprintf("Creating pod %s", l.pod.Name)) removeUnusedContainers(l.pod) @@ -516,7 +516,7 @@ func SubpathTestPod(f *framework.Framework, subpath, volumeType string, source * InitContainers: []v1.Container{ { Name: fmt.Sprintf("init-volume-%s", suffix), - Image: volume.GetTestImage(imageutils.GetE2EImage(imageutils.BusyBox)), + Image: e2evolume.GetTestImage(imageutils.GetE2EImage(imageutils.BusyBox)), VolumeMounts: []v1.VolumeMount{ { Name: volumeName, @@ -527,7 +527,7 @@ func SubpathTestPod(f *framework.Framework, subpath, volumeType string, source * MountPath: probeVolumePath, }, }, - SecurityContext: volume.GenerateSecurityContext(privilegedSecurityContext), + SecurityContext: e2evolume.GenerateSecurityContext(privilegedSecurityContext), }, { Name: fmt.Sprintf("test-init-subpath-%s", suffix), @@ -543,7 +543,7 @@ func SubpathTestPod(f *framework.Framework, subpath, volumeType string, source * MountPath: probeVolumePath, }, }, - SecurityContext: volume.GenerateSecurityContext(privilegedSecurityContext), + SecurityContext: e2evolume.GenerateSecurityContext(privilegedSecurityContext), }, { Name: fmt.Sprintf("test-init-volume-%s", suffix), @@ -558,7 +558,7 @@ func SubpathTestPod(f *framework.Framework, subpath, volumeType string, source * MountPath: probeVolumePath, }, }, - SecurityContext: volume.GenerateSecurityContext(privilegedSecurityContext), + SecurityContext: e2evolume.GenerateSecurityContext(privilegedSecurityContext), }, }, Containers: []v1.Container{ @@ -576,7 +576,7 @@ func SubpathTestPod(f *framework.Framework, subpath, volumeType string, source * MountPath: probeVolumePath, }, }, - SecurityContext: volume.GenerateSecurityContext(privilegedSecurityContext), + SecurityContext: e2evolume.GenerateSecurityContext(privilegedSecurityContext), }, { Name: fmt.Sprintf("test-container-volume-%s", suffix), @@ -591,7 +591,7 @@ func SubpathTestPod(f *framework.Framework, subpath, volumeType string, source * MountPath: probeVolumePath, }, }, - SecurityContext: volume.GenerateSecurityContext(privilegedSecurityContext), + SecurityContext: e2evolume.GenerateSecurityContext(privilegedSecurityContext), }, }, RestartPolicy: v1.RestartPolicyNever, @@ -608,7 +608,7 @@ func SubpathTestPod(f *framework.Framework, subpath, volumeType string, source * }, }, }, - SecurityContext: volume.GeneratePodSecurityContext(nil, seLinuxOptions), + SecurityContext: e2evolume.GeneratePodSecurityContext(nil, seLinuxOptions), }, } } @@ -651,8 +651,8 @@ func volumeFormatPod(f *framework.Framework, volumeSource *v1.VolumeSource) *v1. Containers: []v1.Container{ { Name: fmt.Sprintf("init-volume-%s", f.Namespace.Name), - Image: volume.GetTestImage(imageutils.GetE2EImage(imageutils.BusyBox)), - Command: volume.GenerateScriptCmd("echo nothing"), + Image: e2evolume.GetTestImage(imageutils.GetE2EImage(imageutils.BusyBox)), + Command: e2evolume.GenerateScriptCmd("echo nothing"), VolumeMounts: []v1.VolumeMount{ { Name: volumeName, @@ -673,7 +673,7 @@ func volumeFormatPod(f *framework.Framework, volumeSource *v1.VolumeSource) *v1. } func setInitCommand(pod *v1.Pod, command string) { - pod.Spec.InitContainers[0].Command = volume.GenerateScriptCmd(command) + pod.Spec.InitContainers[0].Command = e2evolume.GenerateScriptCmd(command) } func setWriteCommand(file string, container *v1.Container) { @@ -796,10 +796,10 @@ func waitForPodSubpathError(f *framework.Framework, pod *v1.Pod, allowContainerT func testPodContainerRestart(f *framework.Framework, pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyOnFailure - pod.Spec.Containers[0].Image = volume.GetTestImage(imageutils.GetE2EImage(imageutils.BusyBox)) - pod.Spec.Containers[0].Command = volume.GenerateScriptCmd("sleep 100000") - pod.Spec.Containers[1].Image = volume.GetTestImage(imageutils.GetE2EImage(imageutils.BusyBox)) - pod.Spec.Containers[1].Command = volume.GenerateScriptCmd("sleep 100000") + pod.Spec.Containers[0].Image = e2evolume.GetTestImage(imageutils.GetE2EImage(imageutils.BusyBox)) + pod.Spec.Containers[0].Command = e2evolume.GenerateScriptCmd("sleep 100000") + pod.Spec.Containers[1].Image = e2evolume.GetTestImage(imageutils.GetE2EImage(imageutils.BusyBox)) + pod.Spec.Containers[1].Command = e2evolume.GenerateScriptCmd("sleep 100000") // Add liveness probe to subpath container pod.Spec.Containers[0].LivenessProbe = &v1.Probe{ Handler: v1.Handler{ @@ -905,10 +905,10 @@ func testSubpathReconstruction(f *framework.Framework, hostExec utils.HostExec, } // Change to busybox - pod.Spec.Containers[0].Image = volume.GetTestImage(imageutils.GetE2EImage(imageutils.BusyBox)) - pod.Spec.Containers[0].Command = volume.GenerateScriptCmd("sleep 100000") - pod.Spec.Containers[1].Image = volume.GetTestImage(imageutils.GetE2EImage(imageutils.BusyBox)) - pod.Spec.Containers[1].Command = volume.GenerateScriptCmd("sleep 100000") + pod.Spec.Containers[0].Image = e2evolume.GetTestImage(imageutils.GetE2EImage(imageutils.BusyBox)) + pod.Spec.Containers[0].Command = e2evolume.GenerateScriptCmd("sleep 100000") + pod.Spec.Containers[1].Image = e2evolume.GetTestImage(imageutils.GetE2EImage(imageutils.BusyBox)) + pod.Spec.Containers[1].Command = e2evolume.GenerateScriptCmd("sleep 100000") // If grace period is too short, then there is not enough time for the volume // manager to cleanup the volumes diff --git a/test/e2e/storage/testsuites/testdriver.go b/test/e2e/storage/testsuites/testdriver.go index 4db7d27a29ab..0cfce354fc09 100644 --- a/test/e2e/storage/testsuites/testdriver.go +++ b/test/e2e/storage/testsuites/testdriver.go @@ -23,7 +23,7 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/kubernetes/test/e2e/framework" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" - "k8s.io/kubernetes/test/e2e/framework/volume" + e2evolume "k8s.io/kubernetes/test/e2e/framework/volume" "k8s.io/kubernetes/test/e2e/storage/testpatterns" ) @@ -170,7 +170,7 @@ type DriverInfo struct { // Maximum single file size supported by this driver MaxFileSize int64 // The range of disk size supported by this driver - SupportedSizeRange volume.SizeRange + SupportedSizeRange e2evolume.SizeRange // Map of string for supported fs type SupportedFsType sets.String // Map of string for supported mount option @@ -214,7 +214,7 @@ type PerTestConfig struct { // Some test drivers initialize a storage server. This is // the configuration that then has to be used to run tests. // The values above are ignored for such tests. - ServerConfig *volume.TestConfig + ServerConfig *e2evolume.TestConfig } // GetUniqueDriverName returns unique driver name that can be used parallelly in tests diff --git a/test/e2e/storage/testsuites/volume_expand.go b/test/e2e/storage/testsuites/volume_expand.go index 2fb9a496bdc5..e2656790f184 100644 --- a/test/e2e/storage/testsuites/volume_expand.go +++ b/test/e2e/storage/testsuites/volume_expand.go @@ -34,7 +34,7 @@ import ( e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epv "k8s.io/kubernetes/test/e2e/framework/pv" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" - "k8s.io/kubernetes/test/e2e/framework/volume" + e2evolume "k8s.io/kubernetes/test/e2e/framework/volume" "k8s.io/kubernetes/test/e2e/storage/testpatterns" ) @@ -63,7 +63,7 @@ func InitVolumeExpandTestSuite() TestSuite { testpatterns.DefaultFsDynamicPVAllowExpansion, testpatterns.BlockVolModeDynamicPVAllowExpansion, }, - SupportedSizeRange: volume.SizeRange{ + SupportedSizeRange: e2evolume.SizeRange{ Min: "1Mi", }, }, diff --git a/test/e2e/storage/testsuites/volume_io.go b/test/e2e/storage/testsuites/volume_io.go index ab79ba3198dc..ce916cbdbdca 100644 --- a/test/e2e/storage/testsuites/volume_io.go +++ b/test/e2e/storage/testsuites/volume_io.go @@ -39,7 +39,7 @@ import ( "k8s.io/kubernetes/test/e2e/framework" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" - "k8s.io/kubernetes/test/e2e/framework/volume" + e2evolume "k8s.io/kubernetes/test/e2e/framework/volume" "k8s.io/kubernetes/test/e2e/storage/testpatterns" "k8s.io/kubernetes/test/e2e/storage/utils" ) @@ -71,7 +71,7 @@ func InitVolumeIOTestSuite() TestSuite { testpatterns.DefaultFsPreprovisionedPV, testpatterns.DefaultFsDynamicPV, }, - SupportedSizeRange: volume.SizeRange{ + SupportedSizeRange: e2evolume.SizeRange{ Min: "1Mi", }, }, @@ -180,7 +180,7 @@ func createFileSizes(maxFileSize int64) []int64 { } // Return the plugin's client pod spec. Use an InitContainer to setup the file i/o test env. -func makePodSpec(config volume.TestConfig, initCmd string, volsrc v1.VolumeSource, podSecContext *v1.PodSecurityContext) *v1.Pod { +func makePodSpec(config e2evolume.TestConfig, initCmd string, volsrc v1.VolumeSource, podSecContext *v1.PodSecurityContext) *v1.Pod { var gracePeriod int64 = 1 volName := fmt.Sprintf("io-volume-%s", config.Namespace) pod := &v1.Pod{ @@ -305,7 +305,7 @@ func deleteFile(f *framework.Framework, pod *v1.Pod, fpath string) { // Note: nil can be passed for the podSecContext parm, in which case it is ignored. // Note: `fsizes` values are enforced to each be at least `MinFileSize` and a multiple of `MinFileSize` // bytes. -func testVolumeIO(f *framework.Framework, cs clientset.Interface, config volume.TestConfig, volsrc v1.VolumeSource, podSecContext *v1.PodSecurityContext, file string, fsizes []int64) (err error) { +func testVolumeIO(f *framework.Framework, cs clientset.Interface, config e2evolume.TestConfig, volsrc v1.VolumeSource, podSecContext *v1.PodSecurityContext, file string, fsizes []int64) (err error) { ddInput := filepath.Join(mountPath, fmt.Sprintf("%s-%s-dd_if", config.Prefix, config.Namespace)) writeBlk := strings.Repeat("abcdefghijklmnopqrstuvwxyz123456", 32) // 1KiB value loopCnt := testpatterns.MinFileSize / int64(len(writeBlk)) @@ -333,7 +333,7 @@ func testVolumeIO(f *framework.Framework, cs clientset.Interface, config volume. } } else { framework.Logf("sleeping a bit so kubelet can unmount and detach the volume") - time.Sleep(volume.PodCleanupTimeout) + time.Sleep(e2evolume.PodCleanupTimeout) } }() diff --git a/test/e2e/storage/testsuites/volumemode.go b/test/e2e/storage/testsuites/volumemode.go index c035878f7ef8..5925327094ee 100644 --- a/test/e2e/storage/testsuites/volumemode.go +++ b/test/e2e/storage/testsuites/volumemode.go @@ -38,7 +38,7 @@ import ( e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epv "k8s.io/kubernetes/test/e2e/framework/pv" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" - "k8s.io/kubernetes/test/e2e/framework/volume" + e2evolume "k8s.io/kubernetes/test/e2e/framework/volume" "k8s.io/kubernetes/test/e2e/storage/testpatterns" "k8s.io/kubernetes/test/e2e/storage/utils" ) @@ -65,7 +65,7 @@ func InitVolumeModeTestSuite() TestSuite { testpatterns.BlockVolModePreprovisionedPV, testpatterns.BlockVolModeDynamicPV, }, - SupportedSizeRange: volume.SizeRange{ + SupportedSizeRange: e2evolume.SizeRange{ Min: "1Mi", }, }, diff --git a/test/e2e/storage/testsuites/volumes.go b/test/e2e/storage/testsuites/volumes.go index 9bf41a655a34..3178da169e49 100644 --- a/test/e2e/storage/testsuites/volumes.go +++ b/test/e2e/storage/testsuites/volumes.go @@ -33,7 +33,7 @@ import ( "k8s.io/kubernetes/test/e2e/framework" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" - "k8s.io/kubernetes/test/e2e/framework/volume" + e2evolume "k8s.io/kubernetes/test/e2e/framework/volume" "k8s.io/kubernetes/test/e2e/storage/testpatterns" imageutils "k8s.io/kubernetes/test/utils/image" ) @@ -74,7 +74,7 @@ func InitVolumesTestSuite() TestSuite { testpatterns.BlockVolModePreprovisionedPV, testpatterns.BlockVolModeDynamicPV, }, - SupportedSizeRange: volume.SizeRange{ + SupportedSizeRange: e2evolume.SizeRange{ Min: "1Mi", }, }, @@ -156,11 +156,11 @@ func (t *volumesTestSuite) DefineTests(driver TestDriver, pattern testpatterns.T init() defer func() { - volume.TestServerCleanup(f, convertTestConfig(l.config)) + e2evolume.TestServerCleanup(f, convertTestConfig(l.config)) cleanup() }() - tests := []volume.Test{ + tests := []e2evolume.Test{ { Volume: *l.resource.VolSource, Mode: pattern.VolMode, @@ -180,9 +180,9 @@ func (t *volumesTestSuite) DefineTests(driver TestDriver, pattern testpatterns.T // local), plugin skips setting fsGroup if volume is already mounted // and we don't have reliable way to detect volumes are unmounted or // not before starting the second pod. - volume.InjectContent(f, config, fsGroup, pattern.FsType, tests) + e2evolume.InjectContent(f, config, fsGroup, pattern.FsType, tests) if driver.GetDriverInfo().Capabilities[CapPersistence] { - volume.TestVolumeClient(f, config, fsGroup, pattern.FsType, tests) + e2evolume.TestVolumeClient(f, config, fsGroup, pattern.FsType, tests) } else { ginkgo.By("Skipping persistence check for non-persistent volume") } @@ -228,7 +228,7 @@ func testScriptInPod( Containers: []v1.Container{ { Name: fmt.Sprintf("exec-container-%s", suffix), - Image: volume.GetTestImage(imageutils.GetE2EImage(imageutils.Nginx)), + Image: e2evolume.GetTestImage(imageutils.GetE2EImage(imageutils.Nginx)), Command: command, VolumeMounts: []v1.VolumeMount{ { diff --git a/test/e2e/storage/utils/create.go b/test/e2e/storage/utils/create.go index b76ab8d0b647..06daf34846c7 100644 --- a/test/e2e/storage/utils/create.go +++ b/test/e2e/storage/utils/create.go @@ -22,8 +22,6 @@ import ( "encoding/json" "fmt" - imageutils "k8s.io/kubernetes/test/utils/image" - "github.com/pkg/errors" appsv1 "k8s.io/api/apps/v1" @@ -37,7 +35,8 @@ import ( "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/tools/cache" "k8s.io/kubernetes/test/e2e/framework" - "k8s.io/kubernetes/test/e2e/framework/testfiles" + e2etestfiles "k8s.io/kubernetes/test/e2e/framework/testfiles" + imageutils "k8s.io/kubernetes/test/utils/image" ) // LoadFromManifests loads .yaml or .json manifest files and returns @@ -80,7 +79,7 @@ func LoadFromManifests(files ...string) ([]interface{}, error) { func visitManifests(cb func([]byte) error, files ...string) error { for _, fileName := range files { - data, err := testfiles.Read(fileName) + data, err := e2etestfiles.Read(fileName) if err != nil { framework.Failf("reading manifest file: %v", err) } diff --git a/test/e2e/storage/volume_metrics.go b/test/e2e/storage/volume_metrics.go index 9b7b320afb68..894c8a5e45ee 100644 --- a/test/e2e/storage/volume_metrics.go +++ b/test/e2e/storage/volume_metrics.go @@ -32,7 +32,7 @@ import ( "k8s.io/component-base/metrics/testutil" kubeletmetrics "k8s.io/kubernetes/pkg/kubelet/metrics" "k8s.io/kubernetes/test/e2e/framework" - "k8s.io/kubernetes/test/e2e/framework/metrics" + e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epv "k8s.io/kubernetes/test/e2e/framework/pv" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" @@ -47,7 +47,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { c clientset.Interface ns string pvc *v1.PersistentVolumeClaim - metricsGrabber *metrics.Grabber + metricsGrabber *e2emetrics.Grabber invalidSc *storagev1.StorageClass defaultScName string ) @@ -72,7 +72,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { VolumeMode: &test.VolumeMode, }, ns) - metricsGrabber, err = metrics.NewMetricsGrabber(c, nil, true, false, true, false, false) + metricsGrabber, err = e2emetrics.NewMetricsGrabber(c, nil, true, false, true, false, false) if err != nil { framework.Failf("Error creating metrics grabber : %v", err) @@ -231,7 +231,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { kubeletKeyName := fmt.Sprintf("%s_%s", kubeletmetrics.KubeletSubsystem, key) // Poll kubelet metrics waiting for the volume to be picked up // by the volume stats collector - var kubeMetrics metrics.KubeletMetrics + var kubeMetrics e2emetrics.KubeletMetrics waitErr := wait.Poll(30*time.Second, 5*time.Minute, func() (bool, error) { framework.Logf("Grabbing Kubelet metrics") // Grab kubelet metrics from the node the pod was scheduled on @@ -405,7 +405,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { } pvcConfig = e2epv.PersistentVolumeClaimConfig{StorageClassName: &className} - metrics = []struct { + e2emetrics = []struct { name string dimension string }{ @@ -431,7 +431,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { controllerMetrics, err := metricsGrabber.GrabFromControllerManager() framework.ExpectNoError(err, "Error getting c-m metricValues: %v", err) - for i, metric := range metrics { + for i, metric := range e2emetrics { expectValues := metricValues[i] if expectValues == nil { expectValues = make(map[string]int64) @@ -457,7 +457,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { // Initializes all original metric values. controllerMetrics, err := metricsGrabber.GrabFromControllerManager() framework.ExpectNoError(err, "Error getting c-m metricValues: %v", err) - for _, metric := range metrics { + for _, metric := range e2emetrics { originMetricValues = append(originMetricValues, testutil.GetMetricValuesForLabel(testutil.Metrics(controllerMetrics), metric.name, metric.dimension)) } @@ -528,7 +528,7 @@ func newStorageControllerMetrics() *storageControllerMetrics { } } -func waitForDetachAndGrabMetrics(oldMetrics *storageControllerMetrics, metricsGrabber *metrics.Grabber, pluginName string) *storageControllerMetrics { +func waitForDetachAndGrabMetrics(oldMetrics *storageControllerMetrics, metricsGrabber *e2emetrics.Grabber, pluginName string) *storageControllerMetrics { backoff := wait.Backoff{ Duration: 10 * time.Second, Factor: 1.2, @@ -610,7 +610,7 @@ func verifyMetricCount(oldMetrics, newMetrics *storageControllerMetrics, metricN gomega.Expect(newStatusCount).To(gomega.BeNumerically(">", oldStatusCount), "New status count %d should be more than old count %d for action %s", newStatusCount, oldStatusCount, metricName) } -func getControllerStorageMetrics(ms metrics.ControllerManagerMetrics, pluginName string) *storageControllerMetrics { +func getControllerStorageMetrics(ms e2emetrics.ControllerManagerMetrics, pluginName string) *storageControllerMetrics { result := newStorageControllerMetrics() for method, samples := range ms { @@ -654,7 +654,7 @@ func getControllerStorageMetrics(ms metrics.ControllerManagerMetrics, pluginName // Finds the sample in the specified metric from `KubeletMetrics` tagged with // the specified namespace and pvc name -func findVolumeStatMetric(metricKeyName string, namespace string, pvcName string, kubeletMetrics metrics.KubeletMetrics) bool { +func findVolumeStatMetric(metricKeyName string, namespace string, pvcName string, kubeletMetrics e2emetrics.KubeletMetrics) bool { found := false errCount := 0 framework.Logf("Looking for sample in metric `%s` tagged with namespace `%s`, PVC `%s`", metricKeyName, namespace, pvcName) @@ -683,7 +683,7 @@ func findVolumeStatMetric(metricKeyName string, namespace string, pvcName string } // Wait for the count of a pv controller's metric specified by metricName and dimension bigger than zero. -func waitForPVControllerSync(metricsGrabber *metrics.Grabber, metricName, dimension string) { +func waitForPVControllerSync(metricsGrabber *e2emetrics.Grabber, metricName, dimension string) { backoff := wait.Backoff{ Duration: 10 * time.Second, Factor: 1.2, @@ -728,7 +728,7 @@ func getStatesMetrics(metricKey string, givenMetrics testutil.Metrics) map[strin return states } -func waitForADControllerStatesMetrics(metricsGrabber *metrics.Grabber, metricName string, dimensions []string, stateNames []string) { +func waitForADControllerStatesMetrics(metricsGrabber *e2emetrics.Grabber, metricName string, dimensions []string, stateNames []string) { backoff := wait.Backoff{ Duration: 10 * time.Second, Factor: 1.2, diff --git a/test/e2e/storage/volume_provisioning.go b/test/e2e/storage/volume_provisioning.go index 680b9841db10..61ca473860df 100644 --- a/test/e2e/storage/volume_provisioning.go +++ b/test/e2e/storage/volume_provisioning.go @@ -45,7 +45,7 @@ import ( clientset "k8s.io/client-go/kubernetes" storageutil "k8s.io/kubernetes/pkg/apis/storage/v1/util" "k8s.io/kubernetes/test/e2e/framework" - "k8s.io/kubernetes/test/e2e/framework/auth" + e2eauth "k8s.io/kubernetes/test/e2e/framework/auth" e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" "k8s.io/kubernetes/test/e2e/framework/providers/gce" @@ -603,7 +603,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { Name: serviceAccountName, } - err := auth.BindClusterRole(c.RbacV1(), "system:persistent-volume-provisioner", ns, subject) + err := e2eauth.BindClusterRole(c.RbacV1(), "system:persistent-volume-provisioner", ns, subject) framework.ExpectNoError(err) roleName := "leader-locking-nfs-provisioner" @@ -619,10 +619,10 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { }, metav1.CreateOptions{}) framework.ExpectNoError(err, "Failed to create leader-locking role") - err = auth.BindRoleInNamespace(c.RbacV1(), roleName, ns, subject) + err = e2eauth.BindRoleInNamespace(c.RbacV1(), roleName, ns, subject) framework.ExpectNoError(err) - err = auth.WaitForAuthorizationUpdate(c.AuthorizationV1(), + err = e2eauth.WaitForAuthorizationUpdate(c.AuthorizationV1(), serviceaccount.MakeUsername(ns, serviceAccountName), "", "get", schema.GroupResource{Group: "storage.k8s.io", Resource: "storageclasses"}, true) framework.ExpectNoError(err, "Failed to update authorization") diff --git a/test/e2e/storage/volumes.go b/test/e2e/storage/volumes.go index 7595514eabe6..cee369576085 100644 --- a/test/e2e/storage/volumes.go +++ b/test/e2e/storage/volumes.go @@ -26,7 +26,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" - "k8s.io/kubernetes/test/e2e/framework/volume" + e2evolume "k8s.io/kubernetes/test/e2e/framework/volume" "k8s.io/kubernetes/test/e2e/storage/utils" ) @@ -46,7 +46,7 @@ var _ = utils.SIGDescribe("Volumes", func() { ginkgo.Describe("ConfigMap", func() { ginkgo.It("should be mountable", func() { - config := volume.TestConfig{ + config := e2evolume.TestConfig{ Namespace: namespace.Name, Prefix: "configmap", } @@ -72,7 +72,7 @@ var _ = utils.SIGDescribe("Volumes", func() { }() // Test one ConfigMap mounted several times to test #28502 - tests := []volume.Test{ + tests := []e2evolume.Test{ { Volume: v1.VolumeSource{ ConfigMap: &v1.ConfigMapVolumeSource{ @@ -108,7 +108,7 @@ var _ = utils.SIGDescribe("Volumes", func() { ExpectedContent: "this is the second file", }, } - volume.TestVolumeClient(f, config, nil, "" /* fsType */, tests) + e2evolume.TestVolumeClient(f, config, nil, "" /* fsType */, tests) }) }) }) diff --git a/test/e2e/storage/vsphere/vsphere_statefulsets.go b/test/e2e/storage/vsphere/vsphere_statefulsets.go index d4b2ccd774e6..4ac7fca1818c 100644 --- a/test/e2e/storage/vsphere/vsphere_statefulsets.go +++ b/test/e2e/storage/vsphere/vsphere_statefulsets.go @@ -28,7 +28,7 @@ import ( "k8s.io/kubernetes/test/e2e/framework" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" - e2esset "k8s.io/kubernetes/test/e2e/framework/statefulset" + e2estatefulset "k8s.io/kubernetes/test/e2e/framework/statefulset" "k8s.io/kubernetes/test/e2e/storage/utils" ) @@ -68,7 +68,7 @@ var _ = utils.SIGDescribe("vsphere statefulset [Feature:vsphere]", func() { }) ginkgo.AfterEach(func() { framework.Logf("Deleting all statefulset in namespace: %v", namespace) - e2esset.DeleteAllStatefulSets(client, namespace) + e2estatefulset.DeleteAllStatefulSets(client, namespace) }) ginkgo.It("vsphere statefulset testing", func() { @@ -82,12 +82,12 @@ var _ = utils.SIGDescribe("vsphere statefulset [Feature:vsphere]", func() { ginkgo.By("Creating statefulset") - statefulset := e2esset.CreateStatefulSet(client, manifestPath, namespace) + statefulset := e2estatefulset.CreateStatefulSet(client, manifestPath, namespace) replicas := *(statefulset.Spec.Replicas) // Waiting for pods status to be Ready - e2esset.WaitForStatusReadyReplicas(client, statefulset, replicas) - framework.ExpectNoError(e2esset.CheckMount(client, statefulset, mountPath)) - ssPodsBeforeScaleDown := e2esset.GetPodList(client, statefulset) + e2estatefulset.WaitForStatusReadyReplicas(client, statefulset, replicas) + framework.ExpectNoError(e2estatefulset.CheckMount(client, statefulset, mountPath)) + ssPodsBeforeScaleDown := e2estatefulset.GetPodList(client, statefulset) gomega.Expect(ssPodsBeforeScaleDown.Items).NotTo(gomega.BeEmpty(), fmt.Sprintf("Unable to get list of Pods from the Statefulset: %v", statefulset.Name)) framework.ExpectEqual(len(ssPodsBeforeScaleDown.Items), int(replicas), "Number of Pods in the statefulset should match with number of replicas") @@ -105,9 +105,9 @@ var _ = utils.SIGDescribe("vsphere statefulset [Feature:vsphere]", func() { } ginkgo.By(fmt.Sprintf("Scaling down statefulsets to number of Replica: %v", replicas-1)) - _, scaledownErr := e2esset.Scale(client, statefulset, replicas-1) + _, scaledownErr := e2estatefulset.Scale(client, statefulset, replicas-1) framework.ExpectNoError(scaledownErr) - e2esset.WaitForStatusReadyReplicas(client, statefulset, replicas-1) + e2estatefulset.WaitForStatusReadyReplicas(client, statefulset, replicas-1) // After scale down, verify vsphere volumes are detached from deleted pods ginkgo.By("Verify Volumes are detached from Nodes after Statefulsets is scaled down") @@ -126,12 +126,12 @@ var _ = utils.SIGDescribe("vsphere statefulset [Feature:vsphere]", func() { } ginkgo.By(fmt.Sprintf("Scaling up statefulsets to number of Replica: %v", replicas)) - _, scaleupErr := e2esset.Scale(client, statefulset, replicas) + _, scaleupErr := e2estatefulset.Scale(client, statefulset, replicas) framework.ExpectNoError(scaleupErr) - e2esset.WaitForStatusReplicas(client, statefulset, replicas) - e2esset.WaitForStatusReadyReplicas(client, statefulset, replicas) + e2estatefulset.WaitForStatusReplicas(client, statefulset, replicas) + e2estatefulset.WaitForStatusReadyReplicas(client, statefulset, replicas) - ssPodsAfterScaleUp := e2esset.GetPodList(client, statefulset) + ssPodsAfterScaleUp := e2estatefulset.GetPodList(client, statefulset) gomega.Expect(ssPodsAfterScaleUp.Items).NotTo(gomega.BeEmpty(), fmt.Sprintf("Unable to get list of Pods from the Statefulset: %v", statefulset.Name)) framework.ExpectEqual(len(ssPodsAfterScaleUp.Items), int(replicas), "Number of Pods in the statefulset should match with number of replicas") diff --git a/test/e2e/storage/vsphere/vsphere_volume_node_poweroff.go b/test/e2e/storage/vsphere/vsphere_volume_node_poweroff.go index b400a0d3d1da..e294714287d6 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_node_poweroff.go +++ b/test/e2e/storage/vsphere/vsphere_volume_node_poweroff.go @@ -32,7 +32,7 @@ import ( "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" - e2edeploy "k8s.io/kubernetes/test/e2e/framework/deployment" + e2edeployment "k8s.io/kubernetes/test/e2e/framework/deployment" e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epv "k8s.io/kubernetes/test/e2e/framework/pv" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" @@ -97,12 +97,12 @@ var _ = utils.SIGDescribe("Node Poweroff [Feature:vsphere] [Slow] [Disruptive]", volumePath := pvs[0].Spec.VsphereVolume.VolumePath ginkgo.By("Creating a Deployment") - deployment, err := e2edeploy.CreateDeployment(client, int32(1), map[string]string{"test": "app"}, nil, namespace, pvclaims, "") + deployment, err := e2edeployment.CreateDeployment(client, int32(1), map[string]string{"test": "app"}, nil, namespace, pvclaims, "") framework.ExpectNoError(err, fmt.Sprintf("Failed to create Deployment with err: %v", err)) defer client.AppsV1().Deployments(namespace).Delete(context.TODO(), deployment.Name, metav1.DeleteOptions{}) ginkgo.By("Get pod from the deployment") - podList, err := e2edeploy.GetPodsForDeployment(client, deployment) + podList, err := e2edeployment.GetPodsForDeployment(client, deployment) framework.ExpectNoError(err, fmt.Sprintf("Failed to get pod from the deployment with err: %v", err)) gomega.Expect(podList.Items).NotTo(gomega.BeEmpty()) pod := podList.Items[0] @@ -179,7 +179,7 @@ func waitForPodToFailover(client clientset.Interface, deployment *appsv1.Deploym // getNodeForDeployment returns node name for the Deployment func getNodeForDeployment(client clientset.Interface, deployment *appsv1.Deployment) (string, error) { - podList, err := e2edeploy.GetPodsForDeployment(client, deployment) + podList, err := e2edeployment.GetPodsForDeployment(client, deployment) if err != nil { return "", err } diff --git a/test/e2e/upgrades/apps/deployments.go b/test/e2e/upgrades/apps/deployments.go index 7d885b26fefb..f03032c9af06 100644 --- a/test/e2e/upgrades/apps/deployments.go +++ b/test/e2e/upgrades/apps/deployments.go @@ -28,7 +28,7 @@ import ( clientset "k8s.io/client-go/kubernetes" deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util" "k8s.io/kubernetes/test/e2e/framework" - e2edeploy "k8s.io/kubernetes/test/e2e/framework/deployment" + e2edeployment "k8s.io/kubernetes/test/e2e/framework/deployment" "k8s.io/kubernetes/test/e2e/upgrades" "github.com/onsi/ginkgo" @@ -66,12 +66,12 @@ func (t *DeploymentUpgradeTest) Setup(f *framework.Framework) { rsClient := c.AppsV1().ReplicaSets(ns) ginkgo.By(fmt.Sprintf("Creating a deployment %q with 1 replica in namespace %q", deploymentName, ns)) - d := e2edeploy.NewDeployment(deploymentName, int32(1), map[string]string{"test": "upgrade"}, "nginx", nginxImage, appsv1.RollingUpdateDeploymentStrategyType) + d := e2edeployment.NewDeployment(deploymentName, int32(1), map[string]string{"test": "upgrade"}, "nginx", nginxImage, appsv1.RollingUpdateDeploymentStrategyType) deployment, err := deploymentClient.Create(context.TODO(), d, metav1.CreateOptions{}) framework.ExpectNoError(err) ginkgo.By(fmt.Sprintf("Waiting deployment %q to complete", deploymentName)) - framework.ExpectNoError(e2edeploy.WaitForDeploymentComplete(c, deployment)) + framework.ExpectNoError(e2edeployment.WaitForDeploymentComplete(c, deployment)) ginkgo.By(fmt.Sprintf("Getting replicaset revision 1 of deployment %q", deploymentName)) rsSelector, err := metav1.LabelSelectorAsSelector(d.Spec.Selector) @@ -87,13 +87,13 @@ func (t *DeploymentUpgradeTest) Setup(f *framework.Framework) { // Trigger a new rollout so that we have some history. ginkgo.By(fmt.Sprintf("Triggering a new rollout for deployment %q", deploymentName)) - deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deploymentName, func(update *appsv1.Deployment) { + deployment, err = e2edeployment.UpdateDeploymentWithRetries(c, ns, deploymentName, func(update *appsv1.Deployment) { update.Spec.Template.Spec.Containers[0].Name = "updated-name" }) framework.ExpectNoError(err) ginkgo.By(fmt.Sprintf("Waiting deployment %q to complete", deploymentName)) - framework.ExpectNoError(e2edeploy.WaitForDeploymentComplete(c, deployment)) + framework.ExpectNoError(e2edeployment.WaitForDeploymentComplete(c, deployment)) ginkgo.By(fmt.Sprintf("Getting replicasets revision 1 and 2 of deployment %q", deploymentName)) rsList, err = rsClient.List(context.TODO(), metav1.ListOptions{LabelSelector: rsSelector.String()}) @@ -155,17 +155,17 @@ func (t *DeploymentUpgradeTest) Test(f *framework.Framework, done <-chan struct{ framework.ExpectEqual(deployment.Annotations[deploymentutil.RevisionAnnotation], "2") ginkgo.By(fmt.Sprintf("Waiting for deployment %q to complete adoption", deploymentName)) - framework.ExpectNoError(e2edeploy.WaitForDeploymentComplete(c, deployment)) + framework.ExpectNoError(e2edeployment.WaitForDeploymentComplete(c, deployment)) // Verify the upgraded deployment is active by scaling up the deployment by 1 ginkgo.By(fmt.Sprintf("Scaling up replicaset of deployment %q by 1", deploymentName)) - deploymentWithUpdatedReplicas, err := e2edeploy.UpdateDeploymentWithRetries(c, ns, deploymentName, func(deployment *appsv1.Deployment) { + deploymentWithUpdatedReplicas, err := e2edeployment.UpdateDeploymentWithRetries(c, ns, deploymentName, func(deployment *appsv1.Deployment) { *deployment.Spec.Replicas = *deployment.Spec.Replicas + 1 }) framework.ExpectNoError(err) ginkgo.By(fmt.Sprintf("Waiting for deployment %q to complete after scaling", deploymentName)) - framework.ExpectNoError(e2edeploy.WaitForDeploymentComplete(c, deploymentWithUpdatedReplicas)) + framework.ExpectNoError(e2edeployment.WaitForDeploymentComplete(c, deploymentWithUpdatedReplicas)) } // Teardown cleans up any remaining resources. diff --git a/test/e2e/upgrades/apps/replicasets.go b/test/e2e/upgrades/apps/replicasets.go index 76ea1d49e0ba..f38d6a967400 100644 --- a/test/e2e/upgrades/apps/replicasets.go +++ b/test/e2e/upgrades/apps/replicasets.go @@ -26,7 +26,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/kubernetes/test/e2e/framework" - "k8s.io/kubernetes/test/e2e/framework/replicaset" + e2ereplicaset "k8s.io/kubernetes/test/e2e/framework/replicaset" "k8s.io/kubernetes/test/e2e/upgrades" "github.com/onsi/ginkgo" @@ -63,7 +63,7 @@ func (r *ReplicaSetUpgradeTest) Setup(f *framework.Framework) { framework.ExpectNoError(err) ginkgo.By(fmt.Sprintf("Waiting for replicaset %s to have all of its replicas ready", rsName)) - framework.ExpectNoError(replicaset.WaitForReadyReplicaSet(c, ns, rsName)) + framework.ExpectNoError(e2ereplicaset.WaitForReadyReplicaSet(c, ns, rsName)) r.UID = rs.UID } @@ -87,17 +87,17 @@ func (r *ReplicaSetUpgradeTest) Test(f *framework.Framework, done <-chan struct{ } ginkgo.By(fmt.Sprintf("Waiting for replicaset %s to have all of its replicas ready after upgrade", rsName)) - framework.ExpectNoError(replicaset.WaitForReadyReplicaSet(c, ns, rsName)) + framework.ExpectNoError(e2ereplicaset.WaitForReadyReplicaSet(c, ns, rsName)) // Verify the upgraded RS is active by scaling up the RS to scaleNum and ensuring all pods are Ready ginkgo.By(fmt.Sprintf("Scaling up replicaset %s to %d", rsName, scaleNum)) - _, err = replicaset.UpdateReplicaSetWithRetries(c, ns, rsName, func(rs *appsv1.ReplicaSet) { + _, err = e2ereplicaset.UpdateReplicaSetWithRetries(c, ns, rsName, func(rs *appsv1.ReplicaSet) { *rs.Spec.Replicas = scaleNum }) framework.ExpectNoError(err) ginkgo.By(fmt.Sprintf("Waiting for replicaset %s to have all of its replicas ready after scaling", rsName)) - framework.ExpectNoError(replicaset.WaitForReadyReplicaSet(c, ns, rsName)) + framework.ExpectNoError(e2ereplicaset.WaitForReadyReplicaSet(c, ns, rsName)) } // Teardown cleans up any remaining resources. diff --git a/test/e2e/upgrades/apps/statefulset.go b/test/e2e/upgrades/apps/statefulset.go index cd47dfe480cc..312c6ca2951f 100644 --- a/test/e2e/upgrades/apps/statefulset.go +++ b/test/e2e/upgrades/apps/statefulset.go @@ -26,7 +26,7 @@ import ( "k8s.io/apimachinery/pkg/util/version" "k8s.io/kubernetes/test/e2e/framework" - e2esset "k8s.io/kubernetes/test/e2e/framework/statefulset" + e2estatefulset "k8s.io/kubernetes/test/e2e/framework/statefulset" "k8s.io/kubernetes/test/e2e/upgrades" ) @@ -79,10 +79,10 @@ func (t *StatefulSetUpgradeTest) Setup(f *framework.Framework) { statefulPodMounts := []v1.VolumeMount{{Name: "datadir", MountPath: "/data/"}} podMounts := []v1.VolumeMount{{Name: "home", MountPath: "/home"}} ns := f.Namespace.Name - t.set = e2esset.NewStatefulSet(ssName, ns, headlessSvcName, 2, statefulPodMounts, podMounts, labels) + t.set = e2estatefulset.NewStatefulSet(ssName, ns, headlessSvcName, 2, statefulPodMounts, podMounts, labels) t.service = createStatefulSetService(ssName, labels) *(t.set.Spec.Replicas) = 3 - e2esset.PauseNewPods(t.set) + e2estatefulset.PauseNewPods(t.set) ginkgo.By("Creating service " + headlessSvcName + " in namespace " + ns) _, err := f.ClientSet.CoreV1().Services(ns).Create(context.TODO(), t.service, metav1.CreateOptions{}) @@ -94,7 +94,7 @@ func (t *StatefulSetUpgradeTest) Setup(f *framework.Framework) { framework.ExpectNoError(err) ginkgo.By("Saturating stateful set " + t.set.Name) - e2esset.Saturate(f.ClientSet, t.set) + e2estatefulset.Saturate(f.ClientSet, t.set) t.verify(f) t.restart(f) t.verify(f) @@ -108,26 +108,26 @@ func (t *StatefulSetUpgradeTest) Test(f *framework.Framework, done <-chan struct // Teardown deletes all StatefulSets func (t *StatefulSetUpgradeTest) Teardown(f *framework.Framework) { - e2esset.DeleteAllStatefulSets(f.ClientSet, t.set.Name) + e2estatefulset.DeleteAllStatefulSets(f.ClientSet, t.set.Name) } func (t *StatefulSetUpgradeTest) verify(f *framework.Framework) { ginkgo.By("Verifying statefulset mounted data directory is usable") - framework.ExpectNoError(e2esset.CheckMount(f.ClientSet, t.set, "/data")) + framework.ExpectNoError(e2estatefulset.CheckMount(f.ClientSet, t.set, "/data")) ginkgo.By("Verifying statefulset provides a stable hostname for each pod") - framework.ExpectNoError(e2esset.CheckHostname(f.ClientSet, t.set)) + framework.ExpectNoError(e2estatefulset.CheckHostname(f.ClientSet, t.set)) ginkgo.By("Verifying statefulset set proper service name") - framework.ExpectNoError(e2esset.CheckServiceName(t.set, t.set.Spec.ServiceName)) + framework.ExpectNoError(e2estatefulset.CheckServiceName(t.set, t.set.Spec.ServiceName)) cmd := "echo $(hostname) > /data/hostname; sync;" ginkgo.By("Running " + cmd + " in all stateful pods") - framework.ExpectNoError(e2esset.ExecInStatefulPods(f.ClientSet, t.set, cmd)) + framework.ExpectNoError(e2estatefulset.ExecInStatefulPods(f.ClientSet, t.set, cmd)) } func (t *StatefulSetUpgradeTest) restart(f *framework.Framework) { ginkgo.By("Restarting statefulset " + t.set.Name) - e2esset.Restart(f.ClientSet, t.set) - e2esset.WaitForRunningAndReady(f.ClientSet, *t.set.Spec.Replicas, t.set) + e2estatefulset.Restart(f.ClientSet, t.set) + e2estatefulset.WaitForRunningAndReady(f.ClientSet, *t.set.Spec.Replicas, t.set) } diff --git a/test/e2e/upgrades/cassandra.go b/test/e2e/upgrades/cassandra.go index 1c987f5983c2..b093586c42e9 100644 --- a/test/e2e/upgrades/cassandra.go +++ b/test/e2e/upgrades/cassandra.go @@ -32,8 +32,8 @@ import ( "k8s.io/apimachinery/pkg/util/version" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/kubernetes/test/e2e/framework" - e2esset "k8s.io/kubernetes/test/e2e/framework/statefulset" - "k8s.io/kubernetes/test/e2e/framework/testfiles" + e2estatefulset "k8s.io/kubernetes/test/e2e/framework/statefulset" + e2etestfiles "k8s.io/kubernetes/test/e2e/framework/testfiles" ) const cassandraManifestPath = "test/e2e/testing-manifests/statefulset/cassandra" @@ -60,7 +60,7 @@ func (CassandraUpgradeTest) Skip(upgCtx UpgradeContext) bool { } func cassandraKubectlCreate(ns, file string) { - input := string(testfiles.ReadOrDie(filepath.Join(cassandraManifestPath, file))) + input := string(e2etestfiles.ReadOrDie(filepath.Join(cassandraManifestPath, file))) framework.RunKubectlOrDieInput(ns, input, "create", "-f", "-", fmt.Sprintf("--namespace=%s", ns)) } @@ -78,7 +78,7 @@ func (t *CassandraUpgradeTest) Setup(f *framework.Framework) { cassandraKubectlCreate(ns, "pdb.yaml") ginkgo.By("Creating a Cassandra StatefulSet") - e2esset.CreateStatefulSet(f.ClientSet, cassandraManifestPath, ns) + e2estatefulset.CreateStatefulSet(f.ClientSet, cassandraManifestPath, ns) ginkgo.By("Creating a cassandra-test-server deployment") cassandraKubectlCreate(ns, "tester.yaml") diff --git a/test/e2e/upgrades/etcd.go b/test/e2e/upgrades/etcd.go index d5bbb8e4d799..feeafd2b0813 100644 --- a/test/e2e/upgrades/etcd.go +++ b/test/e2e/upgrades/etcd.go @@ -32,8 +32,8 @@ import ( "k8s.io/apimachinery/pkg/util/version" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/kubernetes/test/e2e/framework" - e2esset "k8s.io/kubernetes/test/e2e/framework/statefulset" - "k8s.io/kubernetes/test/e2e/framework/testfiles" + e2estatefulset "k8s.io/kubernetes/test/e2e/framework/statefulset" + e2etestfiles "k8s.io/kubernetes/test/e2e/framework/testfiles" ) const manifestPath = "test/e2e/testing-manifests/statefulset/etcd" @@ -59,7 +59,7 @@ func (EtcdUpgradeTest) Skip(upgCtx UpgradeContext) bool { } func kubectlCreate(ns, file string) { - input := string(testfiles.ReadOrDie(filepath.Join(manifestPath, file))) + input := string(e2etestfiles.ReadOrDie(filepath.Join(manifestPath, file))) framework.RunKubectlOrDieInput(ns, input, "create", "-f", "-", fmt.Sprintf("--namespace=%s", ns)) } @@ -73,7 +73,7 @@ func (t *EtcdUpgradeTest) Setup(f *framework.Framework) { kubectlCreate(ns, "pdb.yaml") ginkgo.By("Creating an etcd StatefulSet") - e2esset.CreateStatefulSet(f.ClientSet, manifestPath, ns) + e2estatefulset.CreateStatefulSet(f.ClientSet, manifestPath, ns) ginkgo.By("Creating an etcd--test-server deployment") kubectlCreate(ns, "tester.yaml") diff --git a/test/e2e/upgrades/mysql.go b/test/e2e/upgrades/mysql.go index 0cece723ad14..edb3a599a21b 100644 --- a/test/e2e/upgrades/mysql.go +++ b/test/e2e/upgrades/mysql.go @@ -32,8 +32,8 @@ import ( "k8s.io/apimachinery/pkg/util/version" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/kubernetes/test/e2e/framework" - e2esset "k8s.io/kubernetes/test/e2e/framework/statefulset" - "k8s.io/kubernetes/test/e2e/framework/testfiles" + e2estatefulset "k8s.io/kubernetes/test/e2e/framework/statefulset" + e2etestfiles "k8s.io/kubernetes/test/e2e/framework/testfiles" ) const mysqlManifestPath = "test/e2e/testing-manifests/statefulset/mysql-upgrade" @@ -61,7 +61,7 @@ func (MySQLUpgradeTest) Skip(upgCtx UpgradeContext) bool { } func mysqlKubectlCreate(ns, file string) { - input := string(testfiles.ReadOrDie(filepath.Join(mysqlManifestPath, file))) + input := string(e2etestfiles.ReadOrDie(filepath.Join(mysqlManifestPath, file))) framework.RunKubectlOrDieInput(ns, input, "create", "-f", "-", fmt.Sprintf("--namespace=%s", ns)) } @@ -88,7 +88,7 @@ func (t *MySQLUpgradeTest) Setup(f *framework.Framework) { mysqlKubectlCreate(ns, "configmap.yaml") ginkgo.By("Creating a mysql StatefulSet") - e2esset.CreateStatefulSet(f.ClientSet, mysqlManifestPath, ns) + e2estatefulset.CreateStatefulSet(f.ClientSet, mysqlManifestPath, ns) ginkgo.By("Creating a mysql-test-server deployment") mysqlKubectlCreate(ns, "tester.yaml") diff --git a/test/e2e_node/device_plugin_test.go b/test/e2e_node/device_plugin_test.go index 91e749f42c3a..83a962493ddd 100644 --- a/test/e2e_node/device_plugin_test.go +++ b/test/e2e_node/device_plugin_test.go @@ -25,7 +25,7 @@ import ( v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/serializer" - "k8s.io/kubernetes/test/e2e/framework/testfiles" + e2etestfiles "k8s.io/kubernetes/test/e2e/framework/testfiles" "regexp" @@ -80,7 +80,7 @@ func numberOfSampleResources(node *v1.Node) int64 { // getSampleDevicePluginPod returns the Device Plugin pod for sample resources in e2e tests. func getSampleDevicePluginPod() *v1.Pod { - ds := readDaemonSetV1OrDie(testfiles.ReadOrDie(sampleDevicePluginDSYAML)) + ds := readDaemonSetV1OrDie(e2etestfiles.ReadOrDie(sampleDevicePluginDSYAML)) p := &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: sampleDevicePluginName, diff --git a/test/e2e_node/dynamic_kubelet_config_test.go b/test/e2e_node/dynamic_kubelet_config_test.go index 3dbaa53d8df8..c89d8467c36f 100644 --- a/test/e2e_node/dynamic_kubelet_config_test.go +++ b/test/e2e_node/dynamic_kubelet_config_test.go @@ -34,7 +34,7 @@ import ( controller "k8s.io/kubernetes/pkg/kubelet/kubeletconfig" "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/status" "k8s.io/kubernetes/pkg/kubelet/metrics" - frameworkmetrics "k8s.io/kubernetes/test/e2e/framework/metrics" + e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics" "k8s.io/kubernetes/test/e2e/framework" @@ -1161,7 +1161,7 @@ func (tc *nodeConfigTestCase) checkConfigMetrics(f *framework.Framework) { // error errorSamples := model.Samples{mkErrorSample(len(tc.expectConfigStatus.err) > 0)} // expected metrics - expect := frameworkmetrics.KubeletMetrics(map[string]model.Samples{ + expect := e2emetrics.KubeletMetrics(map[string]model.Samples{ assignedConfigKey: assignedSamples, activeConfigKey: activeSamples, lastKnownGoodConfigKey: lastKnownGoodSamples, diff --git a/test/e2e_node/e2e_node_suite_test.go b/test/e2e_node/e2e_node_suite_test.go index 0237f7d38b9e..0fc19aeb31bf 100644 --- a/test/e2e_node/e2e_node_suite_test.go +++ b/test/e2e_node/e2e_node_suite_test.go @@ -43,7 +43,7 @@ import ( commontest "k8s.io/kubernetes/test/e2e/common" "k8s.io/kubernetes/test/e2e/framework" e2econfig "k8s.io/kubernetes/test/e2e/framework/config" - "k8s.io/kubernetes/test/e2e/framework/testfiles" + e2etestfiles "k8s.io/kubernetes/test/e2e/framework/testfiles" "k8s.io/kubernetes/test/e2e/generated" "k8s.io/kubernetes/test/e2e_node/services" system "k8s.io/system-validators/validators" @@ -85,7 +85,7 @@ func registerNodeFlags(flags *flag.FlagSet) { func init() { // Enable bindata file lookup as fallback. - testfiles.AddFileSource(testfiles.BindataFileSource{ + e2etestfiles.AddFileSource(e2etestfiles.BindataFileSource{ Asset: generated.Asset, AssetNames: generated.AssetNames, }) diff --git a/test/e2e_node/gpu_device_plugin_test.go b/test/e2e_node/gpu_device_plugin_test.go index 64c6f985d631..518e944e9058 100644 --- a/test/e2e_node/gpu_device_plugin_test.go +++ b/test/e2e_node/gpu_device_plugin_test.go @@ -27,8 +27,8 @@ import ( "k8s.io/apimachinery/pkg/util/uuid" kubeletmetrics "k8s.io/kubernetes/pkg/kubelet/metrics" "k8s.io/kubernetes/test/e2e/framework" - "k8s.io/kubernetes/test/e2e/framework/gpu" - "k8s.io/kubernetes/test/e2e/framework/metrics" + e2egpu "k8s.io/kubernetes/test/e2e/framework/gpu" + e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics" "github.com/onsi/ginkgo" "github.com/onsi/gomega" @@ -40,7 +40,7 @@ import ( // After the NVIDIA drivers were installed // TODO make this generic and not linked to COS only func numberOfNVIDIAGPUs(node *v1.Node) int64 { - val, ok := node.Status.Capacity[gpu.NVIDIAGPUResourceName] + val, ok := node.Status.Capacity[e2egpu.NVIDIAGPUResourceName] if !ok { return 0 } @@ -49,7 +49,7 @@ func numberOfNVIDIAGPUs(node *v1.Node) int64 { // NVIDIADevicePlugin returns the official Google Device Plugin pod for NVIDIA GPU in GKE func NVIDIADevicePlugin() *v1.Pod { - ds, err := framework.DsFromManifest(gpu.GPUDevicePluginDSYAML) + ds, err := framework.DsFromManifest(e2egpu.GPUDevicePluginDSYAML) framework.ExpectNoError(err) p := &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ @@ -106,7 +106,7 @@ var _ = framework.KubeDescribe("NVIDIA GPU Device Plugin [Feature:GPUDevicePlugi ginkgo.It("checks that when Kubelet restarts exclusive GPU assignation to pods is kept.", func() { ginkgo.By("Creating one GPU pod on a node with at least two GPUs") podRECMD := "devs=$(ls /dev/ | egrep '^nvidia[0-9]+$') && echo gpu devices: $devs" - p1 := f.PodClient().CreateSync(makeBusyboxPod(gpu.NVIDIAGPUResourceName, podRECMD)) + p1 := f.PodClient().CreateSync(makeBusyboxPod(e2egpu.NVIDIAGPUResourceName, podRECMD)) deviceIDRE := "gpu devices: (nvidia[0-9]+)" devID1 := parseLog(f, p1.Name, p1.Name, deviceIDRE) @@ -127,7 +127,7 @@ var _ = framework.KubeDescribe("NVIDIA GPU Device Plugin [Feature:GPUDevicePlugi gomega.Eventually(func() bool { return numberOfNVIDIAGPUs(getLocalNode(f)) > 0 }, 5*time.Minute, framework.Poll).Should(gomega.BeTrue()) - p2 := f.PodClient().CreateSync(makeBusyboxPod(gpu.NVIDIAGPUResourceName, podRECMD)) + p2 := f.PodClient().CreateSync(makeBusyboxPod(e2egpu.NVIDIAGPUResourceName, podRECMD)) ginkgo.By("Checking that pods got a different GPU") devID2 := parseLog(f, p2.Name, p2.Name, deviceIDRE) @@ -179,7 +179,7 @@ func checkIfNvidiaGPUsExistOnNode() bool { } func logDevicePluginMetrics() { - ms, err := metrics.GrabKubeletMetricsWithoutProxy(framework.TestContext.NodeName+":10255", "/metrics") + ms, err := e2emetrics.GrabKubeletMetricsWithoutProxy(framework.TestContext.NodeName+":10255", "/metrics") framework.ExpectNoError(err) for msKey, samples := range ms { switch msKey { diff --git a/test/e2e_node/image_list.go b/test/e2e_node/image_list.go index e896ae365227..233e8bac3b4c 100644 --- a/test/e2e_node/image_list.go +++ b/test/e2e_node/image_list.go @@ -30,8 +30,8 @@ import ( runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" commontest "k8s.io/kubernetes/test/e2e/common" "k8s.io/kubernetes/test/e2e/framework" - "k8s.io/kubernetes/test/e2e/framework/gpu" - "k8s.io/kubernetes/test/e2e/framework/testfiles" + e2egpu "k8s.io/kubernetes/test/e2e/framework/gpu" + e2etestfiles "k8s.io/kubernetes/test/e2e/framework/testfiles" imageutils "k8s.io/kubernetes/test/utils/image" ) @@ -171,7 +171,7 @@ func PrePullAllImages() error { // getGPUDevicePluginImage returns the image of GPU device plugin. func getGPUDevicePluginImage() string { - ds, err := framework.DsFromManifest(gpu.GPUDevicePluginDSYAML) + ds, err := framework.DsFromManifest(e2egpu.GPUDevicePluginDSYAML) if err != nil { klog.Errorf("Failed to parse the device plugin image: %v", err) return "" @@ -189,7 +189,7 @@ func getGPUDevicePluginImage() string { // getSRIOVDevicePluginImage returns the image of SRIOV device plugin. func getSRIOVDevicePluginImage() string { - data, err := testfiles.Read(SRIOVDevicePluginDSYAML) + data, err := e2etestfiles.Read(SRIOVDevicePluginDSYAML) if err != nil { klog.Errorf("Failed to read the device plugin manifest: %v", err) return "" diff --git a/test/e2e_node/resource_metrics_test.go b/test/e2e_node/resource_metrics_test.go index 5c65d3fc7150..215ef1e400d6 100644 --- a/test/e2e_node/resource_metrics_test.go +++ b/test/e2e_node/resource_metrics_test.go @@ -24,8 +24,8 @@ import ( kubeletresourcemetricsv1alpha1 "k8s.io/kubernetes/pkg/kubelet/apis/resourcemetrics/v1alpha1" "k8s.io/kubernetes/test/e2e/framework" e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" - "k8s.io/kubernetes/test/e2e/framework/metrics" - "k8s.io/kubernetes/test/e2e/framework/volume" + e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics" + e2evolume "k8s.io/kubernetes/test/e2e/framework/volume" "github.com/prometheus/common/model" @@ -76,7 +76,7 @@ var _ = framework.KubeDescribe("ResourceMetricsAPI", func() { "": boundedSample(1, 1e6), }), "node_memory_working_set_bytes": gstruct.MatchAllElements(nodeID, gstruct.Elements{ - "": boundedSample(10*volume.Mb, memoryLimit), + "": boundedSample(10*e2evolume.Mb, memoryLimit), }), "container_cpu_usage_seconds_total": gstruct.MatchElements(containerID, gstruct.IgnoreExtras, gstruct.Elements{ @@ -85,8 +85,8 @@ var _ = framework.KubeDescribe("ResourceMetricsAPI", func() { }), "container_memory_working_set_bytes": gstruct.MatchAllElements(containerID, gstruct.Elements{ - fmt.Sprintf("%s::%s::%s", f.Namespace.Name, pod0, "busybox-container"): boundedSample(10*volume.Kb, 80*volume.Mb), - fmt.Sprintf("%s::%s::%s", f.Namespace.Name, pod1, "busybox-container"): boundedSample(10*volume.Kb, 80*volume.Mb), + fmt.Sprintf("%s::%s::%s", f.Namespace.Name, pod0, "busybox-container"): boundedSample(10*e2evolume.Kb, 80*e2evolume.Mb), + fmt.Sprintf("%s::%s::%s", f.Namespace.Name, pod1, "busybox-container"): boundedSample(10*e2evolume.Kb, 80*e2evolume.Mb), }), }) ginkgo.By("Giving pods a minute to start up and produce metrics") @@ -110,8 +110,8 @@ var _ = framework.KubeDescribe("ResourceMetricsAPI", func() { }) }) -func getV1alpha1ResourceMetrics() (metrics.KubeletMetrics, error) { - return metrics.GrabKubeletMetricsWithoutProxy(framework.TestContext.NodeName+":10255", "/metrics/resource/"+kubeletresourcemetricsv1alpha1.Version) +func getV1alpha1ResourceMetrics() (e2emetrics.KubeletMetrics, error) { + return e2emetrics.GrabKubeletMetricsWithoutProxy(framework.TestContext.NodeName+":10255", "/metrics/resource/"+kubeletresourcemetricsv1alpha1.Version) } func nodeID(element interface{}) string { diff --git a/test/e2e_node/summary_test.go b/test/e2e_node/summary_test.go index 5de99e1b803c..acd6fe553758 100644 --- a/test/e2e_node/summary_test.go +++ b/test/e2e_node/summary_test.go @@ -29,7 +29,7 @@ import ( kubeletstatsv1alpha1 "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1" "k8s.io/kubernetes/test/e2e/framework" e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" - "k8s.io/kubernetes/test/e2e/framework/volume" + e2evolume "k8s.io/kubernetes/test/e2e/framework/volume" systemdutil "github.com/coreos/go-systemd/util" "github.com/onsi/ginkgo" @@ -82,7 +82,7 @@ var _ = framework.KubeDescribe("Summary API [NodeConformance]", func() { node := getLocalNode(f) memoryCapacity := node.Status.Capacity["memory"] memoryLimit := memoryCapacity.Value() - fsCapacityBounds := bounded(100*volume.Mb, 10*volume.Tb) + fsCapacityBounds := bounded(100*e2evolume.Mb, 10*e2evolume.Tb) // Expectations for system containers. sysContExpectations := func() types.GomegaMatcher { return gstruct.MatchAllFields(gstruct.Fields{ @@ -97,10 +97,10 @@ var _ = framework.KubeDescribe("Summary API [NodeConformance]", func() { "Time": recent(maxStatsAge), // We don't limit system container memory. "AvailableBytes": gomega.BeNil(), - "UsageBytes": bounded(1*volume.Mb, memoryLimit), - "WorkingSetBytes": bounded(1*volume.Mb, memoryLimit), + "UsageBytes": bounded(1*e2evolume.Mb, memoryLimit), + "WorkingSetBytes": bounded(1*e2evolume.Mb, memoryLimit), // this now returns /sys/fs/cgroup/memory.stat total_rss - "RSSBytes": bounded(1*volume.Mb, memoryLimit), + "RSSBytes": bounded(1*e2evolume.Mb, memoryLimit), "PageFaults": bounded(1000, 1e9), "MajorPageFaults": bounded(0, 100000), }), @@ -114,10 +114,10 @@ var _ = framework.KubeDescribe("Summary API [NodeConformance]", func() { podsContExpectations.Fields["Memory"] = ptrMatchAllFields(gstruct.Fields{ "Time": recent(maxStatsAge), // Pods are limited by Node Allocatable - "AvailableBytes": bounded(1*volume.Kb, memoryLimit), - "UsageBytes": bounded(10*volume.Kb, memoryLimit), - "WorkingSetBytes": bounded(10*volume.Kb, memoryLimit), - "RSSBytes": bounded(1*volume.Kb, memoryLimit), + "AvailableBytes": bounded(1*e2evolume.Kb, memoryLimit), + "UsageBytes": bounded(10*e2evolume.Kb, memoryLimit), + "WorkingSetBytes": bounded(10*e2evolume.Kb, memoryLimit), + "RSSBytes": bounded(1*e2evolume.Kb, memoryLimit), "PageFaults": bounded(0, 1000000), "MajorPageFaults": bounded(0, 10), }) @@ -159,9 +159,9 @@ var _ = framework.KubeDescribe("Summary API [NodeConformance]", func() { "Time": recent(maxStatsAge), // We don't limit system container memory. "AvailableBytes": gomega.BeNil(), - "UsageBytes": bounded(100*volume.Kb, memoryLimit), - "WorkingSetBytes": bounded(100*volume.Kb, memoryLimit), - "RSSBytes": bounded(100*volume.Kb, memoryLimit), + "UsageBytes": bounded(100*e2evolume.Kb, memoryLimit), + "WorkingSetBytes": bounded(100*e2evolume.Kb, memoryLimit), + "RSSBytes": bounded(100*e2evolume.Kb, memoryLimit), "PageFaults": bounded(1000, 1e9), "MajorPageFaults": bounded(0, 100000), }) @@ -182,10 +182,10 @@ var _ = framework.KubeDescribe("Summary API [NodeConformance]", func() { }), "Memory": ptrMatchAllFields(gstruct.Fields{ "Time": recent(maxStatsAge), - "AvailableBytes": bounded(1*volume.Kb, 80*volume.Mb), - "UsageBytes": bounded(10*volume.Kb, 80*volume.Mb), - "WorkingSetBytes": bounded(10*volume.Kb, 80*volume.Mb), - "RSSBytes": bounded(1*volume.Kb, 80*volume.Mb), + "AvailableBytes": bounded(1*e2evolume.Kb, 80*e2evolume.Mb), + "UsageBytes": bounded(10*e2evolume.Kb, 80*e2evolume.Mb), + "WorkingSetBytes": bounded(10*e2evolume.Kb, 80*e2evolume.Mb), + "RSSBytes": bounded(1*e2evolume.Kb, 80*e2evolume.Mb), "PageFaults": bounded(100, 1000000), "MajorPageFaults": bounded(0, 10), }), @@ -194,7 +194,7 @@ var _ = framework.KubeDescribe("Summary API [NodeConformance]", func() { "Time": recent(maxStatsAge), "AvailableBytes": fsCapacityBounds, "CapacityBytes": fsCapacityBounds, - "UsedBytes": bounded(volume.Kb, 10*volume.Mb), + "UsedBytes": bounded(e2evolume.Kb, 10*e2evolume.Mb), "InodesFree": bounded(1e4, 1e8), "Inodes": bounded(1e4, 1e8), "InodesUsed": bounded(0, 1e8), @@ -203,7 +203,7 @@ var _ = framework.KubeDescribe("Summary API [NodeConformance]", func() { "Time": recent(maxStatsAge), "AvailableBytes": fsCapacityBounds, "CapacityBytes": fsCapacityBounds, - "UsedBytes": bounded(volume.Kb, 10*volume.Mb), + "UsedBytes": bounded(e2evolume.Kb, 10*e2evolume.Mb), "InodesFree": bounded(1e4, 1e8), "Inodes": bounded(1e4, 1e8), "InodesUsed": bounded(0, 1e8), @@ -215,9 +215,9 @@ var _ = framework.KubeDescribe("Summary API [NodeConformance]", func() { "Time": recent(maxStatsAge), "InterfaceStats": gstruct.MatchAllFields(gstruct.Fields{ "Name": gomega.Equal("eth0"), - "RxBytes": bounded(10, 10*volume.Mb), + "RxBytes": bounded(10, 10*e2evolume.Mb), "RxErrors": bounded(0, 1000), - "TxBytes": bounded(10, 10*volume.Mb), + "TxBytes": bounded(10, 10*e2evolume.Mb), "TxErrors": bounded(0, 1000), }), "Interfaces": gomega.Not(gomega.BeNil()), @@ -229,10 +229,10 @@ var _ = framework.KubeDescribe("Summary API [NodeConformance]", func() { }), "Memory": ptrMatchAllFields(gstruct.Fields{ "Time": recent(maxStatsAge), - "AvailableBytes": bounded(1*volume.Kb, 80*volume.Mb), - "UsageBytes": bounded(10*volume.Kb, 80*volume.Mb), - "WorkingSetBytes": bounded(10*volume.Kb, 80*volume.Mb), - "RSSBytes": bounded(1*volume.Kb, 80*volume.Mb), + "AvailableBytes": bounded(1*e2evolume.Kb, 80*e2evolume.Mb), + "UsageBytes": bounded(10*e2evolume.Kb, 80*e2evolume.Mb), + "WorkingSetBytes": bounded(10*e2evolume.Kb, 80*e2evolume.Mb), + "RSSBytes": bounded(1*e2evolume.Kb, 80*e2evolume.Mb), "PageFaults": bounded(0, 1000000), "MajorPageFaults": bounded(0, 10), }), @@ -244,7 +244,7 @@ var _ = framework.KubeDescribe("Summary API [NodeConformance]", func() { "Time": recent(maxStatsAge), "AvailableBytes": fsCapacityBounds, "CapacityBytes": fsCapacityBounds, - "UsedBytes": bounded(volume.Kb, 1*volume.Mb), + "UsedBytes": bounded(e2evolume.Kb, 1*e2evolume.Mb), "InodesFree": bounded(1e4, 1e8), "Inodes": bounded(1e4, 1e8), "InodesUsed": bounded(0, 1e8), @@ -255,7 +255,7 @@ var _ = framework.KubeDescribe("Summary API [NodeConformance]", func() { "Time": recent(maxStatsAge), "AvailableBytes": fsCapacityBounds, "CapacityBytes": fsCapacityBounds, - "UsedBytes": bounded(volume.Kb, 21*volume.Mb), + "UsedBytes": bounded(e2evolume.Kb, 21*e2evolume.Mb), "InodesFree": bounded(1e4, 1e8), "Inodes": bounded(1e4, 1e8), "InodesUsed": bounded(0, 1e8), @@ -277,11 +277,11 @@ var _ = framework.KubeDescribe("Summary API [NodeConformance]", func() { }), "Memory": ptrMatchAllFields(gstruct.Fields{ "Time": recent(maxStatsAge), - "AvailableBytes": bounded(100*volume.Mb, memoryLimit), - "UsageBytes": bounded(10*volume.Mb, memoryLimit), - "WorkingSetBytes": bounded(10*volume.Mb, memoryLimit), + "AvailableBytes": bounded(100*e2evolume.Mb, memoryLimit), + "UsageBytes": bounded(10*e2evolume.Mb, memoryLimit), + "WorkingSetBytes": bounded(10*e2evolume.Mb, memoryLimit), // this now returns /sys/fs/cgroup/memory.stat total_rss - "RSSBytes": bounded(1*volume.Kb, memoryLimit), + "RSSBytes": bounded(1*e2evolume.Kb, memoryLimit), "PageFaults": bounded(1000, 1e9), "MajorPageFaults": bounded(0, 100000), }), @@ -290,9 +290,9 @@ var _ = framework.KubeDescribe("Summary API [NodeConformance]", func() { "Time": recent(maxStatsAge), "InterfaceStats": gstruct.MatchAllFields(gstruct.Fields{ "Name": gomega.Or(gomega.BeEmpty(), gomega.Equal("eth0")), - "RxBytes": gomega.Or(gomega.BeNil(), bounded(1*volume.Mb, 100*volume.Gb)), + "RxBytes": gomega.Or(gomega.BeNil(), bounded(1*e2evolume.Mb, 100*e2evolume.Gb)), "RxErrors": gomega.Or(gomega.BeNil(), bounded(0, 100000)), - "TxBytes": gomega.Or(gomega.BeNil(), bounded(10*volume.Kb, 10*volume.Gb)), + "TxBytes": gomega.Or(gomega.BeNil(), bounded(10*e2evolume.Kb, 10*e2evolume.Gb)), "TxErrors": gomega.Or(gomega.BeNil(), bounded(0, 100000)), }), "Interfaces": gomega.Not(gomega.BeNil()), @@ -302,7 +302,7 @@ var _ = framework.KubeDescribe("Summary API [NodeConformance]", func() { "AvailableBytes": fsCapacityBounds, "CapacityBytes": fsCapacityBounds, // we assume we are not running tests on machines < 10tb of disk - "UsedBytes": bounded(volume.Kb, 10*volume.Tb), + "UsedBytes": bounded(e2evolume.Kb, 10*e2evolume.Tb), "InodesFree": bounded(1e4, 1e8), "Inodes": bounded(1e4, 1e8), "InodesUsed": bounded(0, 1e8), @@ -313,7 +313,7 @@ var _ = framework.KubeDescribe("Summary API [NodeConformance]", func() { "AvailableBytes": fsCapacityBounds, "CapacityBytes": fsCapacityBounds, // we assume we are not running tests on machines < 10tb of disk - "UsedBytes": bounded(volume.Kb, 10*volume.Tb), + "UsedBytes": bounded(e2evolume.Kb, 10*e2evolume.Tb), "InodesFree": bounded(1e4, 1e8), "Inodes": bounded(1e4, 1e8), "InodesUsed": bounded(0, 1e8), diff --git a/test/e2e_node/topology_manager_test.go b/test/e2e_node/topology_manager_test.go index f1732c3b3848..f2f29af426ac 100644 --- a/test/e2e_node/topology_manager_test.go +++ b/test/e2e_node/topology_manager_test.go @@ -41,7 +41,7 @@ import ( e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" - "k8s.io/kubernetes/test/e2e/framework/testfiles" + e2etestfiles "k8s.io/kubernetes/test/e2e/framework/testfiles" "github.com/onsi/ginkgo" "github.com/onsi/gomega" @@ -246,7 +246,7 @@ func configureTopologyManagerInKubelet(f *framework.Framework, oldCfg *kubeletco // getSRIOVDevicePluginPod returns the Device Plugin pod for sriov resources in e2e tests. func getSRIOVDevicePluginPod() *v1.Pod { - ds := readDaemonSetV1OrDie(testfiles.ReadOrDie(SRIOVDevicePluginDSYAML)) + ds := readDaemonSetV1OrDie(e2etestfiles.ReadOrDie(SRIOVDevicePluginDSYAML)) p := &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: SRIOVDevicePluginName, @@ -415,7 +415,7 @@ func isTopologyAffinityError(pod *v1.Pod) bool { } func getSRIOVDevicePluginConfigMap(cmFile string) *v1.ConfigMap { - cmData := testfiles.ReadOrDie(SRIOVDevicePluginCMYAML) + cmData := e2etestfiles.ReadOrDie(SRIOVDevicePluginCMYAML) var err error // the SRIOVDP configuration is hw-dependent, so we allow per-test-host customization. @@ -449,7 +449,7 @@ func setupSRIOVConfigOrFail(f *framework.Framework, configMap *v1.ConfigMap) *sr framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) } - serviceAccount := readServiceAccountV1OrDie(testfiles.ReadOrDie(SRIOVDevicePluginSAYAML)) + serviceAccount := readServiceAccountV1OrDie(e2etestfiles.ReadOrDie(SRIOVDevicePluginSAYAML)) ginkgo.By(fmt.Sprintf("Creating serviceAccount %v/%v", metav1.NamespaceSystem, serviceAccount.Name)) if _, err = f.ClientSet.CoreV1().ServiceAccounts(metav1.NamespaceSystem).Create(context.TODO(), serviceAccount, metav1.CreateOptions{}); err != nil { framework.Failf("unable to create test serviceAccount %s: %v", serviceAccount.Name, err) From c9314dde593b0b845fa5345e58230e620ad41195 Mon Sep 17 00:00:00 2001 From: Aldo Culquicondor Date: Fri, 20 Mar 2020 16:18:44 -0400 Subject: [PATCH 31/92] Add support for multiple label values in test cases They are assigned in a round robin fashion Signed-off-by: Aldo Culquicondor --- .../config/performance-config.yaml | 2 +- test/utils/runners.go | 17 +++++++++++------ 2 files changed, 12 insertions(+), 7 deletions(-) diff --git a/test/integration/scheduler_perf/config/performance-config.yaml b/test/integration/scheduler_perf/config/performance-config.yaml index 4ea03b9959a1..34ac6dc4b16f 100644 --- a/test/integration/scheduler_perf/config/performance-config.yaml +++ b/test/integration/scheduler_perf/config/performance-config.yaml @@ -162,7 +162,7 @@ nodeTemplatePath: config/node-default.yaml labelNodePrepareStrategy: labelKey: "failure-domain.beta.kubernetes.io/zone" - labelValue: "zone1" + labelValues: ["zone1"] initPods: - podTemplatePath: config/pod-with-node-affinity.yaml podsToSchedule: diff --git a/test/utils/runners.go b/test/utils/runners.go index aaac8bd37d5b..fde5a7030710 100644 --- a/test/utils/runners.go +++ b/test/utils/runners.go @@ -980,22 +980,27 @@ func (*TrivialNodePrepareStrategy) CleanupDependentObjects(nodeName string, clie } type LabelNodePrepareStrategy struct { - LabelKey string - LabelValue string + LabelKey string + LabelValues []string + roundRobinIdx int } var _ PrepareNodeStrategy = &LabelNodePrepareStrategy{} -func NewLabelNodePrepareStrategy(labelKey string, labelValue string) *LabelNodePrepareStrategy { +func NewLabelNodePrepareStrategy(labelKey string, labelValues ...string) *LabelNodePrepareStrategy { return &LabelNodePrepareStrategy{ - LabelKey: labelKey, - LabelValue: labelValue, + LabelKey: labelKey, + LabelValues: labelValues, } } func (s *LabelNodePrepareStrategy) PreparePatch(*v1.Node) []byte { - labelString := fmt.Sprintf("{\"%v\":\"%v\"}", s.LabelKey, s.LabelValue) + labelString := fmt.Sprintf("{\"%v\":\"%v\"}", s.LabelKey, s.LabelValues[s.roundRobinIdx]) patch := fmt.Sprintf(`{"metadata":{"labels":%v}}`, labelString) + s.roundRobinIdx++ + if s.roundRobinIdx == len(s.LabelValues) { + s.roundRobinIdx = 0 + } return []byte(patch) } From 671cd339862cab311df0aa62ea7f9a490f0f79cc Mon Sep 17 00:00:00 2001 From: Aldo Culquicondor Date: Mon, 23 Mar 2020 14:33:46 -0400 Subject: [PATCH 32/92] Add perf test cases for topology spreading Signed-off-by: Aldo Culquicondor --- .../config/performance-config.yaml | 36 +++++++++++++++++++ ...pod-with-preferred-topology-spreading.yaml | 21 +++++++++++ .../config/pod-with-topology-spreading.yaml | 21 +++++++++++ 3 files changed, 78 insertions(+) create mode 100644 test/integration/scheduler_perf/config/pod-with-preferred-topology-spreading.yaml create mode 100644 test/integration/scheduler_perf/config/pod-with-topology-spreading.yaml diff --git a/test/integration/scheduler_perf/config/performance-config.yaml b/test/integration/scheduler_perf/config/performance-config.yaml index 34ac6dc4b16f..f9cb6a254aaa 100644 --- a/test/integration/scheduler_perf/config/performance-config.yaml +++ b/test/integration/scheduler_perf/config/performance-config.yaml @@ -174,6 +174,42 @@ - numNodes: 5000 numInitPods: [5000] numPodsToSchedule: 1000 +- template: + desc: TopologySpreading + nodes: + nodeTemplatePath: config/node-default.yaml + labelNodePrepareStrategy: + labelKey: "topology.kubernetes.io/zone" + labelValues: ["moon-1", "moon-2", "moon-3"] + initPods: + - podTemplatePath: config/pod-default.yaml + podsToSchedule: + podTemplatePath: config/pod-with-topology-spreading.yaml + params: + - numNodes: 500 + numInitPods: [1000] + numPodsToSchedule: 1000 + - numNodes: 5000 + numInitPods: [5000] + numPodsToSchedule: 2000 +- template: + desc: PreferredTopologySpreading + nodes: + nodeTemplatePath: config/node-default.yaml + labelNodePrepareStrategy: + labelKey: "topology.kubernetes.io/zone" + labelValues: ["moon-1", "moon-2", "moon-3"] + initPods: + - podTemplatePath: config/pod-default.yaml + podsToSchedule: + podTemplatePath: config/pod-with-preferred-topology-spreading.yaml + params: + - numNodes: 500 + numInitPods: [1000] + numPodsToSchedule: 1000 + - numNodes: 5000 + numInitPods: [5000] + numPodsToSchedule: 2000 - template: desc: MixedSchedulingBasePod initPods: diff --git a/test/integration/scheduler_perf/config/pod-with-preferred-topology-spreading.yaml b/test/integration/scheduler_perf/config/pod-with-preferred-topology-spreading.yaml new file mode 100644 index 000000000000..39a171f393ab --- /dev/null +++ b/test/integration/scheduler_perf/config/pod-with-preferred-topology-spreading.yaml @@ -0,0 +1,21 @@ +apiVersion: v1 +kind: Pod +metadata: + generateName: spreading-pod- +spec: + topologySpreadConstraints: + - maxSkew: 5 + topologyKey: topology.kubernetes.io/zone + whenUnsatisfiable: ScheduleAnyway + containers: + - image: k8s.gcr.io/pause:3.2 + name: pause + ports: + - containerPort: 80 + resources: + limits: + cpu: 100m + memory: 500Mi + requests: + cpu: 100m + memory: 500Mi diff --git a/test/integration/scheduler_perf/config/pod-with-topology-spreading.yaml b/test/integration/scheduler_perf/config/pod-with-topology-spreading.yaml new file mode 100644 index 000000000000..cb3769675ed5 --- /dev/null +++ b/test/integration/scheduler_perf/config/pod-with-topology-spreading.yaml @@ -0,0 +1,21 @@ +apiVersion: v1 +kind: Pod +metadata: + generateName: spreading-pod- +spec: + topologySpreadConstraints: + - maxSkew: 5 + topologyKey: topology.kubernetes.io/zone + whenUnsatisfiable: DoNotSchedule + containers: + - image: k8s.gcr.io/pause:3.2 + name: pause + ports: + - containerPort: 80 + resources: + limits: + cpu: 100m + memory: 500Mi + requests: + cpu: 100m + memory: 500Mi From 4b31b554992fc50a65fdeca47bbd1f48f69a91b3 Mon Sep 17 00:00:00 2001 From: Aldo Culquicondor Date: Wed, 25 Mar 2020 13:29:27 -0400 Subject: [PATCH 33/92] Count spreading node matches for hostname topology in Score --- .../plugins/podtopologyspread/common.go | 14 +++++++ .../plugins/podtopologyspread/scoring.go | 37 +++++++++---------- 2 files changed, 32 insertions(+), 19 deletions(-) diff --git a/pkg/scheduler/framework/plugins/podtopologyspread/common.go b/pkg/scheduler/framework/plugins/podtopologyspread/common.go index b87af00c88e5..fa53a9ac48b1 100644 --- a/pkg/scheduler/framework/plugins/podtopologyspread/common.go +++ b/pkg/scheduler/framework/plugins/podtopologyspread/common.go @@ -82,3 +82,17 @@ func filterTopologySpreadConstraints(constraints []v1.TopologySpreadConstraint, } return result, nil } + +func countPodsMatchSelector(pods []*v1.Pod, selector labels.Selector, ns string) int { + count := 0 + for _, p := range pods { + // Bypass terminating Pod (see #87621). + if p.DeletionTimestamp != nil || p.Namespace != ns { + continue + } + if selector.Matches(labels.Set(p.Labels)) { + count++ + } + } + return count +} diff --git a/pkg/scheduler/framework/plugins/podtopologyspread/scoring.go b/pkg/scheduler/framework/plugins/podtopologyspread/scoring.go index 6686361f85a0..bac11cea11a9 100644 --- a/pkg/scheduler/framework/plugins/podtopologyspread/scoring.go +++ b/pkg/scheduler/framework/plugins/podtopologyspread/scoring.go @@ -23,7 +23,6 @@ import ( "sync/atomic" v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/klog" pluginhelper "k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper" @@ -74,6 +73,10 @@ func (pl *PodTopologySpread) initPreScoreState(s *preScoreState, pod *v1.Pod, fi continue } for _, constraint := range s.Constraints { + // per-node counts are calculated during Score. + if constraint.TopologyKey == v1.LabelHostname { + continue + } pair := topologyPair{key: constraint.TopologyKey, value: node.Labels[constraint.TopologyKey]} if s.TopologyPairToPodCounts[pair] == nil { s.TopologyPairToPodCounts[pair] = new(int64) @@ -104,7 +107,7 @@ func (pl *PodTopologySpread) PreScore( } state := &preScoreState{ - NodeNameSet: sets.String{}, + NodeNameSet: make(sets.String, len(filteredNodes)), TopologyPairToPodCounts: make(map[topologyPair]*int64), } err = pl.initPreScoreState(state, pod, filteredNodes) @@ -135,22 +138,13 @@ func (pl *PodTopologySpread) PreScore( pair := topologyPair{key: c.TopologyKey, value: node.Labels[c.TopologyKey]} // If current topology pair is not associated with any candidate node, // continue to avoid unnecessary calculation. - if state.TopologyPairToPodCounts[pair] == nil { + // Per-node counts are also skipped, as they are done during Score. + tpCount := state.TopologyPairToPodCounts[pair] + if tpCount == nil { continue } - - // indicates how many pods (on current node) match the . - matchSum := int64(0) - for _, existingPod := range nodeInfo.Pods() { - // Bypass terminating Pod (see #87621). - if existingPod.DeletionTimestamp != nil || existingPod.Namespace != pod.Namespace { - continue - } - if c.Selector.Matches(labels.Set(existingPod.Labels)) { - matchSum++ - } - } - atomic.AddInt64(state.TopologyPairToPodCounts[pair], matchSum) + count := countPodsMatchSelector(nodeInfo.Pods(), c.Selector, pod.Namespace) + atomic.AddInt64(tpCount, int64(count)) } } parallelize.Until(ctx, len(allNodes), processAllNode) @@ -184,9 +178,14 @@ func (pl *PodTopologySpread) Score(ctx context.Context, cycleState *framework.Cy var score int64 for _, c := range s.Constraints { if tpVal, ok := node.Labels[c.TopologyKey]; ok { - pair := topologyPair{key: c.TopologyKey, value: tpVal} - matchSum := *s.TopologyPairToPodCounts[pair] - score += matchSum + if c.TopologyKey == v1.LabelHostname { + count := countPodsMatchSelector(nodeInfo.Pods(), c.Selector, pod.Namespace) + score += int64(count) + } else { + pair := topologyPair{key: c.TopologyKey, value: tpVal} + matchSum := *s.TopologyPairToPodCounts[pair] + score += matchSum + } } } return score, nil From c9004e704d6be7e08277791537370f76c5816692 Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Thu, 13 Feb 2020 16:20:24 +0100 Subject: [PATCH 34/92] e2e/storage: check result of WaitForPersistentVolumeDeleted When deleting fails, the tests should be considered as failed, too. Ignoring the error caused a wrong return code in the CSI mock driver to go unnoticed (see https://github.com/kubernetes-csi/csi-test/pull/250). The v3.1.0 release of the CSI mock driver fixes that. --- test/e2e/storage/csi_mock_volume.go | 2 +- test/e2e/storage/pv_protection.go | 6 ++++-- .../testing-manifests/storage-csi/mock/csi-mock-driver.yaml | 2 +- 3 files changed, 6 insertions(+), 4 deletions(-) diff --git a/test/e2e/storage/csi_mock_volume.go b/test/e2e/storage/csi_mock_volume.go index 1ed09acd6a30..830342e1d56b 100644 --- a/test/e2e/storage/csi_mock_volume.go +++ b/test/e2e/storage/csi_mock_volume.go @@ -188,7 +188,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { claim, err := cs.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(context.TODO(), claim.Name, metav1.GetOptions{}) if err == nil { cs.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(context.TODO(), claim.Name, metav1.DeleteOptions{}) - e2epv.WaitForPersistentVolumeDeleted(cs, claim.Spec.VolumeName, framework.Poll, 2*time.Minute) + errs = append(errs, e2epv.WaitForPersistentVolumeDeleted(cs, claim.Spec.VolumeName, framework.Poll, 2*time.Minute)) } } diff --git a/test/e2e/storage/pv_protection.go b/test/e2e/storage/pv_protection.go index f38261331275..c87735ec73d0 100644 --- a/test/e2e/storage/pv_protection.go +++ b/test/e2e/storage/pv_protection.go @@ -100,7 +100,8 @@ var _ = utils.SIGDescribe("PV Protection", func() { ginkgo.By("Deleting the PV") err = client.CoreV1().PersistentVolumes().Delete(context.TODO(), pv.Name, *metav1.NewDeleteOptions(0)) framework.ExpectNoError(err, "Error deleting PV") - e2epv.WaitForPersistentVolumeDeleted(client, pv.Name, framework.Poll, e2epv.PVDeletingTimeout) + err = e2epv.WaitForPersistentVolumeDeleted(client, pv.Name, framework.Poll, e2epv.PVDeletingTimeout) + framework.ExpectNoError(err, "waiting for PV to be deleted") }) ginkgo.It("Verify that PV bound to a PVC is not removed immediately", func() { @@ -127,6 +128,7 @@ var _ = utils.SIGDescribe("PV Protection", func() { framework.ExpectNoError(err, "Error deleting PVC") ginkgo.By("Checking that the PV is automatically removed from the system because it's no longer bound to a PVC") - e2epv.WaitForPersistentVolumeDeleted(client, pv.Name, framework.Poll, e2epv.PVDeletingTimeout) + err = e2epv.WaitForPersistentVolumeDeleted(client, pv.Name, framework.Poll, e2epv.PVDeletingTimeout) + framework.ExpectNoError(err, "waiting for PV to be deleted") }) }) diff --git a/test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver.yaml b/test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver.yaml index 4d21f5bb4203..3a1a87a12ed7 100644 --- a/test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver.yaml +++ b/test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver.yaml @@ -45,7 +45,7 @@ spec: - mountPath: /registration name: registration-dir - name: mock - image: quay.io/k8scsi/mock-driver:v2.1.0 + image: quay.io/k8scsi/mock-driver:v3.1.0 args: - "--name=mock.storage.k8s.io" - "--permissive-target-path" # because of https://github.com/kubernetes/kubernetes/issues/75535 From 5c48f209a541fdac43bf2d2cbb62f4007f98eb75 Mon Sep 17 00:00:00 2001 From: Ricardo Pchevuzinske Katz Date: Thu, 26 Mar 2020 11:20:01 -0300 Subject: [PATCH 35/92] Update ipvs library to the new repo and upgrade library version --- Godeps/LICENSES | 420 +++---- go.mod | 7 +- go.sum | 9 +- pkg/util/ipvs/BUILD | 8 +- pkg/util/ipvs/ipvs_linux.go | 2 +- pkg/util/ipvs/ipvs_linux_test.go | 2 +- vendor/BUILD | 2 +- vendor/github.com/moby/ipvs/.gitignore | 38 + .../{docker/libnetwork => moby}/ipvs/BUILD | 5 +- .../{docker/libnetwork => moby/ipvs}/LICENSE | 0 vendor/github.com/moby/ipvs/README.md | 34 + .../libnetwork => moby}/ipvs/constants.go | 0 vendor/github.com/moby/ipvs/doc.go | 1 + vendor/github.com/moby/ipvs/go.mod | 12 + vendor/github.com/moby/ipvs/go.sum | 35 + .../{docker/libnetwork => moby}/ipvs/ipvs.go | 0 .../libnetwork => moby}/ipvs/netlink.go | 2 +- .../github.com/vishvananda/netlink/.gitignore | 1 + .../vishvananda/netlink/.travis.yml | 6 + vendor/github.com/vishvananda/netlink/BUILD | 4 + .../vishvananda/netlink/addr_linux.go | 105 +- .../vishvananda/netlink/bridge_linux.go | 9 +- .../github.com/vishvananda/netlink/class.go | 141 ++- .../vishvananda/netlink/class_linux.go | 145 ++- .../vishvananda/netlink/conntrack_linux.go | 119 +- .../vishvananda/netlink/devlink_linux.go | 272 ++++ .../github.com/vishvananda/netlink/filter.go | 110 +- .../vishvananda/netlink/filter_linux.go | 240 +++- .../vishvananda/netlink/fou_linux.go | 6 +- .../vishvananda/netlink/genetlink_linux.go | 3 + vendor/github.com/vishvananda/netlink/go.mod | 8 + vendor/github.com/vishvananda/netlink/go.sum | 4 + .../vishvananda/netlink/handle_linux.go | 2 +- .../vishvananda/netlink/handle_unspecified.go | 12 + .../vishvananda/netlink/ioctl_linux.go | 10 +- vendor/github.com/vishvananda/netlink/link.go | 242 +++- .../vishvananda/netlink/link_linux.go | 1120 +++++++++++++---- .../github.com/vishvananda/netlink/neigh.go | 7 + .../vishvananda/netlink/neigh_linux.go | 189 ++- .../github.com/vishvananda/netlink/netlink.go | 3 +- .../netlink/netlink_unspecified.go | 12 + .../vishvananda/netlink/netns_linux.go | 141 +++ .../vishvananda/netlink/netns_unspecified.go | 19 + .../github.com/vishvananda/netlink/nl/BUILD | 3 + .../vishvananda/netlink/nl/bridge_linux.go | 4 +- .../vishvananda/netlink/nl/conntrack_linux.go | 40 +- .../vishvananda/netlink/nl/devlink_linux.go | 40 + .../vishvananda/netlink/nl/link_linux.go | 72 +- .../vishvananda/netlink/nl/nl_linux.go | 66 +- .../vishvananda/netlink/nl/rdma_link_linux.go | 35 + .../vishvananda/netlink/nl/route_linux.go | 26 + .../vishvananda/netlink/nl/seg6_linux.go | 43 + .../vishvananda/netlink/nl/seg6local_linux.go | 76 ++ .../vishvananda/netlink/nl/syscall.go | 11 +- .../vishvananda/netlink/nl/tc_linux.go | 166 ++- .../vishvananda/netlink/nl/xfrm_linux.go | 62 +- .../vishvananda/netlink/protinfo.go | 4 + .../vishvananda/netlink/protinfo_linux.go | 7 +- .../github.com/vishvananda/netlink/qdisc.go | 48 + .../vishvananda/netlink/qdisc_linux.go | 69 +- .../vishvananda/netlink/rdma_link_linux.go | 264 ++++ .../github.com/vishvananda/netlink/route.go | 2 + .../vishvananda/netlink/route_linux.go | 229 +++- .../vishvananda/netlink/rule_linux.go | 2 +- .../vishvananda/netlink/socket_linux.go | 5 +- .../vishvananda/netlink/xfrm_monitor_linux.go | 6 +- .../vishvananda/netlink/xfrm_policy.go | 26 +- .../vishvananda/netlink/xfrm_policy_linux.go | 19 +- .../vishvananda/netlink/xfrm_state.go | 6 +- .../vishvananda/netlink/xfrm_state_linux.go | 27 +- vendor/modules.txt | 6 +- 71 files changed, 4060 insertions(+), 811 deletions(-) create mode 100644 vendor/github.com/moby/ipvs/.gitignore rename vendor/github.com/{docker/libnetwork => moby}/ipvs/BUILD (90%) rename vendor/github.com/{docker/libnetwork => moby/ipvs}/LICENSE (100%) create mode 100644 vendor/github.com/moby/ipvs/README.md rename vendor/github.com/{docker/libnetwork => moby}/ipvs/constants.go (100%) create mode 100644 vendor/github.com/moby/ipvs/doc.go create mode 100644 vendor/github.com/moby/ipvs/go.mod create mode 100644 vendor/github.com/moby/ipvs/go.sum rename vendor/github.com/{docker/libnetwork => moby}/ipvs/ipvs.go (100%) rename vendor/github.com/{docker/libnetwork => moby}/ipvs/netlink.go (99%) create mode 100644 vendor/github.com/vishvananda/netlink/.gitignore create mode 100644 vendor/github.com/vishvananda/netlink/devlink_linux.go create mode 100644 vendor/github.com/vishvananda/netlink/go.mod create mode 100644 vendor/github.com/vishvananda/netlink/go.sum create mode 100644 vendor/github.com/vishvananda/netlink/netns_linux.go create mode 100644 vendor/github.com/vishvananda/netlink/netns_unspecified.go create mode 100644 vendor/github.com/vishvananda/netlink/nl/devlink_linux.go create mode 100644 vendor/github.com/vishvananda/netlink/nl/rdma_link_linux.go create mode 100644 vendor/github.com/vishvananda/netlink/nl/seg6local_linux.go create mode 100644 vendor/github.com/vishvananda/netlink/rdma_link_linux.go diff --git a/Godeps/LICENSES b/Godeps/LICENSES index 0376a5574601..77dc9941a33c 100644 --- a/Godeps/LICENSES +++ b/Godeps/LICENSES @@ -6649,216 +6649,6 @@ Apache License ================================================================================ -================================================================================ -= vendor/github.com/docker/libnetwork licensed under: = - -Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - - -= vendor/github.com/docker/libnetwork/LICENSE d2794c0df5b907fdace235a619d80314 -================================================================================ - - ================================================================================ = vendor/github.com/docker/spdystream licensed under: = @@ -14403,6 +14193,216 @@ THE SOFTWARE. ================================================================================ +================================================================================ += vendor/github.com/moby/ipvs licensed under: = + +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + += vendor/github.com/moby/ipvs/LICENSE d2794c0df5b907fdace235a619d80314 +================================================================================ + + ================================================================================ = vendor/github.com/moby/term licensed under: = diff --git a/go.mod b/go.mod index 4495bf684983..965deb6fe994 100644 --- a/go.mod +++ b/go.mod @@ -42,7 +42,6 @@ require ( github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0 github.com/docker/go-connections v0.3.0 github.com/docker/go-units v0.4.0 - github.com/docker/libnetwork v0.8.0-dev.2.0.20190925143933-c8a5fca4a652 github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153 github.com/emicklei/go-restful v2.9.5+incompatible github.com/evanphx/json-patch v4.2.0+incompatible @@ -71,6 +70,7 @@ require ( github.com/lithammer/dedent v1.1.0 github.com/lpabon/godbc v0.1.1 // indirect github.com/miekg/dns v1.1.4 + github.com/moby/ipvs v1.0.0 github.com/mohae/deepcopy v0.0.0-20170603005431-491d3605edfb // indirect github.com/mrunalp/fileutils v0.0.0-20171103030105-7d4729fb3618 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 @@ -97,7 +97,7 @@ require ( github.com/stretchr/testify v1.4.0 github.com/thecodeteam/goscaleio v0.1.0 github.com/urfave/negroni v1.0.0 // indirect - github.com/vishvananda/netlink v1.0.0 + github.com/vishvananda/netlink v1.1.0 github.com/vmware/govmomi v0.20.3 go.etcd.io/etcd v0.5.0-alpha.5.0.20200224211402-c65a9e2dd1fd golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975 @@ -380,6 +380,7 @@ replace ( github.com/mitchellh/go-ps => github.com/mitchellh/go-ps v0.0.0-20170309133038-4fdf99ab2936 github.com/mitchellh/go-wordwrap => github.com/mitchellh/go-wordwrap v1.0.0 github.com/mitchellh/mapstructure => github.com/mitchellh/mapstructure v1.1.2 + github.com/moby/ipvs => github.com/moby/ipvs v1.0.0 github.com/moby/term => github.com/moby/term v0.0.0-20200312100748-672ec06f55cd github.com/modern-go/concurrent => github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd github.com/modern-go/reflect2 => github.com/modern-go/reflect2 v1.0.1 @@ -456,7 +457,7 @@ replace ( github.com/valyala/quicktemplate => github.com/valyala/quicktemplate v1.1.1 github.com/valyala/tcplisten => github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a github.com/vektah/gqlparser => github.com/vektah/gqlparser v1.1.2 - github.com/vishvananda/netlink => github.com/vishvananda/netlink v1.0.0 + github.com/vishvananda/netlink => github.com/vishvananda/netlink v1.1.0 github.com/vishvananda/netns => github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df github.com/vmware/govmomi => github.com/vmware/govmomi v0.20.3 github.com/xiang90/probing => github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 diff --git a/go.sum b/go.sum index e03a32af092b..0b57d8d54f3f 100644 --- a/go.sum +++ b/go.sum @@ -140,8 +140,6 @@ github.com/docker/go-connections v0.3.0 h1:3lOnM9cSzgGwx8VfK/NGOW5fLQ0GjIlCkaktF github.com/docker/go-connections v0.3.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/docker/libnetwork v0.8.0-dev.2.0.20190925143933-c8a5fca4a652 h1:alzR0hpQ/vaeYQAiqpCzrcbDbGMBAghmjT8nYe0To3I= -github.com/docker/libnetwork v0.8.0-dev.2.0.20190925143933-c8a5fca4a652/go.mod h1:93m0aTqz6z+g32wla4l4WxTrdtvBRmVzYRkYvasA5Z8= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96 h1:cenwrSVm+Z7QLSV/BsnenAOcDXdX4cMv4wP0B/5QbPg= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= @@ -370,6 +368,8 @@ github.com/mitchellh/go-wordwrap v1.0.0 h1:6GlHJ/LTGMrIJbwgdqdl2eEH8o+Exx/0m8ir9 github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/moby/ipvs v1.0.0 h1:89i8bPaL2DC0cjyRDv0QQOYUOU4fujziJmhF4ca/mtY= +github.com/moby/ipvs v1.0.0/go.mod h1:2pngiyseZbIKXNv7hsKj3O9UEz30c53MT9005gt2hxQ= github.com/moby/term v0.0.0-20200312100748-672ec06f55cd h1:aY7OQNf2XqY/JQ6qREWamhI/81os/agb2BAGpcx5yWI= github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= @@ -497,8 +497,8 @@ github.com/valyala/fasthttp v1.2.0/go.mod h1:4vX61m6KN+xDduDNwXrhIAVZaZaZiQ1luJk github.com/valyala/quicktemplate v1.1.1/go.mod h1:EH+4AkTd43SvgIbQHYu59/cJyxDoOVRUAfrukLPuGJ4= github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio= github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= -github.com/vishvananda/netlink v1.0.0 h1:bqNY2lgheFIu1meHUFSH3d7vG93AFyqg3oGbJCOJgSM= -github.com/vishvananda/netlink v1.0.0/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= +github.com/vishvananda/netlink v1.1.0 h1:1iyaYNBLmP6L0220aDnYQpo1QEV4t4hJ+xEEhhJH8j0= +github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df h1:OviZH7qLw/7ZovXvuNyL3XQl8UFofeikI1NW1Gypu7k= github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= github.com/vmware/govmomi v0.20.3 h1:gpw/0Ku+6RgF3jsi7fnCLmlcikBHfKBCUcu1qgc16OU= @@ -592,6 +592,7 @@ gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= gotest.tools/gotestsum v0.3.5 h1:VePOWRsuWFYpfp/G8mbmOZKxO5T3501SEGQRUdvq7h0= gotest.tools/gotestsum v0.3.5/go.mod h1:Mnf3e5FUzXbkCfynWBGOwLssY7gTQgCHObK9tMpAriY= +gotest.tools/v3 v3.0.2 h1:kG1BFyqVHuQoVQiR1bWGnfz/fmHvvuiSPIV7rvl360E= gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= honnef.co/go/tools v0.0.1-2019.2.2 h1:TEgegKbBqByGUb1Coo1pc2qIdf2xw6v0mYyLSYtyopE= diff --git a/pkg/util/ipvs/BUILD b/pkg/util/ipvs/BUILD index c4ac2cf9249c..a25cae757deb 100644 --- a/pkg/util/ipvs/BUILD +++ b/pkg/util/ipvs/BUILD @@ -17,10 +17,10 @@ go_test( "//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library", ] + select({ "@io_bazel_rules_go//go/platform:android": [ - "//vendor/github.com/docker/libnetwork/ipvs:go_default_library", + "//vendor/github.com/moby/ipvs:go_default_library", ], "@io_bazel_rules_go//go/platform:linux": [ - "//vendor/github.com/docker/libnetwork/ipvs:go_default_library", + "//vendor/github.com/moby/ipvs:go_default_library", ], "//conditions:default": [], }), @@ -39,7 +39,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library", ] + select({ "@io_bazel_rules_go//go/platform:android": [ - "//vendor/github.com/docker/libnetwork/ipvs:go_default_library", + "//vendor/github.com/moby/ipvs:go_default_library", "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", ], @@ -56,7 +56,7 @@ go_library( "//vendor/k8s.io/utils/exec:go_default_library", ], "@io_bazel_rules_go//go/platform:linux": [ - "//vendor/github.com/docker/libnetwork/ipvs:go_default_library", + "//vendor/github.com/moby/ipvs:go_default_library", "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", ], diff --git a/pkg/util/ipvs/ipvs_linux.go b/pkg/util/ipvs/ipvs_linux.go index 9acfc570a4ab..69b0acf7d5b2 100644 --- a/pkg/util/ipvs/ipvs_linux.go +++ b/pkg/util/ipvs/ipvs_linux.go @@ -27,7 +27,7 @@ import ( "syscall" "time" - libipvs "github.com/docker/libnetwork/ipvs" + libipvs "github.com/moby/ipvs" "k8s.io/klog" utilexec "k8s.io/utils/exec" ) diff --git a/pkg/util/ipvs/ipvs_linux_test.go b/pkg/util/ipvs/ipvs_linux_test.go index 706073870d6c..9fa92823c3b2 100644 --- a/pkg/util/ipvs/ipvs_linux_test.go +++ b/pkg/util/ipvs/ipvs_linux_test.go @@ -25,7 +25,7 @@ import ( "syscall" "testing" - libipvs "github.com/docker/libnetwork/ipvs" + libipvs "github.com/moby/ipvs" ) func Test_toVirtualServer(t *testing.T) { diff --git a/vendor/BUILD b/vendor/BUILD index ef54d6ab0a90..dd06a245693c 100644 --- a/vendor/BUILD +++ b/vendor/BUILD @@ -121,7 +121,6 @@ filegroup( "//vendor/github.com/docker/go-connections/sockets:all-srcs", "//vendor/github.com/docker/go-connections/tlsconfig:all-srcs", "//vendor/github.com/docker/go-units:all-srcs", - "//vendor/github.com/docker/libnetwork/ipvs:all-srcs", "//vendor/github.com/docker/spdystream:all-srcs", "//vendor/github.com/dustin/go-humanize:all-srcs", "//vendor/github.com/elazarl/goproxy:all-srcs", @@ -246,6 +245,7 @@ filegroup( "//vendor/github.com/mistifyio/go-zfs:all-srcs", "//vendor/github.com/mitchellh/go-wordwrap:all-srcs", "//vendor/github.com/mitchellh/mapstructure:all-srcs", + "//vendor/github.com/moby/ipvs:all-srcs", "//vendor/github.com/moby/term:all-srcs", "//vendor/github.com/modern-go/concurrent:all-srcs", "//vendor/github.com/modern-go/reflect2:all-srcs", diff --git a/vendor/github.com/moby/ipvs/.gitignore b/vendor/github.com/moby/ipvs/.gitignore new file mode 100644 index 000000000000..4fdfea1d9ec4 --- /dev/null +++ b/vendor/github.com/moby/ipvs/.gitignore @@ -0,0 +1,38 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so +*~ +.gtm +tags +.DS_Store + +# Folders +_obj +_test + + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof + +# Coverage +*.tmp +*.coverprofile + +# IDE files and folders +.project +.settings/ +.idea/ diff --git a/vendor/github.com/docker/libnetwork/ipvs/BUILD b/vendor/github.com/moby/ipvs/BUILD similarity index 90% rename from vendor/github.com/docker/libnetwork/ipvs/BUILD rename to vendor/github.com/moby/ipvs/BUILD index 0df884b9a46e..aa2852f1747e 100644 --- a/vendor/github.com/docker/libnetwork/ipvs/BUILD +++ b/vendor/github.com/moby/ipvs/BUILD @@ -4,11 +4,12 @@ go_library( name = "go_default_library", srcs = [ "constants.go", + "doc.go", "ipvs.go", "netlink.go", ], - importmap = "k8s.io/kubernetes/vendor/github.com/docker/libnetwork/ipvs", - importpath = "github.com/docker/libnetwork/ipvs", + importmap = "k8s.io/kubernetes/vendor/github.com/moby/ipvs", + importpath = "github.com/moby/ipvs", visibility = ["//visibility:public"], deps = select({ "@io_bazel_rules_go//go/platform:android": [ diff --git a/vendor/github.com/docker/libnetwork/LICENSE b/vendor/github.com/moby/ipvs/LICENSE similarity index 100% rename from vendor/github.com/docker/libnetwork/LICENSE rename to vendor/github.com/moby/ipvs/LICENSE diff --git a/vendor/github.com/moby/ipvs/README.md b/vendor/github.com/moby/ipvs/README.md new file mode 100644 index 000000000000..a45cf049a559 --- /dev/null +++ b/vendor/github.com/moby/ipvs/README.md @@ -0,0 +1,34 @@ +# ipvs - networking for containers + +![Test](https://github.com/moby/ipvs/workflows/Test/badge.svg) [![GoDoc](https://godoc.org/github.com/moby/ipvs?status.svg)](https://godoc.org/github.com/moby/ipvs) [![Go Report Card](https://goreportcard.com/badge/github.com/moby/ipvs)](https://goreportcard.com/report/github.com/moby/ipvs) + +ipvs provides a native Go implementation for communicating with IPVS kernel module using a netlink socket. + + +#### Using ipvs + +```go +import ( + "log" + + "github.com/moby/ipvs" +) + +func main() { + handle, err := ipvs.New("") + if err != nil { + log.Fatalf("ipvs.New: %s", err) + } + svcs, err := handle.GetServices() + if err != nil { + log.Fatalf("handle.GetServices: %s", err) + } +} +``` + +## Contributing + +Want to hack on ipvs? [Docker's contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md) apply. + +## Copyright and license +Code and documentation copyright 2015 Docker, inc. Code released under the Apache 2.0 license. Docs released under Creative commons. diff --git a/vendor/github.com/docker/libnetwork/ipvs/constants.go b/vendor/github.com/moby/ipvs/constants.go similarity index 100% rename from vendor/github.com/docker/libnetwork/ipvs/constants.go rename to vendor/github.com/moby/ipvs/constants.go diff --git a/vendor/github.com/moby/ipvs/doc.go b/vendor/github.com/moby/ipvs/doc.go new file mode 100644 index 000000000000..91ce6808ad7e --- /dev/null +++ b/vendor/github.com/moby/ipvs/doc.go @@ -0,0 +1 @@ +package ipvs diff --git a/vendor/github.com/moby/ipvs/go.mod b/vendor/github.com/moby/ipvs/go.mod new file mode 100644 index 000000000000..a57cc9d210b3 --- /dev/null +++ b/vendor/github.com/moby/ipvs/go.mod @@ -0,0 +1,12 @@ +module github.com/moby/ipvs + +go 1.13 + +require ( + github.com/pkg/errors v0.9.1 // indirect + github.com/sirupsen/logrus v1.4.2 + github.com/vishvananda/netlink v1.1.0 + github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df + golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527 + gotest.tools/v3 v3.0.2 +) diff --git a/vendor/github.com/moby/ipvs/go.sum b/vendor/github.com/moby/ipvs/go.sum new file mode 100644 index 000000000000..324acf7c9404 --- /dev/null +++ b/vendor/github.com/moby/ipvs/go.sum @@ -0,0 +1,35 @@ +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/vishvananda/netlink v1.1.0 h1:1iyaYNBLmP6L0220aDnYQpo1QEV4t4hJ+xEEhhJH8j0= +github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= +github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df h1:OviZH7qLw/7ZovXvuNyL3XQl8UFofeikI1NW1Gypu7k= +github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444 h1:/d2cWp6PSamH4jDPFLyO150psQdqvtoNX8Zjg3AQ31g= +golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527 h1:uYVVQ9WP/Ds2ROhcaGPeIdVq0RIXVLwsHlnvJ+cT1So= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +gotest.tools/v3 v3.0.2 h1:kG1BFyqVHuQoVQiR1bWGnfz/fmHvvuiSPIV7rvl360E= +gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= diff --git a/vendor/github.com/docker/libnetwork/ipvs/ipvs.go b/vendor/github.com/moby/ipvs/ipvs.go similarity index 100% rename from vendor/github.com/docker/libnetwork/ipvs/ipvs.go rename to vendor/github.com/moby/ipvs/ipvs.go diff --git a/vendor/github.com/docker/libnetwork/ipvs/netlink.go b/vendor/github.com/moby/ipvs/netlink.go similarity index 99% rename from vendor/github.com/docker/libnetwork/ipvs/netlink.go rename to vendor/github.com/moby/ipvs/netlink.go index 7673659aa86f..1a822da4b60f 100644 --- a/vendor/github.com/docker/libnetwork/ipvs/netlink.go +++ b/vendor/github.com/moby/ipvs/netlink.go @@ -217,7 +217,7 @@ func execute(s *nl.NetlinkSocket, req *nl.NetlinkRequest, resType uint16) ([][]b done: for { - msgs, err := s.Receive() + msgs, _, err := s.Receive() if err != nil { if s.GetFd() == -1 { return nil, fmt.Errorf("Socket got closed on receive") diff --git a/vendor/github.com/vishvananda/netlink/.gitignore b/vendor/github.com/vishvananda/netlink/.gitignore new file mode 100644 index 000000000000..9f11b755a17d --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/.gitignore @@ -0,0 +1 @@ +.idea/ diff --git a/vendor/github.com/vishvananda/netlink/.travis.yml b/vendor/github.com/vishvananda/netlink/.travis.yml index f5c0b3eb5f7f..7d14af4d6da1 100644 --- a/vendor/github.com/vishvananda/netlink/.travis.yml +++ b/vendor/github.com/vishvananda/netlink/.travis.yml @@ -1,4 +1,8 @@ language: go +go: + - "1.10.x" + - "1.11.x" + - "1.12.x" before_script: # make sure we keep path in tact when we sudo - sudo sed -i -e 's/^Defaults\tsecure_path.*$//' /etc/sudoers @@ -9,5 +13,7 @@ before_script: - sudo modprobe nf_conntrack_netlink - sudo modprobe nf_conntrack_ipv4 - sudo modprobe nf_conntrack_ipv6 + - sudo modprobe sch_hfsc install: - go get github.com/vishvananda/netns +go_import_path: github.com/vishvananda/netlink diff --git a/vendor/github.com/vishvananda/netlink/BUILD b/vendor/github.com/vishvananda/netlink/BUILD index f9f912e2963f..d11d34533844 100644 --- a/vendor/github.com/vishvananda/netlink/BUILD +++ b/vendor/github.com/vishvananda/netlink/BUILD @@ -11,6 +11,7 @@ go_library( "class_linux.go", "conntrack_linux.go", "conntrack_unspecified.go", + "devlink_linux.go", "filter.go", "filter_linux.go", "fou.go", @@ -30,11 +31,14 @@ go_library( "netlink.go", "netlink_linux.go", "netlink_unspecified.go", + "netns_linux.go", + "netns_unspecified.go", "order.go", "protinfo.go", "protinfo_linux.go", "qdisc.go", "qdisc_linux.go", + "rdma_link_linux.go", "route.go", "route_linux.go", "route_unspecified.go", diff --git a/vendor/github.com/vishvananda/netlink/addr_linux.go b/vendor/github.com/vishvananda/netlink/addr_linux.go index d59c3281ec78..28746d5afecd 100644 --- a/vendor/github.com/vishvananda/netlink/addr_linux.go +++ b/vendor/github.com/vishvananda/netlink/addr_linux.go @@ -15,39 +15,62 @@ import ( const IFA_FLAGS = 0x8 // AddrAdd will add an IP address to a link device. +// // Equivalent to: `ip addr add $addr dev $link` +// +// If `addr` is an IPv4 address and the broadcast address is not given, it +// will be automatically computed based on the IP mask if /30 or larger. func AddrAdd(link Link, addr *Addr) error { return pkgHandle.AddrAdd(link, addr) } // AddrAdd will add an IP address to a link device. +// // Equivalent to: `ip addr add $addr dev $link` +// +// If `addr` is an IPv4 address and the broadcast address is not given, it +// will be automatically computed based on the IP mask if /30 or larger. func (h *Handle) AddrAdd(link Link, addr *Addr) error { req := h.newNetlinkRequest(unix.RTM_NEWADDR, unix.NLM_F_CREATE|unix.NLM_F_EXCL|unix.NLM_F_ACK) return h.addrHandle(link, addr, req) } // AddrReplace will replace (or, if not present, add) an IP address on a link device. +// // Equivalent to: `ip addr replace $addr dev $link` +// +// If `addr` is an IPv4 address and the broadcast address is not given, it +// will be automatically computed based on the IP mask if /30 or larger. func AddrReplace(link Link, addr *Addr) error { return pkgHandle.AddrReplace(link, addr) } // AddrReplace will replace (or, if not present, add) an IP address on a link device. +// // Equivalent to: `ip addr replace $addr dev $link` +// +// If `addr` is an IPv4 address and the broadcast address is not given, it +// will be automatically computed based on the IP mask if /30 or larger. func (h *Handle) AddrReplace(link Link, addr *Addr) error { req := h.newNetlinkRequest(unix.RTM_NEWADDR, unix.NLM_F_CREATE|unix.NLM_F_REPLACE|unix.NLM_F_ACK) return h.addrHandle(link, addr, req) } // AddrDel will delete an IP address from a link device. +// // Equivalent to: `ip addr del $addr dev $link` +// +// If `addr` is an IPv4 address and the broadcast address is not given, it +// will be automatically computed based on the IP mask if /30 or larger. func AddrDel(link Link, addr *Addr) error { return pkgHandle.AddrDel(link, addr) } // AddrDel will delete an IP address from a link device. // Equivalent to: `ip addr del $addr dev $link` +// +// If `addr` is an IPv4 address and the broadcast address is not given, it +// will be automatically computed based on the IP mask if /30 or larger. func (h *Handle) AddrDel(link Link, addr *Addr) error { req := h.newNetlinkRequest(unix.RTM_DELADDR, unix.NLM_F_ACK) return h.addrHandle(link, addr, req) @@ -65,7 +88,11 @@ func (h *Handle) addrHandle(link Link, addr *Addr, req *nl.NetlinkRequest) error msg := nl.NewIfAddrmsg(family) msg.Index = uint32(base.Index) msg.Scope = uint8(addr.Scope) - prefixlen, masklen := addr.Mask.Size() + mask := addr.Mask + if addr.Peer != nil { + mask = addr.Peer.Mask + } + prefixlen, masklen := mask.Size() msg.Prefixlen = uint8(prefixlen) req.AddData(msg) @@ -104,14 +131,20 @@ func (h *Handle) addrHandle(link Link, addr *Addr, req *nl.NetlinkRequest) error } if family == FAMILY_V4 { - if addr.Broadcast == nil { + // Automatically set the broadcast address if it is unset and the + // subnet is large enough to sensibly have one (/30 or larger). + // See: RFC 3021 + if addr.Broadcast == nil && prefixlen < 31 { calcBroadcast := make(net.IP, masklen/8) for i := range localAddrData { - calcBroadcast[i] = localAddrData[i] | ^addr.Mask[i] + calcBroadcast[i] = localAddrData[i] | ^mask[i] } addr.Broadcast = calcBroadcast } - req.AddData(nl.NewRtAttr(unix.IFA_BROADCAST, addr.Broadcast)) + + if addr.Broadcast != nil { + req.AddData(nl.NewRtAttr(unix.IFA_BROADCAST, addr.Broadcast)) + } if addr.Label != "" { labelData := nl.NewRtAttr(unix.IFA_LABEL, nl.ZeroTerminated(addr.Label)) @@ -206,13 +239,17 @@ func parseAddr(m []byte) (addr Addr, family, index int, err error) { IP: attr.Value, Mask: net.CIDRMask(int(msg.Prefixlen), 8*len(attr.Value)), } - addr.Peer = dst case unix.IFA_LOCAL: + // iproute2 manual: + // If a peer address is specified, the local address + // cannot have a prefix length. The network prefix is + // associated with the peer rather than with the local + // address. + n := 8 * len(attr.Value) local = &net.IPNet{ IP: attr.Value, - Mask: net.CIDRMask(int(msg.Prefixlen), 8*len(attr.Value)), + Mask: net.CIDRMask(n, n), } - addr.IPNet = local case unix.IFA_BROADCAST: addr.Broadcast = attr.Value case unix.IFA_LABEL: @@ -226,12 +263,24 @@ func parseAddr(m []byte) (addr Addr, family, index int, err error) { } } - // IFA_LOCAL should be there but if not, fall back to IFA_ADDRESS + // libnl addr.c comment: + // IPv6 sends the local address as IFA_ADDRESS with no + // IFA_LOCAL, IPv4 sends both IFA_LOCAL and IFA_ADDRESS + // with IFA_ADDRESS being the peer address if they differ + // + // But obviously, as there are IPv6 PtP addresses, too, + // IFA_LOCAL should also be handled for IPv6. if local != nil { - addr.IPNet = local + if family == FAMILY_V4 && local.IP.Equal(dst.IP) { + addr.IPNet = dst + } else { + addr.IPNet = local + addr.Peer = dst + } } else { addr.IPNet = dst } + addr.Scope = int(msg.Scope) return @@ -250,21 +299,22 @@ type AddrUpdate struct { // AddrSubscribe takes a chan down which notifications will be sent // when addresses change. Close the 'done' chan to stop subscription. func AddrSubscribe(ch chan<- AddrUpdate, done <-chan struct{}) error { - return addrSubscribeAt(netns.None(), netns.None(), ch, done, nil, false) + return addrSubscribeAt(netns.None(), netns.None(), ch, done, nil, false, 0) } // AddrSubscribeAt works like AddrSubscribe plus it allows the caller // to choose the network namespace in which to subscribe (ns). func AddrSubscribeAt(ns netns.NsHandle, ch chan<- AddrUpdate, done <-chan struct{}) error { - return addrSubscribeAt(ns, netns.None(), ch, done, nil, false) + return addrSubscribeAt(ns, netns.None(), ch, done, nil, false, 0) } // AddrSubscribeOptions contains a set of options to use with // AddrSubscribeWithOptions. type AddrSubscribeOptions struct { - Namespace *netns.NsHandle - ErrorCallback func(error) - ListExisting bool + Namespace *netns.NsHandle + ErrorCallback func(error) + ListExisting bool + ReceiveBufferSize int } // AddrSubscribeWithOptions work like AddrSubscribe but enable to @@ -275,10 +325,10 @@ func AddrSubscribeWithOptions(ch chan<- AddrUpdate, done <-chan struct{}, option none := netns.None() options.Namespace = &none } - return addrSubscribeAt(*options.Namespace, netns.None(), ch, done, options.ErrorCallback, options.ListExisting) + return addrSubscribeAt(*options.Namespace, netns.None(), ch, done, options.ErrorCallback, options.ListExisting, options.ReceiveBufferSize) } -func addrSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- AddrUpdate, done <-chan struct{}, cberr func(error), listExisting bool) error { +func addrSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- AddrUpdate, done <-chan struct{}, cberr func(error), listExisting bool, rcvbuf int) error { s, err := nl.SubscribeAt(newNs, curNs, unix.NETLINK_ROUTE, unix.RTNLGRP_IPV4_IFADDR, unix.RTNLGRP_IPV6_IFADDR) if err != nil { return err @@ -289,6 +339,12 @@ func addrSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- AddrUpdate, done <-c s.Close() }() } + if rcvbuf != 0 { + err = pkgHandle.SetSocketReceiveBufferSize(rcvbuf, false) + if err != nil { + return err + } + } if listExisting { req := pkgHandle.newNetlinkRequest(unix.RTM_GETADDR, unix.NLM_F_DUMP) @@ -301,13 +357,19 @@ func addrSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- AddrUpdate, done <-c go func() { defer close(ch) for { - msgs, err := s.Receive() + msgs, from, err := s.Receive() if err != nil { if cberr != nil { cberr(err) } return } + if from.Pid != nl.PidKernel { + if cberr != nil { + cberr(fmt.Errorf("Wrong sender portid %d, expected %d", from.Pid, nl.PidKernel)) + } + continue + } for _, m := range msgs { if m.Header.Type == unix.NLMSG_DONE { continue @@ -319,16 +381,17 @@ func addrSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- AddrUpdate, done <-c continue } if cberr != nil { - cberr(syscall.Errno(-error)) + cberr(fmt.Errorf("error message: %v", + syscall.Errno(-error))) } - return + continue } msgType := m.Header.Type if msgType != unix.RTM_NEWADDR && msgType != unix.RTM_DELADDR { if cberr != nil { cberr(fmt.Errorf("bad message type: %d", msgType)) } - return + continue } addr, _, ifindex, err := parseAddr(m.Data) @@ -336,7 +399,7 @@ func addrSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- AddrUpdate, done <-c if cberr != nil { cberr(fmt.Errorf("could not parse address: %v", err)) } - return + continue } ch <- AddrUpdate{LinkAddress: *addr.IPNet, diff --git a/vendor/github.com/vishvananda/netlink/bridge_linux.go b/vendor/github.com/vishvananda/netlink/bridge_linux.go index 350ab0db4b01..6e1224c47b8f 100644 --- a/vendor/github.com/vishvananda/netlink/bridge_linux.go +++ b/vendor/github.com/vishvananda/netlink/bridge_linux.go @@ -96,7 +96,7 @@ func (h *Handle) bridgeVlanModify(cmd int, link Link, vid uint16, pvid, untagged flags |= nl.BRIDGE_FLAGS_MASTER } if flags > 0 { - nl.NewRtAttrChild(br, nl.IFLA_BRIDGE_FLAGS, nl.Uint16Attr(flags)) + br.AddRtAttr(nl.IFLA_BRIDGE_FLAGS, nl.Uint16Attr(flags)) } vlanInfo := &nl.BridgeVlanInfo{Vid: vid} if pvid { @@ -105,11 +105,8 @@ func (h *Handle) bridgeVlanModify(cmd int, link Link, vid uint16, pvid, untagged if untagged { vlanInfo.Flags |= nl.BRIDGE_VLAN_INFO_UNTAGGED } - nl.NewRtAttrChild(br, nl.IFLA_BRIDGE_VLAN_INFO, vlanInfo.Serialize()) + br.AddRtAttr(nl.IFLA_BRIDGE_VLAN_INFO, vlanInfo.Serialize()) req.AddData(br) _, err := req.Execute(unix.NETLINK_ROUTE, 0) - if err != nil { - return err - } - return nil + return err } diff --git a/vendor/github.com/vishvananda/netlink/class.go b/vendor/github.com/vishvananda/netlink/class.go index 8ee13af48eb9..dcc22d9e97ef 100644 --- a/vendor/github.com/vishvananda/netlink/class.go +++ b/vendor/github.com/vishvananda/netlink/class.go @@ -4,25 +4,76 @@ import ( "fmt" ) +// Class interfaces for all classes type Class interface { Attrs() *ClassAttrs Type() string } +// Generic networking statistics for netlink users. +// This file contains "gnet_" prefixed structs and relevant functions. +// See Documentation/networking/getn_stats.txt in Linux source code for more details. + +// GnetStatsBasic Ref: struct gnet_stats_basic { ... } +type GnetStatsBasic struct { + Bytes uint64 // number of seen bytes + Packets uint32 // number of seen packets +} + +// GnetStatsRateEst Ref: struct gnet_stats_rate_est { ... } +type GnetStatsRateEst struct { + Bps uint32 // current byte rate + Pps uint32 // current packet rate +} + +// GnetStatsRateEst64 Ref: struct gnet_stats_rate_est64 { ... } +type GnetStatsRateEst64 struct { + Bps uint64 // current byte rate + Pps uint64 // current packet rate +} + +// GnetStatsQueue Ref: struct gnet_stats_queue { ... } +type GnetStatsQueue struct { + Qlen uint32 // queue length + Backlog uint32 // backlog size of queue + Drops uint32 // number of dropped packets + Requeues uint32 // number of requues + Overlimits uint32 // number of enqueues over the limit +} + +// ClassStatistics representation based on generic networking statistics for netlink. +// See Documentation/networking/gen_stats.txt in Linux source code for more details. +type ClassStatistics struct { + Basic *GnetStatsBasic + Queue *GnetStatsQueue + RateEst *GnetStatsRateEst +} + +// NewClassStatistics Construct a ClassStatistics struct which fields are all initialized by 0. +func NewClassStatistics() *ClassStatistics { + return &ClassStatistics{ + Basic: &GnetStatsBasic{}, + Queue: &GnetStatsQueue{}, + RateEst: &GnetStatsRateEst{}, + } +} + // ClassAttrs represents a netlink class. A filter is associated with a link, // has a handle and a parent. The root filter of a device should have a // parent == HANDLE_ROOT. type ClassAttrs struct { - LinkIndex int - Handle uint32 - Parent uint32 - Leaf uint32 + LinkIndex int + Handle uint32 + Parent uint32 + Leaf uint32 + Statistics *ClassStatistics } func (q ClassAttrs) String() string { return fmt.Sprintf("{LinkIndex: %d, Handle: %s, Parent: %s, Leaf: %d}", q.LinkIndex, HandleStr(q.Handle), HandleStr(q.Parent), q.Leaf) } +// HtbClassAttrs stores the attributes of HTB class type HtbClassAttrs struct { // TODO handle all attributes Rate uint64 @@ -54,10 +105,12 @@ func (q HtbClass) String() string { return fmt.Sprintf("{Rate: %d, Ceil: %d, Buffer: %d, Cbuffer: %d}", q.Rate, q.Ceil, q.Buffer, q.Cbuffer) } +// Attrs returns the class attributes func (q *HtbClass) Attrs() *ClassAttrs { return &q.ClassAttrs } +// Type return the class type func (q *HtbClass) Type() string { return "htb" } @@ -69,10 +122,90 @@ type GenericClass struct { ClassType string } +// Attrs return the class attributes func (class *GenericClass) Attrs() *ClassAttrs { return &class.ClassAttrs } +// Type return the class type func (class *GenericClass) Type() string { return class.ClassType } + +// ServiceCurve is the way the HFSC curve are represented +type ServiceCurve struct { + m1 uint32 + d uint32 + m2 uint32 +} + +// Attrs return the parameters of the service curve +func (c *ServiceCurve) Attrs() (uint32, uint32, uint32) { + return c.m1, c.d, c.m2 +} + +// HfscClass is a representation of the HFSC class +type HfscClass struct { + ClassAttrs + Rsc ServiceCurve + Fsc ServiceCurve + Usc ServiceCurve +} + +// SetUsc sets the Usc curve +func (hfsc *HfscClass) SetUsc(m1 uint32, d uint32, m2 uint32) { + hfsc.Usc = ServiceCurve{m1: m1 / 8, d: d, m2: m2 / 8} +} + +// SetFsc sets the Fsc curve +func (hfsc *HfscClass) SetFsc(m1 uint32, d uint32, m2 uint32) { + hfsc.Fsc = ServiceCurve{m1: m1 / 8, d: d, m2: m2 / 8} +} + +// SetRsc sets the Rsc curve +func (hfsc *HfscClass) SetRsc(m1 uint32, d uint32, m2 uint32) { + hfsc.Rsc = ServiceCurve{m1: m1 / 8, d: d, m2: m2 / 8} +} + +// SetSC implements the SC from the tc CLI +func (hfsc *HfscClass) SetSC(m1 uint32, d uint32, m2 uint32) { + hfsc.Rsc = ServiceCurve{m1: m1 / 8, d: d, m2: m2 / 8} + hfsc.Fsc = ServiceCurve{m1: m1 / 8, d: d, m2: m2 / 8} +} + +// SetUL implements the UL from the tc CLI +func (hfsc *HfscClass) SetUL(m1 uint32, d uint32, m2 uint32) { + hfsc.Usc = ServiceCurve{m1: m1 / 8, d: d, m2: m2 / 8} +} + +// SetLS implements the LS from the tc CLI +func (hfsc *HfscClass) SetLS(m1 uint32, d uint32, m2 uint32) { + hfsc.Fsc = ServiceCurve{m1: m1 / 8, d: d, m2: m2 / 8} +} + +// NewHfscClass returns a new HFSC struct with the set parameters +func NewHfscClass(attrs ClassAttrs) *HfscClass { + return &HfscClass{ + ClassAttrs: attrs, + Rsc: ServiceCurve{}, + Fsc: ServiceCurve{}, + Usc: ServiceCurve{}, + } +} + +func (hfsc *HfscClass) String() string { + return fmt.Sprintf( + "{%s -- {RSC: {m1=%d d=%d m2=%d}} {FSC: {m1=%d d=%d m2=%d}} {USC: {m1=%d d=%d m2=%d}}}", + hfsc.Attrs(), hfsc.Rsc.m1*8, hfsc.Rsc.d, hfsc.Rsc.m2*8, hfsc.Fsc.m1*8, hfsc.Fsc.d, hfsc.Fsc.m2*8, hfsc.Usc.m1*8, hfsc.Usc.d, hfsc.Usc.m2*8, + ) +} + +// Attrs return the Hfsc parameters +func (hfsc *HfscClass) Attrs() *ClassAttrs { + return &hfsc.ClassAttrs +} + +// Type return the type of the class +func (hfsc *HfscClass) Type() string { + return "hfsc" +} diff --git a/vendor/github.com/vishvananda/netlink/class_linux.go b/vendor/github.com/vishvananda/netlink/class_linux.go index a4997740e292..31091e5010a3 100644 --- a/vendor/github.com/vishvananda/netlink/class_linux.go +++ b/vendor/github.com/vishvananda/netlink/class_linux.go @@ -1,14 +1,34 @@ package netlink import ( + "bytes" + "encoding/binary" + "encoding/hex" "errors" + "fmt" "syscall" "github.com/vishvananda/netlink/nl" "golang.org/x/sys/unix" ) -// NOTE: function is in here because it uses other linux functions +// Internal tc_stats representation in Go struct. +// This is for internal uses only to deserialize the payload of rtattr. +// After the deserialization, this should be converted into the canonical stats +// struct, ClassStatistics, in case of statistics of a class. +// Ref: struct tc_stats { ... } +type tcStats struct { + Bytes uint64 // Number of enqueued bytes + Packets uint32 // Number of enqueued packets + Drops uint32 // Packets dropped because of lack of resources + Overlimits uint32 // Number of throttle events when this flow goes out of allocated bandwidth + Bps uint32 // Current flow byte rate + Pps uint32 // Current flow packet rate + Qlen uint32 + Backlog uint32 +} + +// NewHtbClass NOTE: function is in here because it uses other linux functions func NewHtbClass(attrs ClassAttrs, cattrs HtbClassAttrs) *HtbClass { mtu := 1600 rate := cattrs.Rate / 8 @@ -126,7 +146,9 @@ func classPayload(req *nl.NetlinkRequest, class Class) error { req.AddData(nl.NewRtAttr(nl.TCA_KIND, nl.ZeroTerminated(class.Type()))) options := nl.NewRtAttr(nl.TCA_OPTIONS, nil) - if htb, ok := class.(*HtbClass); ok { + switch class.Type() { + case "htb": + htb := class.(*HtbClass) opt := nl.TcHtbCopt{} opt.Buffer = htb.Buffer opt.Cbuffer = htb.Cbuffer @@ -151,9 +173,18 @@ func classPayload(req *nl.NetlinkRequest, class Class) error { return errors.New("HTB: failed to calculate ceil rate table") } opt.Ceil = tcceil - nl.NewRtAttrChild(options, nl.TCA_HTB_PARMS, opt.Serialize()) - nl.NewRtAttrChild(options, nl.TCA_HTB_RTAB, SerializeRtab(rtab)) - nl.NewRtAttrChild(options, nl.TCA_HTB_CTAB, SerializeRtab(ctab)) + options.AddRtAttr(nl.TCA_HTB_PARMS, opt.Serialize()) + options.AddRtAttr(nl.TCA_HTB_RTAB, SerializeRtab(rtab)) + options.AddRtAttr(nl.TCA_HTB_CTAB, SerializeRtab(ctab)) + case "hfsc": + hfsc := class.(*HfscClass) + opt := nl.HfscCopt{} + opt.Rsc.Set(hfsc.Rsc.Attrs()) + opt.Fsc.Set(hfsc.Fsc.Attrs()) + opt.Usc.Set(hfsc.Usc.Attrs()) + options.AddRtAttr(nl.TCA_HFSC_RSC, nl.SerializeHfscCurve(&opt.Rsc)) + options.AddRtAttr(nl.TCA_HFSC_FSC, nl.SerializeHfscCurve(&opt.Fsc)) + options.AddRtAttr(nl.TCA_HFSC_USC, nl.SerializeHfscCurve(&opt.Usc)) } req.AddData(options) return nil @@ -197,9 +228,10 @@ func (h *Handle) ClassList(link Link, parent uint32) ([]Class, error) { } base := ClassAttrs{ - LinkIndex: int(msg.Ifindex), - Handle: msg.Handle, - Parent: msg.Parent, + LinkIndex: int(msg.Ifindex), + Handle: msg.Handle, + Parent: msg.Parent, + Statistics: nil, } var class Class @@ -211,6 +243,8 @@ func (h *Handle) ClassList(link Link, parent uint32) ([]Class, error) { switch classType { case "htb": class = &HtbClass{} + case "hfsc": + class = &HfscClass{} default: class = &GenericClass{ClassType: classType} } @@ -225,6 +259,26 @@ func (h *Handle) ClassList(link Link, parent uint32) ([]Class, error) { if err != nil { return nil, err } + case "hfsc": + data, err := nl.ParseRouteAttr(attr.Value) + if err != nil { + return nil, err + } + _, err = parseHfscClassData(class, data) + if err != nil { + return nil, err + } + } + // For backward compatibility. + case nl.TCA_STATS: + base.Statistics, err = parseTcStats(attr.Value) + if err != nil { + return nil, err + } + case nl.TCA_STATS2: + base.Statistics, err = parseTcStats2(attr.Value) + if err != nil { + return nil, err } } } @@ -253,3 +307,78 @@ func parseHtbClassData(class Class, data []syscall.NetlinkRouteAttr) (bool, erro } return detailed, nil } + +func parseHfscClassData(class Class, data []syscall.NetlinkRouteAttr) (bool, error) { + hfsc := class.(*HfscClass) + detailed := false + for _, datum := range data { + m1, d, m2 := nl.DeserializeHfscCurve(datum.Value).Attrs() + switch datum.Attr.Type { + case nl.TCA_HFSC_RSC: + hfsc.Rsc = ServiceCurve{m1: m1, d: d, m2: m2} + case nl.TCA_HFSC_FSC: + hfsc.Fsc = ServiceCurve{m1: m1, d: d, m2: m2} + case nl.TCA_HFSC_USC: + hfsc.Usc = ServiceCurve{m1: m1, d: d, m2: m2} + } + } + return detailed, nil +} + +func parseTcStats(data []byte) (*ClassStatistics, error) { + buf := &bytes.Buffer{} + buf.Write(data) + native := nl.NativeEndian() + tcStats := &tcStats{} + if err := binary.Read(buf, native, tcStats); err != nil { + return nil, err + } + + stats := NewClassStatistics() + stats.Basic.Bytes = tcStats.Bytes + stats.Basic.Packets = tcStats.Packets + stats.Queue.Qlen = tcStats.Qlen + stats.Queue.Backlog = tcStats.Backlog + stats.Queue.Drops = tcStats.Drops + stats.Queue.Overlimits = tcStats.Overlimits + stats.RateEst.Bps = tcStats.Bps + stats.RateEst.Pps = tcStats.Pps + + return stats, nil +} + +func parseGnetStats(data []byte, gnetStats interface{}) error { + buf := &bytes.Buffer{} + buf.Write(data) + native := nl.NativeEndian() + return binary.Read(buf, native, gnetStats) +} + +func parseTcStats2(data []byte) (*ClassStatistics, error) { + rtAttrs, err := nl.ParseRouteAttr(data) + if err != nil { + return nil, err + } + stats := NewClassStatistics() + for _, datum := range rtAttrs { + switch datum.Attr.Type { + case nl.TCA_STATS_BASIC: + if err := parseGnetStats(datum.Value, stats.Basic); err != nil { + return nil, fmt.Errorf("Failed to parse ClassStatistics.Basic with: %v\n%s", + err, hex.Dump(datum.Value)) + } + case nl.TCA_STATS_QUEUE: + if err := parseGnetStats(datum.Value, stats.Queue); err != nil { + return nil, fmt.Errorf("Failed to parse ClassStatistics.Queue with: %v\n%s", + err, hex.Dump(datum.Value)) + } + case nl.TCA_STATS_RATE_EST: + if err := parseGnetStats(datum.Value, stats.RateEst); err != nil { + return nil, fmt.Errorf("Failed to parse ClassStatistics.RateEst with: %v\n%s", + err, hex.Dump(datum.Value)) + } + } + } + + return stats, nil +} diff --git a/vendor/github.com/vishvananda/netlink/conntrack_linux.go b/vendor/github.com/vishvananda/netlink/conntrack_linux.go index a0fc74a37224..4bff0dcbab03 100644 --- a/vendor/github.com/vishvananda/netlink/conntrack_linux.go +++ b/vendor/github.com/vishvananda/netlink/conntrack_linux.go @@ -22,11 +22,7 @@ const ( // https://github.com/torvalds/linux/blob/master/include/uapi/linux/netfilter/nfnetlink.h -> #define NFNL_SUBSYS_CTNETLINK_EXP 2 ConntrackExpectTable = 2 ) -const ( - // For Parsing Mark - TCP_PROTO = 6 - UDP_PROTO = 17 -) + const ( // backward compatibility with golang 1.6 which does not have io.SeekCurrent seekCurrent = 1 @@ -135,11 +131,13 @@ func (h *Handle) dumpConntrackTable(table ConntrackTableType, family InetFamily) // http://git.netfilter.org/libnetfilter_conntrack/tree/include/internal/object.h // For the time being, the structure below allows to parse and extract the base information of a flow type ipTuple struct { - SrcIP net.IP + Bytes uint64 DstIP net.IP + DstPort uint16 + Packets uint64 Protocol uint8 + SrcIP net.IP SrcPort uint16 - DstPort uint16 } type ConntrackFlow struct { @@ -151,11 +149,12 @@ type ConntrackFlow struct { func (s *ConntrackFlow) String() string { // conntrack cmd output: - // udp 17 src=127.0.0.1 dst=127.0.0.1 sport=4001 dport=1234 [UNREPLIED] src=127.0.0.1 dst=127.0.0.1 sport=1234 dport=4001 mark=0 - return fmt.Sprintf("%s\t%d src=%s dst=%s sport=%d dport=%d\tsrc=%s dst=%s sport=%d dport=%d mark=%d", + // udp 17 src=127.0.0.1 dst=127.0.0.1 sport=4001 dport=1234 packets=5 bytes=532 [UNREPLIED] src=127.0.0.1 dst=127.0.0.1 sport=1234 dport=4001 packets=10 bytes=1078 mark=0 + return fmt.Sprintf("%s\t%d src=%s dst=%s sport=%d dport=%d packets=%d bytes=%d\tsrc=%s dst=%s sport=%d dport=%d packets=%d bytes=%d mark=%d", nl.L4ProtoMap[s.Forward.Protocol], s.Forward.Protocol, - s.Forward.SrcIP.String(), s.Forward.DstIP.String(), s.Forward.SrcPort, s.Forward.DstPort, - s.Reverse.SrcIP.String(), s.Reverse.DstIP.String(), s.Reverse.SrcPort, s.Reverse.DstPort, s.Mark) + s.Forward.SrcIP.String(), s.Forward.DstIP.String(), s.Forward.SrcPort, s.Forward.DstPort, s.Forward.Packets, s.Forward.Bytes, + s.Reverse.SrcIP.String(), s.Reverse.DstIP.String(), s.Reverse.SrcPort, s.Reverse.DstPort, s.Reverse.Packets, s.Reverse.Bytes, + s.Mark) } // This method parse the ip tuple structure @@ -220,9 +219,35 @@ func parseBERaw16(r *bytes.Reader, v *uint16) { binary.Read(r, binary.BigEndian, v) } +func parseBERaw32(r *bytes.Reader, v *uint32) { + binary.Read(r, binary.BigEndian, v) +} + +func parseBERaw64(r *bytes.Reader, v *uint64) { + binary.Read(r, binary.BigEndian, v) +} + +func parseByteAndPacketCounters(r *bytes.Reader) (bytes, packets uint64) { + for i := 0; i < 2; i++ { + switch _, t, _ := parseNfAttrTL(r); t { + case nl.CTA_COUNTERS_BYTES: + parseBERaw64(r, &bytes) + case nl.CTA_COUNTERS_PACKETS: + parseBERaw64(r, &packets) + default: + return + } + } + return +} + +func parseConnectionMark(r *bytes.Reader) (mark uint32) { + parseBERaw32(r, &mark) + return +} + func parseRawData(data []byte) *ConntrackFlow { s := &ConntrackFlow{} - var proto uint8 // First there is the Nfgenmsg header // consume only the family field reader := bytes.NewReader(data) @@ -238,36 +263,31 @@ func parseRawData(data []byte) *ConntrackFlow { // 4 bytes // flow information of the reverse flow for reader.Len() > 0 { - nested, t, l := parseNfAttrTL(reader) - if nested && t == nl.CTA_TUPLE_ORIG { - if nested, t, _ = parseNfAttrTL(reader); nested && t == nl.CTA_TUPLE_IP { - proto = parseIpTuple(reader, &s.Forward) + if nested, t, l := parseNfAttrTL(reader); nested { + switch t { + case nl.CTA_TUPLE_ORIG: + if nested, t, _ = parseNfAttrTL(reader); nested && t == nl.CTA_TUPLE_IP { + parseIpTuple(reader, &s.Forward) + } + case nl.CTA_TUPLE_REPLY: + if nested, t, _ = parseNfAttrTL(reader); nested && t == nl.CTA_TUPLE_IP { + parseIpTuple(reader, &s.Reverse) + } else { + // Header not recognized skip it + reader.Seek(int64(l), seekCurrent) + } + case nl.CTA_COUNTERS_ORIG: + s.Forward.Bytes, s.Forward.Packets = parseByteAndPacketCounters(reader) + case nl.CTA_COUNTERS_REPLY: + s.Reverse.Bytes, s.Reverse.Packets = parseByteAndPacketCounters(reader) } - } else if nested && t == nl.CTA_TUPLE_REPLY { - if nested, t, _ = parseNfAttrTL(reader); nested && t == nl.CTA_TUPLE_IP { - parseIpTuple(reader, &s.Reverse) - - // Got all the useful information stop parsing - break - } else { - // Header not recognized skip it - reader.Seek(int64(l), seekCurrent) + } else { + switch t { + case nl.CTA_MARK: + s.Mark = parseConnectionMark(reader) } } } - if proto == TCP_PROTO { - reader.Seek(64, seekCurrent) - _, t, _, v := parseNfAttrTLV(reader) - if t == nl.CTA_MARK { - s.Mark = uint32(v[3]) - } - } else if proto == UDP_PROTO { - reader.Seek(16, seekCurrent) - _, t, _, v := parseNfAttrTLV(reader) - if t == nl.CTA_MARK { - s.Mark = uint32(v[3]) - } - } return s } @@ -285,7 +305,7 @@ func parseRawData(data []byte) *ConntrackFlow { // Common parameters and options: // -s, --src, --orig-src ip Source address from original direction // -d, --dst, --orig-dst ip Destination address from original direction -// -r, --reply-src ip Source addres from reply direction +// -r, --reply-src ip Source address from reply direction // -q, --reply-dst ip Destination address from reply direction // -p, --protonum proto Layer 4 Protocol, eg. 'tcp' // -f, --family proto Layer 3 Protocol, eg. 'ipv6' @@ -302,11 +322,14 @@ func parseRawData(data []byte) *ConntrackFlow { type ConntrackFilterType uint8 const ( - ConntrackOrigSrcIP = iota // -orig-src ip Source address from original direction - ConntrackOrigDstIP // -orig-dst ip Destination address from original direction - ConntrackNatSrcIP // -src-nat ip Source NAT ip - ConntrackNatDstIP // -dst-nat ip Destination NAT ip - ConntrackNatAnyIP // -any-nat ip Source or destination NAT ip + ConntrackOrigSrcIP = iota // -orig-src ip Source address from original direction + ConntrackOrigDstIP // -orig-dst ip Destination address from original direction + ConntrackReplySrcIP // --reply-src ip Reply Source IP + ConntrackReplyDstIP // --reply-dst ip Reply Destination IP + ConntrackReplyAnyIP // Match source or destination reply IP + ConntrackNatSrcIP = ConntrackReplySrcIP // deprecated use instead ConntrackReplySrcIP + ConntrackNatDstIP = ConntrackReplyDstIP // deprecated use instead ConntrackReplyDstIP + ConntrackNatAnyIP = ConntrackReplyAnyIP // deprecated use instaed ConntrackReplyAnyIP ) type CustomConntrackFilter interface { @@ -351,17 +374,17 @@ func (f *ConntrackFilter) MatchConntrackFlow(flow *ConntrackFlow) bool { } // -src-nat ip Source NAT ip - if elem, found := f.ipFilter[ConntrackNatSrcIP]; match && found { + if elem, found := f.ipFilter[ConntrackReplySrcIP]; match && found { match = match && elem.Equal(flow.Reverse.SrcIP) } // -dst-nat ip Destination NAT ip - if elem, found := f.ipFilter[ConntrackNatDstIP]; match && found { + if elem, found := f.ipFilter[ConntrackReplyDstIP]; match && found { match = match && elem.Equal(flow.Reverse.DstIP) } - // -any-nat ip Source or destination NAT ip - if elem, found := f.ipFilter[ConntrackNatAnyIP]; match && found { + // Match source or destination reply IP + if elem, found := f.ipFilter[ConntrackReplyAnyIP]; match && found { match = match && (elem.Equal(flow.Reverse.SrcIP) || elem.Equal(flow.Reverse.DstIP)) } diff --git a/vendor/github.com/vishvananda/netlink/devlink_linux.go b/vendor/github.com/vishvananda/netlink/devlink_linux.go new file mode 100644 index 000000000000..29b3f8ec1d2e --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/devlink_linux.go @@ -0,0 +1,272 @@ +package netlink + +import ( + "syscall" + + "fmt" + "github.com/vishvananda/netlink/nl" + "golang.org/x/sys/unix" +) + +// DevlinkDevEswitchAttr represents device's eswitch attributes +type DevlinkDevEswitchAttr struct { + Mode string + InlineMode string + EncapMode string +} + +// DevlinkDevAttrs represents device attributes +type DevlinkDevAttrs struct { + Eswitch DevlinkDevEswitchAttr +} + +// DevlinkDevice represents device and its attributes +type DevlinkDevice struct { + BusName string + DeviceName string + Attrs DevlinkDevAttrs +} + +func parseDevLinkDeviceList(msgs [][]byte) ([]*DevlinkDevice, error) { + devices := make([]*DevlinkDevice, 0, len(msgs)) + for _, m := range msgs { + attrs, err := nl.ParseRouteAttr(m[nl.SizeofGenlmsg:]) + if err != nil { + return nil, err + } + dev := &DevlinkDevice{} + if err = dev.parseAttributes(attrs); err != nil { + return nil, err + } + devices = append(devices, dev) + } + return devices, nil +} + +func eswitchStringToMode(modeName string) (uint16, error) { + if modeName == "legacy" { + return nl.DEVLINK_ESWITCH_MODE_LEGACY, nil + } else if modeName == "switchdev" { + return nl.DEVLINK_ESWITCH_MODE_SWITCHDEV, nil + } else { + return 0xffff, fmt.Errorf("invalid switchdev mode") + } +} + +func parseEswitchMode(mode uint16) string { + var eswitchMode = map[uint16]string{ + nl.DEVLINK_ESWITCH_MODE_LEGACY: "legacy", + nl.DEVLINK_ESWITCH_MODE_SWITCHDEV: "switchdev", + } + if eswitchMode[mode] == "" { + return "unknown" + } else { + return eswitchMode[mode] + } +} + +func parseEswitchInlineMode(inlinemode uint8) string { + var eswitchInlineMode = map[uint8]string{ + nl.DEVLINK_ESWITCH_INLINE_MODE_NONE: "none", + nl.DEVLINK_ESWITCH_INLINE_MODE_LINK: "link", + nl.DEVLINK_ESWITCH_INLINE_MODE_NETWORK: "network", + nl.DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT: "transport", + } + if eswitchInlineMode[inlinemode] == "" { + return "unknown" + } else { + return eswitchInlineMode[inlinemode] + } +} + +func parseEswitchEncapMode(encapmode uint8) string { + var eswitchEncapMode = map[uint8]string{ + nl.DEVLINK_ESWITCH_ENCAP_MODE_NONE: "disable", + nl.DEVLINK_ESWITCH_ENCAP_MODE_BASIC: "enable", + } + if eswitchEncapMode[encapmode] == "" { + return "unknown" + } else { + return eswitchEncapMode[encapmode] + } +} + +func (d *DevlinkDevice) parseAttributes(attrs []syscall.NetlinkRouteAttr) error { + for _, a := range attrs { + switch a.Attr.Type { + case nl.DEVLINK_ATTR_BUS_NAME: + d.BusName = string(a.Value) + case nl.DEVLINK_ATTR_DEV_NAME: + d.DeviceName = string(a.Value) + case nl.DEVLINK_ATTR_ESWITCH_MODE: + d.Attrs.Eswitch.Mode = parseEswitchMode(native.Uint16(a.Value)) + case nl.DEVLINK_ATTR_ESWITCH_INLINE_MODE: + d.Attrs.Eswitch.InlineMode = parseEswitchInlineMode(uint8(a.Value[0])) + case nl.DEVLINK_ATTR_ESWITCH_ENCAP_MODE: + d.Attrs.Eswitch.EncapMode = parseEswitchEncapMode(uint8(a.Value[0])) + } + } + return nil +} + +func (dev *DevlinkDevice) parseEswitchAttrs(msgs [][]byte) { + m := msgs[0] + attrs, err := nl.ParseRouteAttr(m[nl.SizeofGenlmsg:]) + if err != nil { + return + } + dev.parseAttributes(attrs) +} + +func (h *Handle) getEswitchAttrs(family *GenlFamily, dev *DevlinkDevice) { + msg := &nl.Genlmsg{ + Command: nl.DEVLINK_CMD_ESWITCH_GET, + Version: nl.GENL_DEVLINK_VERSION, + } + req := h.newNetlinkRequest(int(family.ID), unix.NLM_F_REQUEST|unix.NLM_F_ACK) + req.AddData(msg) + + b := make([]byte, len(dev.BusName)) + copy(b, dev.BusName) + data := nl.NewRtAttr(nl.DEVLINK_ATTR_BUS_NAME, b) + req.AddData(data) + + b = make([]byte, len(dev.DeviceName)) + copy(b, dev.DeviceName) + data = nl.NewRtAttr(nl.DEVLINK_ATTR_DEV_NAME, b) + req.AddData(data) + + msgs, err := req.Execute(unix.NETLINK_GENERIC, 0) + if err != nil { + return + } + dev.parseEswitchAttrs(msgs) +} + +// DevLinkGetDeviceList provides a pointer to devlink devices and nil error, +// otherwise returns an error code. +func (h *Handle) DevLinkGetDeviceList() ([]*DevlinkDevice, error) { + f, err := h.GenlFamilyGet(nl.GENL_DEVLINK_NAME) + if err != nil { + return nil, err + } + msg := &nl.Genlmsg{ + Command: nl.DEVLINK_CMD_GET, + Version: nl.GENL_DEVLINK_VERSION, + } + req := h.newNetlinkRequest(int(f.ID), + unix.NLM_F_REQUEST|unix.NLM_F_ACK|unix.NLM_F_DUMP) + req.AddData(msg) + msgs, err := req.Execute(unix.NETLINK_GENERIC, 0) + if err != nil { + return nil, err + } + devices, err := parseDevLinkDeviceList(msgs) + if err != nil { + return nil, err + } + for _, d := range devices { + h.getEswitchAttrs(f, d) + } + return devices, nil +} + +// DevLinkGetDeviceList provides a pointer to devlink devices and nil error, +// otherwise returns an error code. +func DevLinkGetDeviceList() ([]*DevlinkDevice, error) { + return pkgHandle.DevLinkGetDeviceList() +} + +func parseDevlinkDevice(msgs [][]byte) (*DevlinkDevice, error) { + m := msgs[0] + attrs, err := nl.ParseRouteAttr(m[nl.SizeofGenlmsg:]) + if err != nil { + return nil, err + } + dev := &DevlinkDevice{} + if err = dev.parseAttributes(attrs); err != nil { + return nil, err + } + return dev, nil +} + +func (h *Handle) createCmdReq(cmd uint8, bus string, device string) (*GenlFamily, *nl.NetlinkRequest, error) { + f, err := h.GenlFamilyGet(nl.GENL_DEVLINK_NAME) + if err != nil { + return nil, nil, err + } + + msg := &nl.Genlmsg{ + Command: cmd, + Version: nl.GENL_DEVLINK_VERSION, + } + req := h.newNetlinkRequest(int(f.ID), + unix.NLM_F_REQUEST|unix.NLM_F_ACK) + req.AddData(msg) + + b := make([]byte, len(bus)+1) + copy(b, bus) + data := nl.NewRtAttr(nl.DEVLINK_ATTR_BUS_NAME, b) + req.AddData(data) + + b = make([]byte, len(device)+1) + copy(b, device) + data = nl.NewRtAttr(nl.DEVLINK_ATTR_DEV_NAME, b) + req.AddData(data) + + return f, req, nil +} + +// DevlinkGetDeviceByName provides a pointer to devlink device and nil error, +// otherwise returns an error code. +func (h *Handle) DevLinkGetDeviceByName(Bus string, Device string) (*DevlinkDevice, error) { + f, req, err := h.createCmdReq(nl.DEVLINK_CMD_GET, Bus, Device) + if err != nil { + return nil, err + } + + respmsg, err := req.Execute(unix.NETLINK_GENERIC, 0) + if err != nil { + return nil, err + } + dev, err := parseDevlinkDevice(respmsg) + if err == nil { + h.getEswitchAttrs(f, dev) + } + return dev, err +} + +// DevlinkGetDeviceByName provides a pointer to devlink device and nil error, +// otherwise returns an error code. +func DevLinkGetDeviceByName(Bus string, Device string) (*DevlinkDevice, error) { + return pkgHandle.DevLinkGetDeviceByName(Bus, Device) +} + +// DevLinkSetEswitchMode sets eswitch mode if able to set successfully or +// returns an error code. +// Equivalent to: `devlink dev eswitch set $dev mode switchdev` +// Equivalent to: `devlink dev eswitch set $dev mode legacy` +func (h *Handle) DevLinkSetEswitchMode(Dev *DevlinkDevice, NewMode string) error { + mode, err := eswitchStringToMode(NewMode) + if err != nil { + return err + } + + _, req, err := h.createCmdReq(nl.DEVLINK_CMD_ESWITCH_SET, Dev.BusName, Dev.DeviceName) + if err != nil { + return err + } + + req.AddData(nl.NewRtAttr(nl.DEVLINK_ATTR_ESWITCH_MODE, nl.Uint16Attr(mode))) + + _, err = req.Execute(unix.NETLINK_GENERIC, 0) + return err +} + +// DevLinkSetEswitchMode sets eswitch mode if able to set successfully or +// returns an error code. +// Equivalent to: `devlink dev eswitch set $dev mode switchdev` +// Equivalent to: `devlink dev eswitch set $dev mode legacy` +func DevLinkSetEswitchMode(Dev *DevlinkDevice, NewMode string) error { + return pkgHandle.DevLinkSetEswitchMode(Dev, NewMode) +} diff --git a/vendor/github.com/vishvananda/netlink/filter.go b/vendor/github.com/vishvananda/netlink/filter.go index c2cf8e4dcf51..88792eab0ee7 100644 --- a/vendor/github.com/vishvananda/netlink/filter.go +++ b/vendor/github.com/vishvananda/netlink/filter.go @@ -2,6 +2,7 @@ package netlink import ( "fmt" + "net" ) type Filter interface { @@ -135,6 +136,27 @@ func (action *BpfAction) Attrs() *ActionAttrs { return &action.ActionAttrs } +type ConnmarkAction struct { + ActionAttrs + Zone uint16 +} + +func (action *ConnmarkAction) Type() string { + return "connmark" +} + +func (action *ConnmarkAction) Attrs() *ActionAttrs { + return &action.ActionAttrs +} + +func NewConnmarkAction() *ConnmarkAction { + return &ConnmarkAction{ + ActionAttrs: ActionAttrs{ + Action: TC_ACT_PIPE, + }, + } +} + type MirredAct uint8 func (a MirredAct) String() string { @@ -182,47 +204,59 @@ func NewMirredAction(redirIndex int) *MirredAction { } } -// Sel of the U32 filters that contains multiple TcU32Key. This is the copy -// and the frontend representation of nl.TcU32Sel. It is serialized into canonical -// nl.TcU32Sel with the appropriate endianness. -type TcU32Sel struct { - Flags uint8 - Offshift uint8 - Nkeys uint8 - Pad uint8 - Offmask uint16 - Off uint16 - Offoff int16 - Hoff int16 - Hmask uint32 - Keys []TcU32Key -} - -// TcU32Key contained of Sel in the U32 filters. This is the copy and the frontend -// representation of nl.TcU32Key. It is serialized into chanonical nl.TcU32Sel -// with the appropriate endianness. -type TcU32Key struct { - Mask uint32 - Val uint32 - Off int32 - OffMask int32 -} - -// U32 filters on many packet related properties -type U32 struct { - FilterAttrs - ClassId uint32 - RedirIndex int - Sel *TcU32Sel - Actions []Action +type TunnelKeyAct int8 + +const ( + TCA_TUNNEL_KEY_SET TunnelKeyAct = 1 // set tunnel key + TCA_TUNNEL_KEY_UNSET TunnelKeyAct = 2 // unset tunnel key +) + +type TunnelKeyAction struct { + ActionAttrs + Action TunnelKeyAct + SrcAddr net.IP + DstAddr net.IP + KeyID uint32 } -func (filter *U32) Attrs() *FilterAttrs { - return &filter.FilterAttrs +func (action *TunnelKeyAction) Type() string { + return "tunnel_key" } -func (filter *U32) Type() string { - return "u32" +func (action *TunnelKeyAction) Attrs() *ActionAttrs { + return &action.ActionAttrs +} + +func NewTunnelKeyAction() *TunnelKeyAction { + return &TunnelKeyAction{ + ActionAttrs: ActionAttrs{ + Action: TC_ACT_PIPE, + }, + } +} + +type SkbEditAction struct { + ActionAttrs + QueueMapping *uint16 + PType *uint16 + Priority *uint32 + Mark *uint32 +} + +func (action *SkbEditAction) Type() string { + return "skbedit" +} + +func (action *SkbEditAction) Attrs() *ActionAttrs { + return &action.ActionAttrs +} + +func NewSkbEditAction() *SkbEditAction { + return &SkbEditAction{ + ActionAttrs: ActionAttrs{ + Action: TC_ACT_PIPE, + }, + } } // MatchAll filters match all packets @@ -262,6 +296,8 @@ type BpfFilter struct { Fd int Name string DirectAction bool + Id int + Tag string } func (filter *BpfFilter) Type() string { diff --git a/vendor/github.com/vishvananda/netlink/filter_linux.go b/vendor/github.com/vishvananda/netlink/filter_linux.go index f0eac6b78cc5..c56f314cd3ff 100644 --- a/vendor/github.com/vishvananda/netlink/filter_linux.go +++ b/vendor/github.com/vishvananda/netlink/filter_linux.go @@ -3,10 +3,11 @@ package netlink import ( "bytes" "encoding/binary" + "encoding/hex" "errors" "fmt" + "net" "syscall" - "unsafe" "github.com/vishvananda/netlink/nl" "golang.org/x/sys/unix" @@ -20,6 +21,35 @@ const ( TC_U32_EAT = nl.TC_U32_EAT ) +// Sel of the U32 filters that contains multiple TcU32Key. This is the type +// alias and the frontend representation of nl.TcU32Sel. It is serialized into +// canonical nl.TcU32Sel with the appropriate endianness. +type TcU32Sel = nl.TcU32Sel + +// TcU32Key contained of Sel in the U32 filters. This is the type alias and the +// frontend representation of nl.TcU32Key. It is serialized into chanonical +// nl.TcU32Sel with the appropriate endianness. +type TcU32Key = nl.TcU32Key + +// U32 filters on many packet related properties +type U32 struct { + FilterAttrs + ClassId uint32 + Divisor uint32 // Divisor MUST be power of 2. + Hash uint32 + RedirIndex int + Sel *TcU32Sel + Actions []Action +} + +func (filter *U32) Attrs() *FilterAttrs { + return &filter.FilterAttrs +} + +func (filter *U32) Type() string { + return "u32" +} + // Fw filter filters on firewall marks // NOTE: this is in filter_linux because it refers to nl.TcPolice which // is defined in nl/tc_linux.go @@ -123,8 +153,24 @@ func FilterAdd(filter Filter) error { // FilterAdd will add a filter to the system. // Equivalent to: `tc filter add $filter` func (h *Handle) FilterAdd(filter Filter) error { + return h.filterModify(filter, unix.NLM_F_CREATE|unix.NLM_F_EXCL) +} + +// FilterReplace will replace a filter. +// Equivalent to: `tc filter replace $filter` +func FilterReplace(filter Filter) error { + return pkgHandle.FilterReplace(filter) +} + +// FilterReplace will replace a filter. +// Equivalent to: `tc filter replace $filter` +func (h *Handle) FilterReplace(filter Filter) error { + return h.filterModify(filter, unix.NLM_F_CREATE) +} + +func (h *Handle) filterModify(filter Filter, flags int) error { native = nl.NativeEndian() - req := h.newNetlinkRequest(unix.RTM_NEWTFILTER, unix.NLM_F_CREATE|unix.NLM_F_EXCL|unix.NLM_F_ACK) + req := h.newNetlinkRequest(unix.RTM_NEWTFILTER, flags|unix.NLM_F_ACK) base := filter.Attrs() msg := &nl.TcMsg{ Family: nl.FAMILY_ALL, @@ -140,8 +186,7 @@ func (h *Handle) FilterAdd(filter Filter) error { switch filter := filter.(type) { case *U32: - // Convert TcU32Sel into nl.TcU32Sel as it is without copy. - sel := (*nl.TcU32Sel)(unsafe.Pointer(filter.Sel)) + sel := filter.Sel if sel == nil { // match all sel = &nl.TcU32Sel{ @@ -168,11 +213,20 @@ func (h *Handle) FilterAdd(filter Filter) error { } } sel.Nkeys = uint8(len(sel.Keys)) - nl.NewRtAttrChild(options, nl.TCA_U32_SEL, sel.Serialize()) + options.AddRtAttr(nl.TCA_U32_SEL, sel.Serialize()) if filter.ClassId != 0 { - nl.NewRtAttrChild(options, nl.TCA_U32_CLASSID, nl.Uint32Attr(filter.ClassId)) + options.AddRtAttr(nl.TCA_U32_CLASSID, nl.Uint32Attr(filter.ClassId)) + } + if filter.Divisor != 0 { + if (filter.Divisor-1)&filter.Divisor != 0 { + return fmt.Errorf("illegal divisor %d. Must be a power of 2", filter.Divisor) + } + options.AddRtAttr(nl.TCA_U32_DIVISOR, nl.Uint32Attr(filter.Divisor)) } - actionsAttr := nl.NewRtAttrChild(options, nl.TCA_U32_ACT, nil) + if filter.Hash != 0 { + options.AddRtAttr(nl.TCA_U32_HASH, nl.Uint32Attr(filter.Hash)) + } + actionsAttr := options.AddRtAttr(nl.TCA_U32_ACT, nil) // backwards compatibility if filter.RedirIndex != 0 { filter.Actions = append([]Action{NewMirredAction(filter.RedirIndex)}, filter.Actions...) @@ -184,51 +238,51 @@ func (h *Handle) FilterAdd(filter Filter) error { if filter.Mask != 0 { b := make([]byte, 4) native.PutUint32(b, filter.Mask) - nl.NewRtAttrChild(options, nl.TCA_FW_MASK, b) + options.AddRtAttr(nl.TCA_FW_MASK, b) } if filter.InDev != "" { - nl.NewRtAttrChild(options, nl.TCA_FW_INDEV, nl.ZeroTerminated(filter.InDev)) + options.AddRtAttr(nl.TCA_FW_INDEV, nl.ZeroTerminated(filter.InDev)) } if (filter.Police != nl.TcPolice{}) { - police := nl.NewRtAttrChild(options, nl.TCA_FW_POLICE, nil) - nl.NewRtAttrChild(police, nl.TCA_POLICE_TBF, filter.Police.Serialize()) + police := options.AddRtAttr(nl.TCA_FW_POLICE, nil) + police.AddRtAttr(nl.TCA_POLICE_TBF, filter.Police.Serialize()) if (filter.Police.Rate != nl.TcRateSpec{}) { payload := SerializeRtab(filter.Rtab) - nl.NewRtAttrChild(police, nl.TCA_POLICE_RATE, payload) + police.AddRtAttr(nl.TCA_POLICE_RATE, payload) } if (filter.Police.PeakRate != nl.TcRateSpec{}) { payload := SerializeRtab(filter.Ptab) - nl.NewRtAttrChild(police, nl.TCA_POLICE_PEAKRATE, payload) + police.AddRtAttr(nl.TCA_POLICE_PEAKRATE, payload) } } if filter.ClassId != 0 { b := make([]byte, 4) native.PutUint32(b, filter.ClassId) - nl.NewRtAttrChild(options, nl.TCA_FW_CLASSID, b) + options.AddRtAttr(nl.TCA_FW_CLASSID, b) } case *BpfFilter: var bpfFlags uint32 if filter.ClassId != 0 { - nl.NewRtAttrChild(options, nl.TCA_BPF_CLASSID, nl.Uint32Attr(filter.ClassId)) + options.AddRtAttr(nl.TCA_BPF_CLASSID, nl.Uint32Attr(filter.ClassId)) } if filter.Fd >= 0 { - nl.NewRtAttrChild(options, nl.TCA_BPF_FD, nl.Uint32Attr((uint32(filter.Fd)))) + options.AddRtAttr(nl.TCA_BPF_FD, nl.Uint32Attr((uint32(filter.Fd)))) } if filter.Name != "" { - nl.NewRtAttrChild(options, nl.TCA_BPF_NAME, nl.ZeroTerminated(filter.Name)) + options.AddRtAttr(nl.TCA_BPF_NAME, nl.ZeroTerminated(filter.Name)) } if filter.DirectAction { bpfFlags |= nl.TCA_BPF_FLAG_ACT_DIRECT } - nl.NewRtAttrChild(options, nl.TCA_BPF_FLAGS, nl.Uint32Attr(bpfFlags)) + options.AddRtAttr(nl.TCA_BPF_FLAGS, nl.Uint32Attr(bpfFlags)) case *MatchAll: - actionsAttr := nl.NewRtAttrChild(options, nl.TCA_MATCHALL_ACT, nil) + actionsAttr := options.AddRtAttr(nl.TCA_MATCHALL_ACT, nil) if err := EncodeActions(actionsAttr, filter.Actions); err != nil { return err } if filter.ClassId != 0 { - nl.NewRtAttrChild(options, nl.TCA_MATCHALL_CLASSID, nl.Uint32Attr(filter.ClassId)) + options.AddRtAttr(nl.TCA_MATCHALL_CLASSID, nl.Uint32Attr(filter.ClassId)) } } @@ -366,34 +420,91 @@ func EncodeActions(attr *nl.RtAttr, actions []Action) error { default: return fmt.Errorf("unknown action type %s", action.Type()) case *MirredAction: - table := nl.NewRtAttrChild(attr, tabIndex, nil) + table := attr.AddRtAttr(tabIndex, nil) tabIndex++ - nl.NewRtAttrChild(table, nl.TCA_ACT_KIND, nl.ZeroTerminated("mirred")) - aopts := nl.NewRtAttrChild(table, nl.TCA_ACT_OPTIONS, nil) + table.AddRtAttr(nl.TCA_ACT_KIND, nl.ZeroTerminated("mirred")) + aopts := table.AddRtAttr(nl.TCA_ACT_OPTIONS, nil) mirred := nl.TcMirred{ Eaction: int32(action.MirredAction), Ifindex: uint32(action.Ifindex), } toTcGen(action.Attrs(), &mirred.TcGen) - nl.NewRtAttrChild(aopts, nl.TCA_MIRRED_PARMS, mirred.Serialize()) + aopts.AddRtAttr(nl.TCA_MIRRED_PARMS, mirred.Serialize()) + case *TunnelKeyAction: + table := attr.AddRtAttr(tabIndex, nil) + tabIndex++ + table.AddRtAttr(nl.TCA_ACT_KIND, nl.ZeroTerminated("tunnel_key")) + aopts := table.AddRtAttr(nl.TCA_ACT_OPTIONS, nil) + tun := nl.TcTunnelKey{ + Action: int32(action.Action), + } + toTcGen(action.Attrs(), &tun.TcGen) + aopts.AddRtAttr(nl.TCA_TUNNEL_KEY_PARMS, tun.Serialize()) + if action.Action == TCA_TUNNEL_KEY_SET { + aopts.AddRtAttr(nl.TCA_TUNNEL_KEY_ENC_KEY_ID, htonl(action.KeyID)) + if v4 := action.SrcAddr.To4(); v4 != nil { + aopts.AddRtAttr(nl.TCA_TUNNEL_KEY_ENC_IPV4_SRC, v4[:]) + } else if v6 := action.SrcAddr.To16(); v6 != nil { + aopts.AddRtAttr(nl.TCA_TUNNEL_KEY_ENC_IPV6_SRC, v6[:]) + } else { + return fmt.Errorf("invalid src addr %s for tunnel_key action", action.SrcAddr) + } + if v4 := action.DstAddr.To4(); v4 != nil { + aopts.AddRtAttr(nl.TCA_TUNNEL_KEY_ENC_IPV4_DST, v4[:]) + } else if v6 := action.DstAddr.To16(); v6 != nil { + aopts.AddRtAttr(nl.TCA_TUNNEL_KEY_ENC_IPV6_DST, v6[:]) + } else { + return fmt.Errorf("invalid dst addr %s for tunnel_key action", action.DstAddr) + } + } + case *SkbEditAction: + table := attr.AddRtAttr(tabIndex, nil) + tabIndex++ + table.AddRtAttr(nl.TCA_ACT_KIND, nl.ZeroTerminated("skbedit")) + aopts := table.AddRtAttr(nl.TCA_ACT_OPTIONS, nil) + skbedit := nl.TcSkbEdit{} + toTcGen(action.Attrs(), &skbedit.TcGen) + aopts.AddRtAttr(nl.TCA_SKBEDIT_PARMS, skbedit.Serialize()) + if action.QueueMapping != nil { + aopts.AddRtAttr(nl.TCA_SKBEDIT_QUEUE_MAPPING, nl.Uint16Attr(*action.QueueMapping)) + } + if action.Priority != nil { + aopts.AddRtAttr(nl.TCA_SKBEDIT_PRIORITY, nl.Uint32Attr(*action.Priority)) + } + if action.PType != nil { + aopts.AddRtAttr(nl.TCA_SKBEDIT_PTYPE, nl.Uint16Attr(*action.PType)) + } + if action.Mark != nil { + aopts.AddRtAttr(nl.TCA_SKBEDIT_MARK, nl.Uint32Attr(*action.Mark)) + } + case *ConnmarkAction: + table := attr.AddRtAttr(tabIndex, nil) + tabIndex++ + table.AddRtAttr(nl.TCA_ACT_KIND, nl.ZeroTerminated("connmark")) + aopts := table.AddRtAttr(nl.TCA_ACT_OPTIONS, nil) + connmark := nl.TcConnmark{ + Zone: action.Zone, + } + toTcGen(action.Attrs(), &connmark.TcGen) + aopts.AddRtAttr(nl.TCA_CONNMARK_PARMS, connmark.Serialize()) case *BpfAction: - table := nl.NewRtAttrChild(attr, tabIndex, nil) + table := attr.AddRtAttr(tabIndex, nil) tabIndex++ - nl.NewRtAttrChild(table, nl.TCA_ACT_KIND, nl.ZeroTerminated("bpf")) - aopts := nl.NewRtAttrChild(table, nl.TCA_ACT_OPTIONS, nil) + table.AddRtAttr(nl.TCA_ACT_KIND, nl.ZeroTerminated("bpf")) + aopts := table.AddRtAttr(nl.TCA_ACT_OPTIONS, nil) gen := nl.TcGen{} toTcGen(action.Attrs(), &gen) - nl.NewRtAttrChild(aopts, nl.TCA_ACT_BPF_PARMS, gen.Serialize()) - nl.NewRtAttrChild(aopts, nl.TCA_ACT_BPF_FD, nl.Uint32Attr(uint32(action.Fd))) - nl.NewRtAttrChild(aopts, nl.TCA_ACT_BPF_NAME, nl.ZeroTerminated(action.Name)) + aopts.AddRtAttr(nl.TCA_ACT_BPF_PARMS, gen.Serialize()) + aopts.AddRtAttr(nl.TCA_ACT_BPF_FD, nl.Uint32Attr(uint32(action.Fd))) + aopts.AddRtAttr(nl.TCA_ACT_BPF_NAME, nl.ZeroTerminated(action.Name)) case *GenericAction: - table := nl.NewRtAttrChild(attr, tabIndex, nil) + table := attr.AddRtAttr(tabIndex, nil) tabIndex++ - nl.NewRtAttrChild(table, nl.TCA_ACT_KIND, nl.ZeroTerminated("gact")) - aopts := nl.NewRtAttrChild(table, nl.TCA_ACT_OPTIONS, nil) + table.AddRtAttr(nl.TCA_ACT_KIND, nl.ZeroTerminated("gact")) + aopts := table.AddRtAttr(nl.TCA_ACT_OPTIONS, nil) gen := nl.TcGen{} toTcGen(action.Attrs(), &gen) - nl.NewRtAttrChild(aopts, nl.TCA_GACT_PARMS, gen.Serialize()) + aopts.AddRtAttr(nl.TCA_GACT_PARMS, gen.Serialize()) } } return nil @@ -419,8 +530,14 @@ func parseActions(tables []syscall.NetlinkRouteAttr) ([]Action, error) { action = &MirredAction{} case "bpf": action = &BpfAction{} + case "connmark": + action = &ConnmarkAction{} case "gact": action = &GenericAction{} + case "tunnel_key": + action = &TunnelKeyAction{} + case "skbedit": + action = &SkbEditAction{} default: break nextattr } @@ -435,11 +552,46 @@ func parseActions(tables []syscall.NetlinkRouteAttr) ([]Action, error) { switch adatum.Attr.Type { case nl.TCA_MIRRED_PARMS: mirred := *nl.DeserializeTcMirred(adatum.Value) - toAttrs(&mirred.TcGen, action.Attrs()) action.(*MirredAction).ActionAttrs = ActionAttrs{} + toAttrs(&mirred.TcGen, action.Attrs()) action.(*MirredAction).Ifindex = int(mirred.Ifindex) action.(*MirredAction).MirredAction = MirredAct(mirred.Eaction) } + case "tunnel_key": + switch adatum.Attr.Type { + case nl.TCA_TUNNEL_KEY_PARMS: + tun := *nl.DeserializeTunnelKey(adatum.Value) + action.(*TunnelKeyAction).ActionAttrs = ActionAttrs{} + toAttrs(&tun.TcGen, action.Attrs()) + action.(*TunnelKeyAction).Action = TunnelKeyAct(tun.Action) + case nl.TCA_TUNNEL_KEY_ENC_KEY_ID: + action.(*TunnelKeyAction).KeyID = networkOrder.Uint32(adatum.Value[0:4]) + case nl.TCA_TUNNEL_KEY_ENC_IPV6_SRC: + case nl.TCA_TUNNEL_KEY_ENC_IPV4_SRC: + action.(*TunnelKeyAction).SrcAddr = net.IP(adatum.Value[:]) + case nl.TCA_TUNNEL_KEY_ENC_IPV6_DST: + case nl.TCA_TUNNEL_KEY_ENC_IPV4_DST: + action.(*TunnelKeyAction).DstAddr = net.IP(adatum.Value[:]) + } + case "skbedit": + switch adatum.Attr.Type { + case nl.TCA_SKBEDIT_PARMS: + skbedit := *nl.DeserializeSkbEdit(adatum.Value) + action.(*SkbEditAction).ActionAttrs = ActionAttrs{} + toAttrs(&skbedit.TcGen, action.Attrs()) + case nl.TCA_SKBEDIT_MARK: + mark := native.Uint32(adatum.Value[0:4]) + action.(*SkbEditAction).Mark = &mark + case nl.TCA_SKBEDIT_PRIORITY: + priority := native.Uint32(adatum.Value[0:4]) + action.(*SkbEditAction).Priority = &priority + case nl.TCA_SKBEDIT_PTYPE: + ptype := native.Uint16(adatum.Value[0:2]) + action.(*SkbEditAction).PType = &ptype + case nl.TCA_SKBEDIT_QUEUE_MAPPING: + mapping := native.Uint16(adatum.Value[0:2]) + action.(*SkbEditAction).QueueMapping = &mapping + } case "bpf": switch adatum.Attr.Type { case nl.TCA_ACT_BPF_PARMS: @@ -450,6 +602,14 @@ func parseActions(tables []syscall.NetlinkRouteAttr) ([]Action, error) { case nl.TCA_ACT_BPF_NAME: action.(*BpfAction).Name = string(adatum.Value[:len(adatum.Value)-1]) } + case "connmark": + switch adatum.Attr.Type { + case nl.TCA_CONNMARK_PARMS: + connmark := *nl.DeserializeTcConnmark(adatum.Value) + action.(*ConnmarkAction).ActionAttrs = ActionAttrs{} + toAttrs(&connmark.TcGen, action.Attrs()) + action.(*ConnmarkAction).Zone = connmark.Zone + } case "gact": switch adatum.Attr.Type { case nl.TCA_GACT_PARMS: @@ -474,7 +634,7 @@ func parseU32Data(filter Filter, data []syscall.NetlinkRouteAttr) (bool, error) case nl.TCA_U32_SEL: detailed = true sel := nl.DeserializeTcU32Sel(datum.Value) - u32.Sel = (*TcU32Sel)(unsafe.Pointer(sel)) + u32.Sel = sel if native != networkOrder { // Handle the endianness of attributes u32.Sel.Offmask = native.Uint16(htons(sel.Offmask)) @@ -500,6 +660,10 @@ func parseU32Data(filter Filter, data []syscall.NetlinkRouteAttr) (bool, error) } case nl.TCA_U32_CLASSID: u32.ClassId = native.Uint32(datum.Value) + case nl.TCA_U32_DIVISOR: + u32.Divisor = native.Uint32(datum.Value) + case nl.TCA_U32_HASH: + u32.Hash = native.Uint32(datum.Value) } } return detailed, nil @@ -551,6 +715,10 @@ func parseBpfData(filter Filter, data []syscall.NetlinkRouteAttr) (bool, error) if (flags & nl.TCA_BPF_FLAG_ACT_DIRECT) != 0 { bpf.DirectAction = true } + case nl.TCA_BPF_ID: + bpf.Id = int(native.Uint32(datum.Value[0:4])) + case nl.TCA_BPF_TAG: + bpf.Tag = hex.EncodeToString(datum.Value[:len(datum.Value)-1]) } } return detailed, nil diff --git a/vendor/github.com/vishvananda/netlink/fou_linux.go b/vendor/github.com/vishvananda/netlink/fou_linux.go index 62d59bd2d093..ed55b2b790d4 100644 --- a/vendor/github.com/vishvananda/netlink/fou_linux.go +++ b/vendor/github.com/vishvananda/netlink/fou_linux.go @@ -90,11 +90,7 @@ func (h *Handle) FouAdd(f Fou) error { req.AddRawData(raw) _, err = req.Execute(unix.NETLINK_GENERIC, 0) - if err != nil { - return err - } - - return nil + return err } func FouDel(f Fou) error { diff --git a/vendor/github.com/vishvananda/netlink/genetlink_linux.go b/vendor/github.com/vishvananda/netlink/genetlink_linux.go index ce7969907d43..772e5834a26a 100644 --- a/vendor/github.com/vishvananda/netlink/genetlink_linux.go +++ b/vendor/github.com/vishvananda/netlink/genetlink_linux.go @@ -157,6 +157,9 @@ func (h *Handle) GenlFamilyGet(name string) (*GenlFamily, error) { return nil, err } families, err := parseFamilies(msgs) + if err != nil { + return nil, err + } if len(families) != 1 { return nil, fmt.Errorf("invalid response for GENL_CTRL_CMD_GETFAMILY") } diff --git a/vendor/github.com/vishvananda/netlink/go.mod b/vendor/github.com/vishvananda/netlink/go.mod new file mode 100644 index 000000000000..09ee60e7784d --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/go.mod @@ -0,0 +1,8 @@ +module github.com/vishvananda/netlink + +go 1.12 + +require ( + github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df + golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444 +) diff --git a/vendor/github.com/vishvananda/netlink/go.sum b/vendor/github.com/vishvananda/netlink/go.sum new file mode 100644 index 000000000000..402d14ec5528 --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/go.sum @@ -0,0 +1,4 @@ +github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df h1:OviZH7qLw/7ZovXvuNyL3XQl8UFofeikI1NW1Gypu7k= +github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= +golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444 h1:/d2cWp6PSamH4jDPFLyO150psQdqvtoNX8Zjg3AQ31g= +golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/vendor/github.com/vishvananda/netlink/handle_linux.go b/vendor/github.com/vishvananda/netlink/handle_linux.go index 9f6d7fe0fbd1..26887b759612 100644 --- a/vendor/github.com/vishvananda/netlink/handle_linux.go +++ b/vendor/github.com/vishvananda/netlink/handle_linux.go @@ -91,7 +91,7 @@ func (h *Handle) GetSocketReceiveBufferSize() ([]int, error) { return results, nil } -// NewHandle returns a netlink handle on the network namespace +// NewHandleAt returns a netlink handle on the network namespace // specified by ns. If ns=netns.None(), current network namespace // will be assumed func NewHandleAt(ns netns.NsHandle, nlFamilies ...int) (*Handle, error) { diff --git a/vendor/github.com/vishvananda/netlink/handle_unspecified.go b/vendor/github.com/vishvananda/netlink/handle_unspecified.go index 915b765ded77..ef914dcb8cc5 100644 --- a/vendor/github.com/vishvananda/netlink/handle_unspecified.go +++ b/vendor/github.com/vishvananda/netlink/handle_unspecified.go @@ -73,10 +73,18 @@ func (h *Handle) LinkSetVfVlan(link Link, vf, vlan int) error { return ErrNotImplemented } +func (h *Handle) LinkSetVfVlanQos(link Link, vf, vlan, qos int) error { + return ErrNotImplemented +} + func (h *Handle) LinkSetVfTxRate(link Link, vf, rate int) error { return ErrNotImplemented } +func (h *Handle) LinkSetVfRate(link Link, vf, minRate, maxRate int) error { + return ErrNotImplemented +} + func (h *Handle) LinkSetMaster(link Link, master *Bridge) error { return ErrNotImplemented } @@ -149,6 +157,10 @@ func (h *Handle) LinkSetTxQLen(link Link, qlen int) error { return ErrNotImplemented } +func (h *Handle) LinkSetGroup(link Link, group int) error { + return ErrNotImplemented +} + func (h *Handle) setProtinfoAttr(link Link, mode bool, attr int) error { return ErrNotImplemented } diff --git a/vendor/github.com/vishvananda/netlink/ioctl_linux.go b/vendor/github.com/vishvananda/netlink/ioctl_linux.go index a8503126d560..4d33db5da513 100644 --- a/vendor/github.com/vishvananda/netlink/ioctl_linux.go +++ b/vendor/github.com/vishvananda/netlink/ioctl_linux.go @@ -56,18 +56,10 @@ type ethtoolSset struct { data [1]uint32 } -// ethtoolGstrings is string set for data tagging -type ethtoolGstrings struct { - cmd uint32 - stringSet uint32 - length uint32 - data [32]byte -} - type ethtoolStats struct { cmd uint32 nStats uint32 - data [1]uint64 + // Followed by nStats * []uint64. } // newIocltSlaveReq returns filled IfreqSlave with proper interface names diff --git a/vendor/github.com/vishvananda/netlink/link.go b/vendor/github.com/vishvananda/netlink/link.go index fe74ffab9640..886d88d1b2f5 100644 --- a/vendor/github.com/vishvananda/netlink/link.go +++ b/vendor/github.com/vishvananda/netlink/link.go @@ -4,6 +4,7 @@ import ( "fmt" "net" "os" + "strconv" ) // Link represents a link device from netlink. Shared link attributes @@ -41,6 +42,29 @@ type LinkAttrs struct { NetNsID int NumTxQueues int NumRxQueues int + GSOMaxSize uint32 + GSOMaxSegs uint32 + Vfs []VfInfo // virtual functions available on link + Group uint32 + Slave LinkSlave +} + +// LinkSlave represents a slave device. +type LinkSlave interface { + SlaveType() string +} + +// VfInfo represents configuration of virtual function +type VfInfo struct { + ID int + Mac net.HardwareAddr + Vlan int + Qos int + TxRate int // IFLA_VF_TX_RATE Max TxRate + Spoofchk bool + LinkState uint32 + MaxTxRate uint32 // IFLA_VF_RATE Max TxRate + MinTxRate uint32 // IFLA_VF_RATE Min TxRate } // LinkOperState represents the values of the IFLA_OPERSTATE link @@ -223,6 +247,7 @@ type Bridge struct { LinkAttrs MulticastSnooping *bool HelloTime *uint32 + VlanFiltering *bool } func (bridge *Bridge) Attrs() *LinkAttrs { @@ -236,7 +261,8 @@ func (bridge *Bridge) Type() string { // Vlan links have ParentIndex set in their Attrs() type Vlan struct { LinkAttrs - VlanId int + VlanId int + VlanProtocol VlanProtocol } func (vlan *Vlan) Attrs() *LinkAttrs { @@ -290,10 +316,13 @@ type TuntapFlag uint16 // Tuntap links created via /dev/tun/tap, but can be destroyed via netlink type Tuntap struct { LinkAttrs - Mode TuntapMode - Flags TuntapFlag - Queues int - Fds []*os.File + Mode TuntapMode + Flags TuntapFlag + NonPersist bool + Queues int + Fds []*os.File + Owner uint32 + Group uint32 } func (tuntap *Tuntap) Attrs() *LinkAttrs { @@ -307,7 +336,8 @@ func (tuntap *Tuntap) Type() string { // Veth devices must specify PeerName on create type Veth struct { LinkAttrs - PeerName string // veth on create only + PeerName string // veth on create only + PeerHardwareAddr net.HardwareAddr } func (veth *Veth) Attrs() *LinkAttrs { @@ -376,9 +406,18 @@ const ( IPVLAN_MODE_MAX ) +type IPVlanFlag uint16 + +const ( + IPVLAN_FLAG_BRIDGE IPVlanFlag = iota + IPVLAN_FLAG_PRIVATE + IPVLAN_FLAG_VEPA +) + type IPVlan struct { LinkAttrs Mode IPVlanMode + Flag IPVlanFlag } func (ipvlan *IPVlan) Attrs() *LinkAttrs { @@ -389,6 +428,43 @@ func (ipvlan *IPVlan) Type() string { return "ipvlan" } +// VlanProtocol type +type VlanProtocol int + +func (p VlanProtocol) String() string { + s, ok := VlanProtocolToString[p] + if !ok { + return fmt.Sprintf("VlanProtocol(%d)", p) + } + return s +} + +// StringToVlanProtocol returns vlan protocol, or unknown is the s is invalid. +func StringToVlanProtocol(s string) VlanProtocol { + mode, ok := StringToVlanProtocolMap[s] + if !ok { + return VLAN_PROTOCOL_UNKNOWN + } + return mode +} + +// VlanProtocol possible values +const ( + VLAN_PROTOCOL_UNKNOWN VlanProtocol = 0 + VLAN_PROTOCOL_8021Q VlanProtocol = 0x8100 + VLAN_PROTOCOL_8021AD VlanProtocol = 0x88A8 +) + +var VlanProtocolToString = map[VlanProtocol]string{ + VLAN_PROTOCOL_8021Q: "802.1q", + VLAN_PROTOCOL_8021AD: "802.1ad", +} + +var StringToVlanProtocolMap = map[string]VlanProtocol{ + "802.1q": VLAN_PROTOCOL_8021Q, + "802.1ad": VLAN_PROTOCOL_8021AD, +} + // BondMode type type BondMode int @@ -400,7 +476,7 @@ func (b BondMode) String() string { return s } -// StringToBondMode returns bond mode, or uknonw is the s is invalid. +// StringToBondMode returns bond mode, or unknown is the s is invalid. func StringToBondMode(s string) BondMode { mode, ok := StringToBondModeMap[s] if !ok { @@ -491,7 +567,7 @@ func (b BondXmitHashPolicy) String() string { return s } -// StringToBondXmitHashPolicy returns bond lacp arte, or uknonw is the s is invalid. +// StringToBondXmitHashPolicy returns bond lacp arte, or unknown is the s is invalid. func StringToBondXmitHashPolicy(s string) BondXmitHashPolicy { lacp, ok := StringToBondXmitHashPolicyMap[s] if !ok { @@ -536,7 +612,7 @@ func (b BondLacpRate) String() string { return s } -// StringToBondLacpRate returns bond lacp arte, or uknonw is the s is invalid. +// StringToBondLacpRate returns bond lacp arte, or unknown is the s is invalid. func StringToBondLacpRate(s string) BondLacpRate { lacp, ok := StringToBondLacpRateMap[s] if !ok { @@ -680,6 +756,67 @@ func (bond *Bond) Type() string { return "bond" } +// BondSlaveState represents the values of the IFLA_BOND_SLAVE_STATE bond slave +// attribute, which contains the state of the bond slave. +type BondSlaveState uint8 + +const ( + BondStateActive = iota // Link is active. + BondStateBackup // Link is backup. +) + +func (s BondSlaveState) String() string { + switch s { + case BondStateActive: + return "ACTIVE" + case BondStateBackup: + return "BACKUP" + default: + return strconv.Itoa(int(s)) + } +} + +// BondSlaveState represents the values of the IFLA_BOND_SLAVE_MII_STATUS bond slave +// attribute, which contains the status of MII link monitoring +type BondSlaveMiiStatus uint8 + +const ( + BondLinkUp = iota // link is up and running. + BondLinkFail // link has just gone down. + BondLinkDown // link has been down for too long time. + BondLinkBack // link is going back. +) + +func (s BondSlaveMiiStatus) String() string { + switch s { + case BondLinkUp: + return "UP" + case BondLinkFail: + return "GOING_DOWN" + case BondLinkDown: + return "DOWN" + case BondLinkBack: + return "GOING_BACK" + default: + return strconv.Itoa(int(s)) + } +} + +type BondSlave struct { + State BondSlaveState + MiiStatus BondSlaveMiiStatus + LinkFailureCount uint32 + PermHardwareAddr net.HardwareAddr + QueueId uint16 + AggregatorId uint16 + AdActorOperPortState uint8 + AdPartnerOperPortState uint16 +} + +func (b *BondSlave) SlaveType() string { + return "bond" +} + // Gretap devices must specify LocalIP and RemoteIP on create type Gretap struct { LinkAttrs @@ -734,6 +871,27 @@ func (iptun *Iptun) Type() string { return "ipip" } +type Ip6tnl struct { + LinkAttrs + Link uint32 + Local net.IP + Remote net.IP + Ttl uint8 + Tos uint8 + EncapLimit uint8 + Flags uint32 + Proto uint8 + FlowInfo uint32 +} + +func (ip6tnl *Ip6tnl) Attrs() *LinkAttrs { + return &ip6tnl.LinkAttrs +} + +func (ip6tnl *Ip6tnl) Type() string { + return "ip6tnl" +} + type Sittun struct { LinkAttrs Link uint32 @@ -769,7 +927,10 @@ func (vti *Vti) Attrs() *LinkAttrs { return &vti.LinkAttrs } -func (iptun *Vti) Type() string { +func (vti *Vti) Type() string { + if vti.Local.To4() == nil { + return "vti6" + } return "vti" } @@ -831,11 +992,68 @@ func (gtp *GTP) Type() string { return "gtp" } +// Virtual XFRM Interfaces +// Named "xfrmi" to prevent confusion with XFRM objects +type Xfrmi struct { + LinkAttrs + Ifid uint32 +} + +func (xfrm *Xfrmi) Attrs() *LinkAttrs { + return &xfrm.LinkAttrs +} + +func (xfrm *Xfrmi) Type() string { + return "xfrm" +} + +// IPoIB interface + +type IPoIBMode uint16 + +func (m *IPoIBMode) String() string { + str, ok := iPoIBModeToString[*m] + if !ok { + return fmt.Sprintf("mode(%d)", *m) + } + return str +} + +const ( + IPOIB_MODE_DATAGRAM = iota + IPOIB_MODE_CONNECTED +) + +var iPoIBModeToString = map[IPoIBMode]string{ + IPOIB_MODE_DATAGRAM: "datagram", + IPOIB_MODE_CONNECTED: "connected", +} + +var StringToIPoIBMode = map[string]IPoIBMode{ + "datagram": IPOIB_MODE_DATAGRAM, + "connected": IPOIB_MODE_CONNECTED, +} + +type IPoIB struct { + LinkAttrs + Pkey uint16 + Mode IPoIBMode + Umcast uint16 +} + +func (ipoib *IPoIB) Attrs() *LinkAttrs { + return &ipoib.LinkAttrs +} + +func (ipoib *IPoIB) Type() string { + return "ipoib" +} + // iproute2 supported devices; // vlan | veth | vcan | dummy | ifb | macvlan | macvtap | // bridge | bond | ipoib | ip6tnl | ipip | sit | vxlan | -// gre | gretap | ip6gre | ip6gretap | vti | nlmon | -// bond_slave | ipvlan +// gre | gretap | ip6gre | ip6gretap | vti | vti6 | nlmon | +// bond_slave | ipvlan | xfrm // LinkNotFoundError wraps the various not found errors when // getting/reading links. This is intended for better error diff --git a/vendor/github.com/vishvananda/netlink/link_linux.go b/vendor/github.com/vishvananda/netlink/link_linux.go index 540191ed843c..ec915a0b9790 100644 --- a/vendor/github.com/vishvananda/netlink/link_linux.go +++ b/vendor/github.com/vishvananda/netlink/link_linux.go @@ -4,8 +4,11 @@ import ( "bytes" "encoding/binary" "fmt" + "io/ioutil" "net" "os" + "strconv" + "strings" "syscall" "unsafe" @@ -16,7 +19,7 @@ import ( const ( SizeofLinkStats32 = 0x5c - SizeofLinkStats64 = 0xd8 + SizeofLinkStats64 = 0xb8 ) const ( @@ -31,6 +34,12 @@ const ( TUNTAP_MULTI_QUEUE_DEFAULTS TuntapFlag = TUNTAP_MULTI_QUEUE | TUNTAP_NO_PI ) +const ( + VF_LINK_STATE_AUTO uint32 = 0 + VF_LINK_STATE_ENABLE uint32 = 1 + VF_LINK_STATE_DISABLE uint32 = 2 +) + var lookupByDump = false var macvlanModes = [...]uint32{ @@ -113,6 +122,52 @@ func (h *Handle) SetPromiscOn(link Link) error { return err } +// LinkSetAllmulticastOn enables the reception of all hardware multicast packets for the link device. +// Equivalent to: `ip link set $link allmulticast on` +func LinkSetAllmulticastOn(link Link) error { + return pkgHandle.LinkSetAllmulticastOn(link) +} + +// LinkSetAllmulticastOn enables the reception of all hardware multicast packets for the link device. +// Equivalent to: `ip link set $link allmulticast on` +func (h *Handle) LinkSetAllmulticastOn(link Link) error { + base := link.Attrs() + h.ensureIndex(base) + req := h.newNetlinkRequest(unix.RTM_NEWLINK, unix.NLM_F_ACK) + + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg.Change = unix.IFF_ALLMULTI + msg.Flags = unix.IFF_ALLMULTI + + msg.Index = int32(base.Index) + req.AddData(msg) + + _, err := req.Execute(unix.NETLINK_ROUTE, 0) + return err +} + +// LinkSetAllmulticastOff disables the reception of all hardware multicast packets for the link device. +// Equivalent to: `ip link set $link allmulticast off` +func LinkSetAllmulticastOff(link Link) error { + return pkgHandle.LinkSetAllmulticastOff(link) +} + +// LinkSetAllmulticastOff disables the reception of all hardware multicast packets for the link device. +// Equivalent to: `ip link set $link allmulticast off` +func (h *Handle) LinkSetAllmulticastOff(link Link) error { + base := link.Attrs() + h.ensureIndex(base) + req := h.newNetlinkRequest(unix.RTM_NEWLINK, unix.NLM_F_ACK) + + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg.Change = unix.IFF_ALLMULTI + msg.Index = int32(base.Index) + req.AddData(msg) + + _, err := req.Execute(unix.NETLINK_ROUTE, 0) + return err +} + func MacvlanMACAddrAdd(link Link, addr net.HardwareAddr) error { return pkgHandle.MacvlanMACAddrAdd(link, addr) } @@ -155,24 +210,24 @@ func (h *Handle) macvlanMACAddrChange(link Link, addrs []net.HardwareAddr, mode req.AddData(msg) linkInfo := nl.NewRtAttr(unix.IFLA_LINKINFO, nil) - nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_KIND, nl.NonZeroTerminated(link.Type())) - inner := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil) + linkInfo.AddRtAttr(nl.IFLA_INFO_KIND, nl.NonZeroTerminated(link.Type())) + inner := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil) // IFLA_MACVLAN_MACADDR_MODE = mode b := make([]byte, 4) native.PutUint32(b, mode) - nl.NewRtAttrChild(inner, nl.IFLA_MACVLAN_MACADDR_MODE, b) + inner.AddRtAttr(nl.IFLA_MACVLAN_MACADDR_MODE, b) // populate message with MAC addrs, if necessary switch mode { case nl.MACVLAN_MACADDR_ADD, nl.MACVLAN_MACADDR_DEL: if len(addrs) == 1 { - nl.NewRtAttrChild(inner, nl.IFLA_MACVLAN_MACADDR, []byte(addrs[0])) + inner.AddRtAttr(nl.IFLA_MACVLAN_MACADDR, []byte(addrs[0])) } case nl.MACVLAN_MACADDR_SET: - mad := nl.NewRtAttrChild(inner, nl.IFLA_MACVLAN_MACADDR_DATA, nil) + mad := inner.AddRtAttr(nl.IFLA_MACVLAN_MACADDR_DATA, nil) for _, addr := range addrs { - nl.NewRtAttrChild(mad, nl.IFLA_MACVLAN_MACADDR, []byte(addr)) + mad.AddRtAttr(nl.IFLA_MACVLAN_MACADDR, []byte(addr)) } } @@ -203,7 +258,6 @@ func (h *Handle) SetPromiscOff(link Link) error { msg := nl.NewIfInfomsg(unix.AF_UNSPEC) msg.Change = unix.IFF_PROMISC - msg.Flags = 0 & ^unix.IFF_PROMISC msg.Index = int32(base.Index) req.AddData(msg) @@ -253,7 +307,6 @@ func (h *Handle) LinkSetDown(link Link) error { msg := nl.NewIfInfomsg(unix.AF_UNSPEC) msg.Change = unix.IFF_UP - msg.Flags = 0 & ^unix.IFF_UP msg.Index = int32(base.Index) req.AddData(msg) @@ -378,12 +431,12 @@ func (h *Handle) LinkSetVfHardwareAddr(link Link, vf int, hwaddr net.HardwareAdd req.AddData(msg) data := nl.NewRtAttr(unix.IFLA_VFINFO_LIST, nil) - info := nl.NewRtAttrChild(data, nl.IFLA_VF_INFO, nil) + info := data.AddRtAttr(nl.IFLA_VF_INFO, nil) vfmsg := nl.VfMac{ Vf: uint32(vf), } copy(vfmsg.Mac[:], []byte(hwaddr)) - nl.NewRtAttrChild(info, nl.IFLA_VF_MAC, vfmsg.Serialize()) + info.AddRtAttr(nl.IFLA_VF_MAC, vfmsg.Serialize()) req.AddData(data) _, err := req.Execute(unix.NETLINK_ROUTE, 0) @@ -407,11 +460,42 @@ func (h *Handle) LinkSetVfVlan(link Link, vf, vlan int) error { msg.Index = int32(base.Index) req.AddData(msg) + data := nl.NewRtAttr(unix.IFLA_VFINFO_LIST, nil) + info := data.AddRtAttr(nl.IFLA_VF_INFO, nil) + vfmsg := nl.VfVlan{ + Vf: uint32(vf), + Vlan: uint32(vlan), + } + info.AddRtAttr(nl.IFLA_VF_VLAN, vfmsg.Serialize()) + req.AddData(data) + + _, err := req.Execute(unix.NETLINK_ROUTE, 0) + return err +} + +// LinkSetVfVlanQos sets the vlan and qos priority of a vf for the link. +// Equivalent to: `ip link set $link vf $vf vlan $vlan qos $qos` +func LinkSetVfVlanQos(link Link, vf, vlan, qos int) error { + return pkgHandle.LinkSetVfVlanQos(link, vf, vlan, qos) +} + +// LinkSetVfVlanQos sets the vlan and qos priority of a vf for the link. +// Equivalent to: `ip link set $link vf $vf vlan $vlan qos $qos` +func (h *Handle) LinkSetVfVlanQos(link Link, vf, vlan, qos int) error { + base := link.Attrs() + h.ensureIndex(base) + req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) + + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg.Index = int32(base.Index) + req.AddData(msg) + data := nl.NewRtAttr(unix.IFLA_VFINFO_LIST, nil) info := nl.NewRtAttrChild(data, nl.IFLA_VF_INFO, nil) vfmsg := nl.VfVlan{ Vf: uint32(vf), Vlan: uint32(vlan), + Qos: uint32(qos), } nl.NewRtAttrChild(info, nl.IFLA_VF_VLAN, vfmsg.Serialize()) req.AddData(data) @@ -438,12 +522,73 @@ func (h *Handle) LinkSetVfTxRate(link Link, vf, rate int) error { req.AddData(msg) data := nl.NewRtAttr(unix.IFLA_VFINFO_LIST, nil) - info := nl.NewRtAttrChild(data, nl.IFLA_VF_INFO, nil) + info := data.AddRtAttr(nl.IFLA_VF_INFO, nil) vfmsg := nl.VfTxRate{ Vf: uint32(vf), Rate: uint32(rate), } - nl.NewRtAttrChild(info, nl.IFLA_VF_TX_RATE, vfmsg.Serialize()) + info.AddRtAttr(nl.IFLA_VF_TX_RATE, vfmsg.Serialize()) + req.AddData(data) + + _, err := req.Execute(unix.NETLINK_ROUTE, 0) + return err +} + +// LinkSetVfRate sets the min and max tx rate of a vf for the link. +// Equivalent to: `ip link set $link vf $vf min_tx_rate $min_rate max_tx_rate $max_rate` +func LinkSetVfRate(link Link, vf, minRate, maxRate int) error { + return pkgHandle.LinkSetVfRate(link, vf, minRate, maxRate) +} + +// LinkSetVfRate sets the min and max tx rate of a vf for the link. +// Equivalent to: `ip link set $link vf $vf min_tx_rate $min_rate max_tx_rate $max_rate` +func (h *Handle) LinkSetVfRate(link Link, vf, minRate, maxRate int) error { + base := link.Attrs() + h.ensureIndex(base) + req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) + + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg.Index = int32(base.Index) + req.AddData(msg) + + data := nl.NewRtAttr(unix.IFLA_VFINFO_LIST, nil) + info := data.AddRtAttr(nl.IFLA_VF_INFO, nil) + vfmsg := nl.VfRate{ + Vf: uint32(vf), + MinTxRate: uint32(minRate), + MaxTxRate: uint32(maxRate), + } + info.AddRtAttr(nl.IFLA_VF_RATE, vfmsg.Serialize()) + req.AddData(data) + + _, err := req.Execute(unix.NETLINK_ROUTE, 0) + return err +} + +// LinkSetVfState enables/disables virtual link state on a vf. +// Equivalent to: `ip link set $link vf $vf state $state` +func LinkSetVfState(link Link, vf int, state uint32) error { + return pkgHandle.LinkSetVfState(link, vf, state) +} + +// LinkSetVfState enables/disables virtual link state on a vf. +// Equivalent to: `ip link set $link vf $vf state $state` +func (h *Handle) LinkSetVfState(link Link, vf int, state uint32) error { + base := link.Attrs() + h.ensureIndex(base) + req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) + + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg.Index = int32(base.Index) + req.AddData(msg) + + data := nl.NewRtAttr(unix.IFLA_VFINFO_LIST, nil) + info := data.AddRtAttr(nl.IFLA_VF_INFO, nil) + vfmsg := nl.VfLinkState{ + Vf: uint32(vf), + LinkState: state, + } + info.AddRtAttr(nl.IFLA_VF_LINK_STATE, vfmsg.Serialize()) req.AddData(data) _, err := req.Execute(unix.NETLINK_ROUTE, 0) @@ -456,7 +601,7 @@ func LinkSetVfSpoofchk(link Link, vf int, check bool) error { return pkgHandle.LinkSetVfSpoofchk(link, vf, check) } -// LinkSetVfSpookfchk enables/disables spoof check on a vf for the link. +// LinkSetVfSpoofchk enables/disables spoof check on a vf for the link. // Equivalent to: `ip link set $link vf $vf spoofchk $check` func (h *Handle) LinkSetVfSpoofchk(link Link, vf int, check bool) error { var setting uint32 @@ -469,7 +614,7 @@ func (h *Handle) LinkSetVfSpoofchk(link Link, vf int, check bool) error { req.AddData(msg) data := nl.NewRtAttr(unix.IFLA_VFINFO_LIST, nil) - info := nl.NewRtAttrChild(data, nl.IFLA_VF_INFO, nil) + info := data.AddRtAttr(nl.IFLA_VF_INFO, nil) if check { setting = 1 } @@ -477,7 +622,7 @@ func (h *Handle) LinkSetVfSpoofchk(link Link, vf int, check bool) error { Vf: uint32(vf), Setting: setting, } - nl.NewRtAttrChild(info, nl.IFLA_VF_SPOOFCHK, vfmsg.Serialize()) + info.AddRtAttr(nl.IFLA_VF_SPOOFCHK, vfmsg.Serialize()) req.AddData(data) _, err := req.Execute(unix.NETLINK_ROUTE, 0) @@ -503,7 +648,7 @@ func (h *Handle) LinkSetVfTrust(link Link, vf int, state bool) error { req.AddData(msg) data := nl.NewRtAttr(unix.IFLA_VFINFO_LIST, nil) - info := nl.NewRtAttrChild(data, nl.IFLA_VF_INFO, nil) + info := data.AddRtAttr(nl.IFLA_VF_INFO, nil) if state { setting = 1 } @@ -511,22 +656,66 @@ func (h *Handle) LinkSetVfTrust(link Link, vf int, state bool) error { Vf: uint32(vf), Setting: setting, } - nl.NewRtAttrChild(info, nl.IFLA_VF_TRUST, vfmsg.Serialize()) + info.AddRtAttr(nl.IFLA_VF_TRUST, vfmsg.Serialize()) req.AddData(data) _, err := req.Execute(unix.NETLINK_ROUTE, 0) return err } +// LinkSetVfNodeGUID sets the node GUID of a vf for the link. +// Equivalent to: `ip link set dev $link vf $vf node_guid $nodeguid` +func LinkSetVfNodeGUID(link Link, vf int, nodeguid net.HardwareAddr) error { + return pkgHandle.LinkSetVfGUID(link, vf, nodeguid, nl.IFLA_VF_IB_NODE_GUID) +} + +// LinkSetVfPortGUID sets the port GUID of a vf for the link. +// Equivalent to: `ip link set dev $link vf $vf port_guid $portguid` +func LinkSetVfPortGUID(link Link, vf int, portguid net.HardwareAddr) error { + return pkgHandle.LinkSetVfGUID(link, vf, portguid, nl.IFLA_VF_IB_PORT_GUID) +} + +// LinkSetVfGUID sets the node or port GUID of a vf for the link. +func (h *Handle) LinkSetVfGUID(link Link, vf int, vfGuid net.HardwareAddr, guidType int) error { + var err error + var guid uint64 + + buf := bytes.NewBuffer(vfGuid) + err = binary.Read(buf, binary.BigEndian, &guid) + if err != nil { + return err + } + + base := link.Attrs() + h.ensureIndex(base) + req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) + + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg.Index = int32(base.Index) + req.AddData(msg) + + data := nl.NewRtAttr(unix.IFLA_VFINFO_LIST, nil) + info := data.AddRtAttr(nl.IFLA_VF_INFO, nil) + vfmsg := nl.VfGUID{ + Vf: uint32(vf), + GUID: guid, + } + info.AddRtAttr(guidType, vfmsg.Serialize()) + req.AddData(data) + + _, err = req.Execute(unix.NETLINK_ROUTE, 0) + return err +} + // LinkSetMaster sets the master of the link device. // Equivalent to: `ip link set $link master $master` -func LinkSetMaster(link Link, master *Bridge) error { +func LinkSetMaster(link Link, master Link) error { return pkgHandle.LinkSetMaster(link, master) } // LinkSetMaster sets the master of the link device. // Equivalent to: `ip link set $link master $master` -func (h *Handle) LinkSetMaster(link Link, master *Bridge) error { +func (h *Handle) LinkSetMaster(link Link, master Link) error { index := 0 if master != nil { masterBase := master.Attrs() @@ -672,69 +861,69 @@ type vxlanPortRange struct { } func addVxlanAttrs(vxlan *Vxlan, linkInfo *nl.RtAttr) { - data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil) + data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil) if vxlan.FlowBased { vxlan.VxlanId = 0 } - nl.NewRtAttrChild(data, nl.IFLA_VXLAN_ID, nl.Uint32Attr(uint32(vxlan.VxlanId))) + data.AddRtAttr(nl.IFLA_VXLAN_ID, nl.Uint32Attr(uint32(vxlan.VxlanId))) if vxlan.VtepDevIndex != 0 { - nl.NewRtAttrChild(data, nl.IFLA_VXLAN_LINK, nl.Uint32Attr(uint32(vxlan.VtepDevIndex))) + data.AddRtAttr(nl.IFLA_VXLAN_LINK, nl.Uint32Attr(uint32(vxlan.VtepDevIndex))) } if vxlan.SrcAddr != nil { ip := vxlan.SrcAddr.To4() if ip != nil { - nl.NewRtAttrChild(data, nl.IFLA_VXLAN_LOCAL, []byte(ip)) + data.AddRtAttr(nl.IFLA_VXLAN_LOCAL, []byte(ip)) } else { ip = vxlan.SrcAddr.To16() if ip != nil { - nl.NewRtAttrChild(data, nl.IFLA_VXLAN_LOCAL6, []byte(ip)) + data.AddRtAttr(nl.IFLA_VXLAN_LOCAL6, []byte(ip)) } } } if vxlan.Group != nil { group := vxlan.Group.To4() if group != nil { - nl.NewRtAttrChild(data, nl.IFLA_VXLAN_GROUP, []byte(group)) + data.AddRtAttr(nl.IFLA_VXLAN_GROUP, []byte(group)) } else { group = vxlan.Group.To16() if group != nil { - nl.NewRtAttrChild(data, nl.IFLA_VXLAN_GROUP6, []byte(group)) + data.AddRtAttr(nl.IFLA_VXLAN_GROUP6, []byte(group)) } } } - nl.NewRtAttrChild(data, nl.IFLA_VXLAN_TTL, nl.Uint8Attr(uint8(vxlan.TTL))) - nl.NewRtAttrChild(data, nl.IFLA_VXLAN_TOS, nl.Uint8Attr(uint8(vxlan.TOS))) - nl.NewRtAttrChild(data, nl.IFLA_VXLAN_LEARNING, boolAttr(vxlan.Learning)) - nl.NewRtAttrChild(data, nl.IFLA_VXLAN_PROXY, boolAttr(vxlan.Proxy)) - nl.NewRtAttrChild(data, nl.IFLA_VXLAN_RSC, boolAttr(vxlan.RSC)) - nl.NewRtAttrChild(data, nl.IFLA_VXLAN_L2MISS, boolAttr(vxlan.L2miss)) - nl.NewRtAttrChild(data, nl.IFLA_VXLAN_L3MISS, boolAttr(vxlan.L3miss)) - nl.NewRtAttrChild(data, nl.IFLA_VXLAN_UDP_ZERO_CSUM6_TX, boolAttr(vxlan.UDP6ZeroCSumTx)) - nl.NewRtAttrChild(data, nl.IFLA_VXLAN_UDP_ZERO_CSUM6_RX, boolAttr(vxlan.UDP6ZeroCSumRx)) + data.AddRtAttr(nl.IFLA_VXLAN_TTL, nl.Uint8Attr(uint8(vxlan.TTL))) + data.AddRtAttr(nl.IFLA_VXLAN_TOS, nl.Uint8Attr(uint8(vxlan.TOS))) + data.AddRtAttr(nl.IFLA_VXLAN_LEARNING, boolAttr(vxlan.Learning)) + data.AddRtAttr(nl.IFLA_VXLAN_PROXY, boolAttr(vxlan.Proxy)) + data.AddRtAttr(nl.IFLA_VXLAN_RSC, boolAttr(vxlan.RSC)) + data.AddRtAttr(nl.IFLA_VXLAN_L2MISS, boolAttr(vxlan.L2miss)) + data.AddRtAttr(nl.IFLA_VXLAN_L3MISS, boolAttr(vxlan.L3miss)) + data.AddRtAttr(nl.IFLA_VXLAN_UDP_ZERO_CSUM6_TX, boolAttr(vxlan.UDP6ZeroCSumTx)) + data.AddRtAttr(nl.IFLA_VXLAN_UDP_ZERO_CSUM6_RX, boolAttr(vxlan.UDP6ZeroCSumRx)) if vxlan.UDPCSum { - nl.NewRtAttrChild(data, nl.IFLA_VXLAN_UDP_CSUM, boolAttr(vxlan.UDPCSum)) + data.AddRtAttr(nl.IFLA_VXLAN_UDP_CSUM, boolAttr(vxlan.UDPCSum)) } if vxlan.GBP { - nl.NewRtAttrChild(data, nl.IFLA_VXLAN_GBP, []byte{}) + data.AddRtAttr(nl.IFLA_VXLAN_GBP, []byte{}) } if vxlan.FlowBased { - nl.NewRtAttrChild(data, nl.IFLA_VXLAN_FLOWBASED, boolAttr(vxlan.FlowBased)) + data.AddRtAttr(nl.IFLA_VXLAN_FLOWBASED, boolAttr(vxlan.FlowBased)) } if vxlan.NoAge { - nl.NewRtAttrChild(data, nl.IFLA_VXLAN_AGEING, nl.Uint32Attr(0)) + data.AddRtAttr(nl.IFLA_VXLAN_AGEING, nl.Uint32Attr(0)) } else if vxlan.Age > 0 { - nl.NewRtAttrChild(data, nl.IFLA_VXLAN_AGEING, nl.Uint32Attr(uint32(vxlan.Age))) + data.AddRtAttr(nl.IFLA_VXLAN_AGEING, nl.Uint32Attr(uint32(vxlan.Age))) } if vxlan.Limit > 0 { - nl.NewRtAttrChild(data, nl.IFLA_VXLAN_LIMIT, nl.Uint32Attr(uint32(vxlan.Limit))) + data.AddRtAttr(nl.IFLA_VXLAN_LIMIT, nl.Uint32Attr(uint32(vxlan.Limit))) } if vxlan.Port > 0 { - nl.NewRtAttrChild(data, nl.IFLA_VXLAN_PORT, htons(uint16(vxlan.Port))) + data.AddRtAttr(nl.IFLA_VXLAN_PORT, htons(uint16(vxlan.Port))) } if vxlan.PortLow > 0 || vxlan.PortHigh > 0 { pr := vxlanPortRange{uint16(vxlan.PortLow), uint16(vxlan.PortHigh)} @@ -742,100 +931,100 @@ func addVxlanAttrs(vxlan *Vxlan, linkInfo *nl.RtAttr) { buf := new(bytes.Buffer) binary.Write(buf, binary.BigEndian, &pr) - nl.NewRtAttrChild(data, nl.IFLA_VXLAN_PORT_RANGE, buf.Bytes()) + data.AddRtAttr(nl.IFLA_VXLAN_PORT_RANGE, buf.Bytes()) } } func addBondAttrs(bond *Bond, linkInfo *nl.RtAttr) { - data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil) + data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil) if bond.Mode >= 0 { - nl.NewRtAttrChild(data, nl.IFLA_BOND_MODE, nl.Uint8Attr(uint8(bond.Mode))) + data.AddRtAttr(nl.IFLA_BOND_MODE, nl.Uint8Attr(uint8(bond.Mode))) } if bond.ActiveSlave >= 0 { - nl.NewRtAttrChild(data, nl.IFLA_BOND_ACTIVE_SLAVE, nl.Uint32Attr(uint32(bond.ActiveSlave))) + data.AddRtAttr(nl.IFLA_BOND_ACTIVE_SLAVE, nl.Uint32Attr(uint32(bond.ActiveSlave))) } if bond.Miimon >= 0 { - nl.NewRtAttrChild(data, nl.IFLA_BOND_MIIMON, nl.Uint32Attr(uint32(bond.Miimon))) + data.AddRtAttr(nl.IFLA_BOND_MIIMON, nl.Uint32Attr(uint32(bond.Miimon))) } if bond.UpDelay >= 0 { - nl.NewRtAttrChild(data, nl.IFLA_BOND_UPDELAY, nl.Uint32Attr(uint32(bond.UpDelay))) + data.AddRtAttr(nl.IFLA_BOND_UPDELAY, nl.Uint32Attr(uint32(bond.UpDelay))) } if bond.DownDelay >= 0 { - nl.NewRtAttrChild(data, nl.IFLA_BOND_DOWNDELAY, nl.Uint32Attr(uint32(bond.DownDelay))) + data.AddRtAttr(nl.IFLA_BOND_DOWNDELAY, nl.Uint32Attr(uint32(bond.DownDelay))) } if bond.UseCarrier >= 0 { - nl.NewRtAttrChild(data, nl.IFLA_BOND_USE_CARRIER, nl.Uint8Attr(uint8(bond.UseCarrier))) + data.AddRtAttr(nl.IFLA_BOND_USE_CARRIER, nl.Uint8Attr(uint8(bond.UseCarrier))) } if bond.ArpInterval >= 0 { - nl.NewRtAttrChild(data, nl.IFLA_BOND_ARP_INTERVAL, nl.Uint32Attr(uint32(bond.ArpInterval))) + data.AddRtAttr(nl.IFLA_BOND_ARP_INTERVAL, nl.Uint32Attr(uint32(bond.ArpInterval))) } if bond.ArpIpTargets != nil { - msg := nl.NewRtAttrChild(data, nl.IFLA_BOND_ARP_IP_TARGET, nil) + msg := data.AddRtAttr(nl.IFLA_BOND_ARP_IP_TARGET, nil) for i := range bond.ArpIpTargets { ip := bond.ArpIpTargets[i].To4() if ip != nil { - nl.NewRtAttrChild(msg, i, []byte(ip)) + msg.AddRtAttr(i, []byte(ip)) continue } ip = bond.ArpIpTargets[i].To16() if ip != nil { - nl.NewRtAttrChild(msg, i, []byte(ip)) + msg.AddRtAttr(i, []byte(ip)) } } } if bond.ArpValidate >= 0 { - nl.NewRtAttrChild(data, nl.IFLA_BOND_ARP_VALIDATE, nl.Uint32Attr(uint32(bond.ArpValidate))) + data.AddRtAttr(nl.IFLA_BOND_ARP_VALIDATE, nl.Uint32Attr(uint32(bond.ArpValidate))) } if bond.ArpAllTargets >= 0 { - nl.NewRtAttrChild(data, nl.IFLA_BOND_ARP_ALL_TARGETS, nl.Uint32Attr(uint32(bond.ArpAllTargets))) + data.AddRtAttr(nl.IFLA_BOND_ARP_ALL_TARGETS, nl.Uint32Attr(uint32(bond.ArpAllTargets))) } if bond.Primary >= 0 { - nl.NewRtAttrChild(data, nl.IFLA_BOND_PRIMARY, nl.Uint32Attr(uint32(bond.Primary))) + data.AddRtAttr(nl.IFLA_BOND_PRIMARY, nl.Uint32Attr(uint32(bond.Primary))) } if bond.PrimaryReselect >= 0 { - nl.NewRtAttrChild(data, nl.IFLA_BOND_PRIMARY_RESELECT, nl.Uint8Attr(uint8(bond.PrimaryReselect))) + data.AddRtAttr(nl.IFLA_BOND_PRIMARY_RESELECT, nl.Uint8Attr(uint8(bond.PrimaryReselect))) } if bond.FailOverMac >= 0 { - nl.NewRtAttrChild(data, nl.IFLA_BOND_FAIL_OVER_MAC, nl.Uint8Attr(uint8(bond.FailOverMac))) + data.AddRtAttr(nl.IFLA_BOND_FAIL_OVER_MAC, nl.Uint8Attr(uint8(bond.FailOverMac))) } if bond.XmitHashPolicy >= 0 { - nl.NewRtAttrChild(data, nl.IFLA_BOND_XMIT_HASH_POLICY, nl.Uint8Attr(uint8(bond.XmitHashPolicy))) + data.AddRtAttr(nl.IFLA_BOND_XMIT_HASH_POLICY, nl.Uint8Attr(uint8(bond.XmitHashPolicy))) } if bond.ResendIgmp >= 0 { - nl.NewRtAttrChild(data, nl.IFLA_BOND_RESEND_IGMP, nl.Uint32Attr(uint32(bond.ResendIgmp))) + data.AddRtAttr(nl.IFLA_BOND_RESEND_IGMP, nl.Uint32Attr(uint32(bond.ResendIgmp))) } if bond.NumPeerNotif >= 0 { - nl.NewRtAttrChild(data, nl.IFLA_BOND_NUM_PEER_NOTIF, nl.Uint8Attr(uint8(bond.NumPeerNotif))) + data.AddRtAttr(nl.IFLA_BOND_NUM_PEER_NOTIF, nl.Uint8Attr(uint8(bond.NumPeerNotif))) } if bond.AllSlavesActive >= 0 { - nl.NewRtAttrChild(data, nl.IFLA_BOND_ALL_SLAVES_ACTIVE, nl.Uint8Attr(uint8(bond.AllSlavesActive))) + data.AddRtAttr(nl.IFLA_BOND_ALL_SLAVES_ACTIVE, nl.Uint8Attr(uint8(bond.AllSlavesActive))) } if bond.MinLinks >= 0 { - nl.NewRtAttrChild(data, nl.IFLA_BOND_MIN_LINKS, nl.Uint32Attr(uint32(bond.MinLinks))) + data.AddRtAttr(nl.IFLA_BOND_MIN_LINKS, nl.Uint32Attr(uint32(bond.MinLinks))) } if bond.LpInterval >= 0 { - nl.NewRtAttrChild(data, nl.IFLA_BOND_LP_INTERVAL, nl.Uint32Attr(uint32(bond.LpInterval))) + data.AddRtAttr(nl.IFLA_BOND_LP_INTERVAL, nl.Uint32Attr(uint32(bond.LpInterval))) } if bond.PackersPerSlave >= 0 { - nl.NewRtAttrChild(data, nl.IFLA_BOND_PACKETS_PER_SLAVE, nl.Uint32Attr(uint32(bond.PackersPerSlave))) + data.AddRtAttr(nl.IFLA_BOND_PACKETS_PER_SLAVE, nl.Uint32Attr(uint32(bond.PackersPerSlave))) } if bond.LacpRate >= 0 { - nl.NewRtAttrChild(data, nl.IFLA_BOND_AD_LACP_RATE, nl.Uint8Attr(uint8(bond.LacpRate))) + data.AddRtAttr(nl.IFLA_BOND_AD_LACP_RATE, nl.Uint8Attr(uint8(bond.LacpRate))) } if bond.AdSelect >= 0 { - nl.NewRtAttrChild(data, nl.IFLA_BOND_AD_SELECT, nl.Uint8Attr(uint8(bond.AdSelect))) + data.AddRtAttr(nl.IFLA_BOND_AD_SELECT, nl.Uint8Attr(uint8(bond.AdSelect))) } if bond.AdActorSysPrio >= 0 { - nl.NewRtAttrChild(data, nl.IFLA_BOND_AD_ACTOR_SYS_PRIO, nl.Uint16Attr(uint16(bond.AdActorSysPrio))) + data.AddRtAttr(nl.IFLA_BOND_AD_ACTOR_SYS_PRIO, nl.Uint16Attr(uint16(bond.AdActorSysPrio))) } if bond.AdUserPortKey >= 0 { - nl.NewRtAttrChild(data, nl.IFLA_BOND_AD_USER_PORT_KEY, nl.Uint16Attr(uint16(bond.AdUserPortKey))) + data.AddRtAttr(nl.IFLA_BOND_AD_USER_PORT_KEY, nl.Uint16Attr(uint16(bond.AdUserPortKey))) } if bond.AdActorSystem != nil { - nl.NewRtAttrChild(data, nl.IFLA_BOND_AD_ACTOR_SYSTEM, []byte(bond.AdActorSystem)) + data.AddRtAttr(nl.IFLA_BOND_AD_ACTOR_SYSTEM, []byte(bond.AdActorSystem)) } if bond.TlbDynamicLb >= 0 { - nl.NewRtAttrChild(data, nl.IFLA_BOND_TLB_DYNAMIC_LB, nl.Uint8Attr(uint8(bond.TlbDynamicLb))) + data.AddRtAttr(nl.IFLA_BOND_TLB_DYNAMIC_LB, nl.Uint8Attr(uint8(bond.TlbDynamicLb))) } } @@ -853,7 +1042,7 @@ func LinkAdd(link Link) error { } // LinkAdd adds a new link device. The type and features of the device -// are taken fromt the parameters in the link object. +// are taken from the parameters in the link object. // Equivalent to: `ip link add $link` func (h *Handle) LinkAdd(link Link) error { return h.linkModify(link, unix.NLM_F_CREATE|unix.NLM_F_EXCL|unix.NLM_F_ACK) @@ -863,16 +1052,18 @@ func (h *Handle) linkModify(link Link, flags int) error { // TODO: support extra data for macvlan base := link.Attrs() - if base.Name == "" { - return fmt.Errorf("LinkAttrs.Name cannot be empty!") + // if tuntap, then the name can be empty, OS will provide a name + tuntap, isTuntap := link.(*Tuntap) + + if base.Name == "" && !isTuntap { + return fmt.Errorf("LinkAttrs.Name cannot be empty") } - if tuntap, ok := link.(*Tuntap); ok { + if isTuntap { // TODO: support user // TODO: support group - // TODO: support non- persistent if tuntap.Mode < unix.IFF_TUN || tuntap.Mode > unix.IFF_TAP { - return fmt.Errorf("Tuntap.Mode %v unknown!", tuntap.Mode) + return fmt.Errorf("Tuntap.Mode %v unknown", tuntap.Mode) } queues := tuntap.Queues @@ -913,12 +1104,25 @@ func (h *Handle) linkModify(link Link, flags int) error { cleanupFds(fds) return fmt.Errorf("Tuntap IOCTL TUNSETIFF failed [%d], errno %v", i, errno) } + // 1) we only care for the name of the first tap in the multi queue set + // 2) if the original name was empty, the localReq has now the actual name + // + // In addition: + // This ensures that the link name is always identical to what the kernel returns. + // Not only in case of an empty name, but also when using name templates. + // e.g. when the provided name is "tap%d", the kernel replaces %d with the next available number. + if i == 0 { + link.Attrs().Name = strings.Trim(string(localReq.Name[:]), "\x00") + } } - _, _, errno := unix.Syscall(unix.SYS_IOCTL, fds[0].Fd(), uintptr(unix.TUNSETPERSIST), 1) - if errno != 0 { - cleanupFds(fds) - return fmt.Errorf("Tuntap IOCTL TUNSETPERSIST failed, errno %v", errno) + // only persist interface if NonPersist is NOT set + if !tuntap.NonPersist { + _, _, errno := unix.Syscall(unix.SYS_IOCTL, fds[0].Fd(), uintptr(unix.TUNSETPERSIST), 1) + if errno != 0 { + cleanupFds(fds) + return fmt.Errorf("Tuntap IOCTL TUNSETPERSIST failed, errno %v", errno) + } } h.ensureIndex(base) @@ -928,7 +1132,11 @@ func (h *Handle) linkModify(link Link, flags int) error { // TODO: verify MasterIndex is actually a bridge? err := h.LinkSetMasterByIndex(link, base.MasterIndex) if err != nil { - _, _, _ = unix.Syscall(unix.SYS_IOCTL, fds[0].Fd(), uintptr(unix.TUNSETPERSIST), 0) + // un-persist (e.g. allow the interface to be removed) the tuntap + // should not hurt if not set prior, condition might be not needed + if !tuntap.NonPersist { + _, _, _ = unix.Syscall(unix.SYS_IOCTL, fds[0].Fd(), uintptr(unix.TUNSETPERSIST), 0) + } cleanupFds(fds) return err } @@ -978,8 +1186,8 @@ func (h *Handle) linkModify(link Link, flags int) error { native.PutUint32(b, uint32(base.ParentIndex)) data := nl.NewRtAttr(unix.IFLA_LINK, b) req.AddData(data) - } else if link.Type() == "ipvlan" { - return fmt.Errorf("Can't create ipvlan link without ParentIndex") + } else if link.Type() == "ipvlan" || link.Type() == "ipoib" { + return fmt.Errorf("Can't create %s link without ParentIndex", link.Type()) } nameData := nl.NewRtAttr(unix.IFLA_IFNAME, nl.ZeroTerminated(base.Name)) @@ -1010,14 +1218,29 @@ func (h *Handle) linkModify(link Link, flags int) error { req.AddData(rxqueues) } + if base.GSOMaxSegs > 0 { + gsoAttr := nl.NewRtAttr(unix.IFLA_GSO_MAX_SEGS, nl.Uint32Attr(base.GSOMaxSegs)) + req.AddData(gsoAttr) + } + + if base.GSOMaxSize > 0 { + gsoAttr := nl.NewRtAttr(unix.IFLA_GSO_MAX_SIZE, nl.Uint32Attr(base.GSOMaxSize)) + req.AddData(gsoAttr) + } + + if base.Group > 0 { + groupAttr := nl.NewRtAttr(unix.IFLA_GROUP, nl.Uint32Attr(base.Group)) + req.AddData(groupAttr) + } + if base.Namespace != nil { var attr *nl.RtAttr - switch base.Namespace.(type) { + switch ns := base.Namespace.(type) { case NsPid: - val := nl.Uint32Attr(uint32(base.Namespace.(NsPid))) + val := nl.Uint32Attr(uint32(ns)) attr = nl.NewRtAttr(unix.IFLA_NET_NS_PID, val) case NsFd: - val := nl.Uint32Attr(uint32(base.Namespace.(NsFd))) + val := nl.Uint32Attr(uint32(ns)) attr = nl.NewRtAttr(unix.IFLA_NET_NS_FD, val) } @@ -1029,47 +1252,56 @@ func (h *Handle) linkModify(link Link, flags int) error { } linkInfo := nl.NewRtAttr(unix.IFLA_LINKINFO, nil) - nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_KIND, nl.NonZeroTerminated(link.Type())) + linkInfo.AddRtAttr(nl.IFLA_INFO_KIND, nl.NonZeroTerminated(link.Type())) switch link := link.(type) { case *Vlan: b := make([]byte, 2) native.PutUint16(b, uint16(link.VlanId)) - data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil) - nl.NewRtAttrChild(data, nl.IFLA_VLAN_ID, b) + data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil) + data.AddRtAttr(nl.IFLA_VLAN_ID, b) + + if link.VlanProtocol != VLAN_PROTOCOL_UNKNOWN { + data.AddRtAttr(nl.IFLA_VLAN_PROTOCOL, htons(uint16(link.VlanProtocol))) + } case *Veth: - data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil) - peer := nl.NewRtAttrChild(data, nl.VETH_INFO_PEER, nil) + data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil) + peer := data.AddRtAttr(nl.VETH_INFO_PEER, nil) nl.NewIfInfomsgChild(peer, unix.AF_UNSPEC) - nl.NewRtAttrChild(peer, unix.IFLA_IFNAME, nl.ZeroTerminated(link.PeerName)) + peer.AddRtAttr(unix.IFLA_IFNAME, nl.ZeroTerminated(link.PeerName)) if base.TxQLen >= 0 { - nl.NewRtAttrChild(peer, unix.IFLA_TXQLEN, nl.Uint32Attr(uint32(base.TxQLen))) + peer.AddRtAttr(unix.IFLA_TXQLEN, nl.Uint32Attr(uint32(base.TxQLen))) } if base.MTU > 0 { - nl.NewRtAttrChild(peer, unix.IFLA_MTU, nl.Uint32Attr(uint32(base.MTU))) + peer.AddRtAttr(unix.IFLA_MTU, nl.Uint32Attr(uint32(base.MTU))) + } + if link.PeerHardwareAddr != nil { + peer.AddRtAttr(unix.IFLA_ADDRESS, []byte(link.PeerHardwareAddr)) } - case *Vxlan: addVxlanAttrs(link, linkInfo) case *Bond: addBondAttrs(link, linkInfo) case *IPVlan: - data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil) - nl.NewRtAttrChild(data, nl.IFLA_IPVLAN_MODE, nl.Uint16Attr(uint16(link.Mode))) + data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil) + data.AddRtAttr(nl.IFLA_IPVLAN_MODE, nl.Uint16Attr(uint16(link.Mode))) + data.AddRtAttr(nl.IFLA_IPVLAN_FLAG, nl.Uint16Attr(uint16(link.Flag))) case *Macvlan: if link.Mode != MACVLAN_MODE_DEFAULT { - data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil) - nl.NewRtAttrChild(data, nl.IFLA_MACVLAN_MODE, nl.Uint32Attr(macvlanModes[link.Mode])) + data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil) + data.AddRtAttr(nl.IFLA_MACVLAN_MODE, nl.Uint32Attr(macvlanModes[link.Mode])) } case *Macvtap: if link.Mode != MACVLAN_MODE_DEFAULT { - data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil) - nl.NewRtAttrChild(data, nl.IFLA_MACVLAN_MODE, nl.Uint32Attr(macvlanModes[link.Mode])) + data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil) + data.AddRtAttr(nl.IFLA_MACVLAN_MODE, nl.Uint32Attr(macvlanModes[link.Mode])) } case *Gretap: addGretapAttrs(link, linkInfo) case *Iptun: addIptunAttrs(link, linkInfo) + case *Ip6tnl: + addIp6tnlAttrs(link, linkInfo) case *Sittun: addSittunAttrs(link, linkInfo) case *Gretun: @@ -1082,6 +1314,10 @@ func (h *Handle) linkModify(link Link, flags int) error { addBridgeAttrs(link, linkInfo) case *GTP: addGTPAttrs(link, linkInfo) + case *Xfrmi: + addXfrmiAttrs(link, linkInfo) + case *IPoIB: + addIPoIBAttrs(link, linkInfo) } req.AddData(linkInfo) @@ -1170,6 +1406,9 @@ func (h *Handle) LinkByName(name string) (Link, error) { msg := nl.NewIfInfomsg(unix.AF_UNSPEC) req.AddData(msg) + attr := nl.NewRtAttr(unix.IFLA_EXT_MASK, nl.Uint32Attr(nl.RTEXT_FILTER_VF)) + req.AddData(attr) + nameData := nl.NewRtAttr(unix.IFLA_IFNAME, nl.ZeroTerminated(name)) req.AddData(nameData) @@ -1202,6 +1441,9 @@ func (h *Handle) LinkByAlias(alias string) (Link, error) { msg := nl.NewIfInfomsg(unix.AF_UNSPEC) req.AddData(msg) + attr := nl.NewRtAttr(unix.IFLA_EXT_MASK, nl.Uint32Attr(nl.RTEXT_FILTER_VF)) + req.AddData(attr) + nameData := nl.NewRtAttr(unix.IFLA_IFALIAS, nl.ZeroTerminated(alias)) req.AddData(nameData) @@ -1228,6 +1470,8 @@ func (h *Handle) LinkByIndex(index int) (Link, error) { msg := nl.NewIfInfomsg(unix.AF_UNSPEC) msg.Index = int32(index) req.AddData(msg) + attr := nl.NewRtAttr(unix.IFLA_EXT_MASK, nl.Uint32Attr(nl.RTEXT_FILTER_VF)) + req.AddData(attr) return execGetLink(req) } @@ -1270,10 +1514,12 @@ func LinkDeserialize(hdr *unix.NlMsghdr, m []byte) (Link, error) { base.Promisc = 1 } var ( - link Link - stats32 []byte - stats64 []byte - linkType string + link Link + stats32 *LinkStatistics32 + stats64 *LinkStatistics64 + linkType string + linkSlave LinkSlave + slaveType string ) for _, attr := range attrs { switch attr.Attr.Type { @@ -1313,18 +1559,26 @@ func LinkDeserialize(hdr *unix.NlMsghdr, m []byte) (Link, error) { link = &Gretap{} case "ipip": link = &Iptun{} + case "ip6tnl": + link = &Ip6tnl{} case "sit": link = &Sittun{} case "gre": link = &Gretun{} case "ip6gre": link = &Gretun{} - case "vti": + case "vti", "vti6": link = &Vti{} case "vrf": link = &Vrf{} case "gtp": link = >P{} + case "xfrm": + link = &Xfrmi{} + case "tun": + link = &Tuntap{} + case "ipoib": + link = &IPoIB{} default: link = &GenericLink{LinkType: linkType} } @@ -1352,13 +1606,15 @@ func LinkDeserialize(hdr *unix.NlMsghdr, m []byte) (Link, error) { parseGretapData(link, data) case "ipip": parseIptunData(link, data) + case "ip6tnl": + parseIp6tnlData(link, data) case "sit": parseSittunData(link, data) case "gre": parseGretunData(link, data) case "ip6gre": parseGretunData(link, data) - case "vti": + case "vti", "vti6": parseVtiData(link, data) case "vrf": parseVrfData(link, data) @@ -1366,6 +1622,27 @@ func LinkDeserialize(hdr *unix.NlMsghdr, m []byte) (Link, error) { parseBridgeData(link, data) case "gtp": parseGTPData(link, data) + case "xfrm": + parseXfrmiData(link, data) + case "tun": + parseTuntapData(link, data) + case "ipoib": + parseIPoIBData(link, data) + } + case nl.IFLA_INFO_SLAVE_KIND: + slaveType = string(info.Value[:len(info.Value)-1]) + switch slaveType { + case "bond": + linkSlave = &BondSlave{} + } + case nl.IFLA_INFO_SLAVE_DATA: + switch slaveType { + case "bond": + data, err := nl.ParseRouteAttr(info.Value) + if err != nil { + return nil, err + } + parseBondSlaveData(linkSlave, data) } } } @@ -1392,9 +1669,15 @@ func LinkDeserialize(hdr *unix.NlMsghdr, m []byte) (Link, error) { case unix.IFLA_IFALIAS: base.Alias = string(attr.Value[:len(attr.Value)-1]) case unix.IFLA_STATS: - stats32 = attr.Value[:] + stats32 = new(LinkStatistics32) + if err := binary.Read(bytes.NewBuffer(attr.Value[:]), nl.NativeEndian(), stats32); err != nil { + return nil, err + } case unix.IFLA_STATS64: - stats64 = attr.Value[:] + stats64 = new(LinkStatistics64) + if err := binary.Read(bytes.NewBuffer(attr.Value[:]), nl.NativeEndian(), stats64); err != nil { + return nil, err + } case unix.IFLA_XDP: xdp, err := parseLinkXdp(attr.Value[:]) if err != nil { @@ -1408,19 +1691,40 @@ func LinkDeserialize(hdr *unix.NlMsghdr, m []byte) (Link, error) { if err != nil { return nil, err } - base.Protinfo = parseProtinfo(attrs) + protinfo := parseProtinfo(attrs) + base.Protinfo = &protinfo } case unix.IFLA_OPERSTATE: base.OperState = LinkOperState(uint8(attr.Value[0])) case unix.IFLA_LINK_NETNSID: base.NetNsID = int(native.Uint32(attr.Value[0:4])) + case unix.IFLA_GSO_MAX_SIZE: + base.GSOMaxSize = native.Uint32(attr.Value[0:4]) + case unix.IFLA_GSO_MAX_SEGS: + base.GSOMaxSegs = native.Uint32(attr.Value[0:4]) + case unix.IFLA_VFINFO_LIST: + data, err := nl.ParseRouteAttr(attr.Value) + if err != nil { + return nil, err + } + vfs, err := parseVfInfoList(data) + if err != nil { + return nil, err + } + base.Vfs = vfs + case unix.IFLA_NUM_TX_QUEUES: + base.NumTxQueues = int(native.Uint32(attr.Value[0:4])) + case unix.IFLA_NUM_RX_QUEUES: + base.NumRxQueues = int(native.Uint32(attr.Value[0:4])) + case unix.IFLA_GROUP: + base.Group = native.Uint32(attr.Value[0:4]) } } if stats64 != nil { - base.Statistics = parseLinkStats64(stats64) + base.Statistics = (*LinkStatistics)(stats64) } else if stats32 != nil { - base.Statistics = parseLinkStats32(stats32) + base.Statistics = (*LinkStatistics)(stats32.to64()) } // Links that don't have IFLA_INFO_KIND are hardware devices @@ -1428,10 +1732,59 @@ func LinkDeserialize(hdr *unix.NlMsghdr, m []byte) (Link, error) { link = &Device{} } *link.Attrs() = base + link.Attrs().Slave = linkSlave + + // If the tuntap attributes are not updated by netlink due to + // an older driver, use sysfs + if link != nil && linkType == "tun" { + tuntap := link.(*Tuntap) + + if tuntap.Mode == 0 { + ifname := tuntap.Attrs().Name + if flags, err := readSysPropAsInt64(ifname, "tun_flags"); err == nil { + + if flags&unix.IFF_TUN != 0 { + tuntap.Mode = unix.IFF_TUN + } else if flags&unix.IFF_TAP != 0 { + tuntap.Mode = unix.IFF_TAP + } + + tuntap.NonPersist = false + if flags&unix.IFF_PERSIST == 0 { + tuntap.NonPersist = true + } + } + + // The sysfs interface for owner/group returns -1 for root user, instead of returning 0. + // So explicitly check for negative value, before assigning the owner uid/gid. + if owner, err := readSysPropAsInt64(ifname, "owner"); err == nil && owner > 0 { + tuntap.Owner = uint32(owner) + } + + if group, err := readSysPropAsInt64(ifname, "group"); err == nil && group > 0 { + tuntap.Group = uint32(group) + } + } + } return link, nil } +func readSysPropAsInt64(ifname, prop string) (int64, error) { + fname := fmt.Sprintf("/sys/class/net/%s/%s", ifname, prop) + contents, err := ioutil.ReadFile(fname) + if err != nil { + return 0, err + } + + num, err := strconv.ParseInt(strings.TrimSpace(string(contents)), 0, 64) + if err == nil { + return num, nil + } + + return 0, err +} + // LinkList gets a list of link devices. // Equivalent to: `ip link show` func LinkList() ([]Link, error) { @@ -1447,6 +1800,8 @@ func (h *Handle) LinkList() ([]Link, error) { msg := nl.NewIfInfomsg(unix.AF_UNSPEC) req.AddData(msg) + attr := nl.NewRtAttr(unix.IFLA_EXT_MASK, nl.Uint32Attr(nl.RTEXT_FILTER_VF)) + req.AddData(attr) msgs, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWLINK) if err != nil { @@ -1526,13 +1881,19 @@ func linkSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- LinkUpdate, done <-c go func() { defer close(ch) for { - msgs, err := s.Receive() + msgs, from, err := s.Receive() if err != nil { if cberr != nil { cberr(err) } return } + if from.Pid != nl.PidKernel { + if cberr != nil { + cberr(fmt.Errorf("Wrong sender portid %d, expected %d", from.Pid, nl.PidKernel)) + } + continue + } for _, m := range msgs { if m.Header.Type == unix.NLMSG_DONE { continue @@ -1639,7 +2000,7 @@ func (h *Handle) setProtinfoAttr(link Link, mode bool, attr int) error { req.AddData(msg) br := nl.NewRtAttr(unix.IFLA_PROTINFO|unix.NLA_F_NESTED, nil) - nl.NewRtAttrChild(br, attr, boolToByte(mode)) + br.AddRtAttr(attr, boolToByte(mode)) req.AddData(br) _, err := req.Execute(unix.NETLINK_ROUTE, 0) if err != nil { @@ -1675,12 +2036,43 @@ func (h *Handle) LinkSetTxQLen(link Link, qlen int) error { return err } +// LinkSetGroup sets the link group id which can be used to perform mass actions +// with iproute2 as well use it as a reference in nft filters. +// Equivalent to: `ip link set $link group $id` +func LinkSetGroup(link Link, group int) error { + return pkgHandle.LinkSetGroup(link, group) +} + +// LinkSetGroup sets the link group id which can be used to perform mass actions +// with iproute2 as well use it as a reference in nft filters. +// Equivalent to: `ip link set $link group $id` +func (h *Handle) LinkSetGroup(link Link, group int) error { + base := link.Attrs() + h.ensureIndex(base) + req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) + + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg.Index = int32(base.Index) + req.AddData(msg) + + b := make([]byte, 4) + native.PutUint32(b, uint32(group)) + + data := nl.NewRtAttr(unix.IFLA_GROUP, b) + req.AddData(data) + + _, err := req.Execute(unix.NETLINK_ROUTE, 0) + return err +} + func parseVlanData(link Link, data []syscall.NetlinkRouteAttr) { vlan := link.(*Vlan) for _, datum := range data { switch datum.Attr.Type { case nl.IFLA_VLAN_ID: vlan.VlanId = int(native.Uint16(datum.Value[0:2])) + case nl.IFLA_VLAN_PROTOCOL: + vlan.VlanProtocol = VlanProtocol(int(ntohs(datum.Value[0:2]))) } } } @@ -1762,7 +2154,7 @@ func parseBondData(link Link, data []syscall.NetlinkRouteAttr) { case nl.IFLA_BOND_ARP_INTERVAL: bond.ArpInterval = int(native.Uint32(data[i].Value[0:4])) case nl.IFLA_BOND_ARP_IP_TARGET: - // TODO: implement + bond.ArpIpTargets = parseBondArpIpTargets(data[i].Value) case nl.IFLA_BOND_ARP_VALIDATE: bond.ArpValidate = BondArpValidate(native.Uint32(data[i].Value[0:4])) case nl.IFLA_BOND_ARP_ALL_TARGETS: @@ -1805,12 +2197,75 @@ func parseBondData(link Link, data []syscall.NetlinkRouteAttr) { } } +func parseBondArpIpTargets(value []byte) []net.IP { + data, err := nl.ParseRouteAttr(value) + if err != nil { + return nil + } + + targets := []net.IP{} + for i := range data { + target := net.IP(data[i].Value) + if ip := target.To4(); ip != nil { + targets = append(targets, ip) + continue + } + if ip := target.To16(); ip != nil { + targets = append(targets, ip) + } + } + + return targets +} + +func addBondSlaveAttrs(bondSlave *BondSlave, linkInfo *nl.RtAttr) { + data := linkInfo.AddRtAttr(nl.IFLA_INFO_SLAVE_DATA, nil) + + data.AddRtAttr(nl.IFLA_BOND_SLAVE_STATE, nl.Uint8Attr(uint8(bondSlave.State))) + data.AddRtAttr(nl.IFLA_BOND_SLAVE_MII_STATUS, nl.Uint8Attr(uint8(bondSlave.MiiStatus))) + data.AddRtAttr(nl.IFLA_BOND_SLAVE_LINK_FAILURE_COUNT, nl.Uint32Attr(bondSlave.LinkFailureCount)) + data.AddRtAttr(nl.IFLA_BOND_SLAVE_QUEUE_ID, nl.Uint16Attr(bondSlave.QueueId)) + data.AddRtAttr(nl.IFLA_BOND_SLAVE_AD_AGGREGATOR_ID, nl.Uint16Attr(bondSlave.AggregatorId)) + data.AddRtAttr(nl.IFLA_BOND_SLAVE_AD_ACTOR_OPER_PORT_STATE, nl.Uint8Attr(bondSlave.AdActorOperPortState)) + data.AddRtAttr(nl.IFLA_BOND_SLAVE_AD_PARTNER_OPER_PORT_STATE, nl.Uint16Attr(bondSlave.AdPartnerOperPortState)) + + if mac := bondSlave.PermHardwareAddr; mac != nil { + data.AddRtAttr(nl.IFLA_BOND_SLAVE_PERM_HWADDR, []byte(mac)) + } +} + +func parseBondSlaveData(slave LinkSlave, data []syscall.NetlinkRouteAttr) { + bondSlave := slave.(*BondSlave) + for i := range data { + switch data[i].Attr.Type { + case nl.IFLA_BOND_SLAVE_STATE: + bondSlave.State = BondSlaveState(data[i].Value[0]) + case nl.IFLA_BOND_SLAVE_MII_STATUS: + bondSlave.MiiStatus = BondSlaveMiiStatus(data[i].Value[0]) + case nl.IFLA_BOND_SLAVE_LINK_FAILURE_COUNT: + bondSlave.LinkFailureCount = native.Uint32(data[i].Value[0:4]) + case nl.IFLA_BOND_SLAVE_PERM_HWADDR: + bondSlave.PermHardwareAddr = net.HardwareAddr(data[i].Value[0:6]) + case nl.IFLA_BOND_SLAVE_QUEUE_ID: + bondSlave.QueueId = native.Uint16(data[i].Value[0:2]) + case nl.IFLA_BOND_SLAVE_AD_AGGREGATOR_ID: + bondSlave.AggregatorId = native.Uint16(data[i].Value[0:2]) + case nl.IFLA_BOND_SLAVE_AD_ACTOR_OPER_PORT_STATE: + bondSlave.AdActorOperPortState = uint8(data[i].Value[0]) + case nl.IFLA_BOND_SLAVE_AD_PARTNER_OPER_PORT_STATE: + bondSlave.AdPartnerOperPortState = native.Uint16(data[i].Value[0:2]) + } + } +} + func parseIPVlanData(link Link, data []syscall.NetlinkRouteAttr) { ipv := link.(*IPVlan) for _, datum := range data { - if datum.Attr.Type == nl.IFLA_IPVLAN_MODE { + switch datum.Attr.Type { + case nl.IFLA_IPVLAN_MODE: ipv.Mode = IPVlanMode(native.Uint32(datum.Value[0:4])) - return + case nl.IFLA_IPVLAN_FLAG: + ipv.Flag = IPVlanFlag(native.Uint32(datum.Value[0:4])) } } } @@ -1873,11 +2328,11 @@ func linkFlags(rawFlags uint32) net.Flags { } func addGretapAttrs(gretap *Gretap, linkInfo *nl.RtAttr) { - data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil) + data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil) if gretap.FlowBased { // In flow based mode, no other attributes need to be configured - nl.NewRtAttrChild(data, nl.IFLA_GRE_COLLECT_METADATA, boolAttr(gretap.FlowBased)) + data.AddRtAttr(nl.IFLA_GRE_COLLECT_METADATA, boolAttr(gretap.FlowBased)) return } @@ -1885,40 +2340,40 @@ func addGretapAttrs(gretap *Gretap, linkInfo *nl.RtAttr) { if ip.To4() != nil { ip = ip.To4() } - nl.NewRtAttrChild(data, nl.IFLA_GRE_LOCAL, []byte(ip)) + data.AddRtAttr(nl.IFLA_GRE_LOCAL, []byte(ip)) } if ip := gretap.Remote; ip != nil { if ip.To4() != nil { ip = ip.To4() } - nl.NewRtAttrChild(data, nl.IFLA_GRE_REMOTE, []byte(ip)) + data.AddRtAttr(nl.IFLA_GRE_REMOTE, []byte(ip)) } if gretap.IKey != 0 { - nl.NewRtAttrChild(data, nl.IFLA_GRE_IKEY, htonl(gretap.IKey)) + data.AddRtAttr(nl.IFLA_GRE_IKEY, htonl(gretap.IKey)) gretap.IFlags |= uint16(nl.GRE_KEY) } if gretap.OKey != 0 { - nl.NewRtAttrChild(data, nl.IFLA_GRE_OKEY, htonl(gretap.OKey)) + data.AddRtAttr(nl.IFLA_GRE_OKEY, htonl(gretap.OKey)) gretap.OFlags |= uint16(nl.GRE_KEY) } - nl.NewRtAttrChild(data, nl.IFLA_GRE_IFLAGS, htons(gretap.IFlags)) - nl.NewRtAttrChild(data, nl.IFLA_GRE_OFLAGS, htons(gretap.OFlags)) + data.AddRtAttr(nl.IFLA_GRE_IFLAGS, htons(gretap.IFlags)) + data.AddRtAttr(nl.IFLA_GRE_OFLAGS, htons(gretap.OFlags)) if gretap.Link != 0 { - nl.NewRtAttrChild(data, nl.IFLA_GRE_LINK, nl.Uint32Attr(gretap.Link)) + data.AddRtAttr(nl.IFLA_GRE_LINK, nl.Uint32Attr(gretap.Link)) } - nl.NewRtAttrChild(data, nl.IFLA_GRE_PMTUDISC, nl.Uint8Attr(gretap.PMtuDisc)) - nl.NewRtAttrChild(data, nl.IFLA_GRE_TTL, nl.Uint8Attr(gretap.Ttl)) - nl.NewRtAttrChild(data, nl.IFLA_GRE_TOS, nl.Uint8Attr(gretap.Tos)) - nl.NewRtAttrChild(data, nl.IFLA_GRE_ENCAP_TYPE, nl.Uint16Attr(gretap.EncapType)) - nl.NewRtAttrChild(data, nl.IFLA_GRE_ENCAP_FLAGS, nl.Uint16Attr(gretap.EncapFlags)) - nl.NewRtAttrChild(data, nl.IFLA_GRE_ENCAP_SPORT, htons(gretap.EncapSport)) - nl.NewRtAttrChild(data, nl.IFLA_GRE_ENCAP_DPORT, htons(gretap.EncapDport)) + data.AddRtAttr(nl.IFLA_GRE_PMTUDISC, nl.Uint8Attr(gretap.PMtuDisc)) + data.AddRtAttr(nl.IFLA_GRE_TTL, nl.Uint8Attr(gretap.Ttl)) + data.AddRtAttr(nl.IFLA_GRE_TOS, nl.Uint8Attr(gretap.Tos)) + data.AddRtAttr(nl.IFLA_GRE_ENCAP_TYPE, nl.Uint16Attr(gretap.EncapType)) + data.AddRtAttr(nl.IFLA_GRE_ENCAP_FLAGS, nl.Uint16Attr(gretap.EncapFlags)) + data.AddRtAttr(nl.IFLA_GRE_ENCAP_SPORT, htons(gretap.EncapSport)) + data.AddRtAttr(nl.IFLA_GRE_ENCAP_DPORT, htons(gretap.EncapDport)) } func parseGretapData(link Link, data []syscall.NetlinkRouteAttr) { @@ -1930,9 +2385,9 @@ func parseGretapData(link Link, data []syscall.NetlinkRouteAttr) { case nl.IFLA_GRE_IKEY: gre.OKey = ntohl(datum.Value[0:4]) case nl.IFLA_GRE_LOCAL: - gre.Local = net.IP(datum.Value[0:16]) + gre.Local = net.IP(datum.Value) case nl.IFLA_GRE_REMOTE: - gre.Remote = net.IP(datum.Value[0:16]) + gre.Remote = net.IP(datum.Value) case nl.IFLA_GRE_ENCAP_SPORT: gre.EncapSport = ntohs(datum.Value[0:2]) case nl.IFLA_GRE_ENCAP_DPORT: @@ -1941,7 +2396,6 @@ func parseGretapData(link Link, data []syscall.NetlinkRouteAttr) { gre.IFlags = ntohs(datum.Value[0:2]) case nl.IFLA_GRE_OFLAGS: gre.OFlags = ntohs(datum.Value[0:2]) - case nl.IFLA_GRE_TTL: gre.Ttl = uint8(datum.Value[0]) case nl.IFLA_GRE_TOS: @@ -1953,73 +2407,70 @@ func parseGretapData(link Link, data []syscall.NetlinkRouteAttr) { case nl.IFLA_GRE_ENCAP_FLAGS: gre.EncapFlags = native.Uint16(datum.Value[0:2]) case nl.IFLA_GRE_COLLECT_METADATA: - if len(datum.Value) > 0 { - gre.FlowBased = int8(datum.Value[0]) != 0 - } + gre.FlowBased = true } } } func addGretunAttrs(gre *Gretun, linkInfo *nl.RtAttr) { - data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil) + data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil) if ip := gre.Local; ip != nil { if ip.To4() != nil { ip = ip.To4() } - nl.NewRtAttrChild(data, nl.IFLA_GRE_LOCAL, []byte(ip)) + data.AddRtAttr(nl.IFLA_GRE_LOCAL, []byte(ip)) } if ip := gre.Remote; ip != nil { if ip.To4() != nil { ip = ip.To4() } - nl.NewRtAttrChild(data, nl.IFLA_GRE_REMOTE, []byte(ip)) + data.AddRtAttr(nl.IFLA_GRE_REMOTE, []byte(ip)) } if gre.IKey != 0 { - nl.NewRtAttrChild(data, nl.IFLA_GRE_IKEY, htonl(gre.IKey)) + data.AddRtAttr(nl.IFLA_GRE_IKEY, htonl(gre.IKey)) gre.IFlags |= uint16(nl.GRE_KEY) } if gre.OKey != 0 { - nl.NewRtAttrChild(data, nl.IFLA_GRE_OKEY, htonl(gre.OKey)) + data.AddRtAttr(nl.IFLA_GRE_OKEY, htonl(gre.OKey)) gre.OFlags |= uint16(nl.GRE_KEY) } - nl.NewRtAttrChild(data, nl.IFLA_GRE_IFLAGS, htons(gre.IFlags)) - nl.NewRtAttrChild(data, nl.IFLA_GRE_OFLAGS, htons(gre.OFlags)) + data.AddRtAttr(nl.IFLA_GRE_IFLAGS, htons(gre.IFlags)) + data.AddRtAttr(nl.IFLA_GRE_OFLAGS, htons(gre.OFlags)) if gre.Link != 0 { - nl.NewRtAttrChild(data, nl.IFLA_GRE_LINK, nl.Uint32Attr(gre.Link)) + data.AddRtAttr(nl.IFLA_GRE_LINK, nl.Uint32Attr(gre.Link)) } - nl.NewRtAttrChild(data, nl.IFLA_GRE_PMTUDISC, nl.Uint8Attr(gre.PMtuDisc)) - nl.NewRtAttrChild(data, nl.IFLA_GRE_TTL, nl.Uint8Attr(gre.Ttl)) - nl.NewRtAttrChild(data, nl.IFLA_GRE_TOS, nl.Uint8Attr(gre.Tos)) - nl.NewRtAttrChild(data, nl.IFLA_GRE_ENCAP_TYPE, nl.Uint16Attr(gre.EncapType)) - nl.NewRtAttrChild(data, nl.IFLA_GRE_ENCAP_FLAGS, nl.Uint16Attr(gre.EncapFlags)) - nl.NewRtAttrChild(data, nl.IFLA_GRE_ENCAP_SPORT, htons(gre.EncapSport)) - nl.NewRtAttrChild(data, nl.IFLA_GRE_ENCAP_DPORT, htons(gre.EncapDport)) + data.AddRtAttr(nl.IFLA_GRE_PMTUDISC, nl.Uint8Attr(gre.PMtuDisc)) + data.AddRtAttr(nl.IFLA_GRE_TTL, nl.Uint8Attr(gre.Ttl)) + data.AddRtAttr(nl.IFLA_GRE_TOS, nl.Uint8Attr(gre.Tos)) + data.AddRtAttr(nl.IFLA_GRE_ENCAP_TYPE, nl.Uint16Attr(gre.EncapType)) + data.AddRtAttr(nl.IFLA_GRE_ENCAP_FLAGS, nl.Uint16Attr(gre.EncapFlags)) + data.AddRtAttr(nl.IFLA_GRE_ENCAP_SPORT, htons(gre.EncapSport)) + data.AddRtAttr(nl.IFLA_GRE_ENCAP_DPORT, htons(gre.EncapDport)) } func parseGretunData(link Link, data []syscall.NetlinkRouteAttr) { gre := link.(*Gretun) for _, datum := range data { switch datum.Attr.Type { - case nl.IFLA_GRE_OKEY: - gre.IKey = ntohl(datum.Value[0:4]) case nl.IFLA_GRE_IKEY: + gre.IKey = ntohl(datum.Value[0:4]) + case nl.IFLA_GRE_OKEY: gre.OKey = ntohl(datum.Value[0:4]) case nl.IFLA_GRE_LOCAL: - gre.Local = net.IP(datum.Value[0:16]) + gre.Local = net.IP(datum.Value) case nl.IFLA_GRE_REMOTE: - gre.Remote = net.IP(datum.Value[0:16]) + gre.Remote = net.IP(datum.Value) case nl.IFLA_GRE_IFLAGS: gre.IFlags = ntohs(datum.Value[0:2]) case nl.IFLA_GRE_OFLAGS: gre.OFlags = ntohs(datum.Value[0:2]) - case nl.IFLA_GRE_TTL: gre.Ttl = uint8(datum.Value[0]) case nl.IFLA_GRE_TOS: @@ -2038,23 +2489,15 @@ func parseGretunData(link Link, data []syscall.NetlinkRouteAttr) { } } -func parseLinkStats32(data []byte) *LinkStatistics { - return (*LinkStatistics)((*LinkStatistics32)(unsafe.Pointer(&data[0:SizeofLinkStats32][0])).to64()) -} - -func parseLinkStats64(data []byte) *LinkStatistics { - return (*LinkStatistics)((*LinkStatistics64)(unsafe.Pointer(&data[0:SizeofLinkStats64][0]))) -} - func addXdpAttrs(xdp *LinkXdp, req *nl.NetlinkRequest) { attrs := nl.NewRtAttr(unix.IFLA_XDP|unix.NLA_F_NESTED, nil) b := make([]byte, 4) native.PutUint32(b, uint32(xdp.Fd)) - nl.NewRtAttrChild(attrs, nl.IFLA_XDP_FD, b) + attrs.AddRtAttr(nl.IFLA_XDP_FD, b) if xdp.Flags != 0 { b := make([]byte, 4) native.PutUint32(b, xdp.Flags) - nl.NewRtAttrChild(attrs, nl.IFLA_XDP_FLAGS, b) + attrs.AddRtAttr(nl.IFLA_XDP_FLAGS, b) } req.AddData(attrs) } @@ -2083,32 +2526,32 @@ func parseLinkXdp(data []byte) (*LinkXdp, error) { func addIptunAttrs(iptun *Iptun, linkInfo *nl.RtAttr) { if iptun.FlowBased { // In flow based mode, no other attributes need to be configured - nl.NewRtAttrChild(linkInfo, nl.IFLA_IPTUN_COLLECT_METADATA, boolAttr(iptun.FlowBased)) + linkInfo.AddRtAttr(nl.IFLA_IPTUN_COLLECT_METADATA, boolAttr(iptun.FlowBased)) return } - data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil) + data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil) ip := iptun.Local.To4() if ip != nil { - nl.NewRtAttrChild(data, nl.IFLA_IPTUN_LOCAL, []byte(ip)) + data.AddRtAttr(nl.IFLA_IPTUN_LOCAL, []byte(ip)) } ip = iptun.Remote.To4() if ip != nil { - nl.NewRtAttrChild(data, nl.IFLA_IPTUN_REMOTE, []byte(ip)) + data.AddRtAttr(nl.IFLA_IPTUN_REMOTE, []byte(ip)) } if iptun.Link != 0 { - nl.NewRtAttrChild(data, nl.IFLA_IPTUN_LINK, nl.Uint32Attr(iptun.Link)) + data.AddRtAttr(nl.IFLA_IPTUN_LINK, nl.Uint32Attr(iptun.Link)) } - nl.NewRtAttrChild(data, nl.IFLA_IPTUN_PMTUDISC, nl.Uint8Attr(iptun.PMtuDisc)) - nl.NewRtAttrChild(data, nl.IFLA_IPTUN_TTL, nl.Uint8Attr(iptun.Ttl)) - nl.NewRtAttrChild(data, nl.IFLA_IPTUN_TOS, nl.Uint8Attr(iptun.Tos)) - nl.NewRtAttrChild(data, nl.IFLA_IPTUN_ENCAP_TYPE, nl.Uint16Attr(iptun.EncapType)) - nl.NewRtAttrChild(data, nl.IFLA_IPTUN_ENCAP_FLAGS, nl.Uint16Attr(iptun.EncapFlags)) - nl.NewRtAttrChild(data, nl.IFLA_IPTUN_ENCAP_SPORT, htons(iptun.EncapSport)) - nl.NewRtAttrChild(data, nl.IFLA_IPTUN_ENCAP_DPORT, htons(iptun.EncapDport)) + data.AddRtAttr(nl.IFLA_IPTUN_PMTUDISC, nl.Uint8Attr(iptun.PMtuDisc)) + data.AddRtAttr(nl.IFLA_IPTUN_TTL, nl.Uint8Attr(iptun.Ttl)) + data.AddRtAttr(nl.IFLA_IPTUN_TOS, nl.Uint8Attr(iptun.Tos)) + data.AddRtAttr(nl.IFLA_IPTUN_ENCAP_TYPE, nl.Uint16Attr(iptun.EncapType)) + data.AddRtAttr(nl.IFLA_IPTUN_ENCAP_FLAGS, nl.Uint16Attr(iptun.EncapFlags)) + data.AddRtAttr(nl.IFLA_IPTUN_ENCAP_SPORT, htons(iptun.EncapSport)) + data.AddRtAttr(nl.IFLA_IPTUN_ENCAP_DPORT, htons(iptun.EncapDport)) } func parseIptunData(link Link, data []syscall.NetlinkRouteAttr) { @@ -2139,34 +2582,83 @@ func parseIptunData(link Link, data []syscall.NetlinkRouteAttr) { } } +func addIp6tnlAttrs(ip6tnl *Ip6tnl, linkInfo *nl.RtAttr) { + data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil) + + if ip6tnl.Link != 0 { + data.AddRtAttr(nl.IFLA_IPTUN_LINK, nl.Uint32Attr(ip6tnl.Link)) + } + + ip := ip6tnl.Local.To16() + if ip != nil { + data.AddRtAttr(nl.IFLA_IPTUN_LOCAL, []byte(ip)) + } + + ip = ip6tnl.Remote.To16() + if ip != nil { + data.AddRtAttr(nl.IFLA_IPTUN_REMOTE, []byte(ip)) + } + + data.AddRtAttr(nl.IFLA_IPTUN_TTL, nl.Uint8Attr(ip6tnl.Ttl)) + data.AddRtAttr(nl.IFLA_IPTUN_TOS, nl.Uint8Attr(ip6tnl.Tos)) + data.AddRtAttr(nl.IFLA_IPTUN_ENCAP_LIMIT, nl.Uint8Attr(ip6tnl.EncapLimit)) + data.AddRtAttr(nl.IFLA_IPTUN_FLAGS, nl.Uint32Attr(ip6tnl.Flags)) + data.AddRtAttr(nl.IFLA_IPTUN_PROTO, nl.Uint8Attr(ip6tnl.Proto)) + data.AddRtAttr(nl.IFLA_IPTUN_FLOWINFO, nl.Uint32Attr(ip6tnl.FlowInfo)) +} + +func parseIp6tnlData(link Link, data []syscall.NetlinkRouteAttr) { + ip6tnl := link.(*Ip6tnl) + for _, datum := range data { + switch datum.Attr.Type { + case nl.IFLA_IPTUN_LOCAL: + ip6tnl.Local = net.IP(datum.Value[:16]) + case nl.IFLA_IPTUN_REMOTE: + ip6tnl.Remote = net.IP(datum.Value[:16]) + case nl.IFLA_IPTUN_TTL: + ip6tnl.Ttl = uint8(datum.Value[0]) + case nl.IFLA_IPTUN_TOS: + ip6tnl.Tos = uint8(datum.Value[0]) + case nl.IFLA_IPTUN_ENCAP_LIMIT: + ip6tnl.EncapLimit = uint8(datum.Value[0]) + case nl.IFLA_IPTUN_FLAGS: + ip6tnl.Flags = native.Uint32(datum.Value[:4]) + case nl.IFLA_IPTUN_PROTO: + ip6tnl.Proto = uint8(datum.Value[0]) + case nl.IFLA_IPTUN_FLOWINFO: + ip6tnl.FlowInfo = native.Uint32(datum.Value[:4]) + } + } +} + func addSittunAttrs(sittun *Sittun, linkInfo *nl.RtAttr) { - data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil) + data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil) if sittun.Link != 0 { - nl.NewRtAttrChild(data, nl.IFLA_IPTUN_LINK, nl.Uint32Attr(sittun.Link)) + data.AddRtAttr(nl.IFLA_IPTUN_LINK, nl.Uint32Attr(sittun.Link)) } ip := sittun.Local.To4() if ip != nil { - nl.NewRtAttrChild(data, nl.IFLA_IPTUN_LOCAL, []byte(ip)) + data.AddRtAttr(nl.IFLA_IPTUN_LOCAL, []byte(ip)) } ip = sittun.Remote.To4() if ip != nil { - nl.NewRtAttrChild(data, nl.IFLA_IPTUN_REMOTE, []byte(ip)) + data.AddRtAttr(nl.IFLA_IPTUN_REMOTE, []byte(ip)) } if sittun.Ttl > 0 { // Would otherwise fail on 3.10 kernel - nl.NewRtAttrChild(data, nl.IFLA_IPTUN_TTL, nl.Uint8Attr(sittun.Ttl)) + data.AddRtAttr(nl.IFLA_IPTUN_TTL, nl.Uint8Attr(sittun.Ttl)) } - nl.NewRtAttrChild(data, nl.IFLA_IPTUN_TOS, nl.Uint8Attr(sittun.Tos)) - nl.NewRtAttrChild(data, nl.IFLA_IPTUN_PMTUDISC, nl.Uint8Attr(sittun.PMtuDisc)) - nl.NewRtAttrChild(data, nl.IFLA_IPTUN_ENCAP_TYPE, nl.Uint16Attr(sittun.EncapType)) - nl.NewRtAttrChild(data, nl.IFLA_IPTUN_ENCAP_FLAGS, nl.Uint16Attr(sittun.EncapFlags)) - nl.NewRtAttrChild(data, nl.IFLA_IPTUN_ENCAP_SPORT, htons(sittun.EncapSport)) - nl.NewRtAttrChild(data, nl.IFLA_IPTUN_ENCAP_DPORT, htons(sittun.EncapDport)) + data.AddRtAttr(nl.IFLA_IPTUN_TOS, nl.Uint8Attr(sittun.Tos)) + data.AddRtAttr(nl.IFLA_IPTUN_PMTUDISC, nl.Uint8Attr(sittun.PMtuDisc)) + data.AddRtAttr(nl.IFLA_IPTUN_ENCAP_TYPE, nl.Uint16Attr(sittun.EncapType)) + data.AddRtAttr(nl.IFLA_IPTUN_ENCAP_FLAGS, nl.Uint16Attr(sittun.EncapFlags)) + data.AddRtAttr(nl.IFLA_IPTUN_ENCAP_SPORT, htons(sittun.EncapSport)) + data.AddRtAttr(nl.IFLA_IPTUN_ENCAP_DPORT, htons(sittun.EncapDport)) } func parseSittunData(link Link, data []syscall.NetlinkRouteAttr) { @@ -2196,24 +2688,39 @@ func parseSittunData(link Link, data []syscall.NetlinkRouteAttr) { } func addVtiAttrs(vti *Vti, linkInfo *nl.RtAttr) { - data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil) + data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil) + + family := FAMILY_V4 + if vti.Local.To4() == nil { + family = FAMILY_V6 + } - ip := vti.Local.To4() + var ip net.IP + + if family == FAMILY_V4 { + ip = vti.Local.To4() + } else { + ip = vti.Local + } if ip != nil { - nl.NewRtAttrChild(data, nl.IFLA_VTI_LOCAL, []byte(ip)) + data.AddRtAttr(nl.IFLA_VTI_LOCAL, []byte(ip)) } - ip = vti.Remote.To4() + if family == FAMILY_V4 { + ip = vti.Remote.To4() + } else { + ip = vti.Remote + } if ip != nil { - nl.NewRtAttrChild(data, nl.IFLA_VTI_REMOTE, []byte(ip)) + data.AddRtAttr(nl.IFLA_VTI_REMOTE, []byte(ip)) } if vti.Link != 0 { - nl.NewRtAttrChild(data, nl.IFLA_VTI_LINK, nl.Uint32Attr(vti.Link)) + data.AddRtAttr(nl.IFLA_VTI_LINK, nl.Uint32Attr(vti.Link)) } - nl.NewRtAttrChild(data, nl.IFLA_VTI_IKEY, htonl(vti.IKey)) - nl.NewRtAttrChild(data, nl.IFLA_VTI_OKEY, htonl(vti.OKey)) + data.AddRtAttr(nl.IFLA_VTI_IKEY, htonl(vti.IKey)) + data.AddRtAttr(nl.IFLA_VTI_OKEY, htonl(vti.OKey)) } func parseVtiData(link Link, data []syscall.NetlinkRouteAttr) { @@ -2221,9 +2728,9 @@ func parseVtiData(link Link, data []syscall.NetlinkRouteAttr) { for _, datum := range data { switch datum.Attr.Type { case nl.IFLA_VTI_LOCAL: - vti.Local = net.IP(datum.Value[0:4]) + vti.Local = net.IP(datum.Value) case nl.IFLA_VTI_REMOTE: - vti.Remote = net.IP(datum.Value[0:4]) + vti.Remote = net.IP(datum.Value) case nl.IFLA_VTI_IKEY: vti.IKey = ntohl(datum.Value[0:4]) case nl.IFLA_VTI_OKEY: @@ -2233,10 +2740,10 @@ func parseVtiData(link Link, data []syscall.NetlinkRouteAttr) { } func addVrfAttrs(vrf *Vrf, linkInfo *nl.RtAttr) { - data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil) + data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil) b := make([]byte, 4) native.PutUint32(b, uint32(vrf.Table)) - nl.NewRtAttrChild(data, nl.IFLA_VRF_TABLE, b) + data.AddRtAttr(nl.IFLA_VRF_TABLE, b) } func parseVrfData(link Link, data []syscall.NetlinkRouteAttr) { @@ -2250,12 +2757,15 @@ func parseVrfData(link Link, data []syscall.NetlinkRouteAttr) { } func addBridgeAttrs(bridge *Bridge, linkInfo *nl.RtAttr) { - data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil) + data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil) if bridge.MulticastSnooping != nil { - nl.NewRtAttrChild(data, nl.IFLA_BR_MCAST_SNOOPING, boolToByte(*bridge.MulticastSnooping)) + data.AddRtAttr(nl.IFLA_BR_MCAST_SNOOPING, boolToByte(*bridge.MulticastSnooping)) } if bridge.HelloTime != nil { - nl.NewRtAttrChild(data, nl.IFLA_BR_HELLO_TIME, nl.Uint32Attr(*bridge.HelloTime)) + data.AddRtAttr(nl.IFLA_BR_HELLO_TIME, nl.Uint32Attr(*bridge.HelloTime)) + } + if bridge.VlanFiltering != nil { + data.AddRtAttr(nl.IFLA_BR_VLAN_FILTERING, boolToByte(*bridge.VlanFiltering)) } } @@ -2269,17 +2779,20 @@ func parseBridgeData(bridge Link, data []syscall.NetlinkRouteAttr) { case nl.IFLA_BR_MCAST_SNOOPING: mcastSnooping := datum.Value[0] == 1 br.MulticastSnooping = &mcastSnooping + case nl.IFLA_BR_VLAN_FILTERING: + vlanFiltering := datum.Value[0] == 1 + br.VlanFiltering = &vlanFiltering } } } func addGTPAttrs(gtp *GTP, linkInfo *nl.RtAttr) { - data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil) - nl.NewRtAttrChild(data, nl.IFLA_GTP_FD0, nl.Uint32Attr(uint32(gtp.FD0))) - nl.NewRtAttrChild(data, nl.IFLA_GTP_FD1, nl.Uint32Attr(uint32(gtp.FD1))) - nl.NewRtAttrChild(data, nl.IFLA_GTP_PDP_HASHSIZE, nl.Uint32Attr(131072)) + data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil) + data.AddRtAttr(nl.IFLA_GTP_FD0, nl.Uint32Attr(uint32(gtp.FD0))) + data.AddRtAttr(nl.IFLA_GTP_FD1, nl.Uint32Attr(uint32(gtp.FD1))) + data.AddRtAttr(nl.IFLA_GTP_PDP_HASHSIZE, nl.Uint32Attr(131072)) if gtp.Role != nl.GTP_ROLE_GGSN { - nl.NewRtAttrChild(data, nl.IFLA_GTP_ROLE, nl.Uint32Attr(uint32(gtp.Role))) + data.AddRtAttr(nl.IFLA_GTP_ROLE, nl.Uint32Attr(uint32(gtp.Role))) } } @@ -2299,6 +2812,70 @@ func parseGTPData(link Link, data []syscall.NetlinkRouteAttr) { } } +func parseVfInfoList(data []syscall.NetlinkRouteAttr) ([]VfInfo, error) { + var vfs []VfInfo + + for i, element := range data { + if element.Attr.Type != nl.IFLA_VF_INFO { + return nil, fmt.Errorf("Incorrect element type in vf info list: %d", element.Attr.Type) + } + vfAttrs, err := nl.ParseRouteAttr(element.Value) + if err != nil { + return nil, err + } + vfs = append(vfs, parseVfInfo(vfAttrs, i)) + } + return vfs, nil +} + +func parseVfInfo(data []syscall.NetlinkRouteAttr, id int) VfInfo { + vf := VfInfo{ID: id} + for _, element := range data { + switch element.Attr.Type { + case nl.IFLA_VF_MAC: + mac := nl.DeserializeVfMac(element.Value[:]) + vf.Mac = mac.Mac[:6] + case nl.IFLA_VF_VLAN: + vl := nl.DeserializeVfVlan(element.Value[:]) + vf.Vlan = int(vl.Vlan) + vf.Qos = int(vl.Qos) + case nl.IFLA_VF_TX_RATE: + txr := nl.DeserializeVfTxRate(element.Value[:]) + vf.TxRate = int(txr.Rate) + case nl.IFLA_VF_SPOOFCHK: + sp := nl.DeserializeVfSpoofchk(element.Value[:]) + vf.Spoofchk = sp.Setting != 0 + case nl.IFLA_VF_LINK_STATE: + ls := nl.DeserializeVfLinkState(element.Value[:]) + vf.LinkState = ls.LinkState + case nl.IFLA_VF_RATE: + vfr := nl.DeserializeVfRate(element.Value[:]) + vf.MaxTxRate = vfr.MaxTxRate + vf.MinTxRate = vfr.MinTxRate + } + } + return vf +} + +func addXfrmiAttrs(xfrmi *Xfrmi, linkInfo *nl.RtAttr) { + data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil) + data.AddRtAttr(nl.IFLA_XFRM_LINK, nl.Uint32Attr(uint32(xfrmi.ParentIndex))) + data.AddRtAttr(nl.IFLA_XFRM_IF_ID, nl.Uint32Attr(xfrmi.Ifid)) + +} + +func parseXfrmiData(link Link, data []syscall.NetlinkRouteAttr) { + xfrmi := link.(*Xfrmi) + for _, datum := range data { + switch datum.Attr.Type { + case nl.IFLA_XFRM_LINK: + xfrmi.ParentIndex = int(native.Uint32(datum.Value)) + case nl.IFLA_XFRM_IF_ID: + xfrmi.Ifid = native.Uint32(datum.Value) + } + } +} + // LinkSetBondSlave add slave to bond link via ioctl interface. func LinkSetBondSlave(link Link, master *Bond) error { fd, err := getSocketUDP() @@ -2316,6 +2893,52 @@ func LinkSetBondSlave(link Link, master *Bond) error { return nil } +// LinkSetBondSlaveQueueId modify bond slave queue-id. +func (h *Handle) LinkSetBondSlaveQueueId(link Link, queueId uint16) error { + base := link.Attrs() + h.ensureIndex(base) + req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) + + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg.Index = int32(base.Index) + req.AddData(msg) + + linkInfo := nl.NewRtAttr(unix.IFLA_LINKINFO, nil) + data := linkInfo.AddRtAttr(nl.IFLA_INFO_SLAVE_DATA, nil) + data.AddRtAttr(nl.IFLA_BOND_SLAVE_QUEUE_ID, nl.Uint16Attr(queueId)) + + req.AddData(linkInfo) + _, err := req.Execute(unix.NETLINK_ROUTE, 0) + return err +} + +// LinkSetBondSlaveQueueId modify bond slave queue-id. +func LinkSetBondSlaveQueueId(link Link, queueId uint16) error { + return pkgHandle.LinkSetBondSlaveQueueId(link, queueId) +} + +func vethStatsSerialize(stats ethtoolStats) ([]byte, error) { + statsSize := int(unsafe.Sizeof(stats)) + int(stats.nStats)*int(unsafe.Sizeof(uint64(0))) + b := make([]byte, 0, statsSize) + buf := bytes.NewBuffer(b) + err := binary.Write(buf, nl.NativeEndian(), stats) + return buf.Bytes()[:statsSize], err +} + +type vethEthtoolStats struct { + Cmd uint32 + NStats uint32 + Peer uint64 + // Newer kernels have XDP stats in here, but we only care + // to extract the peer ifindex here. +} + +func vethStatsDeserialize(b []byte) (vethEthtoolStats, error) { + var stats = vethEthtoolStats{} + err := binary.Read(bytes.NewReader(b), nl.NativeEndian(), &stats) + return stats, err +} + // VethPeerIndex get veth peer index. func VethPeerIndex(link *Veth) (int, error) { fd, err := getSocketUDP() @@ -2330,25 +2953,66 @@ func VethPeerIndex(link *Veth) (int, error) { return -1, fmt.Errorf("SIOCETHTOOL request for %q failed, errno=%v", link.Attrs().Name, errno) } - gstrings := ðtoolGstrings{ - cmd: ETHTOOL_GSTRINGS, - stringSet: ETH_SS_STATS, - length: sSet.data[0], + stats := ethtoolStats{ + cmd: ETHTOOL_GSTATS, + nStats: sSet.data[0], + } + + buffer, err := vethStatsSerialize(stats) + if err != nil { + return -1, err } - ifreq.Data = uintptr(unsafe.Pointer(gstrings)) + + ifreq.Data = uintptr(unsafe.Pointer(&buffer[0])) _, _, errno = syscall.Syscall(syscall.SYS_IOCTL, uintptr(fd), SIOCETHTOOL, uintptr(unsafe.Pointer(ifreq))) if errno != 0 { return -1, fmt.Errorf("SIOCETHTOOL request for %q failed, errno=%v", link.Attrs().Name, errno) } - stats := ðtoolStats{ - cmd: ETHTOOL_GSTATS, - nStats: gstrings.length, + vstats, err := vethStatsDeserialize(buffer) + if err != nil { + return -1, err } - ifreq.Data = uintptr(unsafe.Pointer(stats)) - _, _, errno = syscall.Syscall(syscall.SYS_IOCTL, uintptr(fd), SIOCETHTOOL, uintptr(unsafe.Pointer(ifreq))) - if errno != 0 { - return -1, fmt.Errorf("SIOCETHTOOL request for %q failed, errno=%v", link.Attrs().Name, errno) + + return int(vstats.Peer), nil +} + +func parseTuntapData(link Link, data []syscall.NetlinkRouteAttr) { + tuntap := link.(*Tuntap) + for _, datum := range data { + switch datum.Attr.Type { + case nl.IFLA_TUN_OWNER: + tuntap.Owner = native.Uint32(datum.Value) + case nl.IFLA_TUN_GROUP: + tuntap.Group = native.Uint32(datum.Value) + case nl.IFLA_TUN_TYPE: + tuntap.Mode = TuntapMode(uint8(datum.Value[0])) + case nl.IFLA_TUN_PERSIST: + tuntap.NonPersist = false + if uint8(datum.Value[0]) == 0 { + tuntap.NonPersist = true + } + } + } +} + +func parseIPoIBData(link Link, data []syscall.NetlinkRouteAttr) { + ipoib := link.(*IPoIB) + for _, datum := range data { + switch datum.Attr.Type { + case nl.IFLA_IPOIB_PKEY: + ipoib.Pkey = uint16(native.Uint16(datum.Value)) + case nl.IFLA_IPOIB_MODE: + ipoib.Mode = IPoIBMode(native.Uint16(datum.Value)) + case nl.IFLA_IPOIB_UMCAST: + ipoib.Umcast = uint16(native.Uint16(datum.Value)) + } } - return int(stats.data[0]), nil +} + +func addIPoIBAttrs(ipoib *IPoIB, linkInfo *nl.RtAttr) { + data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil) + data.AddRtAttr(nl.IFLA_IPOIB_PKEY, nl.Uint16Attr(uint16(ipoib.Pkey))) + data.AddRtAttr(nl.IFLA_IPOIB_MODE, nl.Uint16Attr(uint16(ipoib.Mode))) + data.AddRtAttr(nl.IFLA_IPOIB_UMCAST, nl.Uint16Attr(uint16(ipoib.Umcast))) } diff --git a/vendor/github.com/vishvananda/netlink/neigh.go b/vendor/github.com/vishvananda/netlink/neigh.go index 3f5cd497a739..379e5655f730 100644 --- a/vendor/github.com/vishvananda/netlink/neigh.go +++ b/vendor/github.com/vishvananda/netlink/neigh.go @@ -17,9 +17,16 @@ type Neigh struct { LLIPAddr net.IP //Used in the case of NHRP Vlan int VNI int + MasterIndex int } // String returns $ip/$hwaddr $label func (neigh *Neigh) String() string { return fmt.Sprintf("%s %s", neigh.IP, neigh.HardwareAddr) } + +// NeighUpdate is sent when a neighbor changes - type is RTM_NEWNEIGH or RTM_DELNEIGH. +type NeighUpdate struct { + Type uint16 + Neigh +} diff --git a/vendor/github.com/vishvananda/netlink/neigh_linux.go b/vendor/github.com/vishvananda/netlink/neigh_linux.go index f75c22649f90..cb3b55d3508d 100644 --- a/vendor/github.com/vishvananda/netlink/neigh_linux.go +++ b/vendor/github.com/vishvananda/netlink/neigh_linux.go @@ -1,10 +1,13 @@ package netlink import ( + "fmt" "net" + "syscall" "unsafe" "github.com/vishvananda/netlink/nl" + "github.com/vishvananda/netns" "golang.org/x/sys/unix" ) @@ -18,7 +21,10 @@ const ( NDA_PORT NDA_VNI NDA_IFINDEX - NDA_MAX = NDA_IFINDEX + NDA_MASTER + NDA_LINK_NETNSID + NDA_SRC_VNI + NDA_MAX = NDA_SRC_VNI ) // Neighbor Cache Entry States. @@ -43,6 +49,7 @@ const ( NTF_ROUTER = 0x80 ) +// Ndmsg is for adding, removing or receiving information about a neighbor table entry type Ndmsg struct { Family uint8 Index uint32 @@ -170,45 +177,58 @@ func neighHandle(neigh *Neigh, req *nl.NetlinkRequest) error { req.AddData(vniData) } + if neigh.MasterIndex != 0 { + masterData := nl.NewRtAttr(NDA_MASTER, nl.Uint32Attr(uint32(neigh.MasterIndex))) + req.AddData(masterData) + } + _, err := req.Execute(unix.NETLINK_ROUTE, 0) return err } -// NeighList gets a list of IP-MAC mappings in the system (ARP table). +// NeighList returns a list of IP-MAC mappings in the system (ARP table). // Equivalent to: `ip neighbor show`. // The list can be filtered by link and ip family. func NeighList(linkIndex, family int) ([]Neigh, error) { return pkgHandle.NeighList(linkIndex, family) } -// NeighProxyList gets a list of neighbor proxies in the system. +// NeighProxyList returns a list of neighbor proxies in the system. // Equivalent to: `ip neighbor show proxy`. // The list can be filtered by link and ip family. func NeighProxyList(linkIndex, family int) ([]Neigh, error) { return pkgHandle.NeighProxyList(linkIndex, family) } -// NeighList gets a list of IP-MAC mappings in the system (ARP table). +// NeighList returns a list of IP-MAC mappings in the system (ARP table). // Equivalent to: `ip neighbor show`. // The list can be filtered by link and ip family. func (h *Handle) NeighList(linkIndex, family int) ([]Neigh, error) { - return h.neighList(linkIndex, family, 0) + return h.NeighListExecute(Ndmsg{ + Family: uint8(family), + Index: uint32(linkIndex), + }) } -// NeighProxyList gets a list of neighbor proxies in the system. +// NeighProxyList returns a list of neighbor proxies in the system. // Equivalent to: `ip neighbor show proxy`. // The list can be filtered by link, ip family. func (h *Handle) NeighProxyList(linkIndex, family int) ([]Neigh, error) { - return h.neighList(linkIndex, family, NTF_PROXY) + return h.NeighListExecute(Ndmsg{ + Family: uint8(family), + Index: uint32(linkIndex), + Flags: NTF_PROXY, + }) +} + +// NeighListExecute returns a list of neighbour entries filtered by link, ip family, flag and state. +func NeighListExecute(msg Ndmsg) ([]Neigh, error) { + return pkgHandle.NeighListExecute(msg) } -func (h *Handle) neighList(linkIndex, family, flags int) ([]Neigh, error) { +// NeighListExecute returns a list of neighbour entries filtered by link, ip family, flag and state. +func (h *Handle) NeighListExecute(msg Ndmsg) ([]Neigh, error) { req := h.newNetlinkRequest(unix.RTM_GETNEIGH, unix.NLM_F_DUMP) - msg := Ndmsg{ - Family: uint8(family), - Index: uint32(linkIndex), - Flags: uint8(flags), - } req.AddData(&msg) msgs, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWNEIGH) @@ -219,7 +239,7 @@ func (h *Handle) neighList(linkIndex, family, flags int) ([]Neigh, error) { var res []Neigh for _, m := range msgs { ndm := deserializeNdmsg(m) - if linkIndex != 0 && int(ndm.Index) != linkIndex { + if msg.Index != 0 && ndm.Index != msg.Index { // Ignore messages from other interfaces continue } @@ -251,14 +271,6 @@ func NeighDeserialize(m []byte) (*Neigh, error) { return nil, err } - // This should be cached for perfomance - // once per table dump - link, err := LinkByIndex(neigh.LinkIndex) - if err != nil { - return nil, err - } - encapType := link.Attrs().EncapType - for _, attr := range attrs { switch attr.Attr.Type { case NDA_DST: @@ -268,13 +280,16 @@ func NeighDeserialize(m []byte) (*Neigh, error) { // #define RTA_LENGTH(len) (RTA_ALIGN(sizeof(struct rtattr)) + (len)) // #define RTA_PAYLOAD(rta) ((int)((rta)->rta_len) - RTA_LENGTH(0)) attrLen := attr.Attr.Len - unix.SizeofRtAttr - if attrLen == 4 && (encapType == "ipip" || - encapType == "sit" || - encapType == "gre") { + if attrLen == 4 { neigh.LLIPAddr = net.IP(attr.Value) - } else if attrLen == 16 && - encapType == "tunnel6" { - neigh.IP = net.IP(attr.Value) + } else if attrLen == 16 { + // Can be IPv6 or FireWire HWAddr + link, err := LinkByIndex(neigh.LinkIndex) + if err == nil && link.Attrs().EncapType == "tunnel6" { + neigh.IP = net.IP(attr.Value) + } else { + neigh.HardwareAddr = net.HardwareAddr(attr.Value) + } } else { neigh.HardwareAddr = net.HardwareAddr(attr.Value) } @@ -282,8 +297,126 @@ func NeighDeserialize(m []byte) (*Neigh, error) { neigh.Vlan = int(native.Uint16(attr.Value[0:2])) case NDA_VNI: neigh.VNI = int(native.Uint32(attr.Value[0:4])) + case NDA_MASTER: + neigh.MasterIndex = int(native.Uint32(attr.Value[0:4])) } } return &neigh, nil } + +// NeighSubscribe takes a chan down which notifications will be sent +// when neighbors are added or deleted. Close the 'done' chan to stop subscription. +func NeighSubscribe(ch chan<- NeighUpdate, done <-chan struct{}) error { + return neighSubscribeAt(netns.None(), netns.None(), ch, done, nil, false) +} + +// NeighSubscribeAt works like NeighSubscribe plus it allows the caller +// to choose the network namespace in which to subscribe (ns). +func NeighSubscribeAt(ns netns.NsHandle, ch chan<- NeighUpdate, done <-chan struct{}) error { + return neighSubscribeAt(ns, netns.None(), ch, done, nil, false) +} + +// NeighSubscribeOptions contains a set of options to use with +// NeighSubscribeWithOptions. +type NeighSubscribeOptions struct { + Namespace *netns.NsHandle + ErrorCallback func(error) + ListExisting bool +} + +// NeighSubscribeWithOptions work like NeighSubscribe but enable to +// provide additional options to modify the behavior. Currently, the +// namespace can be provided as well as an error callback. +func NeighSubscribeWithOptions(ch chan<- NeighUpdate, done <-chan struct{}, options NeighSubscribeOptions) error { + if options.Namespace == nil { + none := netns.None() + options.Namespace = &none + } + return neighSubscribeAt(*options.Namespace, netns.None(), ch, done, options.ErrorCallback, options.ListExisting) +} + +func neighSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- NeighUpdate, done <-chan struct{}, cberr func(error), listExisting bool) error { + s, err := nl.SubscribeAt(newNs, curNs, unix.NETLINK_ROUTE, unix.RTNLGRP_NEIGH) + makeRequest := func(family int) error { + req := pkgHandle.newNetlinkRequest(unix.RTM_GETNEIGH, + unix.NLM_F_DUMP) + infmsg := nl.NewIfInfomsg(family) + req.AddData(infmsg) + if err := s.Send(req); err != nil { + return err + } + return nil + } + if err != nil { + return err + } + if done != nil { + go func() { + <-done + s.Close() + }() + } + if listExisting { + if err := makeRequest(unix.AF_UNSPEC); err != nil { + return err + } + // We have to wait for NLMSG_DONE before making AF_BRIDGE request + } + go func() { + defer close(ch) + for { + msgs, from, err := s.Receive() + if err != nil { + if cberr != nil { + cberr(err) + } + return + } + if from.Pid != nl.PidKernel { + if cberr != nil { + cberr(fmt.Errorf("Wrong sender portid %d, expected %d", from.Pid, nl.PidKernel)) + } + continue + } + for _, m := range msgs { + if m.Header.Type == unix.NLMSG_DONE { + if listExisting { + // This will be called after handling AF_UNSPEC + // list request, we have to wait for NLMSG_DONE + // before making another request + if err := makeRequest(unix.AF_BRIDGE); err != nil { + if cberr != nil { + cberr(err) + } + return + } + listExisting = false + } + continue + } + if m.Header.Type == unix.NLMSG_ERROR { + native := nl.NativeEndian() + error := int32(native.Uint32(m.Data[0:4])) + if error == 0 { + continue + } + if cberr != nil { + cberr(syscall.Errno(-error)) + } + return + } + neigh, err := NeighDeserialize(m.Data) + if err != nil { + if cberr != nil { + cberr(err) + } + return + } + ch <- NeighUpdate{Type: m.Header.Type, Neigh: *neigh} + } + } + }() + + return nil +} diff --git a/vendor/github.com/vishvananda/netlink/netlink.go b/vendor/github.com/vishvananda/netlink/netlink.go index fb159526e316..9cb685dc818f 100644 --- a/vendor/github.com/vishvananda/netlink/netlink.go +++ b/vendor/github.com/vishvananda/netlink/netlink.go @@ -27,7 +27,8 @@ func ParseIPNet(s string) (*net.IPNet, error) { if err != nil { return nil, err } - return &net.IPNet{IP: ip, Mask: ipNet.Mask}, nil + ipNet.IP = ip + return ipNet, nil } // NewIPNet generates an IPNet from an ip address using a netmask of 32 or 128. diff --git a/vendor/github.com/vishvananda/netlink/netlink_unspecified.go b/vendor/github.com/vishvananda/netlink/netlink_unspecified.go index 86111b92ce16..42d3acf9180a 100644 --- a/vendor/github.com/vishvananda/netlink/netlink_unspecified.go +++ b/vendor/github.com/vishvananda/netlink/netlink_unspecified.go @@ -48,10 +48,18 @@ func LinkSetVfVlan(link Link, vf, vlan int) error { return ErrNotImplemented } +func LinkSetVfVlanQos(link Link, vf, vlan, qos int) error { + return ErrNotImplemented +} + func LinkSetVfTxRate(link Link, vf, rate int) error { return ErrNotImplemented } +func LinkSetVfRate(link Link, vf, minRate, maxRate int) error { + return ErrNotImplemented +} + func LinkSetNoMaster(link Link) error { return ErrNotImplemented } @@ -152,6 +160,10 @@ func AddrAdd(link Link, addr *Addr) error { return ErrNotImplemented } +func AddrReplace(link Link, addr *Addr) error { + return ErrNotImplemented +} + func AddrDel(link Link, addr *Addr) error { return ErrNotImplemented } diff --git a/vendor/github.com/vishvananda/netlink/netns_linux.go b/vendor/github.com/vishvananda/netlink/netns_linux.go new file mode 100644 index 000000000000..77cf6f46901b --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/netns_linux.go @@ -0,0 +1,141 @@ +package netlink + +// Network namespace ID functions +// +// The kernel has a weird concept called the network namespace ID. +// This is different from the file reference in proc (and any bind-mounted +// namespaces, etc.) +// +// Instead, namespaces can be assigned a numeric ID at any time. Once set, +// the ID is fixed. The ID can either be set manually by the user, or +// automatically, triggered by certain kernel actions. The most common kernel +// action that triggers namespace ID creation is moving one end of a veth pair +// in to that namespace. + +import ( + "fmt" + + "github.com/vishvananda/netlink/nl" + "golang.org/x/sys/unix" +) + +// These can be replaced by the values from sys/unix when it is next released. +const ( + _ = iota + NETNSA_NSID + NETNSA_PID + NETNSA_FD +) + +// GetNetNsIdByPid looks up the network namespace ID for a given pid (really thread id). +// Returns -1 if the namespace does not have an ID set. +func (h *Handle) GetNetNsIdByPid(pid int) (int, error) { + return h.getNetNsId(NETNSA_PID, uint32(pid)) +} + +// GetNetNsIdByPid looks up the network namespace ID for a given pid (really thread id). +// Returns -1 if the namespace does not have an ID set. +func GetNetNsIdByPid(pid int) (int, error) { + return pkgHandle.GetNetNsIdByPid(pid) +} + +// SetNetNSIdByPid sets the ID of the network namespace for a given pid (really thread id). +// The ID can only be set for namespaces without an ID already set. +func (h *Handle) SetNetNsIdByPid(pid, nsid int) error { + return h.setNetNsId(NETNSA_PID, uint32(pid), uint32(nsid)) +} + +// SetNetNSIdByPid sets the ID of the network namespace for a given pid (really thread id). +// The ID can only be set for namespaces without an ID already set. +func SetNetNsIdByPid(pid, nsid int) error { + return pkgHandle.SetNetNsIdByPid(pid, nsid) +} + +// GetNetNsIdByFd looks up the network namespace ID for a given fd. +// fd must be an open file descriptor to a namespace file. +// Returns -1 if the namespace does not have an ID set. +func (h *Handle) GetNetNsIdByFd(fd int) (int, error) { + return h.getNetNsId(NETNSA_FD, uint32(fd)) +} + +// GetNetNsIdByFd looks up the network namespace ID for a given fd. +// fd must be an open file descriptor to a namespace file. +// Returns -1 if the namespace does not have an ID set. +func GetNetNsIdByFd(fd int) (int, error) { + return pkgHandle.GetNetNsIdByFd(fd) +} + +// SetNetNSIdByFd sets the ID of the network namespace for a given fd. +// fd must be an open file descriptor to a namespace file. +// The ID can only be set for namespaces without an ID already set. +func (h *Handle) SetNetNsIdByFd(fd, nsid int) error { + return h.setNetNsId(NETNSA_FD, uint32(fd), uint32(nsid)) +} + +// SetNetNSIdByFd sets the ID of the network namespace for a given fd. +// fd must be an open file descriptor to a namespace file. +// The ID can only be set for namespaces without an ID already set. +func SetNetNsIdByFd(fd, nsid int) error { + return pkgHandle.SetNetNsIdByFd(fd, nsid) +} + +// getNetNsId requests the netnsid for a given type-val pair +// type should be either NETNSA_PID or NETNSA_FD +func (h *Handle) getNetNsId(attrType int, val uint32) (int, error) { + req := h.newNetlinkRequest(unix.RTM_GETNSID, unix.NLM_F_REQUEST) + + rtgen := nl.NewRtGenMsg() + req.AddData(rtgen) + + b := make([]byte, 4, 4) + native.PutUint32(b, val) + attr := nl.NewRtAttr(attrType, b) + req.AddData(attr) + + msgs, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWNSID) + + if err != nil { + return 0, err + } + + for _, m := range msgs { + msg := nl.DeserializeRtGenMsg(m) + + attrs, err := nl.ParseRouteAttr(m[msg.Len():]) + if err != nil { + return 0, err + } + + for _, attr := range attrs { + switch attr.Attr.Type { + case NETNSA_NSID: + return int(int32(native.Uint32(attr.Value))), nil + } + } + } + + return 0, fmt.Errorf("unexpected empty result") +} + +// setNetNsId sets the netnsid for a given type-val pair +// type should be either NETNSA_PID or NETNSA_FD +// The ID can only be set for namespaces without an ID already set +func (h *Handle) setNetNsId(attrType int, val uint32, newnsid uint32) error { + req := h.newNetlinkRequest(unix.RTM_NEWNSID, unix.NLM_F_REQUEST|unix.NLM_F_ACK) + + rtgen := nl.NewRtGenMsg() + req.AddData(rtgen) + + b := make([]byte, 4, 4) + native.PutUint32(b, val) + attr := nl.NewRtAttr(attrType, b) + req.AddData(attr) + + b1 := make([]byte, 4, 4) + native.PutUint32(b1, newnsid) + attr1 := nl.NewRtAttr(NETNSA_NSID, b1) + req.AddData(attr1) + + _, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWNSID) + return err +} diff --git a/vendor/github.com/vishvananda/netlink/netns_unspecified.go b/vendor/github.com/vishvananda/netlink/netns_unspecified.go new file mode 100644 index 000000000000..5c5899e3628f --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/netns_unspecified.go @@ -0,0 +1,19 @@ +// +build !linux + +package netlink + +func GetNetNsIdByPid(pid int) (int, error) { + return 0, ErrNotImplemented +} + +func SetNetNsIdByPid(pid, nsid int) error { + return ErrNotImplemented +} + +func GetNetNsIdByFd(fd int) (int, error) { + return 0, ErrNotImplemented +} + +func SetNetNsIdByFd(fd, nsid int) error { + return ErrNotImplemented +} diff --git a/vendor/github.com/vishvananda/netlink/nl/BUILD b/vendor/github.com/vishvananda/netlink/nl/BUILD index 87e2c5f55ecb..cdf3a1e41ffb 100644 --- a/vendor/github.com/vishvananda/netlink/nl/BUILD +++ b/vendor/github.com/vishvananda/netlink/nl/BUILD @@ -6,13 +6,16 @@ go_library( "addr_linux.go", "bridge_linux.go", "conntrack_linux.go", + "devlink_linux.go", "genetlink_linux.go", "link_linux.go", "mpls_linux.go", "nl_linux.go", "nl_unspecified.go", + "rdma_link_linux.go", "route_linux.go", "seg6_linux.go", + "seg6local_linux.go", "syscall.go", "tc_linux.go", "xfrm_linux.go", diff --git a/vendor/github.com/vishvananda/netlink/nl/bridge_linux.go b/vendor/github.com/vishvananda/netlink/nl/bridge_linux.go index 6c0d333387d6..34e78ba8dafc 100644 --- a/vendor/github.com/vishvananda/netlink/nl/bridge_linux.go +++ b/vendor/github.com/vishvananda/netlink/nl/bridge_linux.go @@ -11,8 +11,8 @@ const ( /* Bridge Flags */ const ( - BRIDGE_FLAGS_MASTER = iota /* Bridge command to/from master */ - BRIDGE_FLAGS_SELF /* Bridge command to/from lowerdev */ + BRIDGE_FLAGS_MASTER = iota + 1 /* Bridge command to/from master */ + BRIDGE_FLAGS_SELF /* Bridge command to/from lowerdev */ ) /* Bridge management nested attributes diff --git a/vendor/github.com/vishvananda/netlink/nl/conntrack_linux.go b/vendor/github.com/vishvananda/netlink/nl/conntrack_linux.go index 380cc5967bfa..79d2b6b8909f 100644 --- a/vendor/github.com/vishvananda/netlink/nl/conntrack_linux.go +++ b/vendor/github.com/vishvananda/netlink/nl/conntrack_linux.go @@ -76,12 +76,17 @@ const ( // __CTA_MAX // }; const ( - CTA_TUPLE_ORIG = 1 - CTA_TUPLE_REPLY = 2 - CTA_STATUS = 3 - CTA_TIMEOUT = 7 - CTA_MARK = 8 - CTA_PROTOINFO = 4 + CTA_TUPLE_ORIG = 1 + CTA_TUPLE_REPLY = 2 + CTA_STATUS = 3 + CTA_PROTOINFO = 4 + CTA_TIMEOUT = 7 + CTA_MARK = 8 + CTA_COUNTERS_ORIG = 9 + CTA_COUNTERS_REPLY = 10 + CTA_USE = 11 + CTA_ID = 12 + CTA_TIMESTAMP = 20 ) // enum ctattr_tuple { @@ -163,6 +168,29 @@ const ( CTA_PROTOINFO_TCP_FLAGS_REPLY = 5 ) +// enum ctattr_counters { +// CTA_COUNTERS_UNSPEC, +// CTA_COUNTERS_PACKETS, /* 64bit counters */ +// CTA_COUNTERS_BYTES, /* 64bit counters */ +// CTA_COUNTERS32_PACKETS, /* old 32bit counters, unused */ +// CTA_COUNTERS32_BYTES, /* old 32bit counters, unused */ +// CTA_COUNTERS_PAD, +// __CTA_COUNTERS_M +// }; +// #define CTA_COUNTERS_MAX (__CTA_COUNTERS_MAX - 1) +const ( + CTA_COUNTERS_PACKETS = 1 + CTA_COUNTERS_BYTES = 2 +) + +// enum CTA TIMESTAMP TLVs +// CTA_TIMESTAMP_START /* 64bit value */ +// CTA_TIMESTAMP_STOP /* 64bit value */ +const ( + CTA_TIMESTAMP_START = 1 + CTA_TIMESTAMP_STOP = 2 +) + // /* General form of address family dependent message. // */ // struct nfgenmsg { diff --git a/vendor/github.com/vishvananda/netlink/nl/devlink_linux.go b/vendor/github.com/vishvananda/netlink/nl/devlink_linux.go new file mode 100644 index 000000000000..db66faaad30c --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/nl/devlink_linux.go @@ -0,0 +1,40 @@ +package nl + +// All the following constants are coming from: +// https://github.com/torvalds/linux/blob/master/include/uapi/linux/devlink.h + +const ( + GENL_DEVLINK_VERSION = 1 + GENL_DEVLINK_NAME = "devlink" +) + +const ( + DEVLINK_CMD_GET = 1 + DEVLINK_CMD_ESWITCH_GET = 29 + DEVLINK_CMD_ESWITCH_SET = 30 +) + +const ( + DEVLINK_ATTR_BUS_NAME = 1 + DEVLINK_ATTR_DEV_NAME = 2 + DEVLINK_ATTR_ESWITCH_MODE = 25 + DEVLINK_ATTR_ESWITCH_INLINE_MODE = 26 + DEVLINK_ATTR_ESWITCH_ENCAP_MODE = 62 +) + +const ( + DEVLINK_ESWITCH_MODE_LEGACY = 0 + DEVLINK_ESWITCH_MODE_SWITCHDEV = 1 +) + +const ( + DEVLINK_ESWITCH_INLINE_MODE_NONE = 0 + DEVLINK_ESWITCH_INLINE_MODE_LINK = 1 + DEVLINK_ESWITCH_INLINE_MODE_NETWORK = 2 + DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT = 3 +) + +const ( + DEVLINK_ESWITCH_ENCAP_MODE_NONE = 0 + DEVLINK_ESWITCH_ENCAP_MODE_BASIC = 1 +) diff --git a/vendor/github.com/vishvananda/netlink/nl/link_linux.go b/vendor/github.com/vishvananda/netlink/nl/link_linux.go index 84a3498dd3d3..afb16a9c18d2 100644 --- a/vendor/github.com/vishvananda/netlink/nl/link_linux.go +++ b/vendor/github.com/vishvananda/netlink/nl/link_linux.go @@ -13,7 +13,9 @@ const ( IFLA_INFO_KIND IFLA_INFO_DATA IFLA_INFO_XSTATS - IFLA_INFO_MAX = IFLA_INFO_XSTATS + IFLA_INFO_SLAVE_KIND + IFLA_INFO_SLAVE_DATA + IFLA_INFO_MAX = IFLA_INFO_SLAVE_DATA ) const ( @@ -87,7 +89,8 @@ const ( const ( IFLA_IPVLAN_UNSPEC = iota IFLA_IPVLAN_MODE - IFLA_IPVLAN_MAX = IFLA_IPVLAN_MODE + IFLA_IPVLAN_FLAG + IFLA_IPVLAN_MAX = IFLA_IPVLAN_FLAG ) const ( @@ -164,6 +167,8 @@ const ( IFLA_BOND_SLAVE_PERM_HWADDR IFLA_BOND_SLAVE_QUEUE_ID IFLA_BOND_SLAVE_AD_AGGREGATOR_ID + IFLA_BOND_SLAVE_AD_ACTOR_OPER_PORT_STATE + IFLA_BOND_SLAVE_AD_PARTNER_OPER_PORT_STATE ) const ( @@ -217,9 +222,11 @@ const ( IFLA_VF_RSS_QUERY_EN /* RSS Redirection Table and Hash Key query * on/off switch */ - IFLA_VF_STATS /* network device statistics */ - IFLA_VF_TRUST /* Trust state of VF */ - IFLA_VF_MAX = IFLA_VF_TRUST + IFLA_VF_STATS /* network device statistics */ + IFLA_VF_TRUST /* Trust state of VF */ + IFLA_VF_IB_NODE_GUID /* VF Infiniband node GUID */ + IFLA_VF_IB_PORT_GUID /* VF Infiniband port GUID */ + IFLA_VF_MAX = IFLA_VF_IB_PORT_GUID ) const ( @@ -248,6 +255,7 @@ const ( SizeofVfLinkState = 0x08 SizeofVfRssQueryEn = 0x08 SizeofVfTrust = 0x08 + SizeofVfGUID = 0x10 ) // struct ifla_vf_mac { @@ -430,6 +438,30 @@ func (msg *VfTrust) Serialize() []byte { return (*(*[SizeofVfTrust]byte)(unsafe.Pointer(msg)))[:] } +// struct ifla_vf_guid { +// __u32 vf; +// __u32 rsvd; +// __u64 guid; +// }; + +type VfGUID struct { + Vf uint32 + Rsvd uint32 + GUID uint64 +} + +func (msg *VfGUID) Len() int { + return SizeofVfGUID +} + +func DeserializeVfGUID(b []byte) *VfGUID { + return (*VfGUID)(unsafe.Pointer(&b[0:SizeofVfGUID][0])) +} + +func (msg *VfGUID) Serialize() []byte { + return (*(*[SizeofVfGUID]byte)(unsafe.Pointer(msg)))[:] +} + const ( XDP_FLAGS_UPDATE_IF_NOEXIST = 1 << iota XDP_FLAGS_SKB_MODE @@ -546,3 +578,33 @@ const ( GTP_ROLE_GGSN = iota GTP_ROLE_SGSN ) + +const ( + IFLA_XFRM_UNSPEC = iota + IFLA_XFRM_LINK + IFLA_XFRM_IF_ID + + IFLA_XFRM_MAX = iota - 1 +) + +const ( + IFLA_TUN_UNSPEC = iota + IFLA_TUN_OWNER + IFLA_TUN_GROUP + IFLA_TUN_TYPE + IFLA_TUN_PI + IFLA_TUN_VNET_HDR + IFLA_TUN_PERSIST + IFLA_TUN_MULTI_QUEUE + IFLA_TUN_NUM_QUEUES + IFLA_TUN_NUM_DISABLED_QUEUES + IFLA_TUN_MAX = IFLA_TUN_NUM_DISABLED_QUEUES +) + +const ( + IFLA_IPOIB_UNSPEC = iota + IFLA_IPOIB_PKEY + IFLA_IPOIB_MODE + IFLA_IPOIB_UMCAST + IFLA_IPOIB_MAX = IFLA_IPOIB_UMCAST +) diff --git a/vendor/github.com/vishvananda/netlink/nl/nl_linux.go b/vendor/github.com/vishvananda/netlink/nl/nl_linux.go index bc8e82c2cc4d..aaf56c6715ff 100644 --- a/vendor/github.com/vishvananda/netlink/nl/nl_linux.go +++ b/vendor/github.com/vishvananda/netlink/nl/nl_linux.go @@ -21,7 +21,13 @@ const ( FAMILY_ALL = unix.AF_UNSPEC FAMILY_V4 = unix.AF_INET FAMILY_V6 = unix.AF_INET6 - FAMILY_MPLS = AF_MPLS + FAMILY_MPLS = unix.AF_MPLS + // Arbitrary set value (greater than default 4k) to allow receiving + // from kernel more verbose messages e.g. for statistics, + // tc rules or filters, or other more memory requiring data. + RECEIVE_BUFFER_SIZE = 65536 + // Kernel netlink pid + PidKernel uint32 = 0 ) // SupportedNlFamilies contains the list of netlink families this netlink package supports @@ -42,7 +48,7 @@ func GetIPFamily(ip net.IP) int { var nativeEndian binary.ByteOrder -// Get native endianness for the system +// NativeEndian gets native endianness for the system func NativeEndian() binary.ByteOrder { if nativeEndian == nil { var x uint32 = 0x01020304 @@ -271,15 +277,22 @@ func NewRtAttr(attrType int, data []byte) *RtAttr { } } -// Create a new RtAttr obj anc add it as a child of an existing object +// NewRtAttrChild adds an RtAttr as a child to the parent and returns the new attribute +// +// Deprecated: Use AddRtAttr() on the parent object func NewRtAttrChild(parent *RtAttr, attrType int, data []byte) *RtAttr { + return parent.AddRtAttr(attrType, data) +} + +// AddRtAttr adds an RtAttr as a child and returns the new attribute +func (a *RtAttr) AddRtAttr(attrType int, data []byte) *RtAttr { attr := NewRtAttr(attrType, data) - parent.children = append(parent.children, attr) + a.children = append(a.children, attr) return attr } -// AddChild adds an existing RtAttr as a child. -func (a *RtAttr) AddChild(attr *RtAttr) { +// AddChild adds an existing NetlinkRequestData as a child. +func (a *RtAttr) AddChild(attr NetlinkRequestData) { a.children = append(a.children, attr) } @@ -360,16 +373,12 @@ func (req *NetlinkRequest) Serialize() []byte { } func (req *NetlinkRequest) AddData(data NetlinkRequestData) { - if data != nil { - req.Data = append(req.Data, data) - } + req.Data = append(req.Data, data) } // AddRawData adds raw bytes to the end of the NetlinkRequest object during serialization func (req *NetlinkRequest) AddRawData(data []byte) { - if data != nil { - req.RawData = append(req.RawData, data...) - } + req.RawData = append(req.RawData, data...) } // Execute the request against a the given sockType. @@ -413,10 +422,13 @@ func (req *NetlinkRequest) Execute(sockType int, resType uint16) ([][]byte, erro done: for { - msgs, err := s.Receive() + msgs, from, err := s.Receive() if err != nil { return nil, err } + if from.Pid != PidKernel { + return nil, fmt.Errorf("Wrong sender portid %d, expected %d", from.Pid, PidKernel) + } for _, m := range msgs { if m.Header.Seq != req.Seq { if sharedSocket { @@ -425,7 +437,7 @@ done: return nil, fmt.Errorf("Wrong Seq nr %d, expected %d", m.Header.Seq, req.Seq) } if m.Header.Pid != pid { - return nil, fmt.Errorf("Wrong pid %d, expected %d", m.Header.Pid, pid) + continue } if m.Header.Type == unix.NLMSG_DONE { break done @@ -610,21 +622,31 @@ func (s *NetlinkSocket) Send(request *NetlinkRequest) error { return nil } -func (s *NetlinkSocket) Receive() ([]syscall.NetlinkMessage, error) { +func (s *NetlinkSocket) Receive() ([]syscall.NetlinkMessage, *unix.SockaddrNetlink, error) { fd := int(atomic.LoadInt32(&s.fd)) if fd < 0 { - return nil, fmt.Errorf("Receive called on a closed socket") + return nil, nil, fmt.Errorf("Receive called on a closed socket") } - rb := make([]byte, unix.Getpagesize()) - nr, _, err := unix.Recvfrom(fd, rb, 0) + var fromAddr *unix.SockaddrNetlink + var rb [RECEIVE_BUFFER_SIZE]byte + nr, from, err := unix.Recvfrom(fd, rb[:], 0) if err != nil { - return nil, err + return nil, nil, err + } + fromAddr, ok := from.(*unix.SockaddrNetlink) + if !ok { + return nil, nil, fmt.Errorf("Error converting to netlink sockaddr") } if nr < unix.NLMSG_HDRLEN { - return nil, fmt.Errorf("Got short response from netlink") + return nil, nil, fmt.Errorf("Got short response from netlink") + } + rb2 := make([]byte, nr) + copy(rb2, rb[:nr]) + nl, err := syscall.ParseNetlinkMessage(rb2) + if err != nil { + return nil, nil, err } - rb = rb[:nr] - return syscall.ParseNetlinkMessage(rb) + return nl, fromAddr, nil } // SetSendTimeout allows to set a send timeout on the socket diff --git a/vendor/github.com/vishvananda/netlink/nl/rdma_link_linux.go b/vendor/github.com/vishvananda/netlink/nl/rdma_link_linux.go new file mode 100644 index 000000000000..1224b747def4 --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/nl/rdma_link_linux.go @@ -0,0 +1,35 @@ +package nl + +const ( + RDMA_NL_GET_CLIENT_SHIFT = 10 +) + +const ( + RDMA_NL_NLDEV = 5 +) + +const ( + RDMA_NLDEV_CMD_GET = 1 + RDMA_NLDEV_CMD_SET = 2 + RDMA_NLDEV_CMD_SYS_GET = 6 + RDMA_NLDEV_CMD_SYS_SET = 7 +) + +const ( + RDMA_NLDEV_ATTR_DEV_INDEX = 1 + RDMA_NLDEV_ATTR_DEV_NAME = 2 + RDMA_NLDEV_ATTR_PORT_INDEX = 3 + RDMA_NLDEV_ATTR_CAP_FLAGS = 4 + RDMA_NLDEV_ATTR_FW_VERSION = 5 + RDMA_NLDEV_ATTR_NODE_GUID = 6 + RDMA_NLDEV_ATTR_SYS_IMAGE_GUID = 7 + RDMA_NLDEV_ATTR_SUBNET_PREFIX = 8 + RDMA_NLDEV_ATTR_LID = 9 + RDMA_NLDEV_ATTR_SM_LID = 10 + RDMA_NLDEV_ATTR_LMC = 11 + RDMA_NLDEV_ATTR_PORT_STATE = 12 + RDMA_NLDEV_ATTR_PORT_PHYS_STATE = 13 + RDMA_NLDEV_ATTR_DEV_NODE_TYPE = 14 + RDMA_NLDEV_SYS_ATTR_NETNS_MODE = 66 + RDMA_NLDEV_NET_NS_FD = 68 +) diff --git a/vendor/github.com/vishvananda/netlink/nl/route_linux.go b/vendor/github.com/vishvananda/netlink/nl/route_linux.go index f6906fcaf7e0..03c1900ffa89 100644 --- a/vendor/github.com/vishvananda/netlink/nl/route_linux.go +++ b/vendor/github.com/vishvananda/netlink/nl/route_linux.go @@ -79,3 +79,29 @@ func (msg *RtNexthop) Serialize() []byte { } return buf } + +type RtGenMsg struct { + unix.RtGenmsg +} + +func NewRtGenMsg() *RtGenMsg { + return &RtGenMsg{ + RtGenmsg: unix.RtGenmsg{ + Family: unix.AF_UNSPEC, + }, + } +} + +func (msg *RtGenMsg) Len() int { + return rtaAlignOf(unix.SizeofRtGenmsg) +} + +func DeserializeRtGenMsg(b []byte) *RtGenMsg { + return &RtGenMsg{RtGenmsg: unix.RtGenmsg{Family: b[0]}} +} + +func (msg *RtGenMsg) Serialize() []byte { + out := make([]byte, msg.Len()) + out[0] = msg.Family + return out +} diff --git a/vendor/github.com/vishvananda/netlink/nl/seg6_linux.go b/vendor/github.com/vishvananda/netlink/nl/seg6_linux.go index b3425f6b0ecc..5774cbb15aef 100644 --- a/vendor/github.com/vishvananda/netlink/nl/seg6_linux.go +++ b/vendor/github.com/vishvananda/netlink/nl/seg6_linux.go @@ -99,6 +99,49 @@ func DecodeSEG6Encap(buf []byte) (int, []net.IP, error) { return mode, srh.Segments, nil } +func DecodeSEG6Srh(buf []byte) ([]net.IP, error) { + native := NativeEndian() + srh := IPv6SrHdr{ + nextHdr: buf[0], + hdrLen: buf[1], + routingType: buf[2], + segmentsLeft: buf[3], + firstSegment: buf[4], + flags: buf[5], + reserved: native.Uint16(buf[6:8]), + } + buf = buf[8:] + if len(buf)%16 != 0 { + err := fmt.Errorf("DecodeSEG6Srh: error parsing Segment List (buf len: %d)", len(buf)) + return nil, err + } + for len(buf) > 0 { + srh.Segments = append(srh.Segments, net.IP(buf[:16])) + buf = buf[16:] + } + return srh.Segments, nil +} +func EncodeSEG6Srh(segments []net.IP) ([]byte, error) { + nsegs := len(segments) // nsegs: number of segments + if nsegs == 0 { + return nil, errors.New("EncodeSEG6Srh: No Segments") + } + b := make([]byte, 8, 8+len(segments)*16) + native := NativeEndian() + b[0] = 0 // srh.nextHdr (0 when calling netlink) + b[1] = uint8(16 * nsegs >> 3) // srh.hdrLen (in 8-octets unit) + b[2] = IPV6_SRCRT_TYPE_4 // srh.routingType (assigned by IANA) + b[3] = uint8(nsegs - 1) // srh.segmentsLeft + b[4] = uint8(nsegs - 1) // srh.firstSegment + b[5] = 0 // srh.flags (SR6_FLAG1_HMAC for srh_hmac) + // srh.reserved: Defined as "Tag" in draft-ietf-6man-segment-routing-header-07 + native.PutUint16(b[6:], 0) // srh.reserved + for _, netIP := range segments { + b = append(b, netIP...) // srh.Segments + } + return b, nil +} + // Helper functions func SEG6EncapModeString(mode int) string { switch mode { diff --git a/vendor/github.com/vishvananda/netlink/nl/seg6local_linux.go b/vendor/github.com/vishvananda/netlink/nl/seg6local_linux.go new file mode 100644 index 000000000000..1500177267af --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/nl/seg6local_linux.go @@ -0,0 +1,76 @@ +package nl + +import () + +// seg6local parameters +const ( + SEG6_LOCAL_UNSPEC = iota + SEG6_LOCAL_ACTION + SEG6_LOCAL_SRH + SEG6_LOCAL_TABLE + SEG6_LOCAL_NH4 + SEG6_LOCAL_NH6 + SEG6_LOCAL_IIF + SEG6_LOCAL_OIF + __SEG6_LOCAL_MAX +) +const ( + SEG6_LOCAL_MAX = __SEG6_LOCAL_MAX +) + +// seg6local actions +const ( + SEG6_LOCAL_ACTION_END = iota + 1 // 1 + SEG6_LOCAL_ACTION_END_X // 2 + SEG6_LOCAL_ACTION_END_T // 3 + SEG6_LOCAL_ACTION_END_DX2 // 4 + SEG6_LOCAL_ACTION_END_DX6 // 5 + SEG6_LOCAL_ACTION_END_DX4 // 6 + SEG6_LOCAL_ACTION_END_DT6 // 7 + SEG6_LOCAL_ACTION_END_DT4 // 8 + SEG6_LOCAL_ACTION_END_B6 // 9 + SEG6_LOCAL_ACTION_END_B6_ENCAPS // 10 + SEG6_LOCAL_ACTION_END_BM // 11 + SEG6_LOCAL_ACTION_END_S // 12 + SEG6_LOCAL_ACTION_END_AS // 13 + SEG6_LOCAL_ACTION_END_AM // 14 + __SEG6_LOCAL_ACTION_MAX +) +const ( + SEG6_LOCAL_ACTION_MAX = __SEG6_LOCAL_ACTION_MAX - 1 +) + +// Helper functions +func SEG6LocalActionString(action int) string { + switch action { + case SEG6_LOCAL_ACTION_END: + return "End" + case SEG6_LOCAL_ACTION_END_X: + return "End.X" + case SEG6_LOCAL_ACTION_END_T: + return "End.T" + case SEG6_LOCAL_ACTION_END_DX2: + return "End.DX2" + case SEG6_LOCAL_ACTION_END_DX6: + return "End.DX6" + case SEG6_LOCAL_ACTION_END_DX4: + return "End.DX4" + case SEG6_LOCAL_ACTION_END_DT6: + return "End.DT6" + case SEG6_LOCAL_ACTION_END_DT4: + return "End.DT4" + case SEG6_LOCAL_ACTION_END_B6: + return "End.B6" + case SEG6_LOCAL_ACTION_END_B6_ENCAPS: + return "End.B6.Encaps" + case SEG6_LOCAL_ACTION_END_BM: + return "End.BM" + case SEG6_LOCAL_ACTION_END_S: + return "End.S" + case SEG6_LOCAL_ACTION_END_AS: + return "End.AS" + case SEG6_LOCAL_ACTION_END_AM: + return "End.AM" + } + return "unknown" +} diff --git a/vendor/github.com/vishvananda/netlink/nl/syscall.go b/vendor/github.com/vishvananda/netlink/nl/syscall.go index fc631e0e505f..f7f7f92e6fa1 100644 --- a/vendor/github.com/vishvananda/netlink/nl/syscall.go +++ b/vendor/github.com/vishvananda/netlink/nl/syscall.go @@ -42,16 +42,6 @@ const ( TCPDIAG_NOCOOKIE = 0xFFFFFFFF /* TCPDIAG_NOCOOKIE in net/ipv4/tcp_diag.h*/ ) -const ( - AF_MPLS = 28 -) - -const ( - RTA_NEWDST = 0x13 - RTA_ENCAP_TYPE = 0x15 - RTA_ENCAP = 0x16 -) - // RTA_ENCAP subtype const ( MPLS_IPTUNNEL_UNSPEC = iota @@ -67,6 +57,7 @@ const ( LWTUNNEL_ENCAP_IP6 LWTUNNEL_ENCAP_SEG6 LWTUNNEL_ENCAP_BPF + LWTUNNEL_ENCAP_SEG6_LOCAL ) // routing header types diff --git a/vendor/github.com/vishvananda/netlink/nl/tc_linux.go b/vendor/github.com/vishvananda/netlink/nl/tc_linux.go index 94ebc290a9e3..501f554b216c 100644 --- a/vendor/github.com/vishvananda/netlink/nl/tc_linux.go +++ b/vendor/github.com/vishvananda/netlink/nl/tc_linux.go @@ -1,6 +1,7 @@ package nl import ( + "encoding/binary" "unsafe" ) @@ -64,6 +65,15 @@ const ( TCA_PRIO_MAX = TCA_PRIO_MQ ) +const ( + TCA_STATS_UNSPEC = iota + TCA_STATS_BASIC + TCA_STATS_RATE_EST + TCA_STATS_QUEUE + TCA_STATS_APP + TCA_STATS_MAX = TCA_STATS_APP +) + const ( SizeofTcMsg = 0x14 SizeofTcActionMsg = 0x04 @@ -79,7 +89,10 @@ const ( SizeofTcU32Key = 0x10 SizeofTcU32Sel = 0x10 // without keys SizeofTcGen = 0x14 + SizeofTcConnmark = SizeofTcGen + 0x04 SizeofTcMirred = SizeofTcGen + 0x08 + SizeofTcTunnelKey = SizeofTcGen + 0x04 + SizeofTcSkbEdit = SizeofTcGen SizeofTcPolice = 2*SizeofTcRateSpec + 0x20 ) @@ -412,6 +425,57 @@ func (x *TcHtbGlob) Serialize() []byte { return (*(*[SizeofTcHtbGlob]byte)(unsafe.Pointer(x)))[:] } +// HFSC + +type Curve struct { + m1 uint32 + d uint32 + m2 uint32 +} + +type HfscCopt struct { + Rsc Curve + Fsc Curve + Usc Curve +} + +func (c *Curve) Attrs() (uint32, uint32, uint32) { + return c.m1, c.d, c.m2 +} + +func (c *Curve) Set(m1 uint32, d uint32, m2 uint32) { + c.m1 = m1 + c.d = d + c.m2 = m2 +} + +func DeserializeHfscCurve(b []byte) *Curve { + return &Curve{ + m1: binary.LittleEndian.Uint32(b[0:4]), + d: binary.LittleEndian.Uint32(b[4:8]), + m2: binary.LittleEndian.Uint32(b[8:12]), + } +} + +func SerializeHfscCurve(c *Curve) (b []byte) { + t := make([]byte, binary.MaxVarintLen32) + binary.LittleEndian.PutUint32(t, c.m1) + b = append(b, t[:4]...) + binary.LittleEndian.PutUint32(t, c.d) + b = append(b, t[:4]...) + binary.LittleEndian.PutUint32(t, c.m2) + b = append(b, t[:4]...) + return b +} + +type TcHfscOpt struct { + Defcls uint16 +} + +func (x *TcHfscOpt) Serialize() []byte { + return (*(*[2]byte)(unsafe.Pointer(x)))[:] +} + const ( TCA_U32_UNSPEC = iota TCA_U32_CLASSID @@ -586,11 +650,47 @@ const ( TCA_BPF_FD TCA_BPF_NAME TCA_BPF_FLAGS - TCA_BPF_MAX = TCA_BPF_FLAGS + TCA_BPF_FLAGS_GEN + TCA_BPF_TAG + TCA_BPF_ID + TCA_BPF_MAX = TCA_BPF_ID ) type TcBpf TcGen +const ( + TCA_ACT_CONNMARK = 14 +) + +const ( + TCA_CONNMARK_UNSPEC = iota + TCA_CONNMARK_PARMS + TCA_CONNMARK_TM + TCA_CONNMARK_MAX = TCA_CONNMARK_TM +) + +// struct tc_connmark { +// tc_gen; +// __u16 zone; +// }; + +type TcConnmark struct { + TcGen + Zone uint16 +} + +func (msg *TcConnmark) Len() int { + return SizeofTcConnmark +} + +func DeserializeTcConnmark(b []byte) *TcConnmark { + return (*TcConnmark)(unsafe.Pointer(&b[0:SizeofTcConnmark][0])) +} + +func (x *TcConnmark) Serialize() []byte { + return (*(*[SizeofTcConnmark]byte)(unsafe.Pointer(x)))[:] +} + const ( TCA_ACT_MIRRED = 8 ) @@ -626,6 +726,63 @@ func (x *TcMirred) Serialize() []byte { return (*(*[SizeofTcMirred]byte)(unsafe.Pointer(x)))[:] } +const ( + TCA_TUNNEL_KEY_UNSPEC = iota + TCA_TUNNEL_KEY_TM + TCA_TUNNEL_KEY_PARMS + TCA_TUNNEL_KEY_ENC_IPV4_SRC + TCA_TUNNEL_KEY_ENC_IPV4_DST + TCA_TUNNEL_KEY_ENC_IPV6_SRC + TCA_TUNNEL_KEY_ENC_IPV6_DST + TCA_TUNNEL_KEY_ENC_KEY_ID + TCA_TUNNEL_KEY_MAX = TCA_TUNNEL_KEY_ENC_KEY_ID +) + +type TcTunnelKey struct { + TcGen + Action int32 +} + +func (x *TcTunnelKey) Len() int { + return SizeofTcTunnelKey +} + +func DeserializeTunnelKey(b []byte) *TcTunnelKey { + return (*TcTunnelKey)(unsafe.Pointer(&b[0:SizeofTcTunnelKey][0])) +} + +func (x *TcTunnelKey) Serialize() []byte { + return (*(*[SizeofTcTunnelKey]byte)(unsafe.Pointer(x)))[:] +} + +const ( + TCA_SKBEDIT_UNSPEC = iota + TCA_SKBEDIT_TM + TCA_SKBEDIT_PARMS + TCA_SKBEDIT_PRIORITY + TCA_SKBEDIT_QUEUE_MAPPING + TCA_SKBEDIT_MARK + TCA_SKBEDIT_PAD + TCA_SKBEDIT_PTYPE + TCA_SKBEDIT_MAX = TCA_SKBEDIT_MARK +) + +type TcSkbEdit struct { + TcGen +} + +func (x *TcSkbEdit) Len() int { + return SizeofTcSkbEdit +} + +func DeserializeSkbEdit(b []byte) *TcSkbEdit { + return (*TcSkbEdit)(unsafe.Pointer(&b[0:SizeofTcSkbEdit][0])) +} + +func (x *TcSkbEdit) Serialize() []byte { + return (*(*[SizeofTcSkbEdit]byte)(unsafe.Pointer(x)))[:] +} + // struct tc_police { // __u32 index; // int action; @@ -708,3 +865,10 @@ const ( TCA_FQ_CODEL_DROP_BATCH_SIZE TCA_FQ_CODEL_MEMORY_LIMIT ) + +const ( + TCA_HFSC_UNSPEC = iota + TCA_HFSC_RSC + TCA_HFSC_FSC + TCA_HFSC_USC +) diff --git a/vendor/github.com/vishvananda/netlink/nl/xfrm_linux.go b/vendor/github.com/vishvananda/netlink/nl/xfrm_linux.go index 09a2ffa10ef7..dce9073f7b58 100644 --- a/vendor/github.com/vishvananda/netlink/nl/xfrm_linux.go +++ b/vendor/github.com/vishvananda/netlink/nl/xfrm_linux.go @@ -50,34 +50,44 @@ const ( // Attribute types const ( /* Netlink message attributes. */ - XFRMA_UNSPEC = 0x00 - XFRMA_ALG_AUTH = 0x01 /* struct xfrm_algo */ - XFRMA_ALG_CRYPT = 0x02 /* struct xfrm_algo */ - XFRMA_ALG_COMP = 0x03 /* struct xfrm_algo */ - XFRMA_ENCAP = 0x04 /* struct xfrm_algo + struct xfrm_encap_tmpl */ - XFRMA_TMPL = 0x05 /* 1 or more struct xfrm_user_tmpl */ - XFRMA_SA = 0x06 /* struct xfrm_usersa_info */ - XFRMA_POLICY = 0x07 /* struct xfrm_userpolicy_info */ - XFRMA_SEC_CTX = 0x08 /* struct xfrm_sec_ctx */ - XFRMA_LTIME_VAL = 0x09 - XFRMA_REPLAY_VAL = 0x0a - XFRMA_REPLAY_THRESH = 0x0b - XFRMA_ETIMER_THRESH = 0x0c - XFRMA_SRCADDR = 0x0d /* xfrm_address_t */ - XFRMA_COADDR = 0x0e /* xfrm_address_t */ - XFRMA_LASTUSED = 0x0f /* unsigned long */ - XFRMA_POLICY_TYPE = 0x10 /* struct xfrm_userpolicy_type */ - XFRMA_MIGRATE = 0x11 - XFRMA_ALG_AEAD = 0x12 /* struct xfrm_algo_aead */ - XFRMA_KMADDRESS = 0x13 /* struct xfrm_user_kmaddress */ - XFRMA_ALG_AUTH_TRUNC = 0x14 /* struct xfrm_algo_auth */ - XFRMA_MARK = 0x15 /* struct xfrm_mark */ - XFRMA_TFCPAD = 0x16 /* __u32 */ - XFRMA_REPLAY_ESN_VAL = 0x17 /* struct xfrm_replay_esn */ - XFRMA_SA_EXTRA_FLAGS = 0x18 /* __u32 */ - XFRMA_MAX = 0x18 + XFRMA_UNSPEC = iota + XFRMA_ALG_AUTH /* struct xfrm_algo */ + XFRMA_ALG_CRYPT /* struct xfrm_algo */ + XFRMA_ALG_COMP /* struct xfrm_algo */ + XFRMA_ENCAP /* struct xfrm_algo + struct xfrm_encap_tmpl */ + XFRMA_TMPL /* 1 or more struct xfrm_user_tmpl */ + XFRMA_SA /* struct xfrm_usersa_info */ + XFRMA_POLICY /* struct xfrm_userpolicy_info */ + XFRMA_SEC_CTX /* struct xfrm_sec_ctx */ + XFRMA_LTIME_VAL + XFRMA_REPLAY_VAL + XFRMA_REPLAY_THRESH + XFRMA_ETIMER_THRESH + XFRMA_SRCADDR /* xfrm_address_t */ + XFRMA_COADDR /* xfrm_address_t */ + XFRMA_LASTUSED /* unsigned long */ + XFRMA_POLICY_TYPE /* struct xfrm_userpolicy_type */ + XFRMA_MIGRATE + XFRMA_ALG_AEAD /* struct xfrm_algo_aead */ + XFRMA_KMADDRESS /* struct xfrm_user_kmaddress */ + XFRMA_ALG_AUTH_TRUNC /* struct xfrm_algo_auth */ + XFRMA_MARK /* struct xfrm_mark */ + XFRMA_TFCPAD /* __u32 */ + XFRMA_REPLAY_ESN_VAL /* struct xfrm_replay_esn */ + XFRMA_SA_EXTRA_FLAGS /* __u32 */ + XFRMA_PROTO /* __u8 */ + XFRMA_ADDRESS_FILTER /* struct xfrm_address_filter */ + XFRMA_PAD + XFRMA_OFFLOAD_DEV /* struct xfrm_state_offload */ + XFRMA_SET_MARK /* __u32 */ + XFRMA_SET_MARK_MASK /* __u32 */ + XFRMA_IF_ID /* __u32 */ + + XFRMA_MAX = iota - 1 ) +const XFRMA_OUTPUT_MARK = XFRMA_SET_MARK + const ( SizeofXfrmAddress = 0x10 SizeofXfrmSelector = 0x38 diff --git a/vendor/github.com/vishvananda/netlink/protinfo.go b/vendor/github.com/vishvananda/netlink/protinfo.go index 0087c4438bc2..60b23b3742c9 100644 --- a/vendor/github.com/vishvananda/netlink/protinfo.go +++ b/vendor/github.com/vishvananda/netlink/protinfo.go @@ -18,6 +18,10 @@ type Protinfo struct { // String returns a list of enabled flags func (prot *Protinfo) String() string { + if prot == nil { + return "" + } + var boolStrings []string if prot.Hairpin { boolStrings = append(boolStrings, "Hairpin") diff --git a/vendor/github.com/vishvananda/netlink/protinfo_linux.go b/vendor/github.com/vishvananda/netlink/protinfo_linux.go index 43c465f05758..15b65123cef7 100644 --- a/vendor/github.com/vishvananda/netlink/protinfo_linux.go +++ b/vendor/github.com/vishvananda/netlink/protinfo_linux.go @@ -41,7 +41,7 @@ func (h *Handle) LinkGetProtinfo(link Link) (Protinfo, error) { if err != nil { return pi, err } - pi = *parseProtinfo(infos) + pi = parseProtinfo(infos) return pi, nil } @@ -49,8 +49,7 @@ func (h *Handle) LinkGetProtinfo(link Link) (Protinfo, error) { return pi, fmt.Errorf("Device with index %d not found", base.Index) } -func parseProtinfo(infos []syscall.NetlinkRouteAttr) *Protinfo { - var pi Protinfo +func parseProtinfo(infos []syscall.NetlinkRouteAttr) (pi Protinfo) { for _, info := range infos { switch info.Attr.Type { case nl.IFLA_BRPORT_MODE: @@ -71,5 +70,5 @@ func parseProtinfo(infos []syscall.NetlinkRouteAttr) *Protinfo { pi.ProxyArpWiFi = byteToBool(info.Value[0]) } } - return &pi + return } diff --git a/vendor/github.com/vishvananda/netlink/qdisc.go b/vendor/github.com/vishvananda/netlink/qdisc.go index 3df4b5c291cc..af78305ac26a 100644 --- a/vendor/github.com/vishvananda/netlink/qdisc.go +++ b/vendor/github.com/vishvananda/netlink/qdisc.go @@ -176,6 +176,13 @@ type Netem struct { CorruptCorr uint32 } +func (netem *Netem) String() string { + return fmt.Sprintf( + "{Latency: %v, Limit: %v, Loss: %v, Gap: %v, Duplicate: %v, Jitter: %v}", + netem.Latency, netem.Limit, netem.Loss, netem.Gap, netem.Duplicate, netem.Jitter, + ) +} + func (qdisc *Netem) Attrs() *QdiscAttrs { return &qdisc.QdiscAttrs } @@ -231,6 +238,33 @@ func (qdisc *GenericQdisc) Type() string { return qdisc.QdiscType } +type Hfsc struct { + QdiscAttrs + Defcls uint16 +} + +func NewHfsc(attrs QdiscAttrs) *Hfsc { + return &Hfsc{ + QdiscAttrs: attrs, + Defcls: 1, + } +} + +func (hfsc *Hfsc) Attrs() *QdiscAttrs { + return &hfsc.QdiscAttrs +} + +func (hfsc *Hfsc) Type() string { + return "hfsc" +} + +func (hfsc *Hfsc) String() string { + return fmt.Sprintf( + "{%v -- default: %d}", + hfsc.Attrs(), hfsc.Defcls, + ) +} + // Fq is a classless packet scheduler meant to be mostly used for locally generated traffic. type Fq struct { QdiscAttrs @@ -249,6 +283,13 @@ type Fq struct { LowRateThreshold uint32 } +func (fq *Fq) String() string { + return fmt.Sprintf( + "{PacketLimit: %v, FlowPacketLimit: %v, Quantum: %v, InitialQuantum: %v, Pacing: %v, FlowDefaultRate: %v, FlowMaxRate: %v, Buckets: %v, FlowRefillDelay: %v, LowRateThreshold: %v}", + fq.PacketLimit, fq.FlowPacketLimit, fq.Quantum, fq.InitialQuantum, fq.Pacing, fq.FlowDefaultRate, fq.FlowMaxRate, fq.Buckets, fq.FlowRefillDelay, fq.LowRateThreshold, + ) +} + func NewFq(attrs QdiscAttrs) *Fq { return &Fq{ QdiscAttrs: attrs, @@ -276,6 +317,13 @@ type FqCodel struct { // There are some more attributes here, but support for them seems not ubiquitous } +func (fqcodel *FqCodel) String() string { + return fmt.Sprintf( + "{%v -- Target: %v, Limit: %v, Interval: %v, ECM: %v, Flows: %v, Quantum: %v}", + fqcodel.Attrs(), fqcodel.Target, fqcodel.Limit, fqcodel.Interval, fqcodel.ECN, fqcodel.Flows, fqcodel.Quantum, + ) +} + func NewFqCodel(attrs QdiscAttrs) *FqCodel { return &FqCodel{ QdiscAttrs: attrs, diff --git a/vendor/github.com/vishvananda/netlink/qdisc_linux.go b/vendor/github.com/vishvananda/netlink/qdisc_linux.go index 3794ac18a83f..e9eee59084fa 100644 --- a/vendor/github.com/vishvananda/netlink/qdisc_linux.go +++ b/vendor/github.com/vishvananda/netlink/qdisc_linux.go @@ -175,15 +175,15 @@ func qdiscPayload(req *nl.NetlinkRequest, qdisc Qdisc) error { opt.Peakrate.Rate = uint32(qdisc.Peakrate) opt.Limit = qdisc.Limit opt.Buffer = qdisc.Buffer - nl.NewRtAttrChild(options, nl.TCA_TBF_PARMS, opt.Serialize()) + options.AddRtAttr(nl.TCA_TBF_PARMS, opt.Serialize()) if qdisc.Rate >= uint64(1<<32) { - nl.NewRtAttrChild(options, nl.TCA_TBF_RATE64, nl.Uint64Attr(qdisc.Rate)) + options.AddRtAttr(nl.TCA_TBF_RATE64, nl.Uint64Attr(qdisc.Rate)) } if qdisc.Peakrate >= uint64(1<<32) { - nl.NewRtAttrChild(options, nl.TCA_TBF_PRATE64, nl.Uint64Attr(qdisc.Peakrate)) + options.AddRtAttr(nl.TCA_TBF_PRATE64, nl.Uint64Attr(qdisc.Peakrate)) } if qdisc.Peakrate > 0 { - nl.NewRtAttrChild(options, nl.TCA_TBF_PBURST, nl.Uint32Attr(qdisc.Minburst)) + options.AddRtAttr(nl.TCA_TBF_PBURST, nl.Uint32Attr(qdisc.Minburst)) } case *Htb: opt := nl.TcHtbGlob{} @@ -193,8 +193,12 @@ func qdiscPayload(req *nl.NetlinkRequest, qdisc Qdisc) error { // TODO: Handle Debug properly. For now default to 0 opt.Debug = qdisc.Debug opt.DirectPkts = qdisc.DirectPkts - nl.NewRtAttrChild(options, nl.TCA_HTB_INIT, opt.Serialize()) - // nl.NewRtAttrChild(options, nl.TCA_HTB_DIRECT_QLEN, opt.Serialize()) + options.AddRtAttr(nl.TCA_HTB_INIT, opt.Serialize()) + // options.AddRtAttr(nl.TCA_HTB_DIRECT_QLEN, opt.Serialize()) + case *Hfsc: + opt := nl.TcHfscOpt{} + opt.Defcls = qdisc.Defcls + options = nl.NewRtAttr(nl.TCA_OPTIONS, opt.Serialize()) case *Netem: opt := nl.TcNetemQopt{} opt.Latency = qdisc.Latency @@ -211,21 +215,21 @@ func qdiscPayload(req *nl.NetlinkRequest, qdisc Qdisc) error { corr.DupCorr = qdisc.DuplicateCorr if corr.DelayCorr > 0 || corr.LossCorr > 0 || corr.DupCorr > 0 { - nl.NewRtAttrChild(options, nl.TCA_NETEM_CORR, corr.Serialize()) + options.AddRtAttr(nl.TCA_NETEM_CORR, corr.Serialize()) } // Corruption corruption := nl.TcNetemCorrupt{} corruption.Probability = qdisc.CorruptProb corruption.Correlation = qdisc.CorruptCorr if corruption.Probability > 0 { - nl.NewRtAttrChild(options, nl.TCA_NETEM_CORRUPT, corruption.Serialize()) + options.AddRtAttr(nl.TCA_NETEM_CORRUPT, corruption.Serialize()) } // Reorder reorder := nl.TcNetemReorder{} reorder.Probability = qdisc.ReorderProb reorder.Correlation = qdisc.ReorderCorr if reorder.Probability > 0 { - nl.NewRtAttrChild(options, nl.TCA_NETEM_REORDER, reorder.Serialize()) + options.AddRtAttr(nl.TCA_NETEM_REORDER, reorder.Serialize()) } case *Ingress: // ingress filters must use the proper handle @@ -233,50 +237,54 @@ func qdiscPayload(req *nl.NetlinkRequest, qdisc Qdisc) error { return fmt.Errorf("Ingress filters must set Parent to HANDLE_INGRESS") } case *FqCodel: - nl.NewRtAttrChild(options, nl.TCA_FQ_CODEL_ECN, nl.Uint32Attr((uint32(qdisc.ECN)))) + options.AddRtAttr(nl.TCA_FQ_CODEL_ECN, nl.Uint32Attr((uint32(qdisc.ECN)))) if qdisc.Limit > 0 { - nl.NewRtAttrChild(options, nl.TCA_FQ_CODEL_LIMIT, nl.Uint32Attr((uint32(qdisc.Limit)))) + options.AddRtAttr(nl.TCA_FQ_CODEL_LIMIT, nl.Uint32Attr((uint32(qdisc.Limit)))) } if qdisc.Interval > 0 { - nl.NewRtAttrChild(options, nl.TCA_FQ_CODEL_INTERVAL, nl.Uint32Attr((uint32(qdisc.Interval)))) + options.AddRtAttr(nl.TCA_FQ_CODEL_INTERVAL, nl.Uint32Attr((uint32(qdisc.Interval)))) } if qdisc.Flows > 0 { - nl.NewRtAttrChild(options, nl.TCA_FQ_CODEL_FLOWS, nl.Uint32Attr((uint32(qdisc.Flows)))) + options.AddRtAttr(nl.TCA_FQ_CODEL_FLOWS, nl.Uint32Attr((uint32(qdisc.Flows)))) } if qdisc.Quantum > 0 { - nl.NewRtAttrChild(options, nl.TCA_FQ_CODEL_QUANTUM, nl.Uint32Attr((uint32(qdisc.Quantum)))) + options.AddRtAttr(nl.TCA_FQ_CODEL_QUANTUM, nl.Uint32Attr((uint32(qdisc.Quantum)))) } case *Fq: - nl.NewRtAttrChild(options, nl.TCA_FQ_RATE_ENABLE, nl.Uint32Attr((uint32(qdisc.Pacing)))) + options.AddRtAttr(nl.TCA_FQ_RATE_ENABLE, nl.Uint32Attr((uint32(qdisc.Pacing)))) if qdisc.Buckets > 0 { - nl.NewRtAttrChild(options, nl.TCA_FQ_BUCKETS_LOG, nl.Uint32Attr((uint32(qdisc.Buckets)))) + options.AddRtAttr(nl.TCA_FQ_BUCKETS_LOG, nl.Uint32Attr((uint32(qdisc.Buckets)))) } if qdisc.LowRateThreshold > 0 { - nl.NewRtAttrChild(options, nl.TCA_FQ_LOW_RATE_THRESHOLD, nl.Uint32Attr((uint32(qdisc.LowRateThreshold)))) + options.AddRtAttr(nl.TCA_FQ_LOW_RATE_THRESHOLD, nl.Uint32Attr((uint32(qdisc.LowRateThreshold)))) } if qdisc.Quantum > 0 { - nl.NewRtAttrChild(options, nl.TCA_FQ_QUANTUM, nl.Uint32Attr((uint32(qdisc.Quantum)))) + options.AddRtAttr(nl.TCA_FQ_QUANTUM, nl.Uint32Attr((uint32(qdisc.Quantum)))) } if qdisc.InitialQuantum > 0 { - nl.NewRtAttrChild(options, nl.TCA_FQ_INITIAL_QUANTUM, nl.Uint32Attr((uint32(qdisc.InitialQuantum)))) + options.AddRtAttr(nl.TCA_FQ_INITIAL_QUANTUM, nl.Uint32Attr((uint32(qdisc.InitialQuantum)))) } if qdisc.FlowRefillDelay > 0 { - nl.NewRtAttrChild(options, nl.TCA_FQ_FLOW_REFILL_DELAY, nl.Uint32Attr((uint32(qdisc.FlowRefillDelay)))) + options.AddRtAttr(nl.TCA_FQ_FLOW_REFILL_DELAY, nl.Uint32Attr((uint32(qdisc.FlowRefillDelay)))) } if qdisc.FlowPacketLimit > 0 { - nl.NewRtAttrChild(options, nl.TCA_FQ_FLOW_PLIMIT, nl.Uint32Attr((uint32(qdisc.FlowPacketLimit)))) + options.AddRtAttr(nl.TCA_FQ_FLOW_PLIMIT, nl.Uint32Attr((uint32(qdisc.FlowPacketLimit)))) } if qdisc.FlowMaxRate > 0 { - nl.NewRtAttrChild(options, nl.TCA_FQ_FLOW_MAX_RATE, nl.Uint32Attr((uint32(qdisc.FlowMaxRate)))) + options.AddRtAttr(nl.TCA_FQ_FLOW_MAX_RATE, nl.Uint32Attr((uint32(qdisc.FlowMaxRate)))) } if qdisc.FlowDefaultRate > 0 { - nl.NewRtAttrChild(options, nl.TCA_FQ_FLOW_DEFAULT_RATE, nl.Uint32Attr((uint32(qdisc.FlowDefaultRate)))) + options.AddRtAttr(nl.TCA_FQ_FLOW_DEFAULT_RATE, nl.Uint32Attr((uint32(qdisc.FlowDefaultRate)))) } + default: + options = nil } - req.AddData(options) + if options != nil { + req.AddData(options) + } return nil } @@ -348,6 +356,8 @@ func (h *Handle) QdiscList(link Link) ([]Qdisc, error) { qdisc = &Htb{} case "fq": qdisc = &Fq{} + case "hfsc": + qdisc = &Hfsc{} case "fq_codel": qdisc = &FqCodel{} case "netem": @@ -375,6 +385,10 @@ func (h *Handle) QdiscList(link Link) ([]Qdisc, error) { if err := parseTbfData(qdisc, data); err != nil { return nil, err } + case "hfsc": + if err := parseHfscData(qdisc, attr.Value); err != nil { + return nil, err + } case "htb": data, err := nl.ParseRouteAttr(attr.Value) if err != nil { @@ -474,6 +488,13 @@ func parseFqCodelData(qdisc Qdisc, data []syscall.NetlinkRouteAttr) error { return nil } +func parseHfscData(qdisc Qdisc, data []byte) error { + Hfsc := qdisc.(*Hfsc) + native = nl.NativeEndian() + Hfsc.Defcls = native.Uint16(data) + return nil +} + func parseFqData(qdisc Qdisc, data []syscall.NetlinkRouteAttr) error { native = nl.NativeEndian() fq := qdisc.(*Fq) diff --git a/vendor/github.com/vishvananda/netlink/rdma_link_linux.go b/vendor/github.com/vishvananda/netlink/rdma_link_linux.go new file mode 100644 index 000000000000..2d0bdc8c36f5 --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/rdma_link_linux.go @@ -0,0 +1,264 @@ +package netlink + +import ( + "bytes" + "encoding/binary" + "fmt" + "net" + + "github.com/vishvananda/netlink/nl" + "golang.org/x/sys/unix" +) + +// LinkAttrs represents data shared by most link types +type RdmaLinkAttrs struct { + Index uint32 + Name string + FirmwareVersion string + NodeGuid string + SysImageGuid string +} + +// Link represents a rdma device from netlink. +type RdmaLink struct { + Attrs RdmaLinkAttrs +} + +func getProtoField(clientType int, op int) int { + return ((clientType << nl.RDMA_NL_GET_CLIENT_SHIFT) | op) +} + +func uint64ToGuidString(guid uint64) string { + //Convert to byte array + sysGuidBytes := new(bytes.Buffer) + binary.Write(sysGuidBytes, binary.LittleEndian, guid) + + //Convert to HardwareAddr + sysGuidNet := net.HardwareAddr(sysGuidBytes.Bytes()) + + //Get the String + return sysGuidNet.String() +} + +func executeOneGetRdmaLink(data []byte) (*RdmaLink, error) { + + link := RdmaLink{} + + reader := bytes.NewReader(data) + for reader.Len() >= 4 { + _, attrType, len, value := parseNfAttrTLV(reader) + + switch attrType { + case nl.RDMA_NLDEV_ATTR_DEV_INDEX: + var Index uint32 + r := bytes.NewReader(value) + binary.Read(r, nl.NativeEndian(), &Index) + link.Attrs.Index = Index + case nl.RDMA_NLDEV_ATTR_DEV_NAME: + link.Attrs.Name = string(value[0 : len-1]) + case nl.RDMA_NLDEV_ATTR_FW_VERSION: + link.Attrs.FirmwareVersion = string(value[0 : len-1]) + case nl.RDMA_NLDEV_ATTR_NODE_GUID: + var guid uint64 + r := bytes.NewReader(value) + binary.Read(r, nl.NativeEndian(), &guid) + link.Attrs.NodeGuid = uint64ToGuidString(guid) + case nl.RDMA_NLDEV_ATTR_SYS_IMAGE_GUID: + var sysGuid uint64 + r := bytes.NewReader(value) + binary.Read(r, nl.NativeEndian(), &sysGuid) + link.Attrs.SysImageGuid = uint64ToGuidString(sysGuid) + } + if (len % 4) != 0 { + // Skip pad bytes + reader.Seek(int64(4-(len%4)), seekCurrent) + } + } + return &link, nil +} + +func execRdmaGetLink(req *nl.NetlinkRequest, name string) (*RdmaLink, error) { + + msgs, err := req.Execute(unix.NETLINK_RDMA, 0) + if err != nil { + return nil, err + } + for _, m := range msgs { + link, err := executeOneGetRdmaLink(m) + if err != nil { + return nil, err + } + if link.Attrs.Name == name { + return link, nil + } + } + return nil, fmt.Errorf("Rdma device %v not found", name) +} + +func execRdmaSetLink(req *nl.NetlinkRequest) error { + + _, err := req.Execute(unix.NETLINK_RDMA, 0) + return err +} + +// RdmaLinkByName finds a link by name and returns a pointer to the object if +// found and nil error, otherwise returns error code. +func RdmaLinkByName(name string) (*RdmaLink, error) { + return pkgHandle.RdmaLinkByName(name) +} + +// RdmaLinkByName finds a link by name and returns a pointer to the object if +// found and nil error, otherwise returns error code. +func (h *Handle) RdmaLinkByName(name string) (*RdmaLink, error) { + + proto := getProtoField(nl.RDMA_NL_NLDEV, nl.RDMA_NLDEV_CMD_GET) + req := h.newNetlinkRequest(proto, unix.NLM_F_ACK|unix.NLM_F_DUMP) + + return execRdmaGetLink(req, name) +} + +// RdmaLinkSetName sets the name of the rdma link device. Return nil on success +// or error otherwise. +// Equivalent to: `rdma dev set $old_devname name $name` +func RdmaLinkSetName(link *RdmaLink, name string) error { + return pkgHandle.RdmaLinkSetName(link, name) +} + +// RdmaLinkSetName sets the name of the rdma link device. Return nil on success +// or error otherwise. +// Equivalent to: `rdma dev set $old_devname name $name` +func (h *Handle) RdmaLinkSetName(link *RdmaLink, name string) error { + proto := getProtoField(nl.RDMA_NL_NLDEV, nl.RDMA_NLDEV_CMD_SET) + req := h.newNetlinkRequest(proto, unix.NLM_F_ACK) + + b := make([]byte, 4) + native.PutUint32(b, uint32(link.Attrs.Index)) + data := nl.NewRtAttr(nl.RDMA_NLDEV_ATTR_DEV_INDEX, b) + req.AddData(data) + + b = make([]byte, len(name)+1) + copy(b, name) + data = nl.NewRtAttr(nl.RDMA_NLDEV_ATTR_DEV_NAME, b) + req.AddData(data) + + return execRdmaSetLink(req) +} + +func netnsModeToString(mode uint8) string { + switch mode { + case 0: + return "exclusive" + case 1: + return "shared" + default: + return "unknown" + } +} + +func executeOneGetRdmaNetnsMode(data []byte) (string, error) { + reader := bytes.NewReader(data) + for reader.Len() >= 4 { + _, attrType, len, value := parseNfAttrTLV(reader) + + switch attrType { + case nl.RDMA_NLDEV_SYS_ATTR_NETNS_MODE: + var mode uint8 + r := bytes.NewReader(value) + binary.Read(r, nl.NativeEndian(), &mode) + return netnsModeToString(mode), nil + } + if (len % 4) != 0 { + // Skip pad bytes + reader.Seek(int64(4-(len%4)), seekCurrent) + } + } + return "", fmt.Errorf("Invalid netns mode") +} + +// RdmaSystemGetNetnsMode gets the net namespace mode for RDMA subsystem +// Returns mode string and error status as nil on success or returns error +// otherwise. +// Equivalent to: `rdma system show netns' +func RdmaSystemGetNetnsMode() (string, error) { + return pkgHandle.RdmaSystemGetNetnsMode() +} + +// RdmaSystemGetNetnsMode gets the net namespace mode for RDMA subsystem +// Returns mode string and error status as nil on success or returns error +// otherwise. +// Equivalent to: `rdma system show netns' +func (h *Handle) RdmaSystemGetNetnsMode() (string, error) { + + proto := getProtoField(nl.RDMA_NL_NLDEV, nl.RDMA_NLDEV_CMD_SYS_GET) + req := h.newNetlinkRequest(proto, unix.NLM_F_ACK) + + msgs, err := req.Execute(unix.NETLINK_RDMA, 0) + if err != nil { + return "", err + } + if len(msgs) == 0 { + return "", fmt.Errorf("No valid response from kernel") + } + return executeOneGetRdmaNetnsMode(msgs[0]) +} + +func netnsModeStringToUint8(mode string) (uint8, error) { + switch mode { + case "exclusive": + return 0, nil + case "shared": + return 1, nil + default: + return 0, fmt.Errorf("Invalid mode; %q", mode) + } +} + +// RdmaSystemSetNetnsMode sets the net namespace mode for RDMA subsystem +// Returns nil on success or appropriate error code. +// Equivalent to: `rdma system set netns { shared | exclusive }' +func RdmaSystemSetNetnsMode(NewMode string) error { + return pkgHandle.RdmaSystemSetNetnsMode(NewMode) +} + +// RdmaSystemSetNetnsMode sets the net namespace mode for RDMA subsystem +// Returns nil on success or appropriate error code. +// Equivalent to: `rdma system set netns { shared | exclusive }' +func (h *Handle) RdmaSystemSetNetnsMode(NewMode string) error { + value, err := netnsModeStringToUint8(NewMode) + if err != nil { + return err + } + + proto := getProtoField(nl.RDMA_NL_NLDEV, nl.RDMA_NLDEV_CMD_SYS_SET) + req := h.newNetlinkRequest(proto, unix.NLM_F_ACK) + + data := nl.NewRtAttr(nl.RDMA_NLDEV_SYS_ATTR_NETNS_MODE, []byte{value}) + req.AddData(data) + + _, err = req.Execute(unix.NETLINK_RDMA, 0) + return err +} + +// RdmaLinkSetNsFd puts the RDMA device into a new network namespace. The +// fd must be an open file descriptor to a network namespace. +// Similar to: `rdma dev set $dev netns $ns` +func RdmaLinkSetNsFd(link *RdmaLink, fd uint32) error { + return pkgHandle.RdmaLinkSetNsFd(link, fd) +} + +// RdmaLinkSetNsFd puts the RDMA device into a new network namespace. The +// fd must be an open file descriptor to a network namespace. +// Similar to: `rdma dev set $dev netns $ns` +func (h *Handle) RdmaLinkSetNsFd(link *RdmaLink, fd uint32) error { + proto := getProtoField(nl.RDMA_NL_NLDEV, nl.RDMA_NLDEV_CMD_SET) + req := h.newNetlinkRequest(proto, unix.NLM_F_ACK) + + data := nl.NewRtAttr(nl.RDMA_NLDEV_ATTR_DEV_INDEX, + nl.Uint32Attr(link.Attrs.Index)) + req.AddData(data) + + data = nl.NewRtAttr(nl.RDMA_NLDEV_NET_NS_FD, nl.Uint32Attr(fd)) + req.AddData(data) + + return execRdmaSetLink(req) +} diff --git a/vendor/github.com/vishvananda/netlink/route.go b/vendor/github.com/vishvananda/netlink/route.go index 2cd58ee33424..58ff1af600fb 100644 --- a/vendor/github.com/vishvananda/netlink/route.go +++ b/vendor/github.com/vishvananda/netlink/route.go @@ -47,6 +47,7 @@ type Route struct { Encap Encap MTU int AdvMSS int + Hoplimit int } func (r Route) String() string { @@ -89,6 +90,7 @@ func (r Route) Equal(x Route) bool { r.Table == x.Table && r.Type == x.Type && r.Tos == x.Tos && + r.Hoplimit == x.Hoplimit && r.Flags == x.Flags && (r.MPLSDst == x.MPLSDst || (r.MPLSDst != nil && x.MPLSDst != nil && *r.MPLSDst == *x.MPLSDst)) && (r.NewDst == x.NewDst || (r.NewDst != nil && r.NewDst.Equal(x.NewDst))) && diff --git a/vendor/github.com/vishvananda/netlink/route_linux.go b/vendor/github.com/vishvananda/netlink/route_linux.go index 3f856711f3ce..c69c595ed3c8 100644 --- a/vendor/github.com/vishvananda/netlink/route_linux.go +++ b/vendor/github.com/vishvananda/netlink/route_linux.go @@ -32,6 +32,7 @@ const ( RT_FILTER_SRC RT_FILTER_GW RT_FILTER_TABLE + RT_FILTER_HOPLIMIT ) const ( @@ -207,6 +208,7 @@ func (e *SEG6Encap) Decode(buf []byte) error { } buf = buf[:l] // make sure buf size upper limit is Length typ := native.Uint16(buf[2:]) + // LWTUNNEL_ENCAP_SEG6 has only one attr type SEG6_IPTUNNEL_SRH if typ != nl.SEG6_IPTUNNEL_SRH { return fmt.Errorf("unknown SEG6 Type: %d", typ) } @@ -259,6 +261,188 @@ func (e *SEG6Encap) Equal(x Encap) bool { return true } +// SEG6LocalEncap definitions +type SEG6LocalEncap struct { + Flags [nl.SEG6_LOCAL_MAX]bool + Action int + Segments []net.IP // from SRH in seg6_local_lwt + Table int // table id for End.T and End.DT6 + InAddr net.IP + In6Addr net.IP + Iif int + Oif int +} + +func (e *SEG6LocalEncap) Type() int { + return nl.LWTUNNEL_ENCAP_SEG6_LOCAL +} +func (e *SEG6LocalEncap) Decode(buf []byte) error { + attrs, err := nl.ParseRouteAttr(buf) + if err != nil { + return err + } + native := nl.NativeEndian() + for _, attr := range attrs { + switch attr.Attr.Type { + case nl.SEG6_LOCAL_ACTION: + e.Action = int(native.Uint32(attr.Value[0:4])) + e.Flags[nl.SEG6_LOCAL_ACTION] = true + case nl.SEG6_LOCAL_SRH: + e.Segments, err = nl.DecodeSEG6Srh(attr.Value[:]) + e.Flags[nl.SEG6_LOCAL_SRH] = true + case nl.SEG6_LOCAL_TABLE: + e.Table = int(native.Uint32(attr.Value[0:4])) + e.Flags[nl.SEG6_LOCAL_TABLE] = true + case nl.SEG6_LOCAL_NH4: + e.InAddr = net.IP(attr.Value[0:4]) + e.Flags[nl.SEG6_LOCAL_NH4] = true + case nl.SEG6_LOCAL_NH6: + e.In6Addr = net.IP(attr.Value[0:16]) + e.Flags[nl.SEG6_LOCAL_NH6] = true + case nl.SEG6_LOCAL_IIF: + e.Iif = int(native.Uint32(attr.Value[0:4])) + e.Flags[nl.SEG6_LOCAL_IIF] = true + case nl.SEG6_LOCAL_OIF: + e.Oif = int(native.Uint32(attr.Value[0:4])) + e.Flags[nl.SEG6_LOCAL_OIF] = true + } + } + return err +} +func (e *SEG6LocalEncap) Encode() ([]byte, error) { + var err error + native := nl.NativeEndian() + res := make([]byte, 8) + native.PutUint16(res, 8) // length + native.PutUint16(res[2:], nl.SEG6_LOCAL_ACTION) + native.PutUint32(res[4:], uint32(e.Action)) + if e.Flags[nl.SEG6_LOCAL_SRH] { + srh, err := nl.EncodeSEG6Srh(e.Segments) + if err != nil { + return nil, err + } + attr := make([]byte, 4) + native.PutUint16(attr, uint16(len(srh)+4)) + native.PutUint16(attr[2:], nl.SEG6_LOCAL_SRH) + attr = append(attr, srh...) + res = append(res, attr...) + } + if e.Flags[nl.SEG6_LOCAL_TABLE] { + attr := make([]byte, 8) + native.PutUint16(attr, 8) + native.PutUint16(attr[2:], nl.SEG6_LOCAL_TABLE) + native.PutUint32(attr[4:], uint32(e.Table)) + res = append(res, attr...) + } + if e.Flags[nl.SEG6_LOCAL_NH4] { + attr := make([]byte, 4) + native.PutUint16(attr, 8) + native.PutUint16(attr[2:], nl.SEG6_LOCAL_NH4) + ipv4 := e.InAddr.To4() + if ipv4 == nil { + err = fmt.Errorf("SEG6_LOCAL_NH4 has invalid IPv4 address") + return nil, err + } + attr = append(attr, ipv4...) + res = append(res, attr...) + } + if e.Flags[nl.SEG6_LOCAL_NH6] { + attr := make([]byte, 4) + native.PutUint16(attr, 20) + native.PutUint16(attr[2:], nl.SEG6_LOCAL_NH6) + attr = append(attr, e.In6Addr...) + res = append(res, attr...) + } + if e.Flags[nl.SEG6_LOCAL_IIF] { + attr := make([]byte, 8) + native.PutUint16(attr, 8) + native.PutUint16(attr[2:], nl.SEG6_LOCAL_IIF) + native.PutUint32(attr[4:], uint32(e.Iif)) + res = append(res, attr...) + } + if e.Flags[nl.SEG6_LOCAL_OIF] { + attr := make([]byte, 8) + native.PutUint16(attr, 8) + native.PutUint16(attr[2:], nl.SEG6_LOCAL_OIF) + native.PutUint32(attr[4:], uint32(e.Oif)) + res = append(res, attr...) + } + return res, err +} +func (e *SEG6LocalEncap) String() string { + strs := make([]string, 0, nl.SEG6_LOCAL_MAX) + strs = append(strs, fmt.Sprintf("action %s", nl.SEG6LocalActionString(e.Action))) + + if e.Flags[nl.SEG6_LOCAL_TABLE] { + strs = append(strs, fmt.Sprintf("table %d", e.Table)) + } + if e.Flags[nl.SEG6_LOCAL_NH4] { + strs = append(strs, fmt.Sprintf("nh4 %s", e.InAddr)) + } + if e.Flags[nl.SEG6_LOCAL_NH6] { + strs = append(strs, fmt.Sprintf("nh6 %s", e.In6Addr)) + } + if e.Flags[nl.SEG6_LOCAL_IIF] { + link, err := LinkByIndex(e.Iif) + if err != nil { + strs = append(strs, fmt.Sprintf("iif %d", e.Iif)) + } else { + strs = append(strs, fmt.Sprintf("iif %s", link.Attrs().Name)) + } + } + if e.Flags[nl.SEG6_LOCAL_OIF] { + link, err := LinkByIndex(e.Oif) + if err != nil { + strs = append(strs, fmt.Sprintf("oif %d", e.Oif)) + } else { + strs = append(strs, fmt.Sprintf("oif %s", link.Attrs().Name)) + } + } + if e.Flags[nl.SEG6_LOCAL_SRH] { + segs := make([]string, 0, len(e.Segments)) + //append segment backwards (from n to 0) since seg#0 is the last segment. + for i := len(e.Segments); i > 0; i-- { + segs = append(segs, fmt.Sprintf("%s", e.Segments[i-1])) + } + strs = append(strs, fmt.Sprintf("segs %d [ %s ]", len(e.Segments), strings.Join(segs, " "))) + } + return strings.Join(strs, " ") +} +func (e *SEG6LocalEncap) Equal(x Encap) bool { + o, ok := x.(*SEG6LocalEncap) + if !ok { + return false + } + if e == o { + return true + } + if e == nil || o == nil { + return false + } + // compare all arrays first + for i := range e.Flags { + if e.Flags[i] != o.Flags[i] { + return false + } + } + if len(e.Segments) != len(o.Segments) { + return false + } + for i := range e.Segments { + if !e.Segments[i].Equal(o.Segments[i]) { + return false + } + } + // compare values + if !e.InAddr.Equal(o.InAddr) || !e.In6Addr.Equal(o.In6Addr) { + return false + } + if e.Action != o.Action || e.Table != o.Table || e.Iif != o.Iif || e.Oif != o.Oif { + return false + } + return true +} + // RouteAdd will add a route to the system. // Equivalent to: `ip route add $route` func RouteAdd(route *Route) error { @@ -335,18 +519,18 @@ func (h *Handle) routeHandle(route *Route, req *nl.NetlinkRequest, msg *nl.RtMsg if err != nil { return err } - rtAttrs = append(rtAttrs, nl.NewRtAttr(nl.RTA_NEWDST, buf)) + rtAttrs = append(rtAttrs, nl.NewRtAttr(unix.RTA_NEWDST, buf)) } if route.Encap != nil { buf := make([]byte, 2) native.PutUint16(buf, uint16(route.Encap.Type())) - rtAttrs = append(rtAttrs, nl.NewRtAttr(nl.RTA_ENCAP_TYPE, buf)) + rtAttrs = append(rtAttrs, nl.NewRtAttr(unix.RTA_ENCAP_TYPE, buf)) buf, err := route.Encap.Encode() if err != nil { return err } - rtAttrs = append(rtAttrs, nl.NewRtAttr(nl.RTA_ENCAP, buf)) + rtAttrs = append(rtAttrs, nl.NewRtAttr(unix.RTA_ENCAP, buf)) } if route.Src != nil { @@ -410,17 +594,17 @@ func (h *Handle) routeHandle(route *Route, req *nl.NetlinkRequest, msg *nl.RtMsg if err != nil { return err } - children = append(children, nl.NewRtAttr(nl.RTA_NEWDST, buf)) + children = append(children, nl.NewRtAttr(unix.RTA_NEWDST, buf)) } if nh.Encap != nil { buf := make([]byte, 2) native.PutUint16(buf, uint16(nh.Encap.Type())) - rtAttrs = append(rtAttrs, nl.NewRtAttr(nl.RTA_ENCAP_TYPE, buf)) + children = append(children, nl.NewRtAttr(unix.RTA_ENCAP_TYPE, buf)) buf, err := nh.Encap.Encode() if err != nil { return err } - children = append(children, nl.NewRtAttr(nl.RTA_ENCAP, buf)) + children = append(children, nl.NewRtAttr(unix.RTA_ENCAP, buf)) } rtnh.Children = children buf = append(buf, rtnh.Serialize()...) @@ -464,6 +648,10 @@ func (h *Handle) routeHandle(route *Route, req *nl.NetlinkRequest, msg *nl.RtMsg b := nl.Uint32Attr(uint32(route.AdvMSS)) metrics = append(metrics, nl.NewRtAttr(unix.RTAX_ADVMSS, b)) } + if route.Hoplimit > 0 { + b := nl.Uint32Attr(uint32(route.Hoplimit)) + metrics = append(metrics, nl.NewRtAttr(unix.RTAX_HOPLIMIT, b)) + } if metrics != nil { attr := nl.NewRtAttr(unix.RTA_METRICS, nil) @@ -574,6 +762,8 @@ func (h *Handle) RouteListFiltered(family int, filter *Route, filterMask uint64) continue } } + case filterMask&RT_FILTER_HOPLIMIT != 0 && route.Hoplimit != filter.Hoplimit: + continue } } res = append(res, route) @@ -649,7 +839,7 @@ func deserializeRoute(m []byte) (Route, error) { switch attr.Attr.Type { case unix.RTA_GATEWAY: info.Gw = net.IP(attr.Value) - case nl.RTA_NEWDST: + case unix.RTA_NEWDST: var d Destination switch msg.Family { case nl.FAMILY_MPLS: @@ -659,9 +849,9 @@ func deserializeRoute(m []byte) (Route, error) { return nil, nil, err } info.NewDst = d - case nl.RTA_ENCAP_TYPE: + case unix.RTA_ENCAP_TYPE: encapType = attr - case nl.RTA_ENCAP: + case unix.RTA_ENCAP: encap = attr } } @@ -690,7 +880,7 @@ func deserializeRoute(m []byte) (Route, error) { route.MultiPath = append(route.MultiPath, info) rest = buf } - case nl.RTA_NEWDST: + case unix.RTA_NEWDST: var d Destination switch msg.Family { case nl.FAMILY_MPLS: @@ -700,9 +890,9 @@ func deserializeRoute(m []byte) (Route, error) { return route, err } route.NewDst = d - case nl.RTA_ENCAP_TYPE: + case unix.RTA_ENCAP_TYPE: encapType = attr - case nl.RTA_ENCAP: + case unix.RTA_ENCAP: encap = attr case unix.RTA_METRICS: metrics, err := nl.ParseRouteAttr(attr.Value) @@ -715,6 +905,8 @@ func deserializeRoute(m []byte) (Route, error) { route.MTU = int(native.Uint32(metric.Value[0:4])) case unix.RTAX_ADVMSS: route.AdvMSS = int(native.Uint32(metric.Value[0:4])) + case unix.RTAX_HOPLIMIT: + route.Hoplimit = int(native.Uint32(metric.Value[0:4])) } } } @@ -734,6 +926,11 @@ func deserializeRoute(m []byte) (Route, error) { if err := e.Decode(encap.Value); err != nil { return route, err } + case nl.LWTUNNEL_ENCAP_SEG6_LOCAL: + e = &SEG6LocalEncap{} + if err := e.Decode(encap.Value); err != nil { + return route, err + } } route.Encap = e } @@ -840,13 +1037,19 @@ func routeSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- RouteUpdate, done < go func() { defer close(ch) for { - msgs, err := s.Receive() + msgs, from, err := s.Receive() if err != nil { if cberr != nil { cberr(err) } return } + if from.Pid != nl.PidKernel { + if cberr != nil { + cberr(fmt.Errorf("Wrong sender portid %d, expected %d", from.Pid, nl.PidKernel)) + } + continue + } for _, m := range msgs { if m.Header.Type == unix.NLMSG_DONE { continue diff --git a/vendor/github.com/vishvananda/netlink/rule_linux.go b/vendor/github.com/vishvananda/netlink/rule_linux.go index 6238ae458642..e12569fe45c1 100644 --- a/vendor/github.com/vishvananda/netlink/rule_linux.go +++ b/vendor/github.com/vishvananda/netlink/rule_linux.go @@ -144,7 +144,7 @@ func ruleHandle(rule *Rule, req *nl.NetlinkRequest) error { req.AddData(nl.NewRtAttr(nl.FRA_OIFNAME, []byte(rule.OifName))) } if rule.Goto >= 0 { - msg.Type = nl.FR_ACT_NOP + msg.Type = nl.FR_ACT_GOTO b := make([]byte, 4) native.PutUint32(b, uint32(rule.Goto)) req.AddData(nl.NewRtAttr(nl.FRA_GOTO, b)) diff --git a/vendor/github.com/vishvananda/netlink/socket_linux.go b/vendor/github.com/vishvananda/netlink/socket_linux.go index 99e9fb4d8979..c4d89c17ed5a 100644 --- a/vendor/github.com/vishvananda/netlink/socket_linux.go +++ b/vendor/github.com/vishvananda/netlink/socket_linux.go @@ -141,10 +141,13 @@ func SocketGet(local, remote net.Addr) (*Socket, error) { }, }) s.Send(req) - msgs, err := s.Receive() + msgs, from, err := s.Receive() if err != nil { return nil, err } + if from.Pid != nl.PidKernel { + return nil, fmt.Errorf("Wrong sender portid %d, expected %d", from.Pid, nl.PidKernel) + } if len(msgs) == 0 { return nil, errors.New("no message nor error from netlink") } diff --git a/vendor/github.com/vishvananda/netlink/xfrm_monitor_linux.go b/vendor/github.com/vishvananda/netlink/xfrm_monitor_linux.go index efe72ddf29ce..985d3a915f27 100644 --- a/vendor/github.com/vishvananda/netlink/xfrm_monitor_linux.go +++ b/vendor/github.com/vishvananda/netlink/xfrm_monitor_linux.go @@ -54,11 +54,15 @@ func XfrmMonitor(ch chan<- XfrmMsg, done <-chan struct{}, errorChan chan<- error go func() { defer close(ch) for { - msgs, err := s.Receive() + msgs, from, err := s.Receive() if err != nil { errorChan <- err return } + if from.Pid != nl.PidKernel { + errorChan <- fmt.Errorf("Wrong sender portid %d, expected %d", from.Pid, nl.PidKernel) + return + } for _, m := range msgs { switch m.Header.Type { case nl.XFRM_MSG_EXPIRE: diff --git a/vendor/github.com/vishvananda/netlink/xfrm_policy.go b/vendor/github.com/vishvananda/netlink/xfrm_policy.go index c97ec43a2530..6219d277258a 100644 --- a/vendor/github.com/vishvananda/netlink/xfrm_policy.go +++ b/vendor/github.com/vishvananda/netlink/xfrm_policy.go @@ -35,6 +35,25 @@ func (d Dir) String() string { return fmt.Sprintf("socket %d", d-XFRM_SOCKET_IN) } +// PolicyAction is an enum representing an ipsec policy action. +type PolicyAction uint8 + +const ( + XFRM_POLICY_ALLOW PolicyAction = 0 + XFRM_POLICY_BLOCK PolicyAction = 1 +) + +func (a PolicyAction) String() string { + switch a { + case XFRM_POLICY_ALLOW: + return "allow" + case XFRM_POLICY_BLOCK: + return "block" + default: + return fmt.Sprintf("action %d", a) + } +} + // XfrmPolicyTmpl encapsulates a rule for the base addresses of an ipsec // policy. These rules are matched with XfrmState to determine encryption // and authentication algorithms. @@ -64,11 +83,14 @@ type XfrmPolicy struct { Dir Dir Priority int Index int + Action PolicyAction + Ifindex int + Ifid int Mark *XfrmMark Tmpls []XfrmPolicyTmpl } func (p XfrmPolicy) String() string { - return fmt.Sprintf("{Dst: %v, Src: %v, Proto: %s, DstPort: %d, SrcPort: %d, Dir: %s, Priority: %d, Index: %d, Mark: %s, Tmpls: %s}", - p.Dst, p.Src, p.Proto, p.DstPort, p.SrcPort, p.Dir, p.Priority, p.Index, p.Mark, p.Tmpls) + return fmt.Sprintf("{Dst: %v, Src: %v, Proto: %s, DstPort: %d, SrcPort: %d, Dir: %s, Priority: %d, Index: %d, Action: %s, Ifindex: %d, Ifid: %d, Mark: %s, Tmpls: %s}", + p.Dst, p.Src, p.Proto, p.DstPort, p.SrcPort, p.Dir, p.Priority, p.Index, p.Action, p.Ifindex, p.Ifid, p.Mark, p.Tmpls) } diff --git a/vendor/github.com/vishvananda/netlink/xfrm_policy_linux.go b/vendor/github.com/vishvananda/netlink/xfrm_policy_linux.go index fde0c2ca5ad0..a4e132ef55f2 100644 --- a/vendor/github.com/vishvananda/netlink/xfrm_policy_linux.go +++ b/vendor/github.com/vishvananda/netlink/xfrm_policy_linux.go @@ -27,6 +27,7 @@ func selFromPolicy(sel *nl.XfrmSelector, policy *XfrmPolicy) { if sel.Sport != 0 { sel.SportMask = ^uint16(0) } + sel.Ifindex = int32(policy.Ifindex) } // XfrmPolicyAdd will add an xfrm policy to the system. @@ -61,6 +62,7 @@ func (h *Handle) xfrmPolicyAddOrUpdate(policy *XfrmPolicy, nlProto int) error { msg.Priority = uint32(policy.Priority) msg.Index = uint32(policy.Index) msg.Dir = uint8(policy.Dir) + msg.Action = uint8(policy.Action) msg.Lft.SoftByteLimit = nl.XFRM_INF msg.Lft.HardByteLimit = nl.XFRM_INF msg.Lft.SoftPacketLimit = nl.XFRM_INF @@ -90,6 +92,9 @@ func (h *Handle) xfrmPolicyAddOrUpdate(policy *XfrmPolicy, nlProto int) error { req.AddData(out) } + ifId := nl.NewRtAttr(nl.XFRMA_IF_ID, nl.Uint32Attr(uint32(policy.Ifid))) + req.AddData(ifId) + _, err := req.Execute(unix.NETLINK_XFRM, 0) return err } @@ -183,6 +188,9 @@ func (h *Handle) xfrmPolicyGetOrDelete(policy *XfrmPolicy, nlProto int) (*XfrmPo req.AddData(out) } + ifId := nl.NewRtAttr(nl.XFRMA_IF_ID, nl.Uint32Attr(uint32(policy.Ifid))) + req.AddData(ifId) + resType := nl.XFRM_MSG_NEWPOLICY if nlProto == nl.XFRM_MSG_DELPOLICY { resType = 0 @@ -197,12 +205,7 @@ func (h *Handle) xfrmPolicyGetOrDelete(policy *XfrmPolicy, nlProto int) (*XfrmPo return nil, err } - p, err := parseXfrmPolicy(msgs[0], FAMILY_ALL) - if err != nil { - return nil, err - } - - return p, nil + return parseXfrmPolicy(msgs[0], FAMILY_ALL) } func parseXfrmPolicy(m []byte, family int) (*XfrmPolicy, error) { @@ -220,9 +223,11 @@ func parseXfrmPolicy(m []byte, family int) (*XfrmPolicy, error) { policy.Proto = Proto(msg.Sel.Proto) policy.DstPort = int(nl.Swap16(msg.Sel.Dport)) policy.SrcPort = int(nl.Swap16(msg.Sel.Sport)) + policy.Ifindex = int(msg.Sel.Ifindex) policy.Priority = int(msg.Priority) policy.Index = int(msg.Index) policy.Dir = Dir(msg.Dir) + policy.Action = PolicyAction(msg.Action) attrs, err := nl.ParseRouteAttr(m[msg.Len():]) if err != nil { @@ -249,6 +254,8 @@ func parseXfrmPolicy(m []byte, family int) (*XfrmPolicy, error) { policy.Mark = new(XfrmMark) policy.Mark.Value = mark.Value policy.Mark.Mask = mark.Mask + case nl.XFRMA_IF_ID: + policy.Ifid = int(native.Uint32(attr.Value)) } } diff --git a/vendor/github.com/vishvananda/netlink/xfrm_state.go b/vendor/github.com/vishvananda/netlink/xfrm_state.go index d14740dc55b3..483d8934a8fb 100644 --- a/vendor/github.com/vishvananda/netlink/xfrm_state.go +++ b/vendor/github.com/vishvananda/netlink/xfrm_state.go @@ -94,6 +94,8 @@ type XfrmState struct { Limits XfrmStateLimits Statistics XfrmStateStats Mark *XfrmMark + OutputMark int + Ifid int Auth *XfrmStateAlgo Crypt *XfrmStateAlgo Aead *XfrmStateAlgo @@ -102,8 +104,8 @@ type XfrmState struct { } func (sa XfrmState) String() string { - return fmt.Sprintf("Dst: %v, Src: %v, Proto: %s, Mode: %s, SPI: 0x%x, ReqID: 0x%x, ReplayWindow: %d, Mark: %v, Auth: %v, Crypt: %v, Aead: %v, Encap: %v, ESN: %t", - sa.Dst, sa.Src, sa.Proto, sa.Mode, sa.Spi, sa.Reqid, sa.ReplayWindow, sa.Mark, sa.Auth, sa.Crypt, sa.Aead, sa.Encap, sa.ESN) + return fmt.Sprintf("Dst: %v, Src: %v, Proto: %s, Mode: %s, SPI: 0x%x, ReqID: 0x%x, ReplayWindow: %d, Mark: %v, OutputMark: %d, Ifid: %d, Auth: %v, Crypt: %v, Aead: %v, Encap: %v, ESN: %t", + sa.Dst, sa.Src, sa.Proto, sa.Mode, sa.Spi, sa.Reqid, sa.ReplayWindow, sa.Mark, sa.OutputMark, sa.Ifid, sa.Auth, sa.Crypt, sa.Aead, sa.Encap, sa.ESN) } func (sa XfrmState) Print(stats bool) string { if !stats { diff --git a/vendor/github.com/vishvananda/netlink/xfrm_state_linux.go b/vendor/github.com/vishvananda/netlink/xfrm_state_linux.go index 5dfdb33e4499..66c99423c471 100644 --- a/vendor/github.com/vishvananda/netlink/xfrm_state_linux.go +++ b/vendor/github.com/vishvananda/netlink/xfrm_state_linux.go @@ -158,6 +158,13 @@ func (h *Handle) xfrmStateAddOrUpdate(state *XfrmState, nlProto int) error { out := nl.NewRtAttr(nl.XFRMA_REPLAY_ESN_VAL, writeReplayEsn(state.ReplayWindow)) req.AddData(out) } + if state.OutputMark != 0 { + out := nl.NewRtAttr(nl.XFRMA_OUTPUT_MARK, nl.Uint32Attr(uint32(state.OutputMark))) + req.AddData(out) + } + + ifId := nl.NewRtAttr(nl.XFRMA_IF_ID, nl.Uint32Attr(uint32(state.Ifid))) + req.AddData(ifId) _, err := req.Execute(unix.NETLINK_XFRM, 0) return err @@ -184,12 +191,7 @@ func (h *Handle) xfrmStateAllocSpi(state *XfrmState) (*XfrmState, error) { return nil, err } - s, err := parseXfrmState(msgs[0], FAMILY_ALL) - if err != nil { - return nil, err - } - - return s, err + return parseXfrmState(msgs[0], FAMILY_ALL) } // XfrmStateDel will delete an xfrm state from the system. Note that @@ -275,6 +277,9 @@ func (h *Handle) xfrmStateGetOrDelete(state *XfrmState, nlProto int) (*XfrmState req.AddData(out) } + ifId := nl.NewRtAttr(nl.XFRMA_IF_ID, nl.Uint32Attr(uint32(state.Ifid))) + req.AddData(ifId) + resType := nl.XFRM_MSG_NEWSA if nlProto == nl.XFRM_MSG_DELSA { resType = 0 @@ -372,6 +377,10 @@ func parseXfrmState(m []byte, family int) (*XfrmState, error) { state.Mark = new(XfrmMark) state.Mark.Value = mark.Value state.Mark.Mask = mark.Mask + case nl.XFRMA_OUTPUT_MARK: + state.OutputMark = int(native.Uint32(attr.Value)) + case nl.XFRMA_IF_ID: + state.Ifid = int(native.Uint32(attr.Value)) } } @@ -394,11 +403,7 @@ func (h *Handle) XfrmStateFlush(proto Proto) error { req.AddData(&nl.XfrmUsersaFlush{Proto: uint8(proto)}) _, err := req.Execute(unix.NETLINK_XFRM, 0) - if err != nil { - return err - } - - return nil + return err } func limitsToLft(lmts XfrmStateLimits, lft *nl.XfrmLifetimeCfg) { diff --git a/vendor/modules.txt b/vendor/modules.txt index 6845b5752339..dfcd28bb0ef5 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -246,8 +246,6 @@ github.com/docker/go-connections/sockets github.com/docker/go-connections/tlsconfig # github.com/docker/go-units v0.4.0 => github.com/docker/go-units v0.4.0 github.com/docker/go-units -# github.com/docker/libnetwork v0.8.0-dev.2.0.20190925143933-c8a5fca4a652 => github.com/docker/libnetwork v0.8.0-dev.2.0.20190925143933-c8a5fca4a652 -github.com/docker/libnetwork/ipvs # github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96 => github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96 github.com/docker/spdystream github.com/docker/spdystream/spdy @@ -524,6 +522,8 @@ github.com/mistifyio/go-zfs github.com/mitchellh/go-wordwrap # github.com/mitchellh/mapstructure v1.1.2 => github.com/mitchellh/mapstructure v1.1.2 github.com/mitchellh/mapstructure +# github.com/moby/ipvs v1.0.0 => github.com/moby/ipvs v1.0.0 +github.com/moby/ipvs # github.com/moby/term v0.0.0-20200312100748-672ec06f55cd => github.com/moby/term v0.0.0-20200312100748-672ec06f55cd github.com/moby/term github.com/moby/term/windows @@ -690,7 +690,7 @@ github.com/thecodeteam/goscaleio github.com/thecodeteam/goscaleio/types/v1 # github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8 => github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8 github.com/tmc/grpc-websocket-proxy/wsproxy -# github.com/vishvananda/netlink v1.0.0 => github.com/vishvananda/netlink v1.0.0 +# github.com/vishvananda/netlink v1.1.0 => github.com/vishvananda/netlink v1.1.0 github.com/vishvananda/netlink github.com/vishvananda/netlink/nl # github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df => github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df From 7da8b2b981d29eaa1751ecb571668edd137ed811 Mon Sep 17 00:00:00 2001 From: Ricardo Pchevuzinske Katz Date: Thu, 26 Mar 2020 12:00:51 -0300 Subject: [PATCH 36/92] Update ipvs library to the new repo and upgrade library version --- go.mod | 1 - 1 file changed, 1 deletion(-) diff --git a/go.mod b/go.mod index 965deb6fe994..7acbc2663434 100644 --- a/go.mod +++ b/go.mod @@ -230,7 +230,6 @@ replace ( github.com/docker/docker => github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0 github.com/docker/go-connections => github.com/docker/go-connections v0.3.0 github.com/docker/go-units => github.com/docker/go-units v0.4.0 - github.com/docker/libnetwork => github.com/docker/libnetwork v0.8.0-dev.2.0.20190925143933-c8a5fca4a652 github.com/docker/spdystream => github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96 github.com/dustin/go-humanize => github.com/dustin/go-humanize v1.0.0 github.com/elazarl/goproxy => github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153 // 947c36da3153 is the SHA for git tag v1.11 From d7e787adce74d3df9a3412d24a0f1c16b15239f2 Mon Sep 17 00:00:00 2001 From: tanjunchen Date: Thu, 26 Mar 2020 23:16:20 +0800 Subject: [PATCH 37/92] test/e2e/framework: remove direct dependency to k8s.io/kubernetes/pkg/controller --- test/e2e/framework/pod/BUILD | 1 + test/e2e/framework/pod/resource.go | 22 ++++++++++++++++++++++ test/e2e/framework/resource/BUILD | 1 - test/e2e/framework/resource/resources.go | 3 +-- 4 files changed, 24 insertions(+), 3 deletions(-) diff --git a/test/e2e/framework/pod/BUILD b/test/e2e/framework/pod/BUILD index 196f712ad9e2..25612fd44e97 100644 --- a/test/e2e/framework/pod/BUILD +++ b/test/e2e/framework/pod/BUILD @@ -28,6 +28,7 @@ go_library( "//test/utils/image:go_default_library", "//vendor/github.com/onsi/ginkgo:go_default_library", "//vendor/github.com/onsi/gomega:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/test/e2e/framework/pod/resource.go b/test/e2e/framework/pod/resource.go index da22ad63c4db..85a91d376dbd 100644 --- a/test/e2e/framework/pod/resource.go +++ b/test/e2e/framework/pod/resource.go @@ -32,6 +32,7 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" + "k8s.io/klog" "k8s.io/kubectl/pkg/util/podutils" kubetypes "k8s.io/kubernetes/pkg/kubelet/types" e2elog "k8s.io/kubernetes/test/e2e/framework/log" @@ -583,3 +584,24 @@ func getNodeTTLAnnotationValue(c clientset.Interface) (time.Duration, error) { } return time.Duration(intValue) * time.Second, nil } + +// FilterActivePods returns pods that have not terminated. +func FilterActivePods(pods []*v1.Pod) []*v1.Pod { + var result []*v1.Pod + for _, p := range pods { + if IsPodActive(p) { + result = append(result, p) + } else { + klog.V(4).Infof("Ignoring inactive pod %v/%v in state %v, deletion time %v", + p.Namespace, p.Name, p.Status.Phase, p.DeletionTimestamp) + } + } + return result +} + +// IsPodActive return true if the pod meets certain conditions. +func IsPodActive(p *v1.Pod) bool { + return v1.PodSucceeded != p.Status.Phase && + v1.PodFailed != p.Status.Phase && + p.DeletionTimestamp == nil +} diff --git a/test/e2e/framework/resource/BUILD b/test/e2e/framework/resource/BUILD index 9dfa07d08a39..986506e7b1b7 100644 --- a/test/e2e/framework/resource/BUILD +++ b/test/e2e/framework/resource/BUILD @@ -9,7 +9,6 @@ go_library( importpath = "k8s.io/kubernetes/test/e2e/framework/resource", visibility = ["//visibility:public"], deps = [ - "//pkg/controller:go_default_library", "//staging/src/k8s.io/api/apps/v1:go_default_library", "//staging/src/k8s.io/api/batch/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", diff --git a/test/e2e/framework/resource/resources.go b/test/e2e/framework/resource/resources.go index a64383d94917..437f616aac8d 100644 --- a/test/e2e/framework/resource/resources.go +++ b/test/e2e/framework/resource/resources.go @@ -30,7 +30,6 @@ import ( "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" scaleclient "k8s.io/client-go/scale" - "k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/test/e2e/framework" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" testutils "k8s.io/kubernetes/test/utils" @@ -159,7 +158,7 @@ func waitForPodsInactive(ps *testutils.PodStore, interval, timeout time.Duration var activePods []*v1.Pod err := wait.PollImmediate(interval, timeout, func() (bool, error) { pods := ps.List() - activePods = controller.FilterActivePods(pods) + activePods = e2epod.FilterActivePods(pods) if len(activePods) != 0 { return false, nil } From d2b1903149a942dfaf53a881abdefbe27699acc9 Mon Sep 17 00:00:00 2001 From: Aldo Culquicondor Date: Wed, 25 Mar 2020 15:18:05 -0400 Subject: [PATCH 38/92] Calculate scores in parallel on spreading benchmarks This is closer to what happens in the core scheduler Signed-off-by: Aldo Culquicondor --- .../plugins/defaultpodtopologyspread/BUILD | 1 + .../default_pod_topology_spread_perf_test.go | 16 ++++++++-------- .../framework/plugins/podtopologyspread/BUILD | 1 + .../plugins/podtopologyspread/scoring_test.go | 16 ++++++++-------- 4 files changed, 18 insertions(+), 16 deletions(-) diff --git a/pkg/scheduler/framework/plugins/defaultpodtopologyspread/BUILD b/pkg/scheduler/framework/plugins/defaultpodtopologyspread/BUILD index 3547d3f9c475..a33db5454d4c 100644 --- a/pkg/scheduler/framework/plugins/defaultpodtopologyspread/BUILD +++ b/pkg/scheduler/framework/plugins/defaultpodtopologyspread/BUILD @@ -27,6 +27,7 @@ go_test( deps = [ "//pkg/scheduler/framework/v1alpha1:go_default_library", "//pkg/scheduler/internal/cache:go_default_library", + "//pkg/scheduler/internal/parallelize:go_default_library", "//pkg/scheduler/testing:go_default_library", "//staging/src/k8s.io/api/apps/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", diff --git a/pkg/scheduler/framework/plugins/defaultpodtopologyspread/default_pod_topology_spread_perf_test.go b/pkg/scheduler/framework/plugins/defaultpodtopologyspread/default_pod_topology_spread_perf_test.go index 5e03d3bce9df..212e70a1f268 100644 --- a/pkg/scheduler/framework/plugins/defaultpodtopologyspread/default_pod_topology_spread_perf_test.go +++ b/pkg/scheduler/framework/plugins/defaultpodtopologyspread/default_pod_topology_spread_perf_test.go @@ -25,6 +25,7 @@ import ( "k8s.io/client-go/kubernetes/fake" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" "k8s.io/kubernetes/pkg/scheduler/internal/cache" + "k8s.io/kubernetes/pkg/scheduler/internal/parallelize" st "k8s.io/kubernetes/pkg/scheduler/testing" ) @@ -76,15 +77,14 @@ func BenchmarkTestSelectorSpreadPriority(b *testing.B) { if !status.IsSuccess() { b.Fatalf("unexpected error: %v", status) } - var gotList framework.NodeScoreList - for _, node := range filteredNodes { - score, status := plugin.Score(ctx, state, pod, node.Name) - if !status.IsSuccess() { - b.Errorf("unexpected error: %v", status) - } - gotList = append(gotList, framework.NodeScore{Name: node.Name, Score: score}) + gotList := make(framework.NodeScoreList, len(filteredNodes)) + scoreNode := func(i int) { + n := filteredNodes[i] + score, _ := plugin.Score(ctx, state, pod, n.Name) + gotList[i] = framework.NodeScore{Name: n.Name, Score: score} } - status = plugin.NormalizeScore(context.Background(), state, pod, gotList) + parallelize.Until(ctx, len(filteredNodes), scoreNode) + status = plugin.NormalizeScore(ctx, state, pod, gotList) if !status.IsSuccess() { b.Fatal(status) } diff --git a/pkg/scheduler/framework/plugins/podtopologyspread/BUILD b/pkg/scheduler/framework/plugins/podtopologyspread/BUILD index 78a644051bc8..d24ad02b7dc4 100644 --- a/pkg/scheduler/framework/plugins/podtopologyspread/BUILD +++ b/pkg/scheduler/framework/plugins/podtopologyspread/BUILD @@ -41,6 +41,7 @@ go_test( deps = [ "//pkg/scheduler/framework/v1alpha1:go_default_library", "//pkg/scheduler/internal/cache:go_default_library", + "//pkg/scheduler/internal/parallelize:go_default_library", "//pkg/scheduler/nodeinfo:go_default_library", "//pkg/scheduler/testing:go_default_library", "//staging/src/k8s.io/api/apps/v1:go_default_library", diff --git a/pkg/scheduler/framework/plugins/podtopologyspread/scoring_test.go b/pkg/scheduler/framework/plugins/podtopologyspread/scoring_test.go index 60583b7f0211..c34503f02b56 100644 --- a/pkg/scheduler/framework/plugins/podtopologyspread/scoring_test.go +++ b/pkg/scheduler/framework/plugins/podtopologyspread/scoring_test.go @@ -29,6 +29,7 @@ import ( "k8s.io/client-go/kubernetes/fake" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" "k8s.io/kubernetes/pkg/scheduler/internal/cache" + "k8s.io/kubernetes/pkg/scheduler/internal/parallelize" st "k8s.io/kubernetes/pkg/scheduler/testing" "k8s.io/utils/pointer" ) @@ -746,19 +747,18 @@ func BenchmarkTestDefaultEvenPodsSpreadPriority(b *testing.B) { b.ResetTimer() for i := 0; i < b.N; i++ { - var gotList framework.NodeScoreList status := p.PreScore(ctx, state, pod, filteredNodes) if !status.IsSuccess() { b.Fatalf("unexpected error: %v", status) } - for _, n := range filteredNodes { - score, status := p.Score(context.Background(), state, pod, n.Name) - if !status.IsSuccess() { - b.Fatalf("unexpected error: %v", status) - } - gotList = append(gotList, framework.NodeScore{Name: n.Name, Score: score}) + gotList := make(framework.NodeScoreList, len(filteredNodes)) + scoreNode := func(i int) { + n := filteredNodes[i] + score, _ := p.Score(ctx, state, pod, n.Name) + gotList[i] = framework.NodeScore{Name: n.Name, Score: score} } - status = p.NormalizeScore(context.Background(), state, pod, gotList) + parallelize.Until(ctx, len(filteredNodes), scoreNode) + status = p.NormalizeScore(ctx, state, pod, gotList) if !status.IsSuccess() { b.Fatal(status) } From 9b2ff544ede9c064002637b5bd00597105347e94 Mon Sep 17 00:00:00 2001 From: Aldo Culquicondor Date: Thu, 26 Mar 2020 15:52:44 -0400 Subject: [PATCH 39/92] Fix pod affinity performance test configuration Signed-off-by: Aldo Culquicondor --- .../integration/scheduler_perf/config/performance-config.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/integration/scheduler_perf/config/performance-config.yaml b/test/integration/scheduler_perf/config/performance-config.yaml index f9cb6a254aaa..3be7e3b77af2 100644 --- a/test/integration/scheduler_perf/config/performance-config.yaml +++ b/test/integration/scheduler_perf/config/performance-config.yaml @@ -110,9 +110,9 @@ desc: SchedulingPodAffinity nodes: nodeTemplatePath: config/node-default.yaml - labelNodeStrategy: + labelNodePrepareStrategy: labelKey: "failure-domain.beta.kubernetes.io/zone" - labelValue: "zone1" + labelValues: ["zone1"] initPods: - podTemplatePath: config/pod-with-pod-affinity.yaml podsToSchedule: From 6f99791021f975613ceb99f9e1581616c16f5908 Mon Sep 17 00:00:00 2001 From: "Lubomir I. Ivanov" Date: Thu, 26 Mar 2020 22:02:55 +0200 Subject: [PATCH 40/92] kubeadm: add missing RBAC for getting nodes on "upgrade apply" b117a928 added a new check during "join" whether a Node with the same name exists in the cluster. When upgrading from 1.17 to 1.18 make sure the required RBAC by this check is added. Otherwise "kubeadm join" will complain that it lacks permissions to GET a Node. --- cmd/kubeadm/app/phases/upgrade/postupgrade.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/cmd/kubeadm/app/phases/upgrade/postupgrade.go b/cmd/kubeadm/app/phases/upgrade/postupgrade.go index 98dc6520be33..59aa76ccd549 100644 --- a/cmd/kubeadm/app/phases/upgrade/postupgrade.go +++ b/cmd/kubeadm/app/phases/upgrade/postupgrade.go @@ -70,6 +70,11 @@ func PerformPostUpgradeTasks(client clientset.Interface, cfg *kubeadmapi.InitCon errs = append(errs, errors.Wrap(err, "error uploading crisocket")) } + // Create RBAC rules that makes the bootstrap tokens able to get nodes + if err := nodebootstraptoken.AllowBoostrapTokensToGetNodes(client); err != nil { + errs = append(errs, err) + } + // Create/update RBAC rules that makes the bootstrap tokens able to post CSRs if err := nodebootstraptoken.AllowBootstrapTokensToPostCSRs(client); err != nil { errs = append(errs, err) From 86192d4b9a078ee40a2b3eddb7a2f204af1759df Mon Sep 17 00:00:00 2001 From: David Ashpole Date: Thu, 26 Mar 2020 13:30:36 -0700 Subject: [PATCH 41/92] fix cpu resource metric type by changing to counter --- .../metrics/collectors/resource_metrics.go | 8 ++++---- .../collectors/resource_metrics_test.go | 20 +++++++++---------- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/pkg/kubelet/metrics/collectors/resource_metrics.go b/pkg/kubelet/metrics/collectors/resource_metrics.go index a36671279037..71cf9f7d749e 100644 --- a/pkg/kubelet/metrics/collectors/resource_metrics.go +++ b/pkg/kubelet/metrics/collectors/resource_metrics.go @@ -26,7 +26,7 @@ import ( ) var ( - nodeCPUUsageDesc = metrics.NewDesc("node_cpu_usage_seconds", + nodeCPUUsageDesc = metrics.NewDesc("node_cpu_usage_seconds_total", "Cumulative cpu time consumed by the node in core-seconds", nil, nil, @@ -40,7 +40,7 @@ var ( metrics.ALPHA, "") - containerCPUUsageDesc = metrics.NewDesc("container_cpu_usage_seconds", + containerCPUUsageDesc = metrics.NewDesc("container_cpu_usage_seconds_total", "Cumulative cpu time consumed by the container in core-seconds", []string{"container", "pod", "namespace"}, nil, @@ -120,7 +120,7 @@ func (rc *resourceMetricsCollector) collectNodeCPUMetrics(ch chan<- metrics.Metr } ch <- metrics.NewLazyMetricWithTimestamp(s.CPU.Time.Time, - metrics.NewLazyConstMetric(nodeCPUUsageDesc, metrics.GaugeValue, float64(*s.CPU.UsageCoreNanoSeconds)/float64(time.Second))) + metrics.NewLazyConstMetric(nodeCPUUsageDesc, metrics.CounterValue, float64(*s.CPU.UsageCoreNanoSeconds)/float64(time.Second))) } func (rc *resourceMetricsCollector) collectNodeMemoryMetrics(ch chan<- metrics.Metric, s summary.NodeStats) { @@ -138,7 +138,7 @@ func (rc *resourceMetricsCollector) collectContainerCPUMetrics(ch chan<- metrics } ch <- metrics.NewLazyMetricWithTimestamp(s.CPU.Time.Time, - metrics.NewLazyConstMetric(containerCPUUsageDesc, metrics.GaugeValue, + metrics.NewLazyConstMetric(containerCPUUsageDesc, metrics.CounterValue, float64(*s.CPU.UsageCoreNanoSeconds)/float64(time.Second), s.Name, pod.PodRef.Name, pod.PodRef.Namespace)) } diff --git a/pkg/kubelet/metrics/collectors/resource_metrics_test.go b/pkg/kubelet/metrics/collectors/resource_metrics_test.go index b92aabbd6758..2470c1b21fbd 100644 --- a/pkg/kubelet/metrics/collectors/resource_metrics_test.go +++ b/pkg/kubelet/metrics/collectors/resource_metrics_test.go @@ -47,9 +47,9 @@ func TestCollectResourceMetrics(t *testing.T) { testTime := metav1.NewTime(time.Unix(2, 0)) // a static timestamp: 2000 interestedMetrics := []string{ "scrape_error", - "node_cpu_usage_seconds", + "node_cpu_usage_seconds_total", "node_memory_working_set_bytes", - "container_cpu_usage_seconds", + "container_cpu_usage_seconds_total", "container_memory_working_set_bytes", } @@ -85,9 +85,9 @@ func TestCollectResourceMetrics(t *testing.T) { }, summaryErr: nil, expectedMetrics: ` - # HELP node_cpu_usage_seconds [ALPHA] Cumulative cpu time consumed by the node in core-seconds - # TYPE node_cpu_usage_seconds gauge - node_cpu_usage_seconds 10 2000 + # HELP node_cpu_usage_seconds_total [ALPHA] Cumulative cpu time consumed by the node in core-seconds + # TYPE node_cpu_usage_seconds_total counter + node_cpu_usage_seconds_total 10 2000 # HELP node_memory_working_set_bytes [ALPHA] Current working set of the node in bytes # TYPE node_memory_working_set_bytes gauge node_memory_working_set_bytes 1000 2000 @@ -156,11 +156,11 @@ func TestCollectResourceMetrics(t *testing.T) { # HELP scrape_error [ALPHA] 1 if there was an error while getting container metrics, 0 otherwise # TYPE scrape_error gauge scrape_error 0 - # HELP container_cpu_usage_seconds [ALPHA] Cumulative cpu time consumed by the container in core-seconds - # TYPE container_cpu_usage_seconds gauge - container_cpu_usage_seconds{container="container_a",namespace="namespace_a",pod="pod_a"} 10 2000 - container_cpu_usage_seconds{container="container_a",namespace="namespace_b",pod="pod_b"} 10 2000 - container_cpu_usage_seconds{container="container_b",namespace="namespace_a",pod="pod_a"} 10 2000 + # HELP container_cpu_usage_seconds_total [ALPHA] Cumulative cpu time consumed by the container in core-seconds + # TYPE container_cpu_usage_seconds_total counter + container_cpu_usage_seconds_total{container="container_a",namespace="namespace_a",pod="pod_a"} 10 2000 + container_cpu_usage_seconds_total{container="container_a",namespace="namespace_b",pod="pod_b"} 10 2000 + container_cpu_usage_seconds_total{container="container_b",namespace="namespace_a",pod="pod_a"} 10 2000 # HELP container_memory_working_set_bytes [ALPHA] Current working set of the container in bytes # TYPE container_memory_working_set_bytes gauge container_memory_working_set_bytes{container="container_a",namespace="namespace_a",pod="pod_a"} 1000 2000 From 33ea82227e64fb785ff65175e718aaec61f5b305 Mon Sep 17 00:00:00 2001 From: Wei Huang Date: Thu, 26 Mar 2020 17:11:17 -0700 Subject: [PATCH 42/92] Use sched ComponentConfig over individual command line args --- hack/local-up-cluster.sh | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/hack/local-up-cluster.sh b/hack/local-up-cluster.sh index 52fba164df46..46673225d2b0 100755 --- a/hack/local-up-cluster.sh +++ b/hack/local-up-cluster.sh @@ -853,12 +853,19 @@ EOF } function start_kubescheduler { - SCHEDULER_LOG=${LOG_DIR}/kube-scheduler.log + + cat < /tmp/kube-scheduler.yaml +apiVersion: kubescheduler.config.k8s.io/v1alpha2 +kind: KubeSchedulerConfiguration +clientConnection: + kubeconfig: ${CERT_DIR}/scheduler.kubeconfig +leaderElection: + leaderElect: false +EOF ${CONTROLPLANE_SUDO} "${GO_OUT}/kube-scheduler" \ --v="${LOG_LEVEL}" \ - --leader-elect=false \ - --kubeconfig "${CERT_DIR}"/scheduler.kubeconfig \ + --config=/tmp/kube-scheduler.yaml \ --feature-gates="${FEATURE_GATES}" \ --master="https://${API_HOST}:${API_SECURE_PORT}" >"${SCHEDULER_LOG}" 2>&1 & SCHEDULER_PID=$! From 49283364bf33491b0e79fa952af6f8d7489620cc Mon Sep 17 00:00:00 2001 From: Dave Chen Date: Fri, 13 Mar 2020 18:00:34 +0800 Subject: [PATCH 43/92] Decouple yaml based integration test from legacy test - Move utilities or constants out so that both of them should be able to run independently. - Rename the legacy test so that it can eventually be deleted when the perf dash changes is done --- test/integration/scheduler_perf/BUILD | 4 +++- ...ench_test.go => scheduler_perf_legacy_test.go} | 13 ------------- test/integration/scheduler_perf/util.go | 15 +++++++++++++++ 3 files changed, 18 insertions(+), 14 deletions(-) rename test/integration/scheduler_perf/{scheduler_bench_test.go => scheduler_perf_legacy_test.go} (98%) diff --git a/test/integration/scheduler_perf/BUILD b/test/integration/scheduler_perf/BUILD index 12d846e2b8e0..8e6ef186bfe1 100644 --- a/test/integration/scheduler_perf/BUILD +++ b/test/integration/scheduler_perf/BUILD @@ -15,6 +15,7 @@ go_library( importpath = "k8s.io/kubernetes/test/integration/scheduler_perf", deps = [ "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//staging/src/k8s.io/client-go/informers/core/v1:go_default_library", @@ -23,6 +24,7 @@ go_library( "//staging/src/k8s.io/component-base/metrics/legacyregistry:go_default_library", "//staging/src/k8s.io/component-base/metrics/testutil:go_default_library", "//test/integration/util:go_default_library", + "//test/utils:go_default_library", "//vendor/k8s.io/klog:go_default_library", ], ) @@ -32,7 +34,7 @@ go_test( size = "large", srcs = [ "main_test.go", - "scheduler_bench_test.go", + "scheduler_perf_legacy_test.go", "scheduler_perf_test.go", "scheduler_test.go", ], diff --git a/test/integration/scheduler_perf/scheduler_bench_test.go b/test/integration/scheduler_perf/scheduler_perf_legacy_test.go similarity index 98% rename from test/integration/scheduler_perf/scheduler_bench_test.go rename to test/integration/scheduler_perf/scheduler_perf_legacy_test.go index 6f0e8c212147..b1fb95f1b8f9 100644 --- a/test/integration/scheduler_perf/scheduler_bench_test.go +++ b/test/integration/scheduler_perf/scheduler_perf_legacy_test.go @@ -50,8 +50,6 @@ var ( {nodes: 600, existingPods: 10000, minPods: 1000}, {nodes: 5000, existingPods: 5000, minPods: 1000}, } - testNamespace = "sched-test" - setupNamespace = "sched-setup" ) // BenchmarkScheduling benchmarks the scheduling rate when the cluster has @@ -524,17 +522,6 @@ func makeBasePodWithSecret() *v1.Pod { return basePod } -// makeBasePod creates a Pod object to be used as a template. -func makeBasePod() *v1.Pod { - basePod := &v1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - GenerateName: "pod-", - }, - Spec: testutils.MakePodSpec(), - } - return basePod -} - func makeBasePersistentVolumeClaim() *v1.PersistentVolumeClaim { return &v1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ diff --git a/test/integration/scheduler_perf/util.go b/test/integration/scheduler_perf/util.go index 715d03d3b21f..39960b67f928 100644 --- a/test/integration/scheduler_perf/util.go +++ b/test/integration/scheduler_perf/util.go @@ -27,6 +27,7 @@ import ( "time" v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime/schema" coreinformers "k8s.io/client-go/informers/core/v1" @@ -36,10 +37,13 @@ import ( "k8s.io/component-base/metrics/testutil" "k8s.io/klog" "k8s.io/kubernetes/test/integration/util" + testutils "k8s.io/kubernetes/test/utils" ) const ( dateFormat = "2006-01-02T15:04:05Z" + testNamespace = "sched-test" + setupNamespace = "sched-setup" throughputSampleFrequency = time.Second ) @@ -106,6 +110,17 @@ type DataItems struct { DataItems []DataItem `json:"dataItems"` } +// makeBasePod creates a Pod object to be used as a template. +func makeBasePod() *v1.Pod { + basePod := &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "pod-", + }, + Spec: testutils.MakePodSpec(), + } + return basePod +} + func dataItems2JSONFile(dataItems DataItems, namePrefix string) error { b, err := json.Marshal(dataItems) if err != nil { From 35838161127449d1517bd0e27654ed8ab00df4d8 Mon Sep 17 00:00:00 2001 From: Peter Hornyack Date: Thu, 26 Mar 2020 19:30:39 -0700 Subject: [PATCH 44/92] Fix INFRA_CONTAINER variable references Tested: NUM_NODES=2 NUM_WINDOWS_NODES=2 KUBE_GCE_ENABLE_IP_ALIASES=true \ KUBERNETES_NODE_PLATFORM=windows \ LOGGING_STACKDRIVER_RESOURCE_TYPES=new \ KUBE_UP_AUTOMATIC_CLEANUP=true \ WINDOWS_NODE_OS_DISTRIBUTION=win2019 \ ./cluster/kube-up.sh --- cluster/gce/windows/k8s-node-setup.psm1 | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/cluster/gce/windows/k8s-node-setup.psm1 b/cluster/gce/windows/k8s-node-setup.psm1 index 67521f09aba3..ae2788faaf9e 100644 --- a/cluster/gce/windows/k8s-node-setup.psm1 +++ b/cluster/gce/windows/k8s-node-setup.psm1 @@ -1115,7 +1115,7 @@ function Start-WorkerServices { $kubelet_args = $kubelet_args_str.Split(" ") Log-Output "kubelet_args from metadata: ${kubelet_args}" $default_kubelet_args = @(` - "--pod-infra-container-image=${INFRA_CONTAINER}" + "--pod-infra-container-image=${env:INFRA_CONTAINER}" ) $kubelet_args = ${default_kubelet_args} + ${kubelet_args} if (-not (Test-NodeUsesAuthPlugin ${kube_env})) { @@ -1253,14 +1253,14 @@ function Configure-Crictl { # node startup steps! # Pull-InfraContainer must be called AFTER Verify-WorkerServices. function Pull-InfraContainer { - $name, $label = $INFRA_CONTAINER -split ':',2 + $name, $label = ${env:INFRA_CONTAINER} -split ':',2 if (-not ("$(& crictl images)" -match "$name.*$label")) { - & crictl pull $INFRA_CONTAINER + & crictl pull ${env:INFRA_CONTAINER} if (!$?) { - throw "Error running 'crictl pull $INFRA_CONTAINER'" + throw "Error running 'crictl pull ${env:INFRA_CONTAINER}'" } } - $inspect = "$(& crictl inspecti $INFRA_CONTAINER | Out-String)" + $inspect = "$(& crictl inspecti ${env:INFRA_CONTAINER} | Out-String)" Log-Output "Infra/pause container:`n$inspect" } @@ -1473,7 +1473,7 @@ function Configure_Containerd { [plugins.cri.cni] bin_dir = 'CNI_BIN_DIR' conf_dir = 'CNI_CONF_DIR' -"@.replace('INFRA_CONTAINER_IMAGE', $INFRA_CONTAINER).` +"@.replace('INFRA_CONTAINER_IMAGE', ${env:INFRA_CONTAINER}).` replace('CNI_BIN_DIR', ${env:CNI_DIR}).` replace('CNI_CONF_DIR', ${env:CNI_CONFIG_DIR}) } From 27e133afe2edd9c793ad5d3c5f162008aa4bd683 Mon Sep 17 00:00:00 2001 From: tanjunchen Date: Fri, 27 Mar 2020 11:42:22 +0800 Subject: [PATCH 45/92] e2e/framework : pods.go remove direct imports to k8s.io/kubernetes/pkg/ --- test/e2e/framework/BUILD | 2 -- test/e2e/framework/pods.go | 24 ++++++++++++++++++------ 2 files changed, 18 insertions(+), 8 deletions(-) diff --git a/test/e2e/framework/BUILD b/test/e2e/framework/BUILD index d46e7c3b962b..4ee13ca89395 100644 --- a/test/e2e/framework/BUILD +++ b/test/e2e/framework/BUILD @@ -28,8 +28,6 @@ go_library( "//pkg/controller:go_default_library", "//pkg/kubelet/apis/config:go_default_library", "//pkg/kubelet/apis/stats/v1alpha1:go_default_library", - "//pkg/kubelet/events:go_default_library", - "//pkg/kubelet/sysctl:go_default_library", "//staging/src/k8s.io/api/apps/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/policy/v1beta1:go_default_library", diff --git a/test/e2e/framework/pods.go b/test/e2e/framework/pods.go index 32a84c41678c..66adeddc515b 100644 --- a/test/e2e/framework/pods.go +++ b/test/e2e/framework/pods.go @@ -32,8 +32,6 @@ import ( "k8s.io/client-go/kubernetes/scheme" v1core "k8s.io/client-go/kubernetes/typed/core/v1" "k8s.io/kubectl/pkg/util/podutils" - "k8s.io/kubernetes/pkg/kubelet/events" - "k8s.io/kubernetes/pkg/kubelet/sysctl" "github.com/onsi/ginkgo" "github.com/onsi/gomega" @@ -42,8 +40,22 @@ import ( e2epod "k8s.io/kubernetes/test/e2e/framework/pod" ) -// DefaultPodDeletionTimeout is the default timeout for deleting pod -const DefaultPodDeletionTimeout = 3 * time.Minute +const ( + // DefaultPodDeletionTimeout is the default timeout for deleting pod + DefaultPodDeletionTimeout = 3 * time.Minute + + // the status of container event, copied from k8s.io/kubernetes/pkg/kubelet/events + killingContainer = "Killing" + + // the status of container event, copied from k8s.io/kubernetes/pkg/kubelet/events + failedToCreateContainer = "Failed" + + // the status of container event, copied from k8s.io/kubernetes/pkg/kubelet/events + startedContainer = "Started" + + // it is copied from k8s.io/kubernetes/pkg/kubelet/sysctl + forbiddenReason = "SysctlForbidden" +) // ImageWhiteList is the images used in the current test suite. It should be initialized in test suite and // the images in the white list should be pre-pulled in the test suite. Currently, this is only used by @@ -227,10 +239,10 @@ func (c *PodClient) WaitForErrorEventOrSuccess(pod *v1.Pod) (*v1.Event, error) { } for _, e := range evnts.Items { switch e.Reason { - case events.KillingContainer, events.FailedToCreateContainer, sysctl.ForbiddenReason: + case killingContainer, failedToCreateContainer, forbiddenReason: ev = &e return true, nil - case events.StartedContainer: + case startedContainer: return true, nil default: // ignore all other errors From 74f6aa654b4b2e7246ada25b30724531b906b030 Mon Sep 17 00:00:00 2001 From: Yang Yang Date: Thu, 26 Mar 2020 20:28:40 -0700 Subject: [PATCH 46/92] fix aws loadbalancer nodePort cannot change issue --- .../aws/aws_loadbalancer.go | 7 +- .../aws/aws_loadbalancer_test.go | 124 ++++++++++++++++++ 2 files changed, 128 insertions(+), 3 deletions(-) diff --git a/staging/src/k8s.io/legacy-cloud-providers/aws/aws_loadbalancer.go b/staging/src/k8s.io/legacy-cloud-providers/aws/aws_loadbalancer.go index 9021b64e138a..dd46751876df 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/aws/aws_loadbalancer.go +++ b/staging/src/k8s.io/legacy-cloud-providers/aws/aws_loadbalancer.go @@ -452,13 +452,14 @@ var invalidELBV2NameRegex = regexp.MustCompile("[^[:alnum:]]") // buildTargetGroupName will build unique name for targetGroup of service & port. // the name is in format k8s-{namespace:8}-{name:8}-{uuid:10} (chosen to benefit most common use cases). -// Note: targetProtocol & targetType are included since they cannot be modified on existing targetGroup. -func (c *Cloud) buildTargetGroupName(serviceName types.NamespacedName, servicePort int64, targetProtocol string, targetType string) string { +// Note: nodePort & targetProtocol & targetType are included since they cannot be modified on existing targetGroup. +func (c *Cloud) buildTargetGroupName(serviceName types.NamespacedName, servicePort int64, nodePort int64, targetProtocol string, targetType string) string { hasher := sha1.New() _, _ = hasher.Write([]byte(c.tagging.clusterID())) _, _ = hasher.Write([]byte(serviceName.Namespace)) _, _ = hasher.Write([]byte(serviceName.Name)) _, _ = hasher.Write([]byte(strconv.FormatInt(servicePort, 10))) + _, _ = hasher.Write([]byte(strconv.FormatInt(nodePort, 10))) _, _ = hasher.Write([]byte(targetProtocol)) _, _ = hasher.Write([]byte(targetType)) tgUUID := hex.EncodeToString(hasher.Sum(nil)) @@ -527,7 +528,7 @@ func (c *Cloud) ensureTargetGroup(targetGroup *elbv2.TargetGroup, serviceName ty dirty := false if targetGroup == nil { targetType := "instance" - name := c.buildTargetGroupName(serviceName, mapping.FrontendPort, mapping.TrafficProtocol, targetType) + name := c.buildTargetGroupName(serviceName, mapping.FrontendPort, mapping.TrafficPort, mapping.TrafficProtocol, targetType) klog.Infof("Creating load balancer target group for %v with name: %s", serviceName, name) input := &elbv2.CreateTargetGroupInput{ VpcId: aws.String(vpcID), diff --git a/staging/src/k8s.io/legacy-cloud-providers/aws/aws_loadbalancer_test.go b/staging/src/k8s.io/legacy-cloud-providers/aws/aws_loadbalancer_test.go index a437e3d3ad04..16574b058c08 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/aws/aws_loadbalancer_test.go +++ b/staging/src/k8s.io/legacy-cloud-providers/aws/aws_loadbalancer_test.go @@ -19,6 +19,7 @@ limitations under the License. package aws import ( + "k8s.io/apimachinery/pkg/types" "testing" "github.com/aws/aws-sdk-go/aws" @@ -296,3 +297,126 @@ func TestElbListenersAreEqual(t *testing.T) { }) } } + +func TestBuildTargetGroupName(t *testing.T) { + type args struct { + serviceName types.NamespacedName + servicePort int64 + nodePort int64 + targetProtocol string + targetType string + } + tests := []struct { + name string + clusterID string + args args + want string + }{ + { + name: "base case", + clusterID: "cluster-a", + args: args{ + serviceName: types.NamespacedName{Namespace: "default", Name: "service-a"}, + servicePort: 80, + nodePort: 8080, + targetProtocol: "TCP", + targetType: "instance", + }, + want: "k8s-default-servicea-0aeb5b75af", + }, + { + name: "base case & clusterID changed", + clusterID: "cluster-b", + args: args{ + serviceName: types.NamespacedName{Namespace: "default", Name: "service-a"}, + servicePort: 80, + nodePort: 8080, + targetProtocol: "TCP", + targetType: "instance", + }, + want: "k8s-default-servicea-5d3a0a69a8", + }, + { + name: "base case & serviceNamespace changed", + clusterID: "cluster-a", + args: args{ + serviceName: types.NamespacedName{Namespace: "another", Name: "service-a"}, + servicePort: 80, + nodePort: 8080, + targetProtocol: "TCP", + targetType: "instance", + }, + want: "k8s-another-servicea-f3a3263315", + }, + { + name: "base case & serviceName changed", + clusterID: "cluster-a", + args: args{ + serviceName: types.NamespacedName{Namespace: "default", Name: "service-b"}, + servicePort: 80, + nodePort: 8080, + targetProtocol: "TCP", + targetType: "instance", + }, + want: "k8s-default-serviceb-9a3c03b25e", + }, + { + name: "base case & servicePort changed", + clusterID: "cluster-a", + args: args{ + serviceName: types.NamespacedName{Namespace: "default", Name: "service-a"}, + servicePort: 9090, + nodePort: 8080, + targetProtocol: "TCP", + targetType: "instance", + }, + want: "k8s-default-servicea-6e07474ff4", + }, + { + name: "base case & nodePort changed", + clusterID: "cluster-a", + args: args{ + serviceName: types.NamespacedName{Namespace: "default", Name: "service-a"}, + servicePort: 80, + nodePort: 9090, + targetProtocol: "TCP", + targetType: "instance", + }, + want: "k8s-default-servicea-6cb2d0201c", + }, + { + name: "base case & targetProtocol changed", + clusterID: "cluster-a", + args: args{ + serviceName: types.NamespacedName{Namespace: "default", Name: "service-a"}, + servicePort: 80, + nodePort: 8080, + targetProtocol: "UDP", + targetType: "instance", + }, + want: "k8s-default-servicea-70495e628e", + }, + { + name: "base case & targetType changed", + clusterID: "cluster-a", + args: args{ + serviceName: types.NamespacedName{Namespace: "default", Name: "service-a"}, + servicePort: 80, + nodePort: 8080, + targetProtocol: "TCP", + targetType: "ip", + }, + want: "k8s-default-servicea-fff6dd8028", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := &Cloud{ + tagging: awsTagging{ClusterID: tt.clusterID}, + } + if got := c.buildTargetGroupName(tt.args.serviceName, tt.args.servicePort, tt.args.nodePort, tt.args.targetProtocol, tt.args.targetType); got != tt.want { + assert.Equal(t, tt.want, got) + } + }) + } +} From b75990cc7bccc5693df34e29745eea0c98d095d1 Mon Sep 17 00:00:00 2001 From: "Sean R. Sullivan" Date: Thu, 26 Mar 2020 13:24:33 -0700 Subject: [PATCH 47/92] Fixes problem where kubectl apply stops after first error --- .../src/k8s.io/kubectl/pkg/cmd/apply/BUILD | 1 + .../src/k8s.io/kubectl/pkg/cmd/apply/apply.go | 276 ++++++++++-------- 2 files changed, 148 insertions(+), 129 deletions(-) diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/apply/BUILD b/staging/src/k8s.io/kubectl/pkg/cmd/apply/BUILD index 6f9d4dfe7d83..7b13a956dfc3 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/apply/BUILD +++ b/staging/src/k8s.io/kubectl/pkg/cmd/apply/BUILD @@ -22,6 +22,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/jsonmergepatch:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/mergepatch:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/apply/apply.go b/staging/src/k8s.io/kubectl/pkg/cmd/apply/apply.go index 1f2802600fbf..1558243910ef 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/apply/apply.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/apply/apply.go @@ -28,6 +28,7 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" + utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/cli-runtime/pkg/genericclioptions" "k8s.io/cli-runtime/pkg/printers" @@ -371,53 +372,78 @@ func (o *ApplyOptions) Run() error { // Generates the objects using the resource builder if they have not // already been stored by calling "SetObjects()" in the pre-processor. + errs := []error{} infos, err := o.GetObjects() if err != nil { - return err + errs = append(errs, err) } - if len(infos) == 0 { + if len(infos) == 0 && len(errs) == 0 { return fmt.Errorf("no objects passed to apply") } + // Iterate through all objects, applying each one. for _, info := range infos { + if err := o.applyOneObject(info); err != nil { + errs = append(errs, err) + } + } + // If any errors occurred during apply, then return error (or + // aggregate of errors). + if len(errs) == 1 { + return errs[0] + } + if len(errs) > 1 { + return utilerrors.NewAggregate(errs) + } - o.MarkNamespaceVisited(info) + if o.PostProcessorFn != nil { + klog.V(4).Infof("Running apply post-processor function") + if err := o.PostProcessorFn(); err != nil { + return err + } + } + + return nil +} + +func (o *ApplyOptions) applyOneObject(info *resource.Info) error { + o.MarkNamespaceVisited(info) + + if err := o.Recorder.Record(info.Object); err != nil { + klog.V(4).Infof("error recording current command: %v", err) + } - if err := o.Recorder.Record(info.Object); err != nil { - klog.V(4).Infof("error recording current command: %v", err) + if o.ServerSideApply { + // Send the full object to be applied on the server side. + data, err := runtime.Encode(unstructured.UnstructuredJSONScheme, info.Object) + if err != nil { + return cmdutil.AddSourceToErr("serverside-apply", info.Source, err) } - if o.ServerSideApply { - // Send the full object to be applied on the server side. - data, err := runtime.Encode(unstructured.UnstructuredJSONScheme, info.Object) - if err != nil { - return cmdutil.AddSourceToErr("serverside-apply", info.Source, err) - } + options := metav1.PatchOptions{ + Force: &o.ForceConflicts, + FieldManager: o.FieldManager, + } - options := metav1.PatchOptions{ - Force: &o.ForceConflicts, - FieldManager: o.FieldManager, + helper := resource.NewHelper(info.Client, info.Mapping) + if o.DryRunStrategy == cmdutil.DryRunServer { + if err := o.DryRunVerifier.HasSupport(info.Mapping.GroupVersionKind); err != nil { + return err } - - helper := resource.NewHelper(info.Client, info.Mapping) - if o.DryRunStrategy == cmdutil.DryRunServer { - if err := o.DryRunVerifier.HasSupport(info.Mapping.GroupVersionKind); err != nil { - return err - } - helper.DryRun(true) + helper.DryRun(true) + } + obj, err := helper.Patch( + info.Namespace, + info.Name, + types.ApplyPatchType, + data, + &options, + ) + if err != nil { + if isIncompatibleServerError(err) { + err = fmt.Errorf("Server-side apply not available on the server: (%v)", err) } - obj, err := helper.Patch( - info.Namespace, - info.Name, - types.ApplyPatchType, - data, - &options, - ) - if err != nil { - if isIncompatibleServerError(err) { - err = fmt.Errorf("Server-side apply not available on the server: (%v)", err) - } - if errors.IsConflict(err) { - err = fmt.Errorf(`%v + if errors.IsConflict(err) { + err = fmt.Errorf(`%v Please review the fields above--they currently have other managers. Here are the ways you can resolve this warning: * If you intend to manage all of these fields, please re-run the apply @@ -429,136 +455,128 @@ are the ways you can resolve this warning: value; in this case, you'll become the manager if the other manager(s) stop managing the field (remove it from their configuration). See http://k8s.io/docs/reference/using-api/api-concepts/#conflicts`, err) - } - return err - } - - info.Refresh(obj, true) - - if err := o.MarkObjectVisited(info); err != nil { - return err } + return err + } - if o.shouldPrintObject() { - continue - } + info.Refresh(obj, true) - printer, err := o.ToPrinter("serverside-applied") - if err != nil { - return err - } + if err := o.MarkObjectVisited(info); err != nil { + return err + } - if err = printer.PrintObj(info.Object, o.Out); err != nil { - return err - } - continue + if o.shouldPrintObject() { + return nil } - // Get the modified configuration of the object. Embed the result - // as an annotation in the modified configuration, so that it will appear - // in the patch sent to the server. - modified, err := util.GetModifiedConfiguration(info.Object, true, unstructured.UnstructuredJSONScheme) + printer, err := o.ToPrinter("serverside-applied") if err != nil { - return cmdutil.AddSourceToErr(fmt.Sprintf("retrieving modified configuration from:\n%s\nfor:", info.String()), info.Source, err) + return err } - if err := info.Get(); err != nil { - if !errors.IsNotFound(err) { - return cmdutil.AddSourceToErr(fmt.Sprintf("retrieving current configuration of:\n%s\nfrom server for:", info.String()), info.Source, err) - } - - // Create the resource if it doesn't exist - // First, update the annotation used by kubectl apply - if err := util.CreateApplyAnnotation(info.Object, unstructured.UnstructuredJSONScheme); err != nil { - return cmdutil.AddSourceToErr("creating", info.Source, err) - } - - if o.DryRunStrategy != cmdutil.DryRunClient { - // Then create the resource and skip the three-way merge - helper := resource.NewHelper(info.Client, info.Mapping) - if o.DryRunStrategy == cmdutil.DryRunServer { - if err := o.DryRunVerifier.HasSupport(info.Mapping.GroupVersionKind); err != nil { - return cmdutil.AddSourceToErr("creating", info.Source, err) - } - helper.DryRun(true) - } - obj, err := helper.Create(info.Namespace, true, info.Object) - if err != nil { - return cmdutil.AddSourceToErr("creating", info.Source, err) - } - info.Refresh(obj, true) - } - - if err := o.MarkObjectVisited(info); err != nil { - return err - } + if err = printer.PrintObj(info.Object, o.Out); err != nil { + return err + } + return nil + } - if o.shouldPrintObject() { - continue - } + // Get the modified configuration of the object. Embed the result + // as an annotation in the modified configuration, so that it will appear + // in the patch sent to the server. + modified, err := util.GetModifiedConfiguration(info.Object, true, unstructured.UnstructuredJSONScheme) + if err != nil { + return cmdutil.AddSourceToErr(fmt.Sprintf("retrieving modified configuration from:\n%s\nfor:", info.String()), info.Source, err) + } - printer, err := o.ToPrinter("created") - if err != nil { - return err - } - if err = printer.PrintObj(info.Object, o.Out); err != nil { - return err - } - continue + if err := info.Get(); err != nil { + if !errors.IsNotFound(err) { + return cmdutil.AddSourceToErr(fmt.Sprintf("retrieving current configuration of:\n%s\nfrom server for:", info.String()), info.Source, err) } - if err := o.MarkObjectVisited(info); err != nil { - return err + // Create the resource if it doesn't exist + // First, update the annotation used by kubectl apply + if err := util.CreateApplyAnnotation(info.Object, unstructured.UnstructuredJSONScheme); err != nil { + return cmdutil.AddSourceToErr("creating", info.Source, err) } if o.DryRunStrategy != cmdutil.DryRunClient { - metadata, _ := meta.Accessor(info.Object) - annotationMap := metadata.GetAnnotations() - if _, ok := annotationMap[corev1.LastAppliedConfigAnnotation]; !ok { - fmt.Fprintf(o.ErrOut, warningNoLastAppliedConfigAnnotation, o.cmdBaseName) - } - - patcher, err := newPatcher(o, info) - if err != nil { - return err + // Then create the resource and skip the three-way merge + helper := resource.NewHelper(info.Client, info.Mapping) + if o.DryRunStrategy == cmdutil.DryRunServer { + if err := o.DryRunVerifier.HasSupport(info.Mapping.GroupVersionKind); err != nil { + return cmdutil.AddSourceToErr("creating", info.Source, err) + } + helper.DryRun(true) } - patchBytes, patchedObject, err := patcher.Patch(info.Object, modified, info.Source, info.Namespace, info.Name, o.ErrOut) + obj, err := helper.Create(info.Namespace, true, info.Object) if err != nil { - return cmdutil.AddSourceToErr(fmt.Sprintf("applying patch:\n%s\nto:\n%v\nfor:", patchBytes, info), info.Source, err) + return cmdutil.AddSourceToErr("creating", info.Source, err) } + info.Refresh(obj, true) + } - info.Refresh(patchedObject, true) - - if string(patchBytes) == "{}" && !o.shouldPrintObject() { - printer, err := o.ToPrinter("unchanged") - if err != nil { - return err - } - if err = printer.PrintObj(info.Object, o.Out); err != nil { - return err - } - continue - } + if err := o.MarkObjectVisited(info); err != nil { + return err } if o.shouldPrintObject() { - continue + return nil } - printer, err := o.ToPrinter("configured") + printer, err := o.ToPrinter("created") if err != nil { return err } if err = printer.PrintObj(info.Object, o.Out); err != nil { return err } + return nil } - if o.PostProcessorFn != nil { - klog.V(4).Infof("Running apply post-processor function") - if err := o.PostProcessorFn(); err != nil { + if err := o.MarkObjectVisited(info); err != nil { + return err + } + + if o.DryRunStrategy != cmdutil.DryRunClient { + metadata, _ := meta.Accessor(info.Object) + annotationMap := metadata.GetAnnotations() + if _, ok := annotationMap[corev1.LastAppliedConfigAnnotation]; !ok { + fmt.Fprintf(o.ErrOut, warningNoLastAppliedConfigAnnotation, o.cmdBaseName) + } + + patcher, err := newPatcher(o, info) + if err != nil { return err } + patchBytes, patchedObject, err := patcher.Patch(info.Object, modified, info.Source, info.Namespace, info.Name, o.ErrOut) + if err != nil { + return cmdutil.AddSourceToErr(fmt.Sprintf("applying patch:\n%s\nto:\n%v\nfor:", patchBytes, info), info.Source, err) + } + + info.Refresh(patchedObject, true) + + if string(patchBytes) == "{}" && !o.shouldPrintObject() { + printer, err := o.ToPrinter("unchanged") + if err != nil { + return err + } + if err = printer.PrintObj(info.Object, o.Out); err != nil { + return err + } + return nil + } + } + + if o.shouldPrintObject() { + return nil + } + + printer, err := o.ToPrinter("configured") + if err != nil { + return err + } + if err = printer.PrintObj(info.Object, o.Out); err != nil { + return err } return nil From 1083c0fe053fa9e6d52a2920ab45b0c370581704 Mon Sep 17 00:00:00 2001 From: "Sean R. Sullivan" Date: Thu, 26 Mar 2020 22:33:04 -0700 Subject: [PATCH 48/92] Adds integration test for apply failures when applying multiple resources --- hack/testdata/multi-resource.yaml | 19 +++++++++++++++++++ test/cmd/apply.sh | 13 +++++++++++++ 2 files changed, 32 insertions(+) create mode 100644 hack/testdata/multi-resource.yaml diff --git a/hack/testdata/multi-resource.yaml b/hack/testdata/multi-resource.yaml new file mode 100644 index 000000000000..7acb111c78de --- /dev/null +++ b/hack/testdata/multi-resource.yaml @@ -0,0 +1,19 @@ +# Tests that initial failures to not block subsequent applies. +# Pod must be before namespace, so it initially fails. Second +# apply of pod should succeed, since namespace finally exists. +apiVersion: v1 +kind: Pod +metadata: + name: test-pod + namespace: multi-resource-ns + labels: + name: test-pod-label +spec: + containers: + - name: kubernetes-pause + image: k8s.gcr.io/pause:2.0 +--- +apiVersion: v1 +kind: Namespace +metadata: + name: multi-resource-ns diff --git a/test/cmd/apply.sh b/test/cmd/apply.sh index 0181b8e3c7e7..f9d5f8d80405 100755 --- a/test/cmd/apply.sh +++ b/test/cmd/apply.sh @@ -274,6 +274,19 @@ __EOF__ # cleanup kubectl delete --kustomize hack/testdata/kustomize + ## kubectl apply multiple resources with initial failure. + # Pre-Condition: no POD exists + kube::test::get_object_assert pods "{{range.items}}{{${id_field:?}}}:{{end}}" '' + # First pass, namespace is created, but pod is not (since namespace does not exist yet). + kubectl apply -f hack/testdata/multi-resource.yaml "${kube_flags[@]:?}" + output_message=$(! kubectl get pods test-pod 2>&1 "${kube_flags[@]:?}") + kube::test::if_has_string "${output_message}" 'pods "test-pod" not found' + # Second pass, pod is created (now that namespace exists). + kubectl apply -f hack/testdata/multi-resource.yaml "${kube_flags[@]:?}" + kube::test::get_object_assert 'pod test-pod' "{{${id_field}}}" 'test-pod' + # cleanup + kubectl delete -f hack/testdata/multi-resource.yaml + set +o nounset set +o errexit } From a97a7c49b4b9fed39450038d2e78225556a9a563 Mon Sep 17 00:00:00 2001 From: t-qini Date: Mon, 23 Mar 2020 21:28:38 +0800 Subject: [PATCH 49/92] Add unit tests for azure VMSS client async operations. --- .../azure/clients/vmssclient/BUILD | 2 + .../vmssclient/azure_vmssclient_test.go | 38 +++++++++++++++++++ 2 files changed, 40 insertions(+) diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/clients/vmssclient/BUILD b/staging/src/k8s.io/legacy-cloud-providers/azure/clients/vmssclient/BUILD index 301d2e0de682..e6ae9ef95531 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/azure/clients/vmssclient/BUILD +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/clients/vmssclient/BUILD @@ -32,8 +32,10 @@ go_test( "//staging/src/k8s.io/legacy-cloud-providers/azure/clients:go_default_library", "//staging/src/k8s.io/legacy-cloud-providers/azure/clients/armclient:go_default_library", "//staging/src/k8s.io/legacy-cloud-providers/azure/clients/armclient/mockarmclient:go_default_library", + "//staging/src/k8s.io/legacy-cloud-providers/azure/retry:go_default_library", "//vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-12-01/compute:go_default_library", "//vendor/github.com/Azure/go-autorest/autorest:go_default_library", + "//vendor/github.com/Azure/go-autorest/autorest/azure:go_default_library", "//vendor/github.com/Azure/go-autorest/autorest/to:go_default_library", "//vendor/github.com/golang/mock/gomock:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/clients/vmssclient/azure_vmssclient_test.go b/staging/src/k8s.io/legacy-cloud-providers/azure/clients/vmssclient/azure_vmssclient_test.go index fe3b6e8d39cb..ea6a20f88547 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/azure/clients/vmssclient/azure_vmssclient_test.go +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/clients/vmssclient/azure_vmssclient_test.go @@ -22,12 +22,14 @@ import ( "bytes" "context" "encoding/json" + "fmt" "io/ioutil" "net/http" "testing" "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-12-01/compute" "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" "github.com/Azure/go-autorest/autorest/to" "github.com/golang/mock/gomock" "github.com/stretchr/testify/assert" @@ -35,6 +37,7 @@ import ( azclients "k8s.io/legacy-cloud-providers/azure/clients" "k8s.io/legacy-cloud-providers/azure/clients/armclient" "k8s.io/legacy-cloud-providers/azure/clients/armclient/mockarmclient" + "k8s.io/legacy-cloud-providers/azure/retry" ) func TestGetNotFound(t *testing.T) { @@ -119,6 +122,41 @@ func TestCreateOrUpdate(t *testing.T) { assert.Nil(t, rerr) } +func TestCreateOrUpdateAsync(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + vmss := getTestVMSS("vmss1") + armClient := mockarmclient.NewMockInterface(ctrl) + future := &azure.Future{} + + armClient.EXPECT().PutResourceAsync(gomock.Any(), to.String(vmss.ID), vmss).Return(future, nil).Times(1) + vmssClient := getTestVMSSClient(armClient) + _, rerr := vmssClient.CreateOrUpdateAsync(context.TODO(), "rg", "vmss1", vmss) + assert.Nil(t, rerr) + + retryErr := &retry.Error{RawError: fmt.Errorf("error")} + armClient.EXPECT().PutResourceAsync(gomock.Any(), to.String(vmss.ID), vmss).Return(future, retryErr).Times(1) + _, rerr = vmssClient.CreateOrUpdateAsync(context.TODO(), "rg", "vmss1", vmss) + assert.Equal(t, retryErr, rerr) +} + +func TestWaitForAsyncOperationResult(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + armClient := mockarmclient.NewMockInterface(ctrl) + response := &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + } + + armClient.EXPECT().WaitForAsyncOperationResult(gomock.Any(), &azure.Future{}, "VMSSWaitForAsyncOperationResult").Return(response, nil) + vmssClient := getTestVMSSClient(armClient) + _, err := vmssClient.WaitForAsyncOperationResult(context.TODO(), &azure.Future{}) + assert.Nil(t, err) +} + func TestDeleteInstances(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() From b748c702d995dc4c26dfc1a12fb4beb5d8de91ae Mon Sep 17 00:00:00 2001 From: SataQiu <1527062125@qq.com> Date: Fri, 27 Mar 2020 17:19:00 +0800 Subject: [PATCH 50/92] e2e/framework: remove direct import to pkg/kubelet/util/format --- test/e2e/framework/pod/BUILD | 1 - test/e2e/framework/pod/wait.go | 3 +-- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/test/e2e/framework/pod/BUILD b/test/e2e/framework/pod/BUILD index 196f712ad9e2..c5628a992133 100644 --- a/test/e2e/framework/pod/BUILD +++ b/test/e2e/framework/pod/BUILD @@ -13,7 +13,6 @@ go_library( visibility = ["//visibility:public"], deps = [ "//pkg/kubelet/types:go_default_library", - "//pkg/kubelet/util/format:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/test/e2e/framework/pod/wait.go b/test/e2e/framework/pod/wait.go index c1577c7ed2fa..8f1e8be6f8ca 100644 --- a/test/e2e/framework/pod/wait.go +++ b/test/e2e/framework/pod/wait.go @@ -34,7 +34,6 @@ import ( "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubectl/pkg/util/podutils" - "k8s.io/kubernetes/pkg/kubelet/util/format" e2elog "k8s.io/kubernetes/test/e2e/framework/log" testutils "k8s.io/kubernetes/test/utils" ) @@ -302,7 +301,7 @@ func WaitForMatchPodsCondition(c clientset.Interface, opts metav1.ListOptions, d return fmt.Errorf("Unexpected error: %v", err) } if !done { - conditionNotMatch = append(conditionNotMatch, format.Pod(&pod)) + conditionNotMatch = append(conditionNotMatch, fmt.Sprintf("%s_%s(%s)", pod.Name, pod.Namespace, pod.UID)) } } if len(conditionNotMatch) <= 0 { From 4f0816ebf2eb53f81746b6c7fd5cda579203984f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Janek=20=C5=81ukaszewicz?= Date: Fri, 27 Mar 2020 12:23:17 +0100 Subject: [PATCH 51/92] Revert "Revert "log-dump.sh: allow to dump extra log files"" This reverts commit c20e6043777c8323ff6c73b0d2b33115e8dc9a3b. --- cluster/log-dump/log-dump.sh | 3 +++ cluster/log-dump/logexporter-daemonset.yaml | 1 + 2 files changed, 4 insertions(+) diff --git a/cluster/log-dump/log-dump.sh b/cluster/log-dump/log-dump.sh index 6a4e9070c689..a8c9a4d47d84 100755 --- a/cluster/log-dump/log-dump.sh +++ b/cluster/log-dump/log-dump.sh @@ -51,6 +51,7 @@ readonly kern_logfile="kern.log" readonly initd_logfiles="docker/log" readonly supervisord_logfiles="kubelet.log supervisor/supervisord.log supervisor/kubelet-stdout.log supervisor/kubelet-stderr.log supervisor/docker-stdout.log supervisor/docker-stderr.log" readonly systemd_services="kubelet kubelet-monitor kube-container-runtime-monitor ${LOG_DUMP_SYSTEMD_SERVICES:-docker}" +readonly extra_log_files="${LOG_DUMP_EXTRA_FILES:-}" readonly dump_systemd_journal="${LOG_DUMP_SYSTEMD_JOURNAL:-false}" # Log files found in WINDOWS_LOGS_DIR on Windows nodes: readonly windows_node_logfiles="kubelet.log kube-proxy.log docker.log" @@ -140,6 +141,7 @@ function save-logs() { local opt_systemd_services="${4:-""}" local on_master="${5:-"false"}" + files="${files} ${extra_log_files}" if [[ -n "${use_custom_instance_list}" ]]; then if [[ -n "${LOG_DUMP_SAVE_LOGS:-}" ]]; then files="${files} ${LOG_DUMP_SAVE_LOGS:-}" @@ -496,6 +498,7 @@ function dump_nodes_with_logexporter() { sed -i'' -e "s@{{.GCSPath}}@${gcs_artifacts_dir}@g" "${KUBE_ROOT}/cluster/log-dump/logexporter-daemonset.yaml" sed -i'' -e "s@{{.EnableHollowNodeLogs}}@${enable_hollow_node_logs}@g" "${KUBE_ROOT}/cluster/log-dump/logexporter-daemonset.yaml" sed -i'' -e "s@{{.DumpSystemdJournal}}@${dump_systemd_journal}@g" "${KUBE_ROOT}/cluster/log-dump/logexporter-daemonset.yaml" + sed -i'' -e "s@{{.ExtraLogFiles}}@${extra_log_files}@g" "${KUBE_ROOT}/cluster/log-dump/logexporter-daemonset.yaml" # Create the logexporter namespace, service-account secret and the logexporter daemonset within that namespace. KUBECTL="${KUBE_ROOT}/cluster/kubectl.sh" diff --git a/cluster/log-dump/logexporter-daemonset.yaml b/cluster/log-dump/logexporter-daemonset.yaml index bad9ef30206d..9c911e148d94 100644 --- a/cluster/log-dump/logexporter-daemonset.yaml +++ b/cluster/log-dump/logexporter-daemonset.yaml @@ -50,6 +50,7 @@ spec: - --gcloud-auth-file-path=/etc/service-account/service-account.json - --enable-hollow-node-logs={{.EnableHollowNodeLogs}} - --dump-systemd-journal={{.DumpSystemdJournal}} + - --extra-log-files={{.ExtraLogFiles}} - --sleep-duration=24h volumeMounts: - mountPath: /etc/service-account From 94db87234bfab34df04d63c5ca16d80ef9407031 Mon Sep 17 00:00:00 2001 From: tanjunchen Date: Fri, 27 Mar 2020 22:56:21 +0800 Subject: [PATCH 52/92] add tanjunchen as /test reviewer --- test/OWNERS | 1 + 1 file changed, 1 insertion(+) diff --git a/test/OWNERS b/test/OWNERS index 0718d3ab16ae..e744fdc44ac3 100644 --- a/test/OWNERS +++ b/test/OWNERS @@ -4,6 +4,7 @@ reviewers: - neolit123 - johnSchnake - SataQiu + - tanjunchen approvers: - alejandrox1 - andrewsykim From 875e39e37ad889b0fce97b515afa74b7e5d420e4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Janek=20=C5=81ukaszewicz?= Date: Fri, 27 Mar 2020 16:17:15 +0100 Subject: [PATCH 53/92] Bump logexporter to v20200327-9ba073aa98 --- cluster/log-dump/logexporter-daemonset.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cluster/log-dump/logexporter-daemonset.yaml b/cluster/log-dump/logexporter-daemonset.yaml index bad9ef30206d..8e7f45200f9c 100644 --- a/cluster/log-dump/logexporter-daemonset.yaml +++ b/cluster/log-dump/logexporter-daemonset.yaml @@ -36,7 +36,7 @@ spec: spec: containers: - name: logexporter-test - image: gcr.io/k8s-testimages/logexporter:v20200227-da16e1b17 + image: gcr.io/k8s-testimages/logexporter:v20200327-9ba073aa98 env: - name: NODE_NAME valueFrom: From 9fd48b4039491605012ec76a98d0cb2ea49804e9 Mon Sep 17 00:00:00 2001 From: Kevin Taylor Date: Fri, 27 Mar 2020 16:28:33 +0000 Subject: [PATCH 54/92] Remove VolumeSubpathEnvExpansion Feature Gate --- pkg/api/pod/util.go | 4 ++-- pkg/api/pod/util_test.go | 1 - pkg/apis/core/validation/validation_test.go | 5 +---- pkg/features/kube_features.go | 10 ---------- pkg/kubelet/kubelet_pods.go | 4 ---- 5 files changed, 3 insertions(+), 21 deletions(-) diff --git a/pkg/api/pod/util.go b/pkg/api/pod/util.go index 366ca87df290..b29055d104cd 100644 --- a/pkg/api/pod/util.go +++ b/pkg/api/pod/util.go @@ -394,8 +394,8 @@ func dropDisabledFields( podSpec.EphemeralContainers = nil } - if (!utilfeature.DefaultFeatureGate.Enabled(features.VolumeSubpath) || !utilfeature.DefaultFeatureGate.Enabled(features.VolumeSubpathEnvExpansion)) && !subpathExprInUse(oldPodSpec) { - // drop subpath env expansion from the pod if either of the subpath features is disabled and the old spec did not specify subpath env expansion + if !utilfeature.DefaultFeatureGate.Enabled(features.VolumeSubpath) && !subpathExprInUse(oldPodSpec) { + // drop subpath env expansion from the pod if subpath feature is disabled and the old spec did not specify subpath env expansion VisitContainers(podSpec, AllContainers, func(c *api.Container, containerType ContainerType) bool { for i := range c.VolumeMounts { c.VolumeMounts[i].SubPathExpr = "" diff --git a/pkg/api/pod/util_test.go b/pkg/api/pod/util_test.go index 9b4deb29211d..233356052622 100644 --- a/pkg/api/pod/util_test.go +++ b/pkg/api/pod/util_test.go @@ -1479,7 +1479,6 @@ func TestDropSubPathExpr(t *testing.T) { } t.Run(fmt.Sprintf("feature enabled=%v, old pod %v, new pod %v", enabled, oldPodInfo.description, newPodInfo.description), func(t *testing.T) { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.VolumeSubpathEnvExpansion, enabled)() var oldPodSpec *api.PodSpec if oldPod != nil { diff --git a/pkg/apis/core/validation/validation_test.go b/pkg/apis/core/validation/validation_test.go index 8e23a7e93f5e..a4e1b8928e2c 100644 --- a/pkg/apis/core/validation/validation_test.go +++ b/pkg/apis/core/validation/validation_test.go @@ -5053,8 +5053,7 @@ func TestValidateDisabledSubpath(t *testing.T) { } func TestValidateSubpathMutuallyExclusive(t *testing.T) { - // Enable feature VolumeSubpathEnvExpansion and VolumeSubpath - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.VolumeSubpathEnvExpansion, true)() + // Enable feature VolumeSubpath defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.VolumeSubpath, true)() volumes := []core.Volume{ @@ -5137,8 +5136,6 @@ func TestValidateSubpathMutuallyExclusive(t *testing.T) { } func TestValidateDisabledSubpathExpr(t *testing.T) { - // Enable feature VolumeSubpathEnvExpansion - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.VolumeSubpathEnvExpansion, true)() volumes := []core.Volume{ {Name: "abc", VolumeSource: core.VolumeSource{PersistentVolumeClaim: &core.PersistentVolumeClaimVolumeSource{ClaimName: "testclaim1"}}}, diff --git a/pkg/features/kube_features.go b/pkg/features/kube_features.go index da3286011efd..0e54cefb0973 100644 --- a/pkg/features/kube_features.go +++ b/pkg/features/kube_features.go @@ -288,15 +288,6 @@ const ( // while making decisions. BalanceAttachedNodeVolumes featuregate.Feature = "BalanceAttachedNodeVolumes" - // owner: @kevtaylor - // alpha: v1.14 - // beta: v1.15 - // ga: v1.17 - // - // Allow subpath environment variable substitution - // Only applicable if the VolumeSubpath feature is also enabled - VolumeSubpathEnvExpansion featuregate.Feature = "VolumeSubpathEnvExpansion" - // owner: @vladimirvivien // alpha: v1.11 // beta: v1.14 @@ -646,7 +637,6 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS VolumeSubpath: {Default: true, PreRelease: featuregate.GA}, ConfigurableFSGroupPolicy: {Default: false, PreRelease: featuregate.Alpha}, BalanceAttachedNodeVolumes: {Default: false, PreRelease: featuregate.Alpha}, - VolumeSubpathEnvExpansion: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.19, CSIBlockVolume: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.20 CSIInlineVolume: {Default: true, PreRelease: featuregate.Beta}, RuntimeClass: {Default: true, PreRelease: featuregate.Beta}, diff --git a/pkg/kubelet/kubelet_pods.go b/pkg/kubelet/kubelet_pods.go index 9d45549bf64f..345566d10260 100644 --- a/pkg/kubelet/kubelet_pods.go +++ b/pkg/kubelet/kubelet_pods.go @@ -167,10 +167,6 @@ func makeMounts(pod *v1.Pod, podDir string, container *v1.Container, hostName, h return nil, cleanupAction, fmt.Errorf("volume subpaths are disabled") } - if !utilfeature.DefaultFeatureGate.Enabled(features.VolumeSubpathEnvExpansion) { - return nil, cleanupAction, fmt.Errorf("volume subpath expansion is disabled") - } - subPath, err = kubecontainer.ExpandContainerVolumeMounts(mount, expandEnvs) if err != nil { From 8205f815fb7c76d766a2ffb31bd77c5a563a47d8 Mon Sep 17 00:00:00 2001 From: David Zhu Date: Fri, 27 Mar 2020 18:27:57 +0100 Subject: [PATCH 55/92] Wait for APIServer 'ok' forever during CSINode initialization during Kubelet init --- pkg/volume/csi/csi_plugin.go | 48 +++++++++++++++++++++++++++--------- 1 file changed, 37 insertions(+), 11 deletions(-) diff --git a/pkg/volume/csi/csi_plugin.go b/pkg/volume/csi/csi_plugin.go index 7acd84bee68b..771827432753 100644 --- a/pkg/volume/csi/csi_plugin.go +++ b/pkg/volume/csi/csi_plugin.go @@ -227,10 +227,10 @@ func (p *csiPlugin) Init(host volume.VolumeHost) error { if utilfeature.DefaultFeatureGate.Enabled(features.CSINodeInfo) && utilfeature.DefaultFeatureGate.Enabled(features.CSIMigration) { - // This function prevents Kubelet from posting Ready status until CSINodeInfo + // This function prevents Kubelet from posting Ready status until CSINode // is both installed and initialized if err := initializeCSINode(host); err != nil { - return errors.New(log("failed to initialize CSINodeInfo: %v", err)) + return errors.New(log("failed to initialize CSINode: %v", err)) } } @@ -240,21 +240,27 @@ func (p *csiPlugin) Init(host volume.VolumeHost) error { func initializeCSINode(host volume.VolumeHost) error { kvh, ok := host.(volume.KubeletVolumeHost) if !ok { - klog.V(4).Info("Cast from VolumeHost to KubeletVolumeHost failed. Skipping CSINodeInfo initialization, not running on kubelet") + klog.V(4).Info("Cast from VolumeHost to KubeletVolumeHost failed. Skipping CSINode initialization, not running on kubelet") return nil } kubeClient := host.GetKubeClient() if kubeClient == nil { - // Kubelet running in standalone mode. Skip CSINodeInfo initialization - klog.Warning("Skipping CSINodeInfo initialization, kubelet running in standalone mode") + // Kubelet running in standalone mode. Skip CSINode initialization + klog.Warning("Skipping CSINode initialization, kubelet running in standalone mode") return nil } - kvh.SetKubeletError(errors.New("CSINodeInfo is not yet initialized")) + kvh.SetKubeletError(errors.New("CSINode is not yet initialized")) go func() { defer utilruntime.HandleCrash() + // First wait indefinitely to talk to Kube APIServer + err := waitForAPIServerForever(kubeClient) + if err != nil { + klog.Fatalf("Failed to initialize CSINode while waiting for API server to report ok: %v", err) + } + // Backoff parameters tuned to retry over 140 seconds. Will fail and restart the Kubelet // after max retry steps. initBackoff := wait.Backoff{ @@ -263,12 +269,12 @@ func initializeCSINode(host volume.VolumeHost) error { Factor: 6.0, Jitter: 0.1, } - err := wait.ExponentialBackoff(initBackoff, func() (bool, error) { - klog.V(4).Infof("Initializing migrated drivers on CSINodeInfo") + err = wait.ExponentialBackoff(initBackoff, func() (bool, error) { + klog.V(4).Infof("Initializing migrated drivers on CSINode") err := nim.InitializeCSINodeWithAnnotation() if err != nil { - kvh.SetKubeletError(fmt.Errorf("Failed to initialize CSINodeInfo: %v", err)) - klog.Errorf("Failed to initialize CSINodeInfo: %v", err) + kvh.SetKubeletError(fmt.Errorf("Failed to initialize CSINode: %v", err)) + klog.Errorf("Failed to initialize CSINode: %v", err) return false, nil } @@ -282,7 +288,7 @@ func initializeCSINode(host volume.VolumeHost) error { // using CSI for all Migrated volume plugins. Then all the CSINode initialization // code can be dropped from Kubelet. // Kill the Kubelet process and allow it to restart to retry initialization - klog.Fatalf("Failed to initialize CSINodeInfo after retrying") + klog.Fatalf("Failed to initialize CSINode after retrying: %v", err) } }() return nil @@ -914,3 +920,23 @@ func highestSupportedVersion(versions []string) (*utilversion.Version, error) { } return highestSupportedVersion, nil } + +// waitForAPIServerForever waits forever to get the APIServer Version as a proxy +// for a healthy APIServer. +func waitForAPIServerForever(client clientset.Interface) error { + var lastErr error + err := wait.PollInfinite(time.Second, func() (bool, error) { + _, lastErr = client.Discovery().ServerVersion() + if lastErr != nil { + lastErr = fmt.Errorf("failed to get apiserver version: %v", lastErr) + return false, nil + } + + return true, nil + }) + if err != nil { + return fmt.Errorf("%v: %v", err, lastErr) + } + + return nil +} From 8bdbd4d683df36ccdefd4d491e8042f4099fc74b Mon Sep 17 00:00:00 2001 From: Jan Safranek Date: Fri, 27 Mar 2020 18:29:34 +0100 Subject: [PATCH 56/92] Fix CSINodeInfo startup To speed up unit tests and add more observability when things go wrong. --- pkg/volume/csi/csi_plugin.go | 31 +++++++++++-------- .../csi/nodeinfomanager/nodeinfomanager.go | 8 ++--- pkg/volume/testing/testing.go | 2 +- 3 files changed, 23 insertions(+), 18 deletions(-) diff --git a/pkg/volume/csi/csi_plugin.go b/pkg/volume/csi/csi_plugin.go index 771827432753..80d244829392 100644 --- a/pkg/volume/csi/csi_plugin.go +++ b/pkg/volume/csi/csi_plugin.go @@ -17,6 +17,7 @@ limitations under the License. package csi import ( + "context" "errors" "fmt" "os" @@ -24,8 +25,6 @@ import ( "strings" "time" - "context" - "k8s.io/klog" api "k8s.io/api/core/v1" @@ -256,7 +255,8 @@ func initializeCSINode(host volume.VolumeHost) error { defer utilruntime.HandleCrash() // First wait indefinitely to talk to Kube APIServer - err := waitForAPIServerForever(kubeClient) + nodeName := host.GetNodeName() + err := waitForAPIServerForever(kubeClient, nodeName) if err != nil { klog.Fatalf("Failed to initialize CSINode while waiting for API server to report ok: %v", err) } @@ -921,20 +921,25 @@ func highestSupportedVersion(versions []string) (*utilversion.Version, error) { return highestSupportedVersion, nil } -// waitForAPIServerForever waits forever to get the APIServer Version as a proxy -// for a healthy APIServer. -func waitForAPIServerForever(client clientset.Interface) error { +// waitForAPIServerForever waits forever to get a CSINode instance as a proxy +// for a healthy APIServer +func waitForAPIServerForever(client clientset.Interface, nodeName types.NodeName) error { var lastErr error - err := wait.PollInfinite(time.Second, func() (bool, error) { - _, lastErr = client.Discovery().ServerVersion() - if lastErr != nil { - lastErr = fmt.Errorf("failed to get apiserver version: %v", lastErr) - return false, nil + err := wait.PollImmediateInfinite(time.Second, func() (bool, error) { + // Get a CSINode from API server to make sure 1) kubelet can reach API server + // and 2) it has enough permissions. Kubelet may have restricted permissions + // when it's bootstrapping TLS. + // https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet-tls-bootstrapping/ + _, lastErr = client.StorageV1().CSINodes().Get(context.TODO(), string(nodeName), meta.GetOptions{}) + if lastErr == nil || apierrors.IsNotFound(lastErr) { + // API server contacted + return true, nil } - - return true, nil + klog.V(2).Infof("Failed to contact API server when waiting for CSINode publishing: %s", lastErr) + return false, nil }) if err != nil { + // In theory this is unreachable, but just in case: return fmt.Errorf("%v: %v", err, lastErr) } diff --git a/pkg/volume/csi/nodeinfomanager/nodeinfomanager.go b/pkg/volume/csi/nodeinfomanager/nodeinfomanager.go index 98a16be6e8a1..76b4cc0da1ff 100644 --- a/pkg/volume/csi/nodeinfomanager/nodeinfomanager.go +++ b/pkg/volume/csi/nodeinfomanager/nodeinfomanager.go @@ -397,16 +397,16 @@ func (nim *nodeInfoManager) InitializeCSINodeWithAnnotation() error { return goerrors.New("error getting CSI client") } - var updateErrs []error + var lastErr error err := wait.ExponentialBackoff(updateBackoff, func() (bool, error) { - if err := nim.tryInitializeCSINodeWithAnnotation(csiKubeClient); err != nil { - updateErrs = append(updateErrs, err) + if lastErr = nim.tryInitializeCSINodeWithAnnotation(csiKubeClient); lastErr != nil { + klog.V(2).Infof("Failed to publish CSINode: %v", lastErr) return false, nil } return true, nil }) if err != nil { - return fmt.Errorf("error updating CSINode annotation: %v; caused by: %v", err, utilerrors.NewAggregate(updateErrs)) + return fmt.Errorf("error updating CSINode annotation: %v; caused by: %v", err, lastErr) } return nil diff --git a/pkg/volume/testing/testing.go b/pkg/volume/testing/testing.go index 78ccafce094a..18ce9cfd6eb1 100644 --- a/pkg/volume/testing/testing.go +++ b/pkg/volume/testing/testing.go @@ -1870,7 +1870,7 @@ func (f *fakeVolumeHost) WaitForCacheSync() error { } func (f *fakeVolumeHost) WaitForKubeletErrNil() error { - return wait.PollImmediate(100*time.Millisecond, 10*time.Second, func() (bool, error) { + return wait.PollImmediate(10*time.Millisecond, 10*time.Second, func() (bool, error) { f.mux.Lock() defer f.mux.Unlock() return f.kubeletErr == nil, nil From 5125310023f78105b35151aa7dbaf0b35a90007a Mon Sep 17 00:00:00 2001 From: Jordan Liggitt Date: Fri, 27 Mar 2020 12:12:27 -0400 Subject: [PATCH 57/92] Consider future deletionTimestamps when validating bound tokens --- pkg/serviceaccount/BUILD | 2 + pkg/serviceaccount/claims.go | 11 +- pkg/serviceaccount/claims_test.go | 166 ++++++++++++++++++++++++++++++ 3 files changed, 175 insertions(+), 4 deletions(-) diff --git a/pkg/serviceaccount/BUILD b/pkg/serviceaccount/BUILD index d28601f8f660..bfb6f959084f 100644 --- a/pkg/serviceaccount/BUILD +++ b/pkg/serviceaccount/BUILD @@ -57,7 +57,9 @@ go_test( "//pkg/controller/serviceaccount:go_default_library", "//pkg/routes:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//staging/src/k8s.io/apiserver/pkg/authentication/authenticator:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library", diff --git a/pkg/serviceaccount/claims.go b/pkg/serviceaccount/claims.go index 3d48b6f2dce4..543efc20e20c 100644 --- a/pkg/serviceaccount/claims.go +++ b/pkg/serviceaccount/claims.go @@ -98,8 +98,9 @@ func (v *validator) Validate(_ string, public *jwt.Claims, privateObj interface{ klog.Errorf("jwt validator expected private claim of type *privateClaims but got: %T", privateObj) return nil, errors.New("Token could not be validated.") } + nowTime := now() err := public.Validate(jwt.Expected{ - Time: now(), + Time: nowTime, }) switch { case err == nil: @@ -110,6 +111,8 @@ func (v *validator) Validate(_ string, public *jwt.Claims, privateObj interface{ return nil, errors.New("Token could not be validated.") } + // consider things deleted prior to now()-leeway to be invalid + invalidIfDeletedBefore := nowTime.Add(-jwt.DefaultLeeway) namespace := private.Kubernetes.Namespace saref := private.Kubernetes.Svcacct podref := private.Kubernetes.Pod @@ -120,7 +123,7 @@ func (v *validator) Validate(_ string, public *jwt.Claims, privateObj interface{ klog.V(4).Infof("Could not retrieve service account %s/%s: %v", namespace, saref.Name, err) return nil, err } - if serviceAccount.DeletionTimestamp != nil { + if serviceAccount.DeletionTimestamp != nil && serviceAccount.DeletionTimestamp.Time.Before(invalidIfDeletedBefore) { klog.V(4).Infof("Service account has been deleted %s/%s", namespace, saref.Name) return nil, fmt.Errorf("ServiceAccount %s/%s has been deleted", namespace, saref.Name) } @@ -136,7 +139,7 @@ func (v *validator) Validate(_ string, public *jwt.Claims, privateObj interface{ klog.V(4).Infof("Could not retrieve bound secret %s/%s for service account %s/%s: %v", namespace, secref.Name, namespace, saref.Name, err) return nil, errors.New("Token has been invalidated") } - if secret.DeletionTimestamp != nil { + if secret.DeletionTimestamp != nil && secret.DeletionTimestamp.Time.Before(invalidIfDeletedBefore) { klog.V(4).Infof("Bound secret is deleted and awaiting removal: %s/%s for service account %s/%s", namespace, secref.Name, namespace, saref.Name) return nil, errors.New("Token has been invalidated") } @@ -154,7 +157,7 @@ func (v *validator) Validate(_ string, public *jwt.Claims, privateObj interface{ klog.V(4).Infof("Could not retrieve bound pod %s/%s for service account %s/%s: %v", namespace, podref.Name, namespace, saref.Name, err) return nil, errors.New("Token has been invalidated") } - if pod.DeletionTimestamp != nil { + if pod.DeletionTimestamp != nil && pod.DeletionTimestamp.Time.Before(invalidIfDeletedBefore) { klog.V(4).Infof("Bound pod is deleted and awaiting removal: %s/%s for service account %s/%s", namespace, podref.Name, namespace, saref.Name) return nil, errors.New("Token has been invalidated") } diff --git a/pkg/serviceaccount/claims_test.go b/pkg/serviceaccount/claims_test.go index 91ed25ed0d36..8c5ac79a915d 100644 --- a/pkg/serviceaccount/claims_test.go +++ b/pkg/serviceaccount/claims_test.go @@ -24,7 +24,10 @@ import ( "gopkg.in/square/go-jose.v2/jwt" + v1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/kubernetes/pkg/apis/core" ) @@ -182,3 +185,166 @@ func TestClaims(t *testing.T) { }) } } + +type deletionTestCase struct { + name string + time *metav1.Time + expectErr bool +} + +type claimTestCase struct { + name string + getter ServiceAccountTokenGetter + private *privateClaims + expectErr bool +} + +func TestValidatePrivateClaims(t *testing.T) { + var ( + nowUnix = int64(1514764800) + + serviceAccount = &v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: "saname", Namespace: "ns", UID: "sauid"}} + secret = &v1.Secret{ObjectMeta: metav1.ObjectMeta{Name: "secretname", Namespace: "ns", UID: "secretuid"}} + pod = &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "podname", Namespace: "ns", UID: "poduid"}} + ) + + deletionTestCases := []deletionTestCase{ + { + name: "valid", + time: nil, + }, + { + name: "deleted now", + time: &metav1.Time{Time: time.Unix(nowUnix, 0)}, + }, + { + name: "deleted near past", + time: &metav1.Time{Time: time.Unix(nowUnix-1, 0)}, + }, + { + name: "deleted near future", + time: &metav1.Time{Time: time.Unix(nowUnix+1, 0)}, + }, + { + name: "deleted now-leeway", + time: &metav1.Time{Time: time.Unix(nowUnix-60, 0)}, + }, + { + name: "deleted now-leeway-1", + time: &metav1.Time{Time: time.Unix(nowUnix-61, 0)}, + expectErr: true, + }, + } + + testcases := []claimTestCase{ + { + name: "missing serviceaccount", + getter: fakeGetter{nil, nil, nil}, + private: &privateClaims{Kubernetes: kubernetes{Svcacct: ref{Name: "saname", UID: "sauid"}, Namespace: "ns"}}, + expectErr: true, + }, + { + name: "missing secret", + getter: fakeGetter{serviceAccount, nil, nil}, + private: &privateClaims{Kubernetes: kubernetes{Svcacct: ref{Name: "saname", UID: "sauid"}, Secret: &ref{Name: "secretname", UID: "secretuid"}, Namespace: "ns"}}, + expectErr: true, + }, + { + name: "missing pod", + getter: fakeGetter{serviceAccount, nil, nil}, + private: &privateClaims{Kubernetes: kubernetes{Svcacct: ref{Name: "saname", UID: "sauid"}, Pod: &ref{Name: "podname", UID: "poduid"}, Namespace: "ns"}}, + expectErr: true, + }, + { + name: "different uid serviceaccount", + getter: fakeGetter{serviceAccount, nil, nil}, + private: &privateClaims{Kubernetes: kubernetes{Svcacct: ref{Name: "saname", UID: "sauidold"}, Namespace: "ns"}}, + expectErr: true, + }, + { + name: "different uid secret", + getter: fakeGetter{serviceAccount, secret, nil}, + private: &privateClaims{Kubernetes: kubernetes{Svcacct: ref{Name: "saname", UID: "sauid"}, Secret: &ref{Name: "secretname", UID: "secretuidold"}, Namespace: "ns"}}, + expectErr: true, + }, + { + name: "different uid pod", + getter: fakeGetter{serviceAccount, nil, pod}, + private: &privateClaims{Kubernetes: kubernetes{Svcacct: ref{Name: "saname", UID: "sauid"}, Pod: &ref{Name: "podname", UID: "poduidold"}, Namespace: "ns"}}, + expectErr: true, + }, + } + + for _, deletionTestCase := range deletionTestCases { + var ( + deletedServiceAccount = serviceAccount.DeepCopy() + deletedPod = pod.DeepCopy() + deletedSecret = secret.DeepCopy() + ) + deletedServiceAccount.DeletionTimestamp = deletionTestCase.time + deletedPod.DeletionTimestamp = deletionTestCase.time + deletedSecret.DeletionTimestamp = deletionTestCase.time + + testcases = append(testcases, + claimTestCase{ + name: deletionTestCase.name + " serviceaccount", + getter: fakeGetter{deletedServiceAccount, nil, nil}, + private: &privateClaims{Kubernetes: kubernetes{Svcacct: ref{Name: "saname", UID: "sauid"}, Namespace: "ns"}}, + expectErr: deletionTestCase.expectErr, + }, + claimTestCase{ + name: deletionTestCase.name + " secret", + getter: fakeGetter{serviceAccount, deletedSecret, nil}, + private: &privateClaims{Kubernetes: kubernetes{Svcacct: ref{Name: "saname", UID: "sauid"}, Secret: &ref{Name: "secretname", UID: "secretuid"}, Namespace: "ns"}}, + expectErr: deletionTestCase.expectErr, + }, + claimTestCase{ + name: deletionTestCase.name + " pod", + getter: fakeGetter{serviceAccount, nil, deletedPod}, + private: &privateClaims{Kubernetes: kubernetes{Svcacct: ref{Name: "saname", UID: "sauid"}, Pod: &ref{Name: "podname", UID: "poduid"}, Namespace: "ns"}}, + expectErr: deletionTestCase.expectErr, + }, + ) + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + v := &validator{tc.getter} + _, err := v.Validate("", &jwt.Claims{Expiry: jwt.NumericDate(nowUnix)}, tc.private) + if err != nil && !tc.expectErr { + t.Fatal(err) + } + if err == nil && tc.expectErr { + t.Fatal("expected error, got none") + } + if err != nil { + return + } + }) + } +} + +type fakeGetter struct { + serviceAccount *v1.ServiceAccount + secret *v1.Secret + pod *v1.Pod +} + +func (f fakeGetter) GetServiceAccount(namespace, name string) (*v1.ServiceAccount, error) { + if f.serviceAccount == nil { + return nil, apierrors.NewNotFound(schema.GroupResource{Group: "", Resource: "serviceaccounts"}, name) + } + return f.serviceAccount, nil +} +func (f fakeGetter) GetPod(namespace, name string) (*v1.Pod, error) { + if f.pod == nil { + return nil, apierrors.NewNotFound(schema.GroupResource{Group: "", Resource: "pods"}, name) + } + return f.pod, nil +} +func (f fakeGetter) GetSecret(namespace, name string) (*v1.Secret, error) { + if f.secret == nil { + return nil, apierrors.NewNotFound(schema.GroupResource{Group: "", Resource: "secrets"}, name) + } + return f.secret, nil +} From fe74d08e6078e8effb7e6b649e06ea7e676c2da9 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Fri, 27 Mar 2020 13:47:28 -0500 Subject: [PATCH 58/92] e2e/network: get previous pod logs on NetworkPolicy test failure Sometimes the pod has already been cleaned up by the time the test tried to grab the logs. Mar 27 16:19:38.066: INFO: Waiting for client-a-jt4tf to complete. Mar 27 16:19:38.066: INFO: Waiting up to 5m0s for pod "client-a-jt4tf" in namespace "e2e-network-policy-c-9007" to be "success or failure" Mar 27 16:19:38.072: INFO: Pod "client-a-jt4tf": Phase="Pending", Reason="", readiness=false. Elapsed: 6.270302ms Mar 27 16:19:40.078: INFO: Pod "client-a-jt4tf": Phase="Pending", Reason="", readiness=false. Elapsed: 2.01233019s Mar 27 16:19:42.086: INFO: Pod "client-a-jt4tf": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.020186873s STEP: Saw pod success Mar 27 16:19:42.086: INFO: Pod "client-a-jt4tf" satisfied condition "success or failure" Mar 27 16:19:42.093: FAIL: Error getting container logs: the server could not find the requested resource (get pods client-a-jt4tf) Full Stack Trace github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/network.checkNoConnectivity(0xc00104adc0, 0xc0016b82c0, 0xc001666400, 0xc000c32000) /go/src/github.com/openshift/origin/_output/local/go/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/network/network_policy.go:1457 +0x2a0 github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/network.testCannotConnect(0xc00104adc0, 0xc0016b82c0, 0x55587e9, 0x8, 0xc000c32000, 0x50) /go/src/github.com/openshift/origin/_output/local/go/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/network/network_policy.go:1406 +0x1fc github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/network.glob..func13.2.7() /go/src/github.com/openshift/origin/_output/local/go/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/network/network_policy.go:285 +0x883 github.com/openshift/origin/pkg/test/ginkgo.(*TestOptions).Run(0xc001e47830, 0xc001e50b70, 0x1, 0x1, 0x0, 0x0) /go/src/github.com/openshift/origin/_output/local/go/src/github.com/openshift/origin/pkg/test/ginkgo/cmd_runtest.go:59 +0x41f main.newRunTestCommand.func1(0xc00121b900, 0xc001e50b70, 0x1, 0x1, 0x0, 0x0) /go/src/github.com/openshift/origin/_output/local/go/src/github.com/openshift/origin/cmd/openshift-tests/openshift-tests.go:238 +0x15d github.com/openshift/origin/vendor/github.com/spf13/cobra.(*Command).execute(0xc00121b900, 0xc001e50b30, 0x1, 0x1, 0xc00121b900, 0xc001e50b30) /go/src/github.com/openshift/origin/_output/local/go/src/github.com/openshift/origin/vendor/github.com/spf13/cobra/command.go:826 +0x460 github.com/openshift/origin/vendor/github.com/spf13/cobra.(*Command).ExecuteC(0xc00121b180, 0x0, 0x60d2d00, 0x9887ec8) /go/src/github.com/openshift/origin/_output/local/go/src/github.com/openshift/origin/vendor/github.com/spf13/cobra/command.go:914 +0x2fb github.com/openshift/origin/vendor/github.com/spf13/cobra.(*Command).Execute(...) /go/src/github.com/openshift/origin/_output/local/go/src/github.com/openshift/origin/vendor/github.com/spf13/cobra/command.go:864 main.main.func1(0xc00121b180, 0x0, 0x0) /go/src/github.com/openshift/origin/_output/local/go/src/github.com/openshift/origin/cmd/openshift-tests/openshift-tests.go:59 +0x9c main.main() /go/src/github.com/openshift/origin/_output/local/go/src/github.com/openshift/origin/cmd/openshift-tests/openshift-tests.go:60 +0x341 STEP: Cleaning up the pod client-a-jt4tf STEP: Cleaning up the policy. --- test/e2e/network/network_policy.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/test/e2e/network/network_policy.go b/test/e2e/network/network_policy.go index f0d5ac74362f..1c43921e95bf 100644 --- a/test/e2e/network/network_policy.go +++ b/test/e2e/network/network_policy.go @@ -21,6 +21,7 @@ import ( "encoding/json" v1 "k8s.io/api/core/v1" networkingv1 "k8s.io/api/networking/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" @@ -1566,9 +1567,14 @@ func checkNoConnectivityByExitCode(f *framework.Framework, ns *v1.Namespace, pod func collectPodsAndNetworkPolicies(f *framework.Framework, podClient *v1.Pod) ([]string, *networkingv1.NetworkPolicyList, string) { // Collect pod logs when we see a failure. logs, logErr := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, podClient.Name, "client") + if logErr != nil && apierrors.IsNotFound(logErr) { + // Pod may have already been removed; try to get previous pod logs + logs, logErr = e2epod.GetPreviousPodLogs(f.ClientSet, f.Namespace.Name, podClient.Name, fmt.Sprintf("%s-container", podClient.Name)) + } if logErr != nil { framework.Failf("Error getting container logs: %s", logErr) } + // Collect current NetworkPolicies applied in the test namespace. policies, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{}) if err != nil { From 55df85ba3750cf6b90db6e18d338295f49f0f76f Mon Sep 17 00:00:00 2001 From: Peter Hornyack Date: Fri, 27 Mar 2020 15:44:08 -0700 Subject: [PATCH 59/92] Update GCE Windows node images with March's Windows updates --- cluster/gce/config-common.sh | 2 +- cluster/gce/util.sh | 6 +++--- cluster/gce/windows/k8s-node-setup.psm1 | 15 ++++++++++++++- cluster/gce/windows/smoke-test.sh | 2 +- 4 files changed, 19 insertions(+), 6 deletions(-) diff --git a/cluster/gce/config-common.sh b/cluster/gce/config-common.sh index e7dda920cd54..9622f268c61c 100644 --- a/cluster/gce/config-common.sh +++ b/cluster/gce/config-common.sh @@ -159,4 +159,4 @@ export WINDOWS_BOOTSTRAP_KUBECONFIG_FILE="${WINDOWS_K8S_DIR}\kubelet.bootstrap-k # Path for kube-proxy kubeconfig file on Windows nodes. export WINDOWS_KUBEPROXY_KUBECONFIG_FILE="${WINDOWS_K8S_DIR}\kubeproxy.kubeconfig" # Pause container image for Windows container. -export WINDOWS_INFRA_CONTAINER="gcr.io/gke-release/pause-win:1.1.0" +export WINDOWS_INFRA_CONTAINER="gcr.io/gke-release/pause-win:1.2.0" diff --git a/cluster/gce/util.sh b/cluster/gce/util.sh index b3a941d8c82e..3356fbb07f8c 100755 --- a/cluster/gce/util.sh +++ b/cluster/gce/util.sh @@ -88,11 +88,11 @@ function set-linux-node-image() { function set-windows-node-image() { WINDOWS_NODE_IMAGE_PROJECT="windows-cloud" if [[ "${WINDOWS_NODE_OS_DISTRIBUTION}" == "win2019" ]]; then - WINDOWS_NODE_IMAGE="windows-server-2019-dc-core-for-containers-v20200114" + WINDOWS_NODE_IMAGE="windows-server-2019-dc-core-for-containers-v20200310" elif [[ "${WINDOWS_NODE_OS_DISTRIBUTION}" == "win1909" ]]; then - WINDOWS_NODE_IMAGE="windows-server-1909-dc-core-for-containers-v20200114" + WINDOWS_NODE_IMAGE="windows-server-1909-dc-core-for-containers-v20200310" elif [[ "${WINDOWS_NODE_OS_DISTRIBUTION}" == "win1809" ]]; then - WINDOWS_NODE_IMAGE="windows-server-1809-dc-core-for-containers-v20200114" + WINDOWS_NODE_IMAGE="windows-server-1809-dc-core-for-containers-v20200310" else echo "Unknown WINDOWS_NODE_OS_DISTRIBUTION ${WINDOWS_NODE_OS_DISTRIBUTION}" >&2 exit 1 diff --git a/cluster/gce/windows/k8s-node-setup.psm1 b/cluster/gce/windows/k8s-node-setup.psm1 index ae2788faaf9e..b6efbc2f80f9 100644 --- a/cluster/gce/windows/k8s-node-setup.psm1 +++ b/cluster/gce/windows/k8s-node-setup.psm1 @@ -135,11 +135,24 @@ function Add_GceMetadataServerRoute { } } +# Returns a PowerShell object representing the Windows version. +function Get_WindowsVersion { + # Unlike checking `[System.Environment]::OSVersion.Version`, this long-winded + # approach gets the OS revision/patch number correctly + # (https://superuser.com/a/1160428/652018). + $win_ver = New-Object -TypeName PSObject + $win_ver | Add-Member -MemberType NoteProperty -Name Major -Value $(Get-ItemProperty -Path 'Registry::HKEY_LOCAL_MACHINE\Software\Microsoft\Windows NT\CurrentVersion' CurrentMajorVersionNumber).CurrentMajorVersionNumber + $win_ver | Add-Member -MemberType NoteProperty -Name Minor -Value $(Get-ItemProperty -Path 'Registry::HKEY_LOCAL_MACHINE\Software\Microsoft\Windows NT\CurrentVersion' CurrentMinorVersionNumber).CurrentMinorVersionNumber + $win_ver | Add-Member -MemberType NoteProperty -Name Build -Value $(Get-ItemProperty -Path 'Registry::HKEY_LOCAL_MACHINE\Software\Microsoft\Windows NT\CurrentVersion' CurrentBuild).CurrentBuild + $win_ver | Add-Member -MemberType NoteProperty -Name Revision -Value $(Get-ItemProperty -Path 'Registry::HKEY_LOCAL_MACHINE\Software\Microsoft\Windows NT\CurrentVersion' UBR).UBR + return $win_ver +} + # Writes debugging information, such as Windows version and patch info, to the # console. function Dump-DebugInfoToConsole { Try { - $version = "$([System.Environment]::OSVersion.Version | Out-String)" + $version = Get_WindowsVersion | Out-String $hotfixes = "$(Get-Hotfix | Out-String)" $image = "$(Get-InstanceMetadata 'image' | Out-String)" Log-Output "Windows version:`n$version" diff --git a/cluster/gce/windows/smoke-test.sh b/cluster/gce/windows/smoke-test.sh index 9e9bb38492e9..ddbed62d68a1 100755 --- a/cluster/gce/windows/smoke-test.sh +++ b/cluster/gce/windows/smoke-test.sh @@ -358,7 +358,7 @@ spec: spec: containers: - name: pause-win - image: gcr.io/gke-release/pause-win:1.1.0 + image: gcr.io/gke-release/pause-win:1.2.0 nodeSelector: kubernetes.io/os: windows tolerations: From 68dcc171558418c251380e9a4c3bd459edeb1a31 Mon Sep 17 00:00:00 2001 From: kvaps Date: Fri, 27 Mar 2020 23:55:12 +0100 Subject: [PATCH 60/92] Kubeadm: fix Ready condition check --- cmd/kubeadm/app/cmd/phases/join/kubelet.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/kubeadm/app/cmd/phases/join/kubelet.go b/cmd/kubeadm/app/cmd/phases/join/kubelet.go index cdb535a0a011..fae536c183c1 100644 --- a/cmd/kubeadm/app/cmd/phases/join/kubelet.go +++ b/cmd/kubeadm/app/cmd/phases/join/kubelet.go @@ -148,7 +148,7 @@ func runKubeletStartJoinPhase(c workflow.RunData) (returnErr error) { return errors.Wrapf(err, "cannot get Node %q", nodeName) } for _, cond := range node.Status.Conditions { - if cond.Type == v1.NodeReady { + if cond.Type == v1.NodeReady && cond.Status == v1.ConditionTrue { return errors.Errorf("a Node with name %q and status %q already exists in the cluster. "+ "You must delete the existing Node or change the name of this new joining Node", nodeName, v1.NodeReady) } From 43c45edf88eb5810a5ae131329124b21d334c22c Mon Sep 17 00:00:00 2001 From: Anish Ramasekar Date: Fri, 27 Mar 2020 16:33:58 -0700 Subject: [PATCH 61/92] fix concurreny issue in lb creation --- .../src/k8s.io/legacy-cloud-providers/azure/azure_vmss.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_vmss.go b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_vmss.go index 7bb7000767d8..40962d97d69b 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_vmss.go +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_vmss.go @@ -1101,6 +1101,9 @@ func (ss *scaleSet) EnsureHostsInPool(service *v1.Service, nodes []*v1.Node, bac // Update VMs with best effort that have already been added to nodeUpdates. for meta, update := range nodeUpdates { + // create new instance of meta and update for passing to anonymous function + meta := meta + update := update hostUpdates = append(hostUpdates, func() error { ctx, cancel := getContextWithCancel() defer cancel() @@ -1401,6 +1404,9 @@ func (ss *scaleSet) EnsureBackendPoolDeleted(service *v1.Service, backendPoolID, // Update VMs with best effort that have already been added to nodeUpdates. for meta, update := range nodeUpdates { + // create new instance of meta and update for passing to anonymous function + meta := meta + update := update hostUpdates = append(hostUpdates, func() error { ctx, cancel := getContextWithCancel() defer cancel() From 590f0f345bafbb18e3078e24ad4d839fb9aba78e Mon Sep 17 00:00:00 2001 From: SataQiu <1527062125@qq.com> Date: Sat, 28 Mar 2020 15:50:24 +0800 Subject: [PATCH 62/92] fix the bug that update-vendor.sh reports error when grep match failed --- hack/update-vendor.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hack/update-vendor.sh b/hack/update-vendor.sh index 4aac59bb9001..fa283347f37b 100755 --- a/hack/update-vendor.sh +++ b/hack/update-vendor.sh @@ -298,7 +298,7 @@ go mod tidy >>"${LOG_FILE}" 2>&1 # disallow transitive dependencies on k8s.io/kubernetes loopback_deps=() -kube::util::read-array loopback_deps < <(go mod graph | grep ' k8s.io/kubernetes') +kube::util::read-array loopback_deps < <(go mod graph | grep ' k8s.io/kubernetes' || true) if [[ -n ${loopback_deps[*]:+"${loopback_deps[*]}"} ]]; then kube::log::error "Disallowed transitive k8s.io/kubernetes dependencies exist via the following imports:" kube::log::error "${loopback_deps[@]}" From 7e15e31e11e48a6db855e30ca9b07dbce3047577 Mon Sep 17 00:00:00 2001 From: Quan Tian Date: Fri, 27 Mar 2020 18:39:20 +0800 Subject: [PATCH 63/92] Improve fake clientset performance MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The fake clientset used a slice to store each kind of objects, it's quite slow to init the clientset with massive objects because it checked existence of an object by traversing all objects before adding it, which leads to O(n^2) time complexity. Also, the Create, Update, Get, Delete methods needs to traverse all objects, which affects the time statistic of code that calls them. This patch changed to use a map to store each kind of objects, reduced the time complexity of initializing clientset to O(n) and the Create, Update, Get, Delete to O(1). For example: Before this patch, it took ~29s to init a clientset with 30000 Pods, and 2~4ms to create and get an Pod. After this patch, it took ~50ms to init a clientset with 30000 Pods, and tens of µs to create and get an Pod. --- .../client-go/dynamic/fake/simple_test.go | 2 +- .../client-go/metadata/fake/simple_test.go | 2 +- .../src/k8s.io/client-go/testing/fixture.go | 97 ++++++++----------- 3 files changed, 44 insertions(+), 57 deletions(-) diff --git a/staging/src/k8s.io/client-go/dynamic/fake/simple_test.go b/staging/src/k8s.io/client-go/dynamic/fake/simple_test.go index adefdf7c8bc2..33037c3c5978 100644 --- a/staging/src/k8s.io/client-go/dynamic/fake/simple_test.go +++ b/staging/src/k8s.io/client-go/dynamic/fake/simple_test.go @@ -75,9 +75,9 @@ func TestList(t *testing.T) { } expected := []unstructured.Unstructured{ - *newUnstructured("group/version", "TheKind", "ns-foo", "name-foo"), *newUnstructured("group/version", "TheKind", "ns-foo", "name-bar"), *newUnstructured("group/version", "TheKind", "ns-foo", "name-baz"), + *newUnstructured("group/version", "TheKind", "ns-foo", "name-foo"), } if !equality.Semantic.DeepEqual(listFirst.Items, expected) { t.Fatal(diff.ObjectGoPrintDiff(expected, listFirst.Items)) diff --git a/staging/src/k8s.io/client-go/metadata/fake/simple_test.go b/staging/src/k8s.io/client-go/metadata/fake/simple_test.go index 641fd55b79f0..e6fdde3ebc45 100644 --- a/staging/src/k8s.io/client-go/metadata/fake/simple_test.go +++ b/staging/src/k8s.io/client-go/metadata/fake/simple_test.go @@ -79,9 +79,9 @@ func TestList(t *testing.T) { } expected := []metav1.PartialObjectMetadata{ - *newPartialObjectMetadata("group/version", "TheKind", "ns-foo", "name-foo"), *newPartialObjectMetadata("group/version", "TheKind", "ns-foo", "name-bar"), *newPartialObjectMetadata("group/version", "TheKind", "ns-foo", "name-baz"), + *newPartialObjectMetadata("group/version", "TheKind", "ns-foo", "name-foo"), } if !equality.Semantic.DeepEqual(listFirst.Items, expected) { t.Fatal(diff.ObjectGoPrintDiff(expected, listFirst.Items)) diff --git a/staging/src/k8s.io/client-go/testing/fixture.go b/staging/src/k8s.io/client-go/testing/fixture.go index 54f600ad3f71..d3b937247b24 100644 --- a/staging/src/k8s.io/client-go/testing/fixture.go +++ b/staging/src/k8s.io/client-go/testing/fixture.go @@ -19,6 +19,7 @@ package testing import ( "fmt" "reflect" + "sort" "sync" jsonpatch "github.com/evanphx/json-patch" @@ -197,7 +198,7 @@ type tracker struct { scheme ObjectScheme decoder runtime.Decoder lock sync.RWMutex - objects map[schema.GroupVersionResource][]runtime.Object + objects map[schema.GroupVersionResource]map[types.NamespacedName]runtime.Object // The value type of watchers is a map of which the key is either a namespace or // all/non namespace aka "" and its value is list of fake watchers. // Manipulations on resources will broadcast the notification events into the @@ -214,7 +215,7 @@ func NewObjectTracker(scheme ObjectScheme, decoder runtime.Decoder) ObjectTracke return &tracker{ scheme: scheme, decoder: decoder, - objects: make(map[schema.GroupVersionResource][]runtime.Object), + objects: make(map[schema.GroupVersionResource]map[types.NamespacedName]runtime.Object), watchers: make(map[schema.GroupVersionResource]map[string][]*watch.RaceFreeFakeWatcher), } } @@ -282,31 +283,15 @@ func (t *tracker) Get(gvr schema.GroupVersionResource, ns, name string) (runtime return nil, errNotFound } - var matchingObjs []runtime.Object - for _, obj := range objs { - acc, err := meta.Accessor(obj) - if err != nil { - return nil, err - } - if acc.GetNamespace() != ns { - continue - } - if acc.GetName() != name { - continue - } - matchingObjs = append(matchingObjs, obj) - } - if len(matchingObjs) == 0 { + matchingObj, ok := objs[types.NamespacedName{Namespace: ns, Name: name}] + if !ok { return nil, errNotFound } - if len(matchingObjs) > 1 { - return nil, fmt.Errorf("more than one object matched gvr %s, ns: %q name: %q", gvr, ns, name) - } // Only one object should match in the tracker if it works // correctly, as Add/Update methods enforce kind/namespace/name // uniqueness. - obj := matchingObjs[0].DeepCopyObject() + obj := matchingObj.DeepCopyObject() if status, ok := obj.(*metav1.Status); ok { if status.Status != metav1.StatusSuccess { return nil, &errors.StatusError{ErrStatus: *status} @@ -405,21 +390,21 @@ func (t *tracker) add(gvr schema.GroupVersionResource, obj runtime.Object, ns st return errors.NewBadRequest(msg) } - for i, existingObj := range t.objects[gvr] { - oldMeta, err := meta.Accessor(existingObj) - if err != nil { - return err - } - if oldMeta.GetNamespace() == newMeta.GetNamespace() && oldMeta.GetName() == newMeta.GetName() { - if replaceExisting { - for _, w := range t.getWatches(gvr, ns) { - w.Modify(obj) - } - t.objects[gvr][i] = obj - return nil + _, ok := t.objects[gvr] + if !ok { + t.objects[gvr] = make(map[types.NamespacedName]runtime.Object) + } + + namespacedName := types.NamespacedName{Namespace: newMeta.GetNamespace(), Name: newMeta.GetName()} + if _, ok = t.objects[gvr][namespacedName]; ok { + if replaceExisting { + for _, w := range t.getWatches(gvr, ns) { + w.Modify(obj) } - return errors.NewAlreadyExists(gr, newMeta.GetName()) + t.objects[gvr][namespacedName] = obj + return nil } + return errors.NewAlreadyExists(gr, newMeta.GetName()) } if replaceExisting { @@ -427,7 +412,7 @@ func (t *tracker) add(gvr schema.GroupVersionResource, obj runtime.Object, ns st return errors.NewNotFound(gr, newMeta.GetName()) } - t.objects[gvr] = append(t.objects[gvr], obj) + t.objects[gvr][namespacedName] = obj for _, w := range t.getWatches(gvr, ns) { w.Add(obj) @@ -457,35 +442,28 @@ func (t *tracker) Delete(gvr schema.GroupVersionResource, ns, name string) error t.lock.Lock() defer t.lock.Unlock() - found := false - - for i, existingObj := range t.objects[gvr] { - objMeta, err := meta.Accessor(existingObj) - if err != nil { - return err - } - if objMeta.GetNamespace() == ns && objMeta.GetName() == name { - obj := t.objects[gvr][i] - t.objects[gvr] = append(t.objects[gvr][:i], t.objects[gvr][i+1:]...) - for _, w := range t.getWatches(gvr, ns) { - w.Delete(obj) - } - found = true - break - } + objs, ok := t.objects[gvr] + if !ok { + return errors.NewNotFound(gvr.GroupResource(), name) } - if found { - return nil + namespacedName := types.NamespacedName{Namespace: ns, Name: name} + obj, ok := objs[namespacedName] + if !ok { + return errors.NewNotFound(gvr.GroupResource(), name) } - return errors.NewNotFound(gvr.GroupResource(), name) + delete(objs, namespacedName) + for _, w := range t.getWatches(gvr, ns) { + w.Delete(obj) + } + return nil } // filterByNamespace returns all objects in the collection that // match provided namespace. Empty namespace matches // non-namespaced objects. -func filterByNamespace(objs []runtime.Object, ns string) ([]runtime.Object, error) { +func filterByNamespace(objs map[types.NamespacedName]runtime.Object, ns string) ([]runtime.Object, error) { var res []runtime.Object for _, obj := range objs { @@ -499,6 +477,15 @@ func filterByNamespace(objs []runtime.Object, ns string) ([]runtime.Object, erro res = append(res, obj) } + // Sort res to get deterministic order. + sort.Slice(res, func(i, j int) bool { + acc1, _ := meta.Accessor(res[i]) + acc2, _ := meta.Accessor(res[j]) + if acc1.GetNamespace() != acc2.GetNamespace() { + return acc1.GetNamespace() < acc2.GetNamespace() + } + return acc1.GetName() < acc2.GetName() + }) return res, nil } From d82e9ebac0c9cf947bb01b6b012b85ba99ac0b11 Mon Sep 17 00:00:00 2001 From: tahsinrahman Date: Sun, 29 Mar 2020 06:36:36 +0800 Subject: [PATCH 64/92] Ensure NamedCertKeyArray implements flag.Value --- staging/src/k8s.io/component-base/cli/flag/namedcertkey_flag.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/staging/src/k8s.io/component-base/cli/flag/namedcertkey_flag.go b/staging/src/k8s.io/component-base/cli/flag/namedcertkey_flag.go index bc6867748ba4..af0f437ae39c 100644 --- a/staging/src/k8s.io/component-base/cli/flag/namedcertkey_flag.go +++ b/staging/src/k8s.io/component-base/cli/flag/namedcertkey_flag.go @@ -75,7 +75,7 @@ type NamedCertKeyArray struct { changed bool } -var _ flag.Value = &NamedCertKey{} +var _ flag.Value = &NamedCertKeyArray{} // NewNamedKeyCertArray creates a new NamedCertKeyArray with the internal value // pointing to p. From 62269f5e6ef4c98da09f0700dca9bdab64433a82 Mon Sep 17 00:00:00 2001 From: Davanum Srinivas Date: Sat, 28 Mar 2020 20:25:36 -0400 Subject: [PATCH 65/92] fix mismatch between pod resources verify/update scripts Signed-off-by: Davanum Srinivas --- hack/verify-generated-pod-resources.sh | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/hack/verify-generated-pod-resources.sh b/hack/verify-generated-pod-resources.sh index 423e1819b763..34f4dc98d1d6 100755 --- a/hack/verify-generated-pod-resources.sh +++ b/hack/verify-generated-pod-resources.sh @@ -24,26 +24,26 @@ set -o nounset set -o pipefail KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. -KUBE_REMOTE_RUNTIME_ROOT="${KUBE_ROOT}/staging/src/k8s.io/cri-api/pkg/apis/runtime/v1alpha2" +POD_RESOURCES_ALPHA="${KUBE_ROOT}/pkg/kubelet/apis/podresources/v1alpha1/" source "${KUBE_ROOT}/hack/lib/init.sh" kube::golang::setup_env function cleanup { - rm -rf "${KUBE_REMOTE_RUNTIME_ROOT}/_tmp/" + rm -rf "${POD_RESOURCES_ALPHA}/_tmp/" } trap cleanup EXIT -mkdir -p "${KUBE_REMOTE_RUNTIME_ROOT}/_tmp" -cp "${KUBE_REMOTE_RUNTIME_ROOT}/api.pb.go" "${KUBE_REMOTE_RUNTIME_ROOT}/_tmp/" +mkdir -p "${POD_RESOURCES_ALPHA}/_tmp" +cp "${POD_RESOURCES_ALPHA}/api.pb.go" "${POD_RESOURCES_ALPHA}/_tmp/" ret=0 KUBE_VERBOSE=3 "${KUBE_ROOT}/hack/update-generated-pod-resources.sh" -diff -I "gzipped FileDescriptorProto" -I "0x" -Naupr "${KUBE_REMOTE_RUNTIME_ROOT}/_tmp/api.pb.go" "${KUBE_REMOTE_RUNTIME_ROOT}/api.pb.go" || ret=$? +diff -I "gzipped FileDescriptorProto" -I "0x" -Naupr "${POD_RESOURCES_ALPHA}/_tmp/api.pb.go" "${POD_RESOURCES_ALPHA}/api.pb.go" || ret=$? if [[ $ret -eq 0 ]]; then echo "Generated pod resources api is up to date." - cp "${KUBE_REMOTE_RUNTIME_ROOT}/_tmp/api.pb.go" "${KUBE_REMOTE_RUNTIME_ROOT}/" + cp "${POD_RESOURCES_ALPHA}/_tmp/api.pb.go" "${POD_RESOURCES_ALPHA}/" else echo "Generated pod resources api is out of date. Please run hack/update-generated-pod-resources.sh" exit 1 From 51e0bd2480f23de9697b5e388335b547282b3431 Mon Sep 17 00:00:00 2001 From: andyzhangx Date: Thu, 26 Mar 2020 09:35:46 +0000 Subject: [PATCH 66/92] feat: add azure shared disk support fix comment --- pkg/volume/azure_dd/azure_provision.go | 39 ++++++++++++++++---------- 1 file changed, 24 insertions(+), 15 deletions(-) diff --git a/pkg/volume/azure_dd/azure_provision.go b/pkg/volume/azure_dd/azure_provision.go index af28d1a74b79..3ce4074b0d31 100644 --- a/pkg/volume/azure_dd/azure_provision.go +++ b/pkg/volume/azure_dd/azure_provision.go @@ -93,26 +93,11 @@ func parseZoned(zonedString string, kind v1.AzureDataDiskKind) (bool, error) { } func (p *azureDiskProvisioner) Provision(selectedNode *v1.Node, allowedTopologies []v1.TopologySelectorTerm) (*v1.PersistentVolume, error) { - if !util.AccessModesContainedInAll(p.plugin.GetAccessModes(), p.options.PVC.Spec.AccessModes) { - return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", p.options.PVC.Spec.AccessModes, p.plugin.GetAccessModes()) - } - supportedModes := p.plugin.GetAccessModes() - // perform static validation first if p.options.PVC.Spec.Selector != nil { return nil, fmt.Errorf("azureDisk - claim.Spec.Selector is not supported for dynamic provisioning on Azure disk") } - if len(p.options.PVC.Spec.AccessModes) > 1 { - return nil, fmt.Errorf("AzureDisk - multiple access modes are not supported on AzureDisk plugin") - } - - if len(p.options.PVC.Spec.AccessModes) == 1 { - if p.options.PVC.Spec.AccessModes[0] != supportedModes[0] { - return nil, fmt.Errorf("AzureDisk - mode %s is not supported by AzureDisk plugin (supported mode is %s)", p.options.PVC.Spec.AccessModes[0], supportedModes) - } - } - var ( location, account string storageAccountType, fsType string @@ -194,6 +179,30 @@ func (p *azureDiskProvisioner) Provision(selectedNode *v1.Node, allowedTopologie } } + supportedModes := p.plugin.GetAccessModes() + if maxShares < 2 { + // only do AccessModes validation when maxShares < 2 + if !util.AccessModesContainedInAll(p.plugin.GetAccessModes(), p.options.PVC.Spec.AccessModes) { + return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported with maxShares(%d) < 2", p.options.PVC.Spec.AccessModes, p.plugin.GetAccessModes(), maxShares) + } + + if len(p.options.PVC.Spec.AccessModes) > 1 { + return nil, fmt.Errorf("AzureDisk - multiple access modes are not supported on AzureDisk plugin with maxShares(%d) < 2", maxShares) + } + + if len(p.options.PVC.Spec.AccessModes) == 1 { + if p.options.PVC.Spec.AccessModes[0] != supportedModes[0] { + return nil, fmt.Errorf("AzureDisk - mode %s is not supported by AzureDisk plugin (supported mode is %s) with maxShares(%d) < 2", p.options.PVC.Spec.AccessModes[0], supportedModes, maxShares) + } + } + } else { + supportedModes = []v1.PersistentVolumeAccessMode{ + v1.ReadWriteOnce, + v1.ReadOnlyMany, + v1.ReadWriteMany, + } + } + // normalize values skuName, err := normalizeStorageAccountType(storageAccountType) if err != nil { From 735ab684ce5e6fdae880f122ca8bed3a95bad856 Mon Sep 17 00:00:00 2001 From: Yang Lu Date: Sun, 29 Mar 2020 08:43:23 -0700 Subject: [PATCH 67/92] Fix docker image log dump --- cluster/log-dump/log-dump.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cluster/log-dump/log-dump.sh b/cluster/log-dump/log-dump.sh index a8c9a4d47d84..04ad09097d13 100755 --- a/cluster/log-dump/log-dump.sh +++ b/cluster/log-dump/log-dump.sh @@ -54,7 +54,7 @@ readonly systemd_services="kubelet kubelet-monitor kube-container-runtime-monito readonly extra_log_files="${LOG_DUMP_EXTRA_FILES:-}" readonly dump_systemd_journal="${LOG_DUMP_SYSTEMD_JOURNAL:-false}" # Log files found in WINDOWS_LOGS_DIR on Windows nodes: -readonly windows_node_logfiles="kubelet.log kube-proxy.log docker.log" +readonly windows_node_logfiles="kubelet.log kube-proxy.log docker.log docker_images.log" # Log files found in other directories on Windows nodes: readonly windows_node_otherfiles="C:\\Windows\\MEMORY.dmp" From 765e926d3513b6180d3a5581b7a4b30540c37178 Mon Sep 17 00:00:00 2001 From: Davanum Srinivas Date: Sun, 29 Mar 2020 15:15:02 -0400 Subject: [PATCH 68/92] Avoid using internal packages for streaming/ package Signed-off-by: Davanum Srinivas --- pkg/kubelet/server/portforward/BUILD | 4 ++-- pkg/kubelet/server/portforward/httpstream.go | 2 +- pkg/kubelet/server/portforward/httpstream_test.go | 2 +- pkg/kubelet/server/portforward/websocket.go | 2 +- pkg/kubelet/server/remotecommand/BUILD | 2 +- pkg/kubelet/server/remotecommand/httpstream.go | 2 +- pkg/kubelet/server/streaming/BUILD | 2 +- pkg/kubelet/server/streaming/server_test.go | 2 +- 8 files changed, 9 insertions(+), 9 deletions(-) diff --git a/pkg/kubelet/server/portforward/BUILD b/pkg/kubelet/server/portforward/BUILD index 46c57de81bf6..b263cd12aba5 100644 --- a/pkg/kubelet/server/portforward/BUILD +++ b/pkg/kubelet/server/portforward/BUILD @@ -16,7 +16,7 @@ go_library( ], importpath = "k8s.io/kubernetes/pkg/kubelet/server/portforward", deps = [ - "//pkg/apis/core:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/httpstream:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/httpstream/spdy:go_default_library", @@ -35,7 +35,7 @@ go_test( ], embed = [":go_default_library"], deps = [ - "//pkg/apis/core:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/httpstream:go_default_library", ], ) diff --git a/pkg/kubelet/server/portforward/httpstream.go b/pkg/kubelet/server/portforward/httpstream.go index 4b5e66d6607b..aa0fdbb8cb2d 100644 --- a/pkg/kubelet/server/portforward/httpstream.go +++ b/pkg/kubelet/server/portforward/httpstream.go @@ -24,11 +24,11 @@ import ( "sync" "time" + api "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/httpstream" "k8s.io/apimachinery/pkg/util/httpstream/spdy" utilruntime "k8s.io/apimachinery/pkg/util/runtime" - api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/klog" ) diff --git a/pkg/kubelet/server/portforward/httpstream_test.go b/pkg/kubelet/server/portforward/httpstream_test.go index 26e6905bbbc0..15d96966d832 100644 --- a/pkg/kubelet/server/portforward/httpstream_test.go +++ b/pkg/kubelet/server/portforward/httpstream_test.go @@ -21,8 +21,8 @@ import ( "testing" "time" + api "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/httpstream" - api "k8s.io/kubernetes/pkg/apis/core" ) func TestHTTPStreamReceived(t *testing.T) { diff --git a/pkg/kubelet/server/portforward/websocket.go b/pkg/kubelet/server/portforward/websocket.go index 69502a6997d7..43b5d75af4c3 100644 --- a/pkg/kubelet/server/portforward/websocket.go +++ b/pkg/kubelet/server/portforward/websocket.go @@ -28,11 +28,11 @@ import ( "k8s.io/klog" + api "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apiserver/pkg/server/httplog" "k8s.io/apiserver/pkg/util/wsstream" - api "k8s.io/kubernetes/pkg/apis/core" ) const ( diff --git a/pkg/kubelet/server/remotecommand/BUILD b/pkg/kubelet/server/remotecommand/BUILD index fef6fec9c2cc..57ec91011585 100644 --- a/pkg/kubelet/server/remotecommand/BUILD +++ b/pkg/kubelet/server/remotecommand/BUILD @@ -16,7 +16,7 @@ go_library( ], importpath = "k8s.io/kubernetes/pkg/kubelet/server/remotecommand", deps = [ - "//pkg/apis/core:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", diff --git a/pkg/kubelet/server/remotecommand/httpstream.go b/pkg/kubelet/server/remotecommand/httpstream.go index 833762d83f50..4a60520c72a0 100644 --- a/pkg/kubelet/server/remotecommand/httpstream.go +++ b/pkg/kubelet/server/remotecommand/httpstream.go @@ -24,6 +24,7 @@ import ( "net/http" "time" + api "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/httpstream" @@ -32,7 +33,6 @@ import ( "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apiserver/pkg/util/wsstream" "k8s.io/client-go/tools/remotecommand" - api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/klog" ) diff --git a/pkg/kubelet/server/streaming/BUILD b/pkg/kubelet/server/streaming/BUILD index d133e06be93b..5b45492b11f9 100644 --- a/pkg/kubelet/server/streaming/BUILD +++ b/pkg/kubelet/server/streaming/BUILD @@ -36,8 +36,8 @@ go_test( ], embed = [":go_default_library"], deps = [ - "//pkg/apis/core:go_default_library", "//pkg/kubelet/server/portforward:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/clock:go_default_library", "//staging/src/k8s.io/client-go/rest:go_default_library", "//staging/src/k8s.io/client-go/tools/remotecommand:go_default_library", diff --git a/pkg/kubelet/server/streaming/server_test.go b/pkg/kubelet/server/streaming/server_test.go index 089b4950ded0..80b3f2a90fe7 100644 --- a/pkg/kubelet/server/streaming/server_test.go +++ b/pkg/kubelet/server/streaming/server_test.go @@ -30,11 +30,11 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + api "k8s.io/api/core/v1" restclient "k8s.io/client-go/rest" "k8s.io/client-go/tools/remotecommand" "k8s.io/client-go/transport/spdy" runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" - api "k8s.io/kubernetes/pkg/apis/core" kubeletportforward "k8s.io/kubernetes/pkg/kubelet/server/portforward" ) From a8e6f2c1bd08340fa4ea051978bc54ac35de66b8 Mon Sep 17 00:00:00 2001 From: zzxwill Date: Mon, 30 Mar 2020 13:32:26 +0800 Subject: [PATCH 69/92] fix typo for word `replace` --- build/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build/README.md b/build/README.md index 196c0478d26e..4dfbe330732e 100644 --- a/build/README.md +++ b/build/README.md @@ -34,7 +34,7 @@ The following scripts are found in the [`build/`](.) directory. Note that all sc ## Basic Flow -The scripts directly under [`build/`](.) are used to build and test. They will ensure that the `kube-build` Docker image is built (based on [`build/build-image/Dockerfile`](build-image/Dockerfile) and after base image's `KUBE_BUILD_IMAGE_CROSS_TAG` from Dockerfile is repalced with one of those actual tags of the base image, like `v1.13.9-2`) and then execute the appropriate command in that container. These scripts will both ensure that the right data is cached from run to run for incremental builds and will copy the results back out of the container. +The scripts directly under [`build/`](.) are used to build and test. They will ensure that the `kube-build` Docker image is built (based on [`build/build-image/Dockerfile`](build-image/Dockerfile) and after base image's `KUBE_BUILD_IMAGE_CROSS_TAG` from Dockerfile is replaced with one of those actual tags of the base image, like `v1.13.9-2`) and then execute the appropriate command in that container. These scripts will both ensure that the right data is cached from run to run for incremental builds and will copy the results back out of the container. The `kube-build` container image is built by first creating a "context" directory in `_output/images/build-image`. It is done there instead of at the root of the Kubernetes repo to minimize the amount of data we need to package up when building the image. From 72558add6a22ae06516be350cfc97578f566db53 Mon Sep 17 00:00:00 2001 From: tanjunchen Date: Mon, 30 Mar 2020 13:36:58 +0800 Subject: [PATCH 70/92] test/e2e/framework/kubelet/: remove direct dependency k8s.io/kubernetes/pkg/kubelet/dockershim/metrics --- test/e2e/framework/kubelet/BUILD | 1 - test/e2e/framework/kubelet/stats.go | 19 +++++++++++++++---- 2 files changed, 15 insertions(+), 5 deletions(-) diff --git a/test/e2e/framework/kubelet/BUILD b/test/e2e/framework/kubelet/BUILD index 720d79b66514..5cc3ba24a467 100644 --- a/test/e2e/framework/kubelet/BUILD +++ b/test/e2e/framework/kubelet/BUILD @@ -12,7 +12,6 @@ go_library( deps = [ "//pkg/kubelet/apis/config:go_default_library", "//pkg/kubelet/apis/stats/v1alpha1:go_default_library", - "//pkg/kubelet/dockershim/metrics:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library", diff --git a/test/e2e/framework/kubelet/stats.go b/test/e2e/framework/kubelet/stats.go index 3d9cdc28d597..552d9a14ad0f 100644 --- a/test/e2e/framework/kubelet/stats.go +++ b/test/e2e/framework/kubelet/stats.go @@ -34,7 +34,6 @@ import ( clientset "k8s.io/client-go/kubernetes" restclient "k8s.io/client-go/rest" kubeletstatsv1alpha1 "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1" - dockermetrics "k8s.io/kubernetes/pkg/kubelet/dockershim/metrics" "k8s.io/kubernetes/test/e2e/framework" e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics" ) @@ -42,6 +41,18 @@ import ( const ( // timeout for proxy requests. proxyTimeout = 2 * time.Minute + + // dockerOperationsKey is the key for docker operation metrics. + // copied from k8s.io/kubernetes/pkg/kubelet/dockershim/metrics + dockerOperationsKey = "docker_operations_total" + + // dockerOperationsErrorsKey is the key for the operation error metrics. + // copied from k8s.io/kubernetes/pkg/kubelet/dockershim/metrics + dockerOperationsErrorsKey = "docker_operations_errors_total" + + // dockerOperationsTimeoutKey is the key for the operation timeout metrics. + // copied from k8s.io/kubernetes/pkg/kubelet/dockershim/metrics + dockerOperationsTimeoutKey = "docker_operations_timeout_total" ) // ContainerResourceUsage is a structure for gathering container resource usage. @@ -190,9 +201,9 @@ func getNodeRuntimeOperationErrorRate(c clientset.Interface, node string) (NodeR } // If no corresponding metrics are found, the returned samples will be empty. Then the following // loop will be skipped automatically. - allOps := ms[dockermetrics.DockerOperationsKey] - errOps := ms[dockermetrics.DockerOperationsErrorsKey] - timeoutOps := ms[dockermetrics.DockerOperationsTimeoutKey] + allOps := ms[dockerOperationsKey] + errOps := ms[dockerOperationsErrorsKey] + timeoutOps := ms[dockerOperationsTimeoutKey] for _, sample := range allOps { operation := string(sample.Metric["operation_type"]) result[operation] = &RuntimeOperationErrorRate{TotalNumber: float64(sample.Value)} From 56cb04db4cf72cefc461191c131ddc1273b0f529 Mon Sep 17 00:00:00 2001 From: andyzhangx Date: Mon, 30 Mar 2020 08:12:41 +0000 Subject: [PATCH 71/92] chore: expose azure.KubeClient --- staging/src/k8s.io/legacy-cloud-providers/azure/azure.go | 6 +++--- .../src/k8s.io/legacy-cloud-providers/azure/azure_config.go | 2 +- .../legacy-cloud-providers/azure/azure_config_test.go | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/azure.go b/staging/src/k8s.io/legacy-cloud-providers/azure/azure.go index 4c0de1e29c43..f28148772279 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/azure/azure.go +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/azure.go @@ -262,7 +262,7 @@ type Cloud struct { // routeCIDRs holds cache for route CIDRs. routeCIDRs map[string]string - kubeClient clientset.Interface + KubeClient clientset.Interface eventBroadcaster record.EventBroadcaster eventRecorder record.EventRecorder routeUpdater *delayedRouteUpdater @@ -639,9 +639,9 @@ func parseConfig(configReader io.Reader) (*Config, error) { // Initialize passes a Kubernetes clientBuilder interface to the cloud provider func (az *Cloud) Initialize(clientBuilder cloudprovider.ControllerClientBuilder, stop <-chan struct{}) { - az.kubeClient = clientBuilder.ClientOrDie("azure-cloud-provider") + az.KubeClient = clientBuilder.ClientOrDie("azure-cloud-provider") az.eventBroadcaster = record.NewBroadcaster() - az.eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: az.kubeClient.CoreV1().Events("")}) + az.eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: az.KubeClient.CoreV1().Events("")}) az.eventRecorder = az.eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "azure-cloud-provider"}) az.InitializeCloudFromSecret() } diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_config.go b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_config.go index cf953b331a20..2bf76186117a 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_config.go +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_config.go @@ -69,7 +69,7 @@ func (az *Cloud) getConfigFromSecret() (*Config, error) { return nil, nil } - secret, err := az.kubeClient.CoreV1().Secrets(cloudConfigNamespace).Get(context.TODO(), cloudConfigSecretName, metav1.GetOptions{}) + secret, err := az.KubeClient.CoreV1().Secrets(cloudConfigNamespace).Get(context.TODO(), cloudConfigSecretName, metav1.GetOptions{}) if err != nil { return nil, fmt.Errorf("failed to get secret %s: %v", cloudConfigSecretName, err) } diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_config_test.go b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_config_test.go index 2c3a331f989a..a9f964922990 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_config_test.go +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_config_test.go @@ -134,7 +134,7 @@ func TestGetConfigFromSecret(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { az := &Cloud{ - kubeClient: fakeclient.NewSimpleClientset(), + KubeClient: fakeclient.NewSimpleClientset(), } if test.existingConfig != nil { az.Config = *test.existingConfig @@ -154,7 +154,7 @@ func TestGetConfigFromSecret(t *testing.T) { "cloud-config": secretData, } } - _, err := az.kubeClient.CoreV1().Secrets(cloudConfigNamespace).Create(context.TODO(), secret, metav1.CreateOptions{}) + _, err := az.KubeClient.CoreV1().Secrets(cloudConfigNamespace).Create(context.TODO(), secret, metav1.CreateOptions{}) assert.NoError(t, err, test.name) } From 55c455e61f6f1e52337e04eeb4ebd41668ffef17 Mon Sep 17 00:00:00 2001 From: zhouya0 Date: Mon, 30 Mar 2020 17:10:40 +0800 Subject: [PATCH 72/92] Fix kubectl describe CSINode nil pointer error --- staging/src/k8s.io/kubectl/pkg/describe/describe.go | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/staging/src/k8s.io/kubectl/pkg/describe/describe.go b/staging/src/k8s.io/kubectl/pkg/describe/describe.go index f72b1bb657eb..e2ef04d4c4a4 100644 --- a/staging/src/k8s.io/kubectl/pkg/describe/describe.go +++ b/staging/src/k8s.io/kubectl/pkg/describe/describe.go @@ -4031,10 +4031,14 @@ func describeCSINode(csi *storagev1.CSINode, events *corev1.EventList) (output s w.Write(LEVEL_1, "Drivers:\n") for _, driver := range csi.Spec.Drivers { w.Write(LEVEL_2, "%s:\n", driver.Name) - w.Write(LEVEL_3, "Allocatables:\n") - w.Write(LEVEL_4, "Count:\t%d\n", *driver.Allocatable.Count) w.Write(LEVEL_3, "Node ID:\t%s\n", driver.NodeID) - w.Write(LEVEL_3, "Topology Keys:\t%s\n", driver.TopologyKeys) + if driver.Allocatable.Count != nil { + w.Write(LEVEL_3, "Allocatables:\n") + w.Write(LEVEL_4, "Count:\t%d\n", *driver.Allocatable.Count) + } + if driver.TopologyKeys != nil { + w.Write(LEVEL_3, "Topology Keys:\t%s\n", driver.TopologyKeys) + } } } if events != nil { From 31b837b420cbbe5818a96d91958a8d32110e02a2 Mon Sep 17 00:00:00 2001 From: Humble Chirammal Date: Tue, 24 Mar 2020 16:46:57 +0530 Subject: [PATCH 73/92] Clean some code paths and correct static errors Signed-off-by: Humble Chirammal --- pkg/volume/azure_dd/attacher.go | 10 +++--- pkg/volume/azure_dd/azure_common.go | 10 +++--- pkg/volume/azure_dd/azure_common_linux.go | 18 +++++------ pkg/volume/azure_dd/azure_dd.go | 2 +- pkg/volume/csi/fake/fake_client.go | 3 +- pkg/volume/git_repo/git_repo_test.go | 38 +++++++++++------------ pkg/volume/iscsi/iscsi.go | 2 +- pkg/volume/util/fs/fs.go | 2 +- 8 files changed, 43 insertions(+), 42 deletions(-) diff --git a/pkg/volume/azure_dd/attacher.go b/pkg/volume/azure_dd/attacher.go index 9f13ddae3f43..5cbfdabbcd52 100644 --- a/pkg/volume/azure_dd/attacher.go +++ b/pkg/volume/azure_dd/attacher.go @@ -202,8 +202,8 @@ func (a *azureDiskAttacher) GetDeviceMountPath(spec *volume.Spec) (string, error return makeGlobalPDPath(a.plugin.host, volumeSource.DataDiskURI, isManagedDisk) } -func (attacher *azureDiskAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string) error { - mounter := attacher.plugin.host.GetMounter(azureDataDiskPluginName) +func (a *azureDiskAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string) error { + mounter := a.plugin.host.GetMounter(azureDataDiskPluginName) notMnt, err := mounter.IsLikelyNotMountPoint(deviceMountPath) if err != nil { @@ -242,7 +242,7 @@ func (attacher *azureDiskAttacher) MountDevice(spec *volume.Spec, devicePath str options := []string{} if notMnt { - diskMounter := util.NewSafeFormatAndMountFromHost(azureDataDiskPluginName, attacher.plugin.host) + diskMounter := util.NewSafeFormatAndMountFromHost(azureDataDiskPluginName, a.plugin.host) mountOptions := util.MountOptionFromSpec(spec, options...) if runtime.GOOS == "windows" { // only parse devicePath on Windows node @@ -284,8 +284,8 @@ func (d *azureDiskDetacher) Detach(diskURI string, nodeName types.NodeName) erro } // UnmountDevice unmounts the volume on the node -func (detacher *azureDiskDetacher) UnmountDevice(deviceMountPath string) error { - err := mount.CleanupMountPoint(deviceMountPath, detacher.plugin.host.GetMounter(detacher.plugin.GetPluginName()), false) +func (d *azureDiskDetacher) UnmountDevice(deviceMountPath string) error { + err := mount.CleanupMountPoint(deviceMountPath, d.plugin.host.GetMounter(d.plugin.GetPluginName()), false) if err == nil { klog.V(2).Infof("azureDisk - Device %s was unmounted", deviceMountPath) } else { diff --git a/pkg/volume/azure_dd/azure_common.go b/pkg/volume/azure_dd/azure_common.go index ac3fb148dc4e..10068a81deb8 100644 --- a/pkg/volume/azure_dd/azure_common.go +++ b/pkg/volume/azure_dd/azure_common.go @@ -73,16 +73,16 @@ func getPath(uid types.UID, volName string, host volume.VolumeHost) string { } // creates a unique path for disks (even if they share the same *.vhd name) -func makeGlobalPDPath(host volume.VolumeHost, diskUri string, isManaged bool) (string, error) { - diskUri = libstrings.ToLower(diskUri) // always lower uri because users may enter it in caps. +func makeGlobalPDPath(host volume.VolumeHost, diskURI string, isManaged bool) (string, error) { + diskURI = libstrings.ToLower(diskURI) // always lower uri because users may enter it in caps. uniqueDiskNameTemplate := "%s%s" - hashedDiskUri := azure.MakeCRC32(diskUri) + hashedDiskURI := azure.MakeCRC32(diskURI) prefix := "b" if isManaged { prefix = "m" } - // "{m for managed b for blob}{hashed diskUri or DiskId depending on disk kind }" - diskName := fmt.Sprintf(uniqueDiskNameTemplate, prefix, hashedDiskUri) + // "{m for managed b for blob}{hashed diskURI or DiskId depending on disk kind }" + diskName := fmt.Sprintf(uniqueDiskNameTemplate, prefix, hashedDiskURI) pdPath := filepath.Join(host.GetPluginDir(azureDataDiskPluginName), util.MountsInGlobalPDPath, diskName) return pdPath, nil diff --git a/pkg/volume/azure_dd/azure_common_linux.go b/pkg/volume/azure_dd/azure_common_linux.go index 9da9adfef032..41c35b115724 100644 --- a/pkg/volume/azure_dd/azure_common_linux.go +++ b/pkg/volume/azure_dd/azure_common_linux.go @@ -73,17 +73,17 @@ func getDiskLinkByDevName(io ioHandler, devLinkPath, devName string) (string, er } func scsiHostRescan(io ioHandler, exec utilexec.Interface) { - scsi_path := "/sys/class/scsi_host/" - if dirs, err := io.ReadDir(scsi_path); err == nil { + scsiPath := "/sys/class/scsi_host/" + if dirs, err := io.ReadDir(scsiPath); err == nil { for _, f := range dirs { - name := scsi_path + f.Name() + "/scan" + name := scsiPath + f.Name() + "/scan" data := []byte("- - -") if err = io.WriteFile(name, data, 0666); err != nil { klog.Warningf("failed to rescan scsi host %s", name) } } } else { - klog.Warningf("failed to read %s, err %v", scsi_path, err) + klog.Warningf("failed to read %s, err %v", scsiPath, err) } } @@ -95,8 +95,8 @@ func findDiskByLun(lun int, io ioHandler, exec utilexec.Interface) (string, erro // finds a device mounted to "current" node func findDiskByLunWithConstraint(lun int, io ioHandler, azureDisks []string) (string, error) { var err error - sys_path := "/sys/bus/scsi/devices" - if dirs, err := io.ReadDir(sys_path); err == nil { + sysPath := "/sys/bus/scsi/devices" + if dirs, err := io.ReadDir(sysPath); err == nil { for _, f := range dirs { name := f.Name() // look for path like /sys/bus/scsi/devices/3:0:0:1 @@ -128,7 +128,7 @@ func findDiskByLunWithConstraint(lun int, io ioHandler, azureDisks []string) (st if lun == l { // find the matching LUN // read vendor and model to ensure it is a VHD disk - vendorPath := filepath.Join(sys_path, name, "vendor") + vendorPath := filepath.Join(sysPath, name, "vendor") vendorBytes, err := io.ReadFile(vendorPath) if err != nil { klog.Errorf("failed to read device vendor, err: %v", err) @@ -140,7 +140,7 @@ func findDiskByLunWithConstraint(lun int, io ioHandler, azureDisks []string) (st continue } - modelPath := filepath.Join(sys_path, name, "model") + modelPath := filepath.Join(sysPath, name, "model") modelBytes, err := io.ReadFile(modelPath) if err != nil { klog.Errorf("failed to read device model, err: %v", err) @@ -153,7 +153,7 @@ func findDiskByLunWithConstraint(lun int, io ioHandler, azureDisks []string) (st } // find a disk, validate name - dir := filepath.Join(sys_path, name, "block") + dir := filepath.Join(sysPath, name, "block") if dev, err := io.ReadDir(dir); err == nil { found := false devName := dev[0].Name() diff --git a/pkg/volume/azure_dd/azure_dd.go b/pkg/volume/azure_dd/azure_dd.go index 448b432ca178..155941974c06 100644 --- a/pkg/volume/azure_dd/azure_dd.go +++ b/pkg/volume/azure_dd/azure_dd.go @@ -36,7 +36,7 @@ import ( "k8s.io/legacy-cloud-providers/azure" ) -// interface exposed by the cloud provider implementing Disk functionality +// DiskController interface exposed by the cloud provider implementing Disk functionality type DiskController interface { CreateBlobDisk(dataDiskName string, storageAccountType storage.SkuName, sizeGB int) (string, error) DeleteBlobDisk(diskUri string) error diff --git a/pkg/volume/csi/fake/fake_client.go b/pkg/volume/csi/fake/fake_client.go index 0c9c446fba1a..6e37ec686db7 100644 --- a/pkg/volume/csi/fake/fake_client.go +++ b/pkg/volume/csi/fake/fake_client.go @@ -31,8 +31,9 @@ import ( ) const ( - // NodePublishTimeout_VolumeID is volume id that will result in NodePublish operation to timeout + // NodePublishTimeOut_VolumeID is volume id that will result in NodePublish operation to timeout NodePublishTimeOut_VolumeID = "node-publish-timeout" + // NodeStageTimeOut_VolumeID is a volume id that will result in NodeStage operation to timeout NodeStageTimeOut_VolumeID = "node-stage-timeout" ) diff --git a/pkg/volume/git_repo/git_repo_test.go b/pkg/volume/git_repo/git_repo_test.go index 1925325ee0f2..a6b9fed9a182 100644 --- a/pkg/volume/git_repo/git_repo_test.go +++ b/pkg/volume/git_repo/git_repo_test.go @@ -70,7 +70,7 @@ type expectedCommand struct { } func TestPlugin(t *testing.T) { - gitUrl := "https://github.com/kubernetes/kubernetes.git" + gitURL := "https://github.com/kubernetes/kubernetes.git" revision := "2a30ce65c5ab586b98916d83385c5983edd353a1" scenarios := []struct { @@ -85,7 +85,7 @@ func TestPlugin(t *testing.T) { Name: "vol1", VolumeSource: v1.VolumeSource{ GitRepo: &v1.GitRepoVolumeSource{ - Repository: gitUrl, + Repository: gitURL, Revision: revision, Directory: "target_dir", }, @@ -93,7 +93,7 @@ func TestPlugin(t *testing.T) { }, expecteds: []expectedCommand{ { - cmd: []string{"git", "clone", "--", gitUrl, "target_dir"}, + cmd: []string{"git", "clone", "--", gitURL, "target_dir"}, dir: "", }, { @@ -113,14 +113,14 @@ func TestPlugin(t *testing.T) { Name: "vol1", VolumeSource: v1.VolumeSource{ GitRepo: &v1.GitRepoVolumeSource{ - Repository: gitUrl, + Repository: gitURL, Directory: "target_dir", }, }, }, expecteds: []expectedCommand{ { - cmd: []string{"git", "clone", "--", gitUrl, "target_dir"}, + cmd: []string{"git", "clone", "--", gitURL, "target_dir"}, dir: "", }, }, @@ -132,13 +132,13 @@ func TestPlugin(t *testing.T) { Name: "vol1", VolumeSource: v1.VolumeSource{ GitRepo: &v1.GitRepoVolumeSource{ - Repository: gitUrl, + Repository: gitURL, }, }, }, expecteds: []expectedCommand{ { - cmd: []string{"git", "clone", "--", gitUrl}, + cmd: []string{"git", "clone", "--", gitURL}, dir: "", }, }, @@ -150,7 +150,7 @@ func TestPlugin(t *testing.T) { Name: "vol1", VolumeSource: v1.VolumeSource{ GitRepo: &v1.GitRepoVolumeSource{ - Repository: gitUrl, + Repository: gitURL, Revision: revision, Directory: "", }, @@ -158,7 +158,7 @@ func TestPlugin(t *testing.T) { }, expecteds: []expectedCommand{ { - cmd: []string{"git", "clone", "--", gitUrl}, + cmd: []string{"git", "clone", "--", gitURL}, dir: "", }, { @@ -178,7 +178,7 @@ func TestPlugin(t *testing.T) { Name: "vol1", VolumeSource: v1.VolumeSource{ GitRepo: &v1.GitRepoVolumeSource{ - Repository: gitUrl, + Repository: gitURL, Revision: revision, Directory: ".", }, @@ -186,7 +186,7 @@ func TestPlugin(t *testing.T) { }, expecteds: []expectedCommand{ { - cmd: []string{"git", "clone", "--", gitUrl, "."}, + cmd: []string{"git", "clone", "--", gitURL, "."}, dir: "", }, { @@ -206,7 +206,7 @@ func TestPlugin(t *testing.T) { Name: "vol1", VolumeSource: v1.VolumeSource{ GitRepo: &v1.GitRepoVolumeSource{ - Repository: gitUrl, + Repository: gitURL, Revision: revision, Directory: "./.", }, @@ -214,7 +214,7 @@ func TestPlugin(t *testing.T) { }, expecteds: []expectedCommand{ { - cmd: []string{"git", "clone", "--", gitUrl, "./."}, + cmd: []string{"git", "clone", "--", gitURL, "./."}, dir: "", }, { @@ -246,7 +246,7 @@ func TestPlugin(t *testing.T) { Name: "vol1", VolumeSource: v1.VolumeSource{ GitRepo: &v1.GitRepoVolumeSource{ - Repository: gitUrl, + Repository: gitURL, Revision: "--bar", }, }, @@ -259,7 +259,7 @@ func TestPlugin(t *testing.T) { Name: "vol1", VolumeSource: v1.VolumeSource{ GitRepo: &v1.GitRepoVolumeSource{ - Repository: gitUrl, + Repository: gitURL, Directory: "-b", }, }, @@ -330,11 +330,11 @@ func doTestPlugin(scenario struct { allErrs = append(allErrs, fmt.Errorf("SetUp() failed, volume path not created: %s", path)) return allErrs - } else { - allErrs = append(allErrs, - fmt.Errorf("SetUp() failed: %v", err)) - return allErrs } + allErrs = append(allErrs, + fmt.Errorf("SetUp() failed: %v", err)) + return allErrs + } // gitRepo volume should create its own empty wrapper path diff --git a/pkg/volume/iscsi/iscsi.go b/pkg/volume/iscsi/iscsi.go index 36bce8d189a9..aea792c11aa9 100644 --- a/pkg/volume/iscsi/iscsi.go +++ b/pkg/volume/iscsi/iscsi.go @@ -38,7 +38,7 @@ import ( "k8s.io/kubernetes/pkg/volume/util/volumepathhandler" ) -// This is the primary entrypoint for volume plugins. +// ProbeVolumePlugins is the primary entrypoint for volume plugins. func ProbeVolumePlugins() []volume.VolumePlugin { return []volume.VolumePlugin{&iscsiPlugin{}} } diff --git a/pkg/volume/util/fs/fs.go b/pkg/volume/util/fs/fs.go index 2796e93d19ed..0050c5fe48c7 100644 --- a/pkg/volume/util/fs/fs.go +++ b/pkg/volume/util/fs/fs.go @@ -30,7 +30,7 @@ import ( "k8s.io/kubernetes/pkg/volume/util/fsquota" ) -// FSInfo linux returns (available bytes, byte capacity, byte usage, total inodes, inodes free, inode usage, error) +// FsInfo linux returns (available bytes, byte capacity, byte usage, total inodes, inodes free, inode usage, error) // for the filesystem that path resides upon. func FsInfo(path string) (int64, int64, int64, int64, int64, int64, error) { statfs := &unix.Statfs_t{} From a1bceb89159fe05e0133df3dcda4a72e2a8c023d Mon Sep 17 00:00:00 2001 From: Davanum Srinivas Date: Sun, 29 Mar 2020 15:52:21 -0400 Subject: [PATCH 74/92] add import restrictions Signed-off-by: Davanum Srinivas --- pkg/kubelet/server/portforward/.import-restrictions | 9 +++++++++ pkg/kubelet/server/remotecommand/.import-restrictions | 9 +++++++++ pkg/kubelet/server/streaming/.import-restrictions | 11 +++++++++++ 3 files changed, 29 insertions(+) create mode 100644 pkg/kubelet/server/portforward/.import-restrictions create mode 100644 pkg/kubelet/server/remotecommand/.import-restrictions create mode 100644 pkg/kubelet/server/streaming/.import-restrictions diff --git a/pkg/kubelet/server/portforward/.import-restrictions b/pkg/kubelet/server/portforward/.import-restrictions new file mode 100644 index 000000000000..0ead1c495302 --- /dev/null +++ b/pkg/kubelet/server/portforward/.import-restrictions @@ -0,0 +1,9 @@ +{ + "Rules": [ + { + "SelectorRegexp": "k8s[.]io/kubernetes", + "AllowedPrefixes": [ + ] + } + ] +} diff --git a/pkg/kubelet/server/remotecommand/.import-restrictions b/pkg/kubelet/server/remotecommand/.import-restrictions new file mode 100644 index 000000000000..0ead1c495302 --- /dev/null +++ b/pkg/kubelet/server/remotecommand/.import-restrictions @@ -0,0 +1,9 @@ +{ + "Rules": [ + { + "SelectorRegexp": "k8s[.]io/kubernetes", + "AllowedPrefixes": [ + ] + } + ] +} diff --git a/pkg/kubelet/server/streaming/.import-restrictions b/pkg/kubelet/server/streaming/.import-restrictions new file mode 100644 index 000000000000..6a0053a96208 --- /dev/null +++ b/pkg/kubelet/server/streaming/.import-restrictions @@ -0,0 +1,11 @@ +{ + "Rules": [ + { + "SelectorRegexp": "k8s[.]io/kubernetes", + "AllowedPrefixes": [ + "k8s.io/kubernetes/pkg/kubelet/server/portforward", + "k8s.io/kubernetes/pkg/kubelet/server/remotecommand" + ] + } + ] +} From 8747ba9370e950fc626c19e87ab4f7c6128bc563 Mon Sep 17 00:00:00 2001 From: Tim Hockin Date: Thu, 26 Mar 2020 23:11:10 -0700 Subject: [PATCH 75/92] Clean up kube-proxy healthz startup Make the healthz package simpler, move retries back to caller. --- cmd/kube-proxy/app/server.go | 23 +++++++++++-- pkg/proxy/healthcheck/proxier_health.go | 45 +++++++++++-------------- 2 files changed, 39 insertions(+), 29 deletions(-) diff --git a/cmd/kube-proxy/app/server.go b/cmd/kube-proxy/app/server.go index 841fba1d3b90..f811be43aefc 100644 --- a/cmd/kube-proxy/app/server.go +++ b/cmd/kube-proxy/app/server.go @@ -576,6 +576,23 @@ func createClients(config componentbaseconfig.ClientConnectionConfiguration, mas return client, eventClient.CoreV1(), nil } +func serveHealthz(hz healthcheck.ProxierHealthUpdater) { + if hz == nil { + return + } + fn := func() { + err := hz.Run() + if err != nil { + // For historical reasons we do not abort on errors here. We may + // change that in the future. + klog.Errorf("healthz server failed: %v", err) + } else { + klog.Errorf("healthz server returned without error") + } + } + go wait.Until(fn, 5*time.Second, wait.NeverStop) +} + // Run runs the specified ProxyServer. This should never exit (unless CleanupAndExit is set). // TODO: At the moment, Run() cannot return a nil error, otherwise it's caller will never exit. Update callers of Run to handle nil errors. func (s *ProxyServer) Run() error { @@ -595,10 +612,10 @@ func (s *ProxyServer) Run() error { s.Broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: s.EventClient.Events("")}) } + // TODO(thockin): make it possible for healthz and metrics to be on the same port. + // Start up a healthz server if requested - if s.HealthzServer != nil { - s.HealthzServer.Run() - } + serveHealthz(s.HealthzServer) // Start up a metrics server if requested if len(s.MetricsBindAddress) > 0 { diff --git a/pkg/proxy/healthcheck/proxier_health.go b/pkg/proxy/healthcheck/proxier_health.go index afeb4fff6571..c915b163b0ff 100644 --- a/pkg/proxy/healthcheck/proxier_health.go +++ b/pkg/proxy/healthcheck/proxier_health.go @@ -22,17 +22,13 @@ import ( "sync/atomic" "time" - "k8s.io/klog" - "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/clock" - "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/tools/record" + "k8s.io/klog" api "k8s.io/kubernetes/pkg/apis/core" ) -var proxierHealthzRetryInterval = 60 * time.Second - // ProxierHealthUpdater allows callers to update healthz timestamp only. type ProxierHealthUpdater interface { // QueuedUpdate should be called when the proxier receives a Service or Endpoints @@ -43,8 +39,8 @@ type ProxierHealthUpdater interface { // rules to reflect the current state. Updated() - // Run starts the healthz http server and returns. - Run() + // Run starts the healthz HTTP server and blocks until it exits. + Run() error } var _ ProxierHealthUpdater = &proxierHealthServer{} @@ -92,31 +88,28 @@ func (hs *proxierHealthServer) QueuedUpdate() { hs.lastQueued.Store(hs.clock.Now()) } -// Run starts the healthz http server and returns. -func (hs *proxierHealthServer) Run() { +// Run starts the healthz HTTP server and blocks until it exits. +func (hs *proxierHealthServer) Run() error { serveMux := http.NewServeMux() serveMux.Handle("/healthz", healthzHandler{hs: hs}) server := hs.httpFactory.New(hs.addr, serveMux) - go wait.Until(func() { - klog.V(3).Infof("Starting goroutine for proxier healthz on %s", hs.addr) - - listener, err := hs.listener.Listen(hs.addr) - if err != nil { - msg := fmt.Sprintf("Failed to start proxier healthz on %s: %v", hs.addr, err) - if hs.recorder != nil { - hs.recorder.Eventf(hs.nodeRef, api.EventTypeWarning, "FailedToStartProxierHealthcheck", msg) - } - klog.Error(msg) - return + listener, err := hs.listener.Listen(hs.addr) + if err != nil { + msg := fmt.Sprintf("failed to start proxier healthz on %s: %v", hs.addr, err) + // TODO(thockin): move eventing back to caller + if hs.recorder != nil { + hs.recorder.Eventf(hs.nodeRef, api.EventTypeWarning, "FailedToStartProxierHealthcheck", msg) } + return fmt.Errorf("%v", msg) + } - if err := server.Serve(listener); err != nil { - klog.Errorf("Proxier healthz closed with error: %v", err) - return - } - klog.Error("Unexpected proxier healthz closed.") - }, proxierHealthzRetryInterval, wait.NeverStop) + klog.V(3).Infof("starting healthz on %s", hs.addr) + + if err := server.Serve(listener); err != nil { + return fmt.Errorf("proxier healthz closed with error: %v", err) + } + return nil } type healthzHandler struct { From 15632b10cbd01777198b97a60fb49f8a25f615df Mon Sep 17 00:00:00 2001 From: Tim Hockin Date: Fri, 27 Mar 2020 16:50:44 -0700 Subject: [PATCH 76/92] Clean up kube-proxy metrics startup --- cmd/kube-proxy/app/server.go | 56 ++++++++++++++++++++++-------------- pkg/proxy/healthcheck/BUILD | 1 - 2 files changed, 35 insertions(+), 22 deletions(-) diff --git a/cmd/kube-proxy/app/server.go b/cmd/kube-proxy/app/server.go index f811be43aefc..97524e2fc4e0 100644 --- a/cmd/kube-proxy/app/server.go +++ b/cmd/kube-proxy/app/server.go @@ -580,6 +580,7 @@ func serveHealthz(hz healthcheck.ProxierHealthUpdater) { if hz == nil { return } + fn := func() { err := hz.Run() if err != nil { @@ -593,6 +594,39 @@ func serveHealthz(hz healthcheck.ProxierHealthUpdater) { go wait.Until(fn, 5*time.Second, wait.NeverStop) } +func serveMetrics(bindAddress string, proxyMode string, enableProfiling bool) { + if len(bindAddress) == 0 { + return + } + + proxyMux := mux.NewPathRecorderMux("kube-proxy") + healthz.InstallHandler(proxyMux) + proxyMux.HandleFunc("/proxyMode", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "text/plain; charset=utf-8") + w.Header().Set("X-Content-Type-Options", "nosniff") + fmt.Fprintf(w, "%s", proxyMode) + }) + + //lint:ignore SA1019 See the Metrics Stability Migration KEP + proxyMux.Handle("/metrics", legacyregistry.Handler()) + + if enableProfiling { + routes.Profiling{}.Install(proxyMux) + } + + configz.InstallHandler(proxyMux) + + fn := func() { + err := http.ListenAndServe(bindAddress, proxyMux) + if err != nil { + // For historical reasons we do not abort on errors here. We may + // change that in the future. + utilruntime.HandleError(fmt.Errorf("starting metrics server failed: %v", err)) + } + } + go wait.Until(fn, 5*time.Second, wait.NeverStop) +} + // Run runs the specified ProxyServer. This should never exit (unless CleanupAndExit is set). // TODO: At the moment, Run() cannot return a nil error, otherwise it's caller will never exit. Update callers of Run to handle nil errors. func (s *ProxyServer) Run() error { @@ -618,27 +652,7 @@ func (s *ProxyServer) Run() error { serveHealthz(s.HealthzServer) // Start up a metrics server if requested - if len(s.MetricsBindAddress) > 0 { - proxyMux := mux.NewPathRecorderMux("kube-proxy") - healthz.InstallHandler(proxyMux) - proxyMux.HandleFunc("/proxyMode", func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "text/plain; charset=utf-8") - w.Header().Set("X-Content-Type-Options", "nosniff") - fmt.Fprintf(w, "%s", s.ProxyMode) - }) - //lint:ignore SA1019 See the Metrics Stability Migration KEP - proxyMux.Handle("/metrics", legacyregistry.Handler()) - if s.EnableProfiling { - routes.Profiling{}.Install(proxyMux) - } - configz.InstallHandler(proxyMux) - go wait.Until(func() { - err := http.ListenAndServe(s.MetricsBindAddress, proxyMux) - if err != nil { - utilruntime.HandleError(fmt.Errorf("starting metrics server failed: %v", err)) - } - }, 5*time.Second, wait.NeverStop) - } + serveMetrics(s.MetricsBindAddress, s.ProxyMode, s.EnableProfiling) // Tune conntrack, if requested // Conntracker is always nil for windows diff --git a/pkg/proxy/healthcheck/BUILD b/pkg/proxy/healthcheck/BUILD index d99782630f73..3c45cef097a7 100644 --- a/pkg/proxy/healthcheck/BUILD +++ b/pkg/proxy/healthcheck/BUILD @@ -20,7 +20,6 @@ go_library( "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/clock:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/client-go/tools/record:go_default_library", "//vendor/github.com/lithammer/dedent:go_default_library", "//vendor/k8s.io/klog:go_default_library", From 72f58ae4ff6b0183292767e86f6627c8f76bb29d Mon Sep 17 00:00:00 2001 From: Peter Hornyack Date: Mon, 30 Mar 2020 11:30:52 -0700 Subject: [PATCH 77/92] Bump GCE Windows pause image version pause-win:1.2.1 is based on the March Windows container base images for both LTSC 2019 and SAC 1909. --- cluster/gce/config-common.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cluster/gce/config-common.sh b/cluster/gce/config-common.sh index 9622f268c61c..7a7365a47248 100644 --- a/cluster/gce/config-common.sh +++ b/cluster/gce/config-common.sh @@ -159,4 +159,4 @@ export WINDOWS_BOOTSTRAP_KUBECONFIG_FILE="${WINDOWS_K8S_DIR}\kubelet.bootstrap-k # Path for kube-proxy kubeconfig file on Windows nodes. export WINDOWS_KUBEPROXY_KUBECONFIG_FILE="${WINDOWS_K8S_DIR}\kubeproxy.kubeconfig" # Pause container image for Windows container. -export WINDOWS_INFRA_CONTAINER="gcr.io/gke-release/pause-win:1.2.0" +export WINDOWS_INFRA_CONTAINER="gcr.io/gke-release/pause-win:1.2.1" From 52653195f65de0fdd9cb14619841ff636ad85887 Mon Sep 17 00:00:00 2001 From: Aaron Crickenberger Date: Fri, 27 Mar 2020 23:13:50 -0700 Subject: [PATCH 78/92] Allow list-resources.sh to continue if a resource fails to list The list-resources.sh script is used solely by our CI, specifically kubernetes/test-infra/kubetest with the --check-leaked-resources flag. Currently if a single resource fails to list, we fail the entire job. I think this is too brittle. A review of previous issues on kubernetes/kubernetes that relate to failure of this script shows that the issues usually resolve themselves, or would be caught by the diff of before/after. Let's instead allow the script to continue listing all resources, and let kubetest's resource diff fail the job. --- cluster/gce/list-resources.sh | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/cluster/gce/list-resources.sh b/cluster/gce/list-resources.sh index aca87c576508..f309a779cf42 100755 --- a/cluster/gce/list-resources.sh +++ b/cluster/gce/list-resources.sh @@ -75,6 +75,9 @@ echo "Provider: ${KUBERNETES_PROVIDER:-}" # List resources related to instances, filtering by the instance prefix if # provided. + +set +e # do not stop on error + gcloud-list compute instance-templates "name ~ '${INSTANCE_PREFIX}.*'" gcloud-list compute instance-groups "${ZONE:+"zone:(${ZONE}) AND "}name ~ '${INSTANCE_PREFIX}.*'" gcloud-list compute instances "${ZONE:+"zone:(${ZONE}) AND "}name ~ '${INSTANCE_PREFIX}.*'" @@ -95,3 +98,5 @@ gcloud-list compute forwarding-rules ${REGION:+"region=(${REGION})"} gcloud-list compute target-pools ${REGION:+"region=(${REGION})"} gcloud-list logging sinks + +set -e From 03c7bdcaccb201129b2d8c7d679bc8de29defb72 Mon Sep 17 00:00:00 2001 From: Kenichi Omichi Date: Wed, 18 Mar 2020 21:23:14 +0000 Subject: [PATCH 79/92] Enable import-boss check for integration test Integration tests imported e2e test code and the dependency made two drawbacks: - Hard to move test/e2e/framework into staging (#74352) - Need to run integration tests always even if PRs are just changing e2e test code This enables import-boss check for blocking such dependency. --- hack/verify-import-boss.sh | 8 +++++++- test/integration/.import-restrictions | 8 ++++++++ 2 files changed, 15 insertions(+), 1 deletion(-) create mode 100644 test/integration/.import-restrictions diff --git a/hack/verify-import-boss.sh b/hack/verify-import-boss.sh index bafb9d71e4d7..4db13e0b15b7 100755 --- a/hack/verify-import-boss.sh +++ b/hack/verify-import-boss.sh @@ -30,7 +30,13 @@ kube::golang::setup_env make -C "${KUBE_ROOT}" WHAT=vendor/k8s.io/code-generator/cmd/import-boss -packages=("k8s.io/kubernetes/pkg/..." "k8s.io/kubernetes/cmd/..." "k8s.io/kubernetes/plugin/..." "k8s.io/kubernetes/test/e2e/framework/...") +packages=( + "k8s.io/kubernetes/pkg/..." + "k8s.io/kubernetes/cmd/..." + "k8s.io/kubernetes/plugin/..." + "k8s.io/kubernetes/test/e2e/framework/..." + "k8s.io/kubernetes/test/integration/..." +) for d in staging/src/k8s.io/*/; do if [ -d "$d" ]; then packages+=("./vendor/${d#"staging/src/"}...") diff --git a/test/integration/.import-restrictions b/test/integration/.import-restrictions new file mode 100644 index 000000000000..20d0d03384f5 --- /dev/null +++ b/test/integration/.import-restrictions @@ -0,0 +1,8 @@ +{ + "Rules": [ + { + "SelectorRegexp": "k8s[.]io/kubernetes/test/e2e", + "AllowedPrefixes": [] + } + ] +} From 4ca2d5a9dadcd51c7f7b5b68c014f39e0b65c6b3 Mon Sep 17 00:00:00 2001 From: "Sean R. Sullivan" Date: Mon, 30 Mar 2020 15:00:42 -0700 Subject: [PATCH 80/92] Fixes kubectl apply tests to run; updates broken tests --- test/cmd/apply.sh | 61 +++++++++++++++------------------------ test/cmd/legacy-script.sh | 1 + 2 files changed, 25 insertions(+), 37 deletions(-) diff --git a/test/cmd/apply.sh b/test/cmd/apply.sh index f9d5f8d80405..325b49246405 100755 --- a/test/cmd/apply.sh +++ b/test/cmd/apply.sh @@ -147,40 +147,22 @@ __EOF__ kubectl "${kube_flags[@]:?}" delete customresourcedefinition resources.mygroup.example.com ## kubectl apply --prune - # Pre-Condition: no POD exists + # Pre-Condition: namespace nsb exists; no POD exists + kubectl create ns nsb kube::test::get_object_assert pods "{{range.items}}{{${id_field:?}}}:{{end}}" '' - - # apply a - kubectl apply --prune -l prune-group=true -f hack/testdata/prune/a.yaml "${kube_flags[@]:?}" - # check right pod exists - kube::test::get_object_assert 'pods a' "{{${id_field:?}}}" 'a' - # check wrong pod doesn't exist - output_message=$(! kubectl get pods b 2>&1 "${kube_flags[@]:?}") - kube::test::if_has_string "${output_message}" 'pods "b" not found' - - # apply b - kubectl apply --prune -l prune-group=true -f hack/testdata/prune/b.yaml "${kube_flags[@]:?}" - # check right pod exists - kube::test::get_object_assert 'pods b' "{{${id_field:?}}}" 'b' - # check wrong pod doesn't exist - output_message=$(! kubectl get pods a 2>&1 "${kube_flags[@]:?}") - kube::test::if_has_string "${output_message}" 'pods "a" not found' - - kubectl delete pods a - kubectl delete pods b - - # apply a + # apply a into namespace nsb kubectl apply --namespace nsb -l prune-group=true -f hack/testdata/prune/a.yaml "${kube_flags[@]:?}" + kube::test::get_object_assert 'pods a -n nsb' "{{${id_field:?}}}" 'a' # apply b with namespace kubectl apply --namespace nsb --prune -l prune-group=true -f hack/testdata/prune/b.yaml "${kube_flags[@]:?}" # check right pod exists - kube::test::get_object_assert 'pods b' "{{${id_field:?}}}" 'b' + kube::test::get_object_assert 'pods b -n nsb' "{{${id_field:?}}}" 'b' # check wrong pod doesn't exist - output_message=$(! kubectl get pods a 2>&1 "${kube_flags[@]:?}") + output_message=$(! kubectl get pods a -n nsb 2>&1 "${kube_flags[@]:?}") kube::test::if_has_string "${output_message}" 'pods "a" not found' # cleanup - kubectl delete pods b + kubectl delete pods b -n nsb # same thing without prune for a sanity check # Pre-Condition: no POD exists @@ -191,18 +173,18 @@ __EOF__ # check right pod exists kube::test::get_object_assert 'pods a' "{{${id_field:?}}}" 'a' # check wrong pod doesn't exist - output_message=$(! kubectl get pods b 2>&1 "${kube_flags[@]:?}") + output_message=$(! kubectl get pods b -n nsb 2>&1 "${kube_flags[@]:?}") kube::test::if_has_string "${output_message}" 'pods "b" not found' # apply b kubectl apply -l prune-group=true -f hack/testdata/prune/b.yaml "${kube_flags[@]:?}" # check both pods exist kube::test::get_object_assert 'pods a' "{{${id_field:?}}}" 'a' - kube::test::get_object_assert 'pods b' "{{${id_field:?}}}" 'b' - # check wrong pod doesn't exist + kube::test::get_object_assert 'pods b -n nsb' "{{${id_field:?}}}" 'b' # cleanup - kubectl delete pod/a pod/b + kubectl delete pod/a + kubectl delete pod/b -n nsb ## kubectl apply --prune requires a --all flag to select everything output_message=$(! kubectl apply --prune -f hack/testdata/prune 2>&1 "${kube_flags[@]:?}") @@ -211,8 +193,10 @@ __EOF__ # should apply everything kubectl apply --all --prune -f hack/testdata/prune kube::test::get_object_assert 'pods a' "{{${id_field:?}}}" 'a' - kube::test::get_object_assert 'pods b' "{{${id_field:?}}}" 'b' - kubectl delete pod/a pod/b + kube::test::get_object_assert 'pods b -n nsb' "{{${id_field:?}}}" 'b' + kubectl delete pod/a + kubectl delete pod/b -n nsb + kubectl delete ns nsb ## kubectl apply --prune should fallback to delete for non reapable types kubectl apply --all --prune -f hack/testdata/prune-reap/a.yml 2>&1 "${kube_flags[@]:?}" @@ -275,24 +259,27 @@ __EOF__ kubectl delete --kustomize hack/testdata/kustomize ## kubectl apply multiple resources with initial failure. - # Pre-Condition: no POD exists + # Pre-Condition: namepace does not exist and no POD exists + output_message=$(! kubectl get namespace multi-resource-ns 2>&1 "${kube_flags[@]:?}") + kube::test::if_has_string "${output_message}" 'namespaces "multi-resource-ns" not found' kube::test::get_object_assert pods "{{range.items}}{{${id_field:?}}}:{{end}}" '' # First pass, namespace is created, but pod is not (since namespace does not exist yet). - kubectl apply -f hack/testdata/multi-resource.yaml "${kube_flags[@]:?}" - output_message=$(! kubectl get pods test-pod 2>&1 "${kube_flags[@]:?}") + output_message=$(! kubectl apply -f hack/testdata/multi-resource.yaml 2>&1 "${kube_flags[@]:?}") + kube::test::if_has_string "${output_message}" 'namespaces "multi-resource-ns" not found' + output_message=$(! kubectl get pods test-pod -n multi-resource-ns 2>&1 "${kube_flags[@]:?}") kube::test::if_has_string "${output_message}" 'pods "test-pod" not found' # Second pass, pod is created (now that namespace exists). kubectl apply -f hack/testdata/multi-resource.yaml "${kube_flags[@]:?}" - kube::test::get_object_assert 'pod test-pod' "{{${id_field}}}" 'test-pod' + kube::test::get_object_assert 'pods test-pod -n multi-resource-ns' "{{${id_field}}}" 'test-pod' # cleanup - kubectl delete -f hack/testdata/multi-resource.yaml + kubectl delete -f hack/testdata/multi-resource.yaml "${kube_flags[@]:?}" set +o nounset set +o errexit } # Runs tests related to kubectl apply (server-side) -run_kubectl_apply_tests() { +run_kubectl_server_side_apply_tests() { set -o nounset set -o errexit diff --git a/test/cmd/legacy-script.sh b/test/cmd/legacy-script.sh index 9d0473e14af3..650575bcb4e7 100755 --- a/test/cmd/legacy-script.sh +++ b/test/cmd/legacy-script.sh @@ -506,6 +506,7 @@ runTests() { if kube::test::if_supports_resource "${pods}" ; then record_command run_kubectl_apply_tests + record_command run_kubectl_server_side_apply_tests record_command run_kubectl_run_tests record_command run_kubectl_create_filter_tests fi From e50afd00e9feeaa1ac9a6bfa18f25aba1cb0c4cc Mon Sep 17 00:00:00 2001 From: gavinfish Date: Mon, 30 Mar 2020 09:32:04 +0800 Subject: [PATCH 81/92] e2e/framework: remove direct imports to /pkg/kubelet/... --- test/e2e/framework/metrics/BUILD | 2 -- test/e2e/framework/metrics/kubelet_metrics.go | 33 +++++++++++++------ 2 files changed, 23 insertions(+), 12 deletions(-) diff --git a/test/e2e/framework/metrics/BUILD b/test/e2e/framework/metrics/BUILD index 734054da4100..4c4f78159cae 100644 --- a/test/e2e/framework/metrics/BUILD +++ b/test/e2e/framework/metrics/BUILD @@ -22,8 +22,6 @@ go_library( ], importpath = "k8s.io/kubernetes/test/e2e/framework/metrics", deps = [ - "//pkg/kubelet/dockershim/metrics:go_default_library", - "//pkg/kubelet/metrics:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", diff --git a/test/e2e/framework/metrics/kubelet_metrics.go b/test/e2e/framework/metrics/kubelet_metrics.go index 1a60e4705765..2c1810204efc 100644 --- a/test/e2e/framework/metrics/kubelet_metrics.go +++ b/test/e2e/framework/metrics/kubelet_metrics.go @@ -29,13 +29,26 @@ import ( "k8s.io/apimachinery/pkg/util/sets" clientset "k8s.io/client-go/kubernetes" "k8s.io/component-base/metrics/testutil" - dockermetrics "k8s.io/kubernetes/pkg/kubelet/dockershim/metrics" - kubeletmetrics "k8s.io/kubernetes/pkg/kubelet/metrics" e2elog "k8s.io/kubernetes/test/e2e/framework/log" ) const ( proxyTimeout = 2 * time.Minute + // dockerOperationsLatencyKey is the key for the operation latency metrics. + // Taken from k8s.io/kubernetes/pkg/kubelet/dockershim/metrics + dockerOperationsLatencyKey = "docker_operations_duration_seconds" + // Taken from k8s.io/kubernetes/pkg/kubelet/metrics + kubeletSubsystem = "kubelet" + // Taken from k8s.io/kubernetes/pkg/kubelet/metrics + podWorkerDurationKey = "pod_worker_duration_seconds" + // Taken from k8s.io/kubernetes/pkg/kubelet/metrics + podStartDurationKey = "pod_start_duration_seconds" + // Taken from k8s.io/kubernetes/pkg/kubelet/metrics + cgroupManagerOperationsKey = "cgroup_manager_duration_seconds" + // Taken from k8s.io/kubernetes/pkg/kubelet/metrics + podWorkerStartDurationKey = "pod_worker_start_duration_seconds" + // Taken from k8s.io/kubernetes/pkg/kubelet/metrics + plegRelistDurationKey = "pleg_relist_duration_seconds" ) // KubeletMetrics is metrics for kubelet @@ -143,7 +156,7 @@ func GetKubeletMetrics(c clientset.Interface, nodeName string) (KubeletMetrics, kubeletMetrics := make(KubeletMetrics) for name, samples := range ms { - const prefix = kubeletmetrics.KubeletSubsystem + "_" + const prefix = kubeletSubsystem + "_" if !strings.HasPrefix(name, prefix) { // Not a kubelet metric. continue @@ -159,13 +172,13 @@ func GetKubeletMetrics(c clientset.Interface, nodeName string) (KubeletMetrics, // Note that the KubeletMetrics passed in should not contain subsystem prefix. func GetDefaultKubeletLatencyMetrics(ms KubeletMetrics) KubeletLatencyMetrics { latencyMetricNames := sets.NewString( - kubeletmetrics.PodWorkerDurationKey, - kubeletmetrics.PodWorkerStartDurationKey, - kubeletmetrics.PodStartDurationKey, - kubeletmetrics.CgroupManagerOperationsKey, - dockermetrics.DockerOperationsLatencyKey, - kubeletmetrics.PodWorkerStartDurationKey, - kubeletmetrics.PLEGRelistDurationKey, + podWorkerDurationKey, + podWorkerStartDurationKey, + podStartDurationKey, + cgroupManagerOperationsKey, + dockerOperationsLatencyKey, + podWorkerStartDurationKey, + plegRelistDurationKey, ) return GetKubeletLatencyMetrics(ms, latencyMetricNames) } From 991547edb6bc5d9302034fbb94b97204ea951348 Mon Sep 17 00:00:00 2001 From: zhouya0 Date: Tue, 31 Mar 2020 12:15:20 +0800 Subject: [PATCH 82/92] Add kubectl describe CSINode test coverage --- .../k8s.io/kubectl/pkg/describe/describe.go | 2 +- .../kubectl/pkg/describe/describe_test.go | 32 +++++++++++++++++++ 2 files changed, 33 insertions(+), 1 deletion(-) diff --git a/staging/src/k8s.io/kubectl/pkg/describe/describe.go b/staging/src/k8s.io/kubectl/pkg/describe/describe.go index e2ef04d4c4a4..a46deb7b7c1e 100644 --- a/staging/src/k8s.io/kubectl/pkg/describe/describe.go +++ b/staging/src/k8s.io/kubectl/pkg/describe/describe.go @@ -4032,7 +4032,7 @@ func describeCSINode(csi *storagev1.CSINode, events *corev1.EventList) (output s for _, driver := range csi.Spec.Drivers { w.Write(LEVEL_2, "%s:\n", driver.Name) w.Write(LEVEL_3, "Node ID:\t%s\n", driver.NodeID) - if driver.Allocatable.Count != nil { + if driver.Allocatable != nil && driver.Allocatable.Count != nil { w.Write(LEVEL_3, "Allocatables:\n") w.Write(LEVEL_4, "Count:\t%d\n", *driver.Allocatable.Count) } diff --git a/staging/src/k8s.io/kubectl/pkg/describe/describe_test.go b/staging/src/k8s.io/kubectl/pkg/describe/describe_test.go index 68e03dc43c13..caf407539fa5 100644 --- a/staging/src/k8s.io/kubectl/pkg/describe/describe_test.go +++ b/staging/src/k8s.io/kubectl/pkg/describe/describe_test.go @@ -1780,6 +1780,38 @@ func TestDescribeStorageClass(t *testing.T) { } } +func TestDescribeCSINode(t *testing.T) { + limit := utilpointer.Int32Ptr(int32(2)) + f := fake.NewSimpleClientset(&storagev1.CSINode{ + ObjectMeta: metav1.ObjectMeta{Name: "foo"}, + Spec: storagev1.CSINodeSpec{ + Drivers: []storagev1.CSINodeDriver{ + { + Name: "driver1", + NodeID: "node1", + }, + { + Name: "driver2", + NodeID: "node2", + Allocatable: &storagev1.VolumeNodeResources{Count: limit}, + }, + }, + }, + }) + s := CSINodeDescriber{f} + out, err := s.Describe("", "foo", DescriberSettings{ShowEvents: true}) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + if !strings.Contains(out, "foo") || + !strings.Contains(out, "driver1") || + !strings.Contains(out, "node1") || + !strings.Contains(out, "driver2") || + !strings.Contains(out, "node2") { + t.Errorf("unexpected out: %s", out) + } +} + func TestDescribePodDisruptionBudget(t *testing.T) { minAvailable := intstr.FromInt(22) f := fake.NewSimpleClientset(&policyv1beta1.PodDisruptionBudget{ From c7bde41478ffa395f35a3edd8985e874500a055c Mon Sep 17 00:00:00 2001 From: Ted Yu Date: Tue, 31 Mar 2020 06:57:05 -0700 Subject: [PATCH 83/92] Unregister csiplugin even if socket path is gone Signed-off-by: Ted Yu --- .../cache/actual_state_of_world.go | 12 ++++++ .../operationexecutor/operation_executor.go | 10 +++-- .../operation_executor_test.go | 14 +++++-- .../operationexecutor/operation_generator.go | 41 ++++++++++++------- .../pluginmanager/reconciler/reconciler.go | 13 +++++- .../reconciler/reconciler_test.go | 1 + 6 files changed, 66 insertions(+), 25 deletions(-) diff --git a/pkg/kubelet/pluginmanager/cache/actual_state_of_world.go b/pkg/kubelet/pluginmanager/cache/actual_state_of_world.go index 904e8015a462..46c9ada1fc54 100644 --- a/pkg/kubelet/pluginmanager/cache/actual_state_of_world.go +++ b/pkg/kubelet/pluginmanager/cache/actual_state_of_world.go @@ -75,6 +75,18 @@ type actualStateOfWorld struct { var _ ActualStateOfWorld = &actualStateOfWorld{} +// NamedPluginHandler holds information for handler and the name of the plugin +type NamedPluginHandler struct { + Handler PluginHandler + Name string +} + +// SocketPluginHandlers contains the map from socket path to NamedPluginHandler +type SocketPluginHandlers struct { + Handlers map[string]NamedPluginHandler + sync.Mutex +} + // PluginInfo holds information of a plugin type PluginInfo struct { SocketPath string diff --git a/pkg/kubelet/pluginmanager/operationexecutor/operation_executor.go b/pkg/kubelet/pluginmanager/operationexecutor/operation_executor.go index 12ae38ee4043..603bb2e4abfb 100644 --- a/pkg/kubelet/pluginmanager/operationexecutor/operation_executor.go +++ b/pkg/kubelet/pluginmanager/operationexecutor/operation_executor.go @@ -45,11 +45,11 @@ import ( type OperationExecutor interface { // RegisterPlugin registers the given plugin using the a handler in the plugin handler map. // It then updates the actual state of the world to reflect that. - RegisterPlugin(socketPath string, timestamp time.Time, pluginHandlers map[string]cache.PluginHandler, actualStateOfWorld ActualStateOfWorldUpdater) error + RegisterPlugin(socketPath string, timestamp time.Time, pluginHandlers map[string]cache.PluginHandler, pathToHandlers *cache.SocketPluginHandlers, actualStateOfWorld ActualStateOfWorldUpdater) error // UnregisterPlugin deregisters the given plugin using a handler in the given plugin handler map. // It then updates the actual state of the world to reflect that. - UnregisterPlugin(socketPath string, pluginHandlers map[string]cache.PluginHandler, actualStateOfWorld ActualStateOfWorldUpdater) error + UnregisterPlugin(socketPath string, pluginHandlers map[string]cache.PluginHandler, pathToHandlers *cache.SocketPluginHandlers, actualStateOfWorld ActualStateOfWorldUpdater) error } // NewOperationExecutor returns a new instance of OperationExecutor. @@ -96,9 +96,10 @@ func (oe *operationExecutor) RegisterPlugin( socketPath string, timestamp time.Time, pluginHandlers map[string]cache.PluginHandler, + pathToHandlers *cache.SocketPluginHandlers, actualStateOfWorld ActualStateOfWorldUpdater) error { generatedOperation := - oe.operationGenerator.GenerateRegisterPluginFunc(socketPath, timestamp, pluginHandlers, actualStateOfWorld) + oe.operationGenerator.GenerateRegisterPluginFunc(socketPath, timestamp, pluginHandlers, pathToHandlers, actualStateOfWorld) return oe.pendingOperations.Run( socketPath, generatedOperation) @@ -107,9 +108,10 @@ func (oe *operationExecutor) RegisterPlugin( func (oe *operationExecutor) UnregisterPlugin( socketPath string, pluginHandlers map[string]cache.PluginHandler, + pathToHandlers *cache.SocketPluginHandlers, actualStateOfWorld ActualStateOfWorldUpdater) error { generatedOperation := - oe.operationGenerator.GenerateUnregisterPluginFunc(socketPath, pluginHandlers, actualStateOfWorld) + oe.operationGenerator.GenerateUnregisterPluginFunc(socketPath, pluginHandlers, pathToHandlers, actualStateOfWorld) return oe.pendingOperations.Run( socketPath, generatedOperation) diff --git a/pkg/kubelet/pluginmanager/operationexecutor/operation_executor_test.go b/pkg/kubelet/pluginmanager/operationexecutor/operation_executor_test.go index a4b2424dae97..0af0df4fcbd6 100644 --- a/pkg/kubelet/pluginmanager/operationexecutor/operation_executor_test.go +++ b/pkg/kubelet/pluginmanager/operationexecutor/operation_executor_test.go @@ -44,9 +44,10 @@ func init() { func TestOperationExecutor_RegisterPlugin_ConcurrentRegisterPlugin(t *testing.T) { ch, quit, oe := setup() + hdlr := cache.SocketPluginHandlers{} for i := 0; i < numPluginsToRegister; i++ { socketPath := fmt.Sprintf("%s/plugin-%d.sock", socketDir, i) - oe.RegisterPlugin(socketPath, time.Now(), nil /* plugin handlers */, nil /* actual state of the world updator */) + oe.RegisterPlugin(socketPath, time.Now(), nil /* plugin handlers */, &hdlr, nil /* actual state of the world updator */) } if !isOperationRunConcurrently(ch, quit, numPluginsToRegister) { t.Fatalf("Unable to start register operations in Concurrent for plugins") @@ -56,8 +57,9 @@ func TestOperationExecutor_RegisterPlugin_ConcurrentRegisterPlugin(t *testing.T) func TestOperationExecutor_RegisterPlugin_SerialRegisterPlugin(t *testing.T) { ch, quit, oe := setup() socketPath := fmt.Sprintf("%s/plugin-serial.sock", socketDir) + hdlr := cache.SocketPluginHandlers{} for i := 0; i < numPluginsToRegister; i++ { - oe.RegisterPlugin(socketPath, time.Now(), nil /* plugin handlers */, nil /* actual state of the world updator */) + oe.RegisterPlugin(socketPath, time.Now(), nil /* plugin handlers */, &hdlr, nil /* actual state of the world updator */) } if !isOperationRunSerially(ch, quit) { @@ -67,9 +69,10 @@ func TestOperationExecutor_RegisterPlugin_SerialRegisterPlugin(t *testing.T) { func TestOperationExecutor_UnregisterPlugin_ConcurrentUnregisterPlugin(t *testing.T) { ch, quit, oe := setup() + hdlr := cache.SocketPluginHandlers{} for i := 0; i < numPluginsToUnregister; i++ { socketPath := "socket-path" + strconv.Itoa(i) - oe.UnregisterPlugin(socketPath, nil /* plugin handlers */, nil /* actual state of the world updator */) + oe.UnregisterPlugin(socketPath, nil /* plugin handlers */, &hdlr, nil /* actual state of the world updator */) } if !isOperationRunConcurrently(ch, quit, numPluginsToUnregister) { @@ -80,8 +83,9 @@ func TestOperationExecutor_UnregisterPlugin_ConcurrentUnregisterPlugin(t *testin func TestOperationExecutor_UnregisterPlugin_SerialUnregisterPlugin(t *testing.T) { ch, quit, oe := setup() socketPath := fmt.Sprintf("%s/plugin-serial.sock", socketDir) + hdlr := cache.SocketPluginHandlers{} for i := 0; i < numPluginsToUnregister; i++ { - oe.UnregisterPlugin(socketPath, nil /* plugin handlers */, nil /* actual state of the world updator */) + oe.UnregisterPlugin(socketPath, nil /* plugin handlers */, &hdlr, nil /* actual state of the world updator */) } if !isOperationRunSerially(ch, quit) { @@ -105,6 +109,7 @@ func (fopg *fakeOperationGenerator) GenerateRegisterPluginFunc( socketPath string, timestamp time.Time, pluginHandlers map[string]cache.PluginHandler, + pathToHandlers *cache.SocketPluginHandlers, actualStateOfWorldUpdater ActualStateOfWorldUpdater) func() error { opFunc := func() error { @@ -117,6 +122,7 @@ func (fopg *fakeOperationGenerator) GenerateRegisterPluginFunc( func (fopg *fakeOperationGenerator) GenerateUnregisterPluginFunc( socketPath string, pluginHandlers map[string]cache.PluginHandler, + pathToHandlers *cache.SocketPluginHandlers, actualStateOfWorldUpdater ActualStateOfWorldUpdater) func() error { opFunc := func() error { startOperationAndBlock(fopg.ch, fopg.quit) diff --git a/pkg/kubelet/pluginmanager/operationexecutor/operation_generator.go b/pkg/kubelet/pluginmanager/operationexecutor/operation_generator.go index a16546601df0..407da39e7067 100644 --- a/pkg/kubelet/pluginmanager/operationexecutor/operation_generator.go +++ b/pkg/kubelet/pluginmanager/operationexecutor/operation_generator.go @@ -63,12 +63,14 @@ type OperationGenerator interface { socketPath string, timestamp time.Time, pluginHandlers map[string]cache.PluginHandler, + pathToHandlers *cache.SocketPluginHandlers, actualStateOfWorldUpdater ActualStateOfWorldUpdater) func() error // Generates the UnregisterPlugin function needed to perform the unregistration of a plugin GenerateUnregisterPluginFunc( socketPath string, pluginHandlers map[string]cache.PluginHandler, + pathToHandlers *cache.SocketPluginHandlers, actualStateOfWorldUpdater ActualStateOfWorldUpdater) func() error } @@ -76,6 +78,7 @@ func (og *operationGenerator) GenerateRegisterPluginFunc( socketPath string, timestamp time.Time, pluginHandlers map[string]cache.PluginHandler, + pathToHandlers *cache.SocketPluginHandlers, actualStateOfWorldUpdater ActualStateOfWorldUpdater) func() error { registerPluginFunc := func() error { @@ -122,6 +125,12 @@ func (og *operationGenerator) GenerateRegisterPluginFunc( if err := handler.RegisterPlugin(infoResp.Name, infoResp.Endpoint, infoResp.SupportedVersions); err != nil { return og.notifyPlugin(client, false, fmt.Sprintf("RegisterPlugin error -- plugin registration failed with err: %v", err)) } + pathToHandlers.Lock() + if pathToHandlers.Handlers == nil { + pathToHandlers.Handlers = make(map[string]cache.NamedPluginHandler) + } + pathToHandlers.Handlers[socketPath] = cache.NamedPluginHandler{Handler: handler, Name: infoResp.Name} + pathToHandlers.Unlock() // Notify is called after register to guarantee that even if notify throws an error Register will always be called after validate if err := og.notifyPlugin(client, true, ""); err != nil { @@ -135,33 +144,35 @@ func (og *operationGenerator) GenerateRegisterPluginFunc( func (og *operationGenerator) GenerateUnregisterPluginFunc( socketPath string, pluginHandlers map[string]cache.PluginHandler, + pathToHandlers *cache.SocketPluginHandlers, actualStateOfWorldUpdater ActualStateOfWorldUpdater) func() error { unregisterPluginFunc := func() error { - client, conn, err := dial(socketPath, dialTimeoutDuration) + _, conn, err := dial(socketPath, dialTimeoutDuration) if err != nil { - return fmt.Errorf("UnregisterPlugin error -- dial failed at socket %s, err: %v", socketPath, err) + klog.V(4).Infof("unable to dial: %v", err) + } else { + conn.Close() } - defer conn.Close() - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - defer cancel() + var handlerWithName cache.NamedPluginHandler + pathToHandlers.Lock() + handlerWithName, handlerFound := pathToHandlers.Handlers[socketPath] + pathToHandlers.Unlock() - infoResp, err := client.GetInfo(ctx, ®isterapi.InfoRequest{}) - if err != nil { - return fmt.Errorf("UnregisterPlugin error -- failed to get plugin info using RPC GetInfo at socket %s, err: %v", socketPath, err) + if !handlerFound { + return fmt.Errorf("UnregisterPlugin error -- failed to get plugin handler for %s", socketPath) } - - handler, ok := pluginHandlers[infoResp.Type] - if !ok { - return fmt.Errorf("UnregisterPlugin error -- no handler registered for plugin type: %s at socket %s", infoResp.Type, socketPath) - } - // We remove the plugin to the actual state of world cache before calling a plugin consumer's Unregister handle // so that if we receive a register event during Register Plugin, we can process it as a Register call. actualStateOfWorldUpdater.RemovePlugin(socketPath) - handler.DeRegisterPlugin(infoResp.Name) + handlerWithName.Handler.DeRegisterPlugin(handlerWithName.Name) + + pathToHandlers.Lock() + delete(pathToHandlers.Handlers, socketPath) + pathToHandlers.Unlock() + klog.V(4).Infof("DeRegisterPlugin called for %s on %v", handlerWithName.Name, handlerWithName.Handler) return nil } return unregisterPluginFunc diff --git a/pkg/kubelet/pluginmanager/reconciler/reconciler.go b/pkg/kubelet/pluginmanager/reconciler/reconciler.go index 7f6790d5c165..d23f4b0c3b63 100644 --- a/pkg/kubelet/pluginmanager/reconciler/reconciler.go +++ b/pkg/kubelet/pluginmanager/reconciler/reconciler.go @@ -67,6 +67,7 @@ func NewReconciler( desiredStateOfWorld: desiredStateOfWorld, actualStateOfWorld: actualStateOfWorld, handlers: make(map[string]cache.PluginHandler), + pathToHandlers: cache.SocketPluginHandlers{Handlers: make(map[string]cache.NamedPluginHandler)}, } } @@ -76,6 +77,7 @@ type reconciler struct { desiredStateOfWorld cache.DesiredStateOfWorld actualStateOfWorld cache.ActualStateOfWorld handlers map[string]cache.PluginHandler + pathToHandlers cache.SocketPluginHandlers sync.RWMutex } @@ -103,6 +105,13 @@ func (rc *reconciler) getHandlers() map[string]cache.PluginHandler { return rc.handlers } +func (rc *reconciler) getPathToHandlers() *cache.SocketPluginHandlers { + rc.RLock() + defer rc.RUnlock() + + return &rc.pathToHandlers +} + func (rc *reconciler) reconcile() { // Unregisterations are triggered before registrations @@ -127,7 +136,7 @@ func (rc *reconciler) reconcile() { if unregisterPlugin { klog.V(5).Infof(registeredPlugin.GenerateMsgDetailed("Starting operationExecutor.UnregisterPlugin", "")) - err := rc.operationExecutor.UnregisterPlugin(registeredPlugin.SocketPath, rc.getHandlers(), rc.actualStateOfWorld) + err := rc.operationExecutor.UnregisterPlugin(registeredPlugin.SocketPath, rc.getHandlers(), rc.getPathToHandlers(), rc.actualStateOfWorld) if err != nil && !goroutinemap.IsAlreadyExists(err) && !exponentialbackoff.IsExponentialBackoff(err) { @@ -145,7 +154,7 @@ func (rc *reconciler) reconcile() { for _, pluginToRegister := range rc.desiredStateOfWorld.GetPluginsToRegister() { if !rc.actualStateOfWorld.PluginExistsWithCorrectTimestamp(pluginToRegister) { klog.V(5).Infof(pluginToRegister.GenerateMsgDetailed("Starting operationExecutor.RegisterPlugin", "")) - err := rc.operationExecutor.RegisterPlugin(pluginToRegister.SocketPath, pluginToRegister.Timestamp, rc.getHandlers(), rc.actualStateOfWorld) + err := rc.operationExecutor.RegisterPlugin(pluginToRegister.SocketPath, pluginToRegister.Timestamp, rc.getHandlers(), rc.getPathToHandlers(), rc.actualStateOfWorld) if err != nil && !goroutinemap.IsAlreadyExists(err) && !exponentialbackoff.IsExponentialBackoff(err) { diff --git a/pkg/kubelet/pluginmanager/reconciler/reconciler_test.go b/pkg/kubelet/pluginmanager/reconciler/reconciler_test.go index 5749e56110c6..7ccc133189e9 100644 --- a/pkg/kubelet/pluginmanager/reconciler/reconciler_test.go +++ b/pkg/kubelet/pluginmanager/reconciler/reconciler_test.go @@ -252,6 +252,7 @@ func Test_Run_Positive_RegisterThenUnregister(t *testing.T) { } dsw.RemovePlugin(socketPath) + os.Remove(socketPath) waitForUnregistration(t, socketPath, asw) // Get asw plugins; it should no longer contain the added plugin From 922ec728de9248657f026eb6cfb8fdaeb11049ac Mon Sep 17 00:00:00 2001 From: jingyih Date: Mon, 16 Mar 2020 07:55:38 -0700 Subject: [PATCH 84/92] Add a metric exposing etcd database size --- .../app/options/options_test.go | 1 + .../apiserver/pkg/server/options/etcd.go | 3 + .../pkg/storage/etcd3/metrics/metrics.go | 14 ++++ .../pkg/storage/storagebackend/config.go | 14 ++-- .../storage/storagebackend/factory/etcd3.go | 78 +++++++++++++++---- 5 files changed, 91 insertions(+), 19 deletions(-) diff --git a/cmd/kube-apiserver/app/options/options_test.go b/cmd/kube-apiserver/app/options/options_test.go index 9afed6eb8751..45392fc64d56 100644 --- a/cmd/kube-apiserver/app/options/options_test.go +++ b/cmd/kube-apiserver/app/options/options_test.go @@ -159,6 +159,7 @@ func TestAddFlags(t *testing.T) { Prefix: "/registry", CompactionInterval: storagebackend.DefaultCompactInterval, CountMetricPollPeriod: time.Minute, + DBMetricPollInterval: storagebackend.DefaultDBMetricPollInterval, }, DefaultStorageMediaType: "application/vnd.kubernetes.protobuf", DeleteCollectionWorkers: 1, diff --git a/staging/src/k8s.io/apiserver/pkg/server/options/etcd.go b/staging/src/k8s.io/apiserver/pkg/server/options/etcd.go index 4cf68fb8fd61..358776a790bd 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/options/etcd.go +++ b/staging/src/k8s.io/apiserver/pkg/server/options/etcd.go @@ -176,6 +176,9 @@ func (s *EtcdOptions) AddFlags(fs *pflag.FlagSet) { fs.DurationVar(&s.StorageConfig.CountMetricPollPeriod, "etcd-count-metric-poll-period", s.StorageConfig.CountMetricPollPeriod, ""+ "Frequency of polling etcd for number of resources per type. 0 disables the metric collection.") + + fs.DurationVar(&s.StorageConfig.DBMetricPollInterval, "etcd-db-metric-poll-interval", s.StorageConfig.DBMetricPollInterval, + "The interval of requests to poll etcd and update metric. 0 disables the metric collection") } func (s *EtcdOptions) ApplyTo(c *server.Config) error { diff --git a/staging/src/k8s.io/apiserver/pkg/storage/etcd3/metrics/metrics.go b/staging/src/k8s.io/apiserver/pkg/storage/etcd3/metrics/metrics.go index 8dd6462b0711..a0de7e18cec6 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/etcd3/metrics/metrics.go +++ b/staging/src/k8s.io/apiserver/pkg/storage/etcd3/metrics/metrics.go @@ -49,6 +49,14 @@ var ( }, []string{"resource"}, ) + dbTotalSize = compbasemetrics.NewGaugeVec( + &compbasemetrics.GaugeOpts{ + Name: "etcd_db_total_size_in_bytes", + Help: "Total size of the etcd database file physically allocated in bytes.", + StabilityLevel: compbasemetrics.ALPHA, + }, + []string{"endpoint"}, + ) ) var registerMetrics sync.Once @@ -59,6 +67,7 @@ func Register() { registerMetrics.Do(func() { legacyregistry.MustRegister(etcdRequestLatency) legacyregistry.MustRegister(objectCounts) + legacyregistry.MustRegister(dbTotalSize) }) } @@ -81,3 +90,8 @@ func Reset() { func sinceInSeconds(start time.Time) float64 { return time.Since(start).Seconds() } + +// UpdateEtcdDbSize sets the etcd_db_total_size_in_bytes metric. +func UpdateEtcdDbSize(ep string, size int64) { + dbTotalSize.WithLabelValues(ep).Set(float64(size)) +} diff --git a/staging/src/k8s.io/apiserver/pkg/storage/storagebackend/config.go b/staging/src/k8s.io/apiserver/pkg/storage/storagebackend/config.go index cbf50b2112a6..5dc0bbfb3496 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/storagebackend/config.go +++ b/staging/src/k8s.io/apiserver/pkg/storage/storagebackend/config.go @@ -28,7 +28,8 @@ const ( StorageTypeUnset = "" StorageTypeETCD3 = "etcd3" - DefaultCompactInterval = 5 * time.Minute + DefaultCompactInterval = 5 * time.Minute + DefaultDBMetricPollInterval = 30 * time.Second ) // TransportConfig holds all connection related info, i.e. equal TransportConfig means equal servers we talk to. @@ -71,13 +72,16 @@ type Config struct { CompactionInterval time.Duration // CountMetricPollPeriod specifies how often should count metric be updated CountMetricPollPeriod time.Duration + // DBMetricPollInterval specifies how often should storage backend metric be updated. + DBMetricPollInterval time.Duration } func NewDefaultConfig(prefix string, codec runtime.Codec) *Config { return &Config{ - Paging: true, - Prefix: prefix, - Codec: codec, - CompactionInterval: DefaultCompactInterval, + Paging: true, + Prefix: prefix, + Codec: codec, + CompactionInterval: DefaultCompactInterval, + DBMetricPollInterval: DefaultDBMetricPollInterval, } } diff --git a/staging/src/k8s.io/apiserver/pkg/storage/storagebackend/factory/etcd3.go b/staging/src/k8s.io/apiserver/pkg/storage/storagebackend/factory/etcd3.go index 81a24825b9e9..97578c3d955a 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/storagebackend/factory/etcd3.go +++ b/staging/src/k8s.io/apiserver/pkg/storage/storagebackend/factory/etcd3.go @@ -36,20 +36,26 @@ import ( "k8s.io/apiserver/pkg/server/egressselector" "k8s.io/apiserver/pkg/storage" "k8s.io/apiserver/pkg/storage/etcd3" + "k8s.io/apiserver/pkg/storage/etcd3/metrics" "k8s.io/apiserver/pkg/storage/storagebackend" "k8s.io/apiserver/pkg/storage/value" "k8s.io/component-base/metrics/legacyregistry" + "k8s.io/klog" ) -// The short keepalive timeout and interval have been chosen to aggressively -// detect a failed etcd server without introducing much overhead. -const keepaliveTime = 30 * time.Second -const keepaliveTimeout = 10 * time.Second +const ( + // The short keepalive timeout and interval have been chosen to aggressively + // detect a failed etcd server without introducing much overhead. + keepaliveTime = 30 * time.Second + keepaliveTimeout = 10 * time.Second -// dialTimeout is the timeout for failing to establish a connection. -// It is set to 20 seconds as times shorter than that will cause TLS connections to fail -// on heavily loaded arm64 CPUs (issue #64649) -const dialTimeout = 20 * time.Second + // dialTimeout is the timeout for failing to establish a connection. + // It is set to 20 seconds as times shorter than that will cause TLS connections to fail + // on heavily loaded arm64 CPUs (issue #64649) + dialTimeout = 20 * time.Second + + dbMetricsMonitorJitter = 0.5 +) func init() { // grpcprom auto-registers (via an init function) their client metrics, since we are opting out of @@ -57,6 +63,7 @@ func init() { // we need to explicitly register these metrics to our global registry here. // For reference: https://github.com/kubernetes/kubernetes/pull/81387 legacyregistry.RawMustRegister(grpcprom.DefaultClientMetrics) + dbMetricsMonitors = make(map[string]struct{}) } func newETCD3HealthCheck(c storagebackend.Config) (func() error, error) { @@ -153,16 +160,20 @@ type runningCompactor struct { } var ( - lock sync.Mutex - compactors = map[string]*runningCompactor{} + // compactorsMu guards access to compactors map + compactorsMu sync.Mutex + compactors = map[string]*runningCompactor{} + // dbMetricsMonitorsMu guards access to dbMetricsMonitors map + dbMetricsMonitorsMu sync.Mutex + dbMetricsMonitors map[string]struct{} ) // startCompactorOnce start one compactor per transport. If the interval get smaller on repeated calls, the // compactor is replaced. A destroy func is returned. If all destroy funcs with the same transport are called, // the compactor is stopped. func startCompactorOnce(c storagebackend.TransportConfig, interval time.Duration) (func(), error) { - lock.Lock() - defer lock.Unlock() + compactorsMu.Lock() + defer compactorsMu.Unlock() key := fmt.Sprintf("%v", c) // gives: {[server1 server2] keyFile certFile caFile} if compactor, foundBefore := compactors[key]; !foundBefore || compactor.interval > interval { @@ -193,8 +204,8 @@ func startCompactorOnce(c storagebackend.TransportConfig, interval time.Duration compactors[key].refs++ return func() { - lock.Lock() - defer lock.Unlock() + compactorsMu.Lock() + defer compactorsMu.Unlock() compactor := compactors[key] compactor.refs-- @@ -218,6 +229,11 @@ func newETCD3Storage(c storagebackend.Config) (storage.Interface, DestroyFunc, e return nil, nil, err } + stopDBSizeMonitor, err := startDBSizeMonitorPerEndpoint(client, c.DBMetricPollInterval) + if err != nil { + return nil, nil, err + } + var once sync.Once destroyFunc := func() { // we know that storage destroy funcs are called multiple times (due to reuse in subresources). @@ -225,6 +241,7 @@ func newETCD3Storage(c storagebackend.Config) (storage.Interface, DestroyFunc, e // TODO: fix duplicated storage destroy calls higher level once.Do(func() { stopCompactor() + stopDBSizeMonitor() client.Close() }) } @@ -234,3 +251,36 @@ func newETCD3Storage(c storagebackend.Config) (storage.Interface, DestroyFunc, e } return etcd3.New(client, c.Codec, c.Prefix, transformer, c.Paging), destroyFunc, nil } + +// startDBSizeMonitorPerEndpoint starts a loop to monitor etcd database size and update the +// corresponding metric etcd_db_total_size_in_bytes for each etcd server endpoint. +func startDBSizeMonitorPerEndpoint(client *clientv3.Client, interval time.Duration) (func(), error) { + if interval == 0 { + return func() {}, nil + } + dbMetricsMonitorsMu.Lock() + defer dbMetricsMonitorsMu.Unlock() + + ctx, cancel := context.WithCancel(context.Background()) + for _, ep := range client.Endpoints() { + if _, found := dbMetricsMonitors[ep]; found { + continue + } + dbMetricsMonitors[ep] = struct{}{} + endpoint := ep + klog.V(4).Infof("Start monitoring storage db size metric for endpoint %s with polling interval %v", endpoint, interval) + go wait.JitterUntilWithContext(ctx, func(context.Context) { + epStatus, err := client.Maintenance.Status(ctx, endpoint) + if err != nil { + klog.V(4).Infof("Failed to get storage db size for ep %s: %v", endpoint, err) + metrics.UpdateEtcdDbSize(endpoint, -1) + } else { + metrics.UpdateEtcdDbSize(endpoint, epStatus.DbSize) + } + }, interval, dbMetricsMonitorJitter, true) + } + + return func() { + cancel() + }, nil +} From e15c49f1bbb9476249d471de63baf1032ec1b788 Mon Sep 17 00:00:00 2001 From: jingyih Date: Mon, 16 Mar 2020 08:15:43 -0700 Subject: [PATCH 85/92] Generated --- .../k8s.io/apiserver/pkg/storage/storagebackend/factory/BUILD | 2 ++ 1 file changed, 2 insertions(+) diff --git a/staging/src/k8s.io/apiserver/pkg/storage/storagebackend/factory/BUILD b/staging/src/k8s.io/apiserver/pkg/storage/storagebackend/factory/BUILD index 594c1d396ee0..23cee707774c 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/storagebackend/factory/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/storage/storagebackend/factory/BUILD @@ -39,6 +39,7 @@ go_library( "//staging/src/k8s.io/apiserver/pkg/server/egressselector:go_default_library", "//staging/src/k8s.io/apiserver/pkg/storage:go_default_library", "//staging/src/k8s.io/apiserver/pkg/storage/etcd3:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/storage/etcd3/metrics:go_default_library", "//staging/src/k8s.io/apiserver/pkg/storage/storagebackend:go_default_library", "//staging/src/k8s.io/apiserver/pkg/storage/value:go_default_library", "//staging/src/k8s.io/component-base/metrics/legacyregistry:go_default_library", @@ -46,6 +47,7 @@ go_library( "//vendor/go.etcd.io/etcd/clientv3:go_default_library", "//vendor/go.etcd.io/etcd/pkg/transport:go_default_library", "//vendor/google.golang.org/grpc:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) From 275da7afc05ed582e9e1c1f58dcbf8ddca3ef672 Mon Sep 17 00:00:00 2001 From: Aldo Culquicondor Date: Mon, 30 Mar 2020 15:08:57 -0400 Subject: [PATCH 86/92] Benchmark for topology spreading filter Signed-off-by: Aldo Culquicondor --- .../podtopologyspread/filtering_test.go | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/pkg/scheduler/framework/plugins/podtopologyspread/filtering_test.go b/pkg/scheduler/framework/plugins/podtopologyspread/filtering_test.go index 574b8e953fae..ce4414cfc41b 100644 --- a/pkg/scheduler/framework/plugins/podtopologyspread/filtering_test.go +++ b/pkg/scheduler/framework/plugins/podtopologyspread/filtering_test.go @@ -30,6 +30,7 @@ import ( "k8s.io/client-go/kubernetes/fake" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" "k8s.io/kubernetes/pkg/scheduler/internal/cache" + "k8s.io/kubernetes/pkg/scheduler/internal/parallelize" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" st "k8s.io/kubernetes/pkg/scheduler/testing" ) @@ -1065,7 +1066,7 @@ func TestPreFilterStateRemovePod(t *testing.T) { } } -func BenchmarkTestCalPreFilterState(b *testing.B) { +func BenchmarkFilter(b *testing.B) { tests := []struct { name string pod *v1.Pod @@ -1103,17 +1104,30 @@ func BenchmarkTestCalPreFilterState(b *testing.B) { }, } for _, tt := range tests { + var state *framework.CycleState b.Run(tt.name, func(b *testing.B) { existingPods, allNodes, _ := st.MakeNodesAndPodsForEvenPodsSpread(tt.pod.Labels, tt.existingPodsNum, tt.allNodesNum, tt.filteredNodesNum) pl := PodTopologySpread{ sharedLister: cache.NewSnapshot(existingPods, allNodes), } + ctx := context.Background() b.ResetTimer() for i := 0; i < b.N; i++ { - s := pl.PreFilter(context.Background(), framework.NewCycleState(), tt.pod) + state = framework.NewCycleState() + s := pl.PreFilter(ctx, state, tt.pod) if !s.IsSuccess() { b.Fatal(s.AsError()) } + filterNode := func(i int) { + n, _ := pl.sharedLister.NodeInfos().Get(allNodes[i].Name) + pl.Filter(ctx, state, tt.pod, n) + } + parallelize.Until(ctx, len(allNodes), filterNode) + } + }) + b.Run(tt.name+"/Clone", func(b *testing.B) { + for i := 0; i < b.N; i++ { + state.Clone() } }) } From 65d9f8175844413be49d0dc978f96f13f9a013b3 Mon Sep 17 00:00:00 2001 From: Aldo Culquicondor Date: Mon, 30 Mar 2020 16:59:46 -0400 Subject: [PATCH 87/92] Replace lock with atomic updates in spreading filter Signed-off-by: Aldo Culquicondor --- .../plugins/podtopologyspread/filtering.go | 76 +++---- .../podtopologyspread/filtering_test.go | 209 +++++++++--------- 2 files changed, 141 insertions(+), 144 deletions(-) diff --git a/pkg/scheduler/framework/plugins/podtopologyspread/filtering.go b/pkg/scheduler/framework/plugins/podtopologyspread/filtering.go index 16cb2ca120bd..761c5cce6a4d 100644 --- a/pkg/scheduler/framework/plugins/podtopologyspread/filtering.go +++ b/pkg/scheduler/framework/plugins/podtopologyspread/filtering.go @@ -20,12 +20,12 @@ import ( "context" "fmt" "math" - "sync" + "sync/atomic" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/klog" - pluginhelper "k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper" + "k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" "k8s.io/kubernetes/pkg/scheduler/internal/parallelize" "k8s.io/kubernetes/pkg/scheduler/nodeinfo" @@ -48,7 +48,7 @@ type preFilterState struct { // it's not guaranteed to be the 2nd minimum match number. TpKeyToCriticalPaths map[string]*criticalPaths // TpPairToMatchNum is keyed with topologyPair, and valued with the number of matching pods. - TpPairToMatchNum map[topologyPair]int32 + TpPairToMatchNum map[topologyPair]*int32 } // Clone makes a copy of the given state. @@ -61,14 +61,15 @@ func (s *preFilterState) Clone() framework.StateData { // Constraints are shared because they don't change. Constraints: s.Constraints, TpKeyToCriticalPaths: make(map[string]*criticalPaths, len(s.TpKeyToCriticalPaths)), - TpPairToMatchNum: make(map[topologyPair]int32, len(s.TpPairToMatchNum)), + TpPairToMatchNum: make(map[topologyPair]*int32, len(s.TpPairToMatchNum)), } for tpKey, paths := range s.TpKeyToCriticalPaths { copy.TpKeyToCriticalPaths[tpKey] = &criticalPaths{paths[0], paths[1]} } for tpPair, matchNum := range s.TpPairToMatchNum { copyPair := topologyPair{key: tpPair.key, value: tpPair.value} - copy.TpPairToMatchNum[copyPair] = matchNum + copyCount := *matchNum + copy.TpPairToMatchNum[copyPair] = ©Count } return © } @@ -137,9 +138,9 @@ func (s *preFilterState) updateWithPod(updatedPod, preemptorPod *v1.Pod, node *v k, v := constraint.TopologyKey, node.Labels[constraint.TopologyKey] pair := topologyPair{key: k, value: v} - s.TpPairToMatchNum[pair] = s.TpPairToMatchNum[pair] + delta + *s.TpPairToMatchNum[pair] += delta - s.TpKeyToCriticalPaths[k].update(v, s.TpPairToMatchNum[pair]) + s.TpKeyToCriticalPaths[k].update(v, *s.TpPairToMatchNum[pair]) } } @@ -219,52 +220,44 @@ func (pl *PodTopologySpread) calPreFilterState(pod *v1.Pod) (*preFilterState, er return &preFilterState{}, nil } - var lock sync.Mutex - - // TODO(Huang-Wei): It might be possible to use "make(map[topologyPair]*int32)". - // In that case, need to consider how to init each tpPairToCount[pair] in an atomic fashion. s := preFilterState{ Constraints: constraints, TpKeyToCriticalPaths: make(map[string]*criticalPaths, len(constraints)), - TpPairToMatchNum: make(map[topologyPair]int32), - } - addTopologyPairMatchNum := func(pair topologyPair, num int32) { - lock.Lock() - s.TpPairToMatchNum[pair] += num - lock.Unlock() + TpPairToMatchNum: make(map[topologyPair]*int32), } - - processNode := func(i int) { - nodeInfo := allNodes[i] - node := nodeInfo.Node() + for _, n := range allNodes { + node := n.Node() if node == nil { klog.Error("node not found") - return + continue } // In accordance to design, if NodeAffinity or NodeSelector is defined, // spreading is applied to nodes that pass those filters. - if !pluginhelper.PodMatchesNodeSelectorAndAffinityTerms(pod, node) { - return + if !helper.PodMatchesNodeSelectorAndAffinityTerms(pod, node) { + continue } - // Ensure current node's labels contains all topologyKeys in 'Constraints'. if !nodeLabelsMatchSpreadConstraints(node.Labels, constraints) { - return + continue } + for _, c := range constraints { + pair := topologyPair{key: c.TopologyKey, value: node.Labels[c.TopologyKey]} + s.TpPairToMatchNum[pair] = new(int32) + } + } + + processNode := func(i int) { + nodeInfo := allNodes[i] + node := nodeInfo.Node() + for _, constraint := range constraints { - matchTotal := int32(0) - // nodeInfo.Pods() can be empty; or all pods don't fit - for _, existingPod := range nodeInfo.Pods() { - // Bypass terminating Pod (see #87621). - if existingPod.DeletionTimestamp != nil || existingPod.Namespace != pod.Namespace { - continue - } - if constraint.Selector.Matches(labels.Set(existingPod.Labels)) { - matchTotal++ - } - } pair := topologyPair{key: constraint.TopologyKey, value: node.Labels[constraint.TopologyKey]} - addTopologyPairMatchNum(pair, matchTotal) + tpCount := s.TpPairToMatchNum[pair] + if tpCount == nil { + continue + } + count := countPodsMatchSelector(nodeInfo.Pods(), constraint.Selector, pod.Namespace) + atomic.AddInt32(tpCount, int32(count)) } } parallelize.Until(context.Background(), len(allNodes), processNode) @@ -275,7 +268,7 @@ func (pl *PodTopologySpread) calPreFilterState(pod *v1.Pod) (*preFilterState, er s.TpKeyToCriticalPaths[key] = newCriticalPaths() } for pair, num := range s.TpPairToMatchNum { - s.TpKeyToCriticalPaths[pair.key].update(pair.value, num) + s.TpKeyToCriticalPaths[pair.key].update(pair.value, *num) } return &s, nil @@ -322,7 +315,10 @@ func (pl *PodTopologySpread) Filter(ctx context.Context, cycleState *framework.C // judging criteria: // 'existing matching num' + 'if self-match (1 or 0)' - 'global min matching num' <= 'maxSkew' minMatchNum := paths[0].MatchNum - matchNum := s.TpPairToMatchNum[pair] + matchNum := int32(0) + if tpCount := s.TpPairToMatchNum[pair]; tpCount != nil { + matchNum = *tpCount + } skew := matchNum + selfMatchNum - minMatchNum if skew > c.MaxSkew { klog.V(5).Infof("node '%s' failed spreadConstraint[%s]: MatchNum(%d) + selfMatchNum(%d) - minMatchNum(%d) > maxSkew(%d)", node.Name, tpKey, matchNum, selfMatchNum, minMatchNum, c.MaxSkew) diff --git a/pkg/scheduler/framework/plugins/podtopologyspread/filtering_test.go b/pkg/scheduler/framework/plugins/podtopologyspread/filtering_test.go index ce4414cfc41b..6a953ef45bbc 100644 --- a/pkg/scheduler/framework/plugins/podtopologyspread/filtering_test.go +++ b/pkg/scheduler/framework/plugins/podtopologyspread/filtering_test.go @@ -33,6 +33,7 @@ import ( "k8s.io/kubernetes/pkg/scheduler/internal/parallelize" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" st "k8s.io/kubernetes/pkg/scheduler/testing" + "k8s.io/utils/pointer" ) var cmpOpts = []cmp.Option{ @@ -87,9 +88,9 @@ func TestPreFilterState(t *testing.T) { TpKeyToCriticalPaths: map[string]*criticalPaths{ "zone": {{"zone1", 0}, {"zone2", 0}}, }, - TpPairToMatchNum: map[topologyPair]int32{ - {key: "zone", value: "zone1"}: 0, - {key: "zone", value: "zone2"}: 0, + TpPairToMatchNum: map[topologyPair]*int32{ + {key: "zone", value: "zone1"}: pointer.Int32Ptr(0), + {key: "zone", value: "zone2"}: pointer.Int32Ptr(0), }, }, }, @@ -122,9 +123,9 @@ func TestPreFilterState(t *testing.T) { TpKeyToCriticalPaths: map[string]*criticalPaths{ "zone": {{"zone2", 2}, {"zone1", 3}}, }, - TpPairToMatchNum: map[topologyPair]int32{ - {key: "zone", value: "zone1"}: 3, - {key: "zone", value: "zone2"}: 2, + TpPairToMatchNum: map[topologyPair]*int32{ + {key: "zone", value: "zone1"}: pointer.Int32Ptr(3), + {key: "zone", value: "zone2"}: pointer.Int32Ptr(2), }, }, }, @@ -159,10 +160,10 @@ func TestPreFilterState(t *testing.T) { TpKeyToCriticalPaths: map[string]*criticalPaths{ "zone": {{"zone3", 0}, {"zone2", 2}}, }, - TpPairToMatchNum: map[topologyPair]int32{ - {key: "zone", value: "zone1"}: 3, - {key: "zone", value: "zone2"}: 2, - {key: "zone", value: "zone3"}: 0, + TpPairToMatchNum: map[topologyPair]*int32{ + {key: "zone", value: "zone1"}: pointer.Int32Ptr(3), + {key: "zone", value: "zone2"}: pointer.Int32Ptr(2), + {key: "zone", value: "zone3"}: pointer.Int32Ptr(0), }, }, }, @@ -195,9 +196,9 @@ func TestPreFilterState(t *testing.T) { TpKeyToCriticalPaths: map[string]*criticalPaths{ "zone": {{"zone2", 1}, {"zone1", 2}}, }, - TpPairToMatchNum: map[topologyPair]int32{ - {key: "zone", value: "zone1"}: 2, - {key: "zone", value: "zone2"}: 1, + TpPairToMatchNum: map[topologyPair]*int32{ + {key: "zone", value: "zone1"}: pointer.Int32Ptr(2), + {key: "zone", value: "zone2"}: pointer.Int32Ptr(1), }, }, }, @@ -239,13 +240,13 @@ func TestPreFilterState(t *testing.T) { "zone": {{"zone1", 3}, {"zone2", 4}}, "node": {{"node-x", 0}, {"node-b", 1}}, }, - TpPairToMatchNum: map[topologyPair]int32{ - {key: "zone", value: "zone1"}: 3, - {key: "zone", value: "zone2"}: 4, - {key: "node", value: "node-a"}: 2, - {key: "node", value: "node-b"}: 1, - {key: "node", value: "node-x"}: 0, - {key: "node", value: "node-y"}: 4, + TpPairToMatchNum: map[topologyPair]*int32{ + {key: "zone", value: "zone1"}: pointer.Int32Ptr(3), + {key: "zone", value: "zone2"}: pointer.Int32Ptr(4), + {key: "node", value: "node-a"}: pointer.Int32Ptr(2), + {key: "node", value: "node-b"}: pointer.Int32Ptr(1), + {key: "node", value: "node-x"}: pointer.Int32Ptr(0), + {key: "node", value: "node-y"}: pointer.Int32Ptr(4), }, }, }, @@ -288,12 +289,12 @@ func TestPreFilterState(t *testing.T) { "zone": {{"zone1", 3}, {"zone2", 4}}, "node": {{"node-b", 1}, {"node-a", 2}}, }, - TpPairToMatchNum: map[topologyPair]int32{ - {key: "zone", value: "zone1"}: 3, - {key: "zone", value: "zone2"}: 4, - {key: "node", value: "node-a"}: 2, - {key: "node", value: "node-b"}: 1, - {key: "node", value: "node-y"}: 4, + TpPairToMatchNum: map[topologyPair]*int32{ + {key: "zone", value: "zone1"}: pointer.Int32Ptr(3), + {key: "zone", value: "zone2"}: pointer.Int32Ptr(4), + {key: "node", value: "node-a"}: pointer.Int32Ptr(2), + {key: "node", value: "node-b"}: pointer.Int32Ptr(1), + {key: "node", value: "node-y"}: pointer.Int32Ptr(4), }, }, }, @@ -329,12 +330,12 @@ func TestPreFilterState(t *testing.T) { "zone": {{"zone2", 0}, {"zone1", 1}}, "node": {{"node-a", 0}, {"node-y", 0}}, }, - TpPairToMatchNum: map[topologyPair]int32{ - {key: "zone", value: "zone1"}: 1, - {key: "zone", value: "zone2"}: 0, - {key: "node", value: "node-a"}: 0, - {key: "node", value: "node-b"}: 1, - {key: "node", value: "node-y"}: 0, + TpPairToMatchNum: map[topologyPair]*int32{ + {key: "zone", value: "zone1"}: pointer.Int32Ptr(1), + {key: "zone", value: "zone2"}: pointer.Int32Ptr(0), + {key: "node", value: "node-a"}: pointer.Int32Ptr(0), + {key: "node", value: "node-b"}: pointer.Int32Ptr(1), + {key: "node", value: "node-y"}: pointer.Int32Ptr(0), }, }, }, @@ -375,12 +376,12 @@ func TestPreFilterState(t *testing.T) { "zone": {{"zone1", 3}, {"zone2", 4}}, "node": {{"node-b", 0}, {"node-a", 1}}, }, - TpPairToMatchNum: map[topologyPair]int32{ - {key: "zone", value: "zone1"}: 3, - {key: "zone", value: "zone2"}: 4, - {key: "node", value: "node-a"}: 1, - {key: "node", value: "node-b"}: 0, - {key: "node", value: "node-y"}: 2, + TpPairToMatchNum: map[topologyPair]*int32{ + {key: "zone", value: "zone1"}: pointer.Int32Ptr(3), + {key: "zone", value: "zone2"}: pointer.Int32Ptr(4), + {key: "node", value: "node-a"}: pointer.Int32Ptr(1), + {key: "node", value: "node-b"}: pointer.Int32Ptr(0), + {key: "node", value: "node-y"}: pointer.Int32Ptr(2), }, }, }, @@ -423,12 +424,12 @@ func TestPreFilterState(t *testing.T) { "zone": {{"zone1", 3}, {"zone2", 4}}, "node": {{"node-b", 1}, {"node-a", 2}}, }, - TpPairToMatchNum: map[topologyPair]int32{ - {key: "zone", value: "zone1"}: 3, - {key: "zone", value: "zone2"}: 4, - {key: "node", value: "node-a"}: 2, - {key: "node", value: "node-b"}: 1, - {key: "node", value: "node-y"}: 4, + TpPairToMatchNum: map[topologyPair]*int32{ + {key: "zone", value: "zone1"}: pointer.Int32Ptr(3), + {key: "zone", value: "zone2"}: pointer.Int32Ptr(4), + {key: "node", value: "node-a"}: pointer.Int32Ptr(2), + {key: "node", value: "node-b"}: pointer.Int32Ptr(1), + {key: "node", value: "node-y"}: pointer.Int32Ptr(4), }, }, }, @@ -460,7 +461,7 @@ func TestPreFilterState(t *testing.T) { "node": newCriticalPaths(), "rack": newCriticalPaths(), }, - TpPairToMatchNum: make(map[topologyPair]int32), + TpPairToMatchNum: make(map[topologyPair]*int32), }, }, { @@ -496,7 +497,7 @@ func TestPreFilterState(t *testing.T) { TpKeyToCriticalPaths: map[string]*criticalPaths{ "zone": newCriticalPaths(), }, - TpPairToMatchNum: make(map[topologyPair]int32), + TpPairToMatchNum: make(map[topologyPair]*int32), }, }, { @@ -573,9 +574,9 @@ func TestPreFilterStateAddPod(t *testing.T) { TpKeyToCriticalPaths: map[string]*criticalPaths{ "node": {{"node-b", 0}, {"node-a", 1}}, }, - TpPairToMatchNum: map[topologyPair]int32{ - {key: "node", value: "node-a"}: 1, - {key: "node", value: "node-b"}: 0, + TpPairToMatchNum: map[topologyPair]*int32{ + {key: "node", value: "node-a"}: pointer.Int32Ptr(1), + {key: "node", value: "node-b"}: pointer.Int32Ptr(0), }, }, }, @@ -598,9 +599,9 @@ func TestPreFilterStateAddPod(t *testing.T) { TpKeyToCriticalPaths: map[string]*criticalPaths{ "node": {{"node-a", 1}, {"node-b", 1}}, }, - TpPairToMatchNum: map[topologyPair]int32{ - {key: "node", value: "node-a"}: 1, - {key: "node", value: "node-b"}: 1, + TpPairToMatchNum: map[topologyPair]*int32{ + {key: "node", value: "node-a"}: pointer.Int32Ptr(1), + {key: "node", value: "node-b"}: pointer.Int32Ptr(1), }, }, }, @@ -623,9 +624,9 @@ func TestPreFilterStateAddPod(t *testing.T) { TpKeyToCriticalPaths: map[string]*criticalPaths{ "node": {{"node-a", 0}, {"node-b", 1}}, }, - TpPairToMatchNum: map[topologyPair]int32{ - {key: "node", value: "node-a"}: 0, - {key: "node", value: "node-b"}: 1, + TpPairToMatchNum: map[topologyPair]*int32{ + {key: "node", value: "node-a"}: pointer.Int32Ptr(0), + {key: "node", value: "node-b"}: pointer.Int32Ptr(1), }, }, }, @@ -648,9 +649,9 @@ func TestPreFilterStateAddPod(t *testing.T) { TpKeyToCriticalPaths: map[string]*criticalPaths{ "node": {{"node-a", 0}, {"node-b", 2}}, }, - TpPairToMatchNum: map[topologyPair]int32{ - {key: "node", value: "node-a"}: 0, - {key: "node", value: "node-b"}: 2, + TpPairToMatchNum: map[topologyPair]*int32{ + {key: "node", value: "node-a"}: pointer.Int32Ptr(0), + {key: "node", value: "node-b"}: pointer.Int32Ptr(2), }, }, }, @@ -673,11 +674,11 @@ func TestPreFilterStateAddPod(t *testing.T) { "zone": {{"zone2", 0}, {"zone1", 1}}, "node": {{"node-x", 0}, {"node-a", 1}}, }, - TpPairToMatchNum: map[topologyPair]int32{ - {key: "zone", value: "zone1"}: 1, - {key: "zone", value: "zone2"}: 0, - {key: "node", value: "node-a"}: 1, - {key: "node", value: "node-x"}: 0, + TpPairToMatchNum: map[topologyPair]*int32{ + {key: "zone", value: "zone1"}: pointer.Int32Ptr(1), + {key: "zone", value: "zone2"}: pointer.Int32Ptr(0), + {key: "node", value: "node-a"}: pointer.Int32Ptr(1), + {key: "node", value: "node-x"}: pointer.Int32Ptr(0), }, }, }, @@ -702,11 +703,11 @@ func TestPreFilterStateAddPod(t *testing.T) { "zone": {{"zone1", 1}, {"zone2", 1}}, "node": {{"node-a", 1}, {"node-x", 1}}, }, - TpPairToMatchNum: map[topologyPair]int32{ - {key: "zone", value: "zone1"}: 1, - {key: "zone", value: "zone2"}: 1, - {key: "node", value: "node-a"}: 1, - {key: "node", value: "node-x"}: 1, + TpPairToMatchNum: map[topologyPair]*int32{ + {key: "zone", value: "zone1"}: pointer.Int32Ptr(1), + {key: "zone", value: "zone2"}: pointer.Int32Ptr(1), + {key: "node", value: "node-a"}: pointer.Int32Ptr(1), + {key: "node", value: "node-x"}: pointer.Int32Ptr(1), }, }, }, @@ -734,12 +735,12 @@ func TestPreFilterStateAddPod(t *testing.T) { "zone": {{"zone2", 1}, {"zone1", 3}}, "node": {{"node-a", 1}, {"node-x", 1}}, }, - TpPairToMatchNum: map[topologyPair]int32{ - {key: "zone", value: "zone1"}: 3, - {key: "zone", value: "zone2"}: 1, - {key: "node", value: "node-a"}: 1, - {key: "node", value: "node-b"}: 2, - {key: "node", value: "node-x"}: 1, + TpPairToMatchNum: map[topologyPair]*int32{ + {key: "zone", value: "zone1"}: pointer.Int32Ptr(3), + {key: "zone", value: "zone2"}: pointer.Int32Ptr(1), + {key: "node", value: "node-a"}: pointer.Int32Ptr(1), + {key: "node", value: "node-b"}: pointer.Int32Ptr(2), + {key: "node", value: "node-x"}: pointer.Int32Ptr(1), }, }, }, @@ -774,12 +775,12 @@ func TestPreFilterStateAddPod(t *testing.T) { "zone": {{"zone2", 1}, {"zone1", 2}}, "node": {{"node-a", 0}, {"node-b", 1}}, }, - TpPairToMatchNum: map[topologyPair]int32{ - {key: "zone", value: "zone1"}: 2, - {key: "zone", value: "zone2"}: 1, - {key: "node", value: "node-a"}: 0, - {key: "node", value: "node-b"}: 1, - {key: "node", value: "node-x"}: 2, + TpPairToMatchNum: map[topologyPair]*int32{ + {key: "zone", value: "zone1"}: pointer.Int32Ptr(2), + {key: "zone", value: "zone2"}: pointer.Int32Ptr(1), + {key: "node", value: "node-a"}: pointer.Int32Ptr(0), + {key: "node", value: "node-b"}: pointer.Int32Ptr(1), + {key: "node", value: "node-x"}: pointer.Int32Ptr(2), }, }, }, @@ -814,12 +815,12 @@ func TestPreFilterStateAddPod(t *testing.T) { "zone": {{"zone1", 1}, {"zone2", 1}}, "node": {{"node-a", 1}, {"node-b", 1}}, }, - TpPairToMatchNum: map[topologyPair]int32{ - {key: "zone", value: "zone1"}: 1, - {key: "zone", value: "zone2"}: 1, - {key: "node", value: "node-a"}: 1, - {key: "node", value: "node-b"}: 1, - {key: "node", value: "node-x"}: 2, + TpPairToMatchNum: map[topologyPair]*int32{ + {key: "zone", value: "zone1"}: pointer.Int32Ptr(1), + {key: "zone", value: "zone2"}: pointer.Int32Ptr(1), + {key: "node", value: "node-a"}: pointer.Int32Ptr(1), + {key: "node", value: "node-b"}: pointer.Int32Ptr(1), + {key: "node", value: "node-x"}: pointer.Int32Ptr(2), }, }, }, @@ -895,9 +896,9 @@ func TestPreFilterStateRemovePod(t *testing.T) { TpKeyToCriticalPaths: map[string]*criticalPaths{ "zone": {{"zone1", 1}, {"zone2", 1}}, }, - TpPairToMatchNum: map[topologyPair]int32{ - {key: "zone", value: "zone1"}: 1, - {key: "zone", value: "zone2"}: 1, + TpPairToMatchNum: map[topologyPair]*int32{ + {key: "zone", value: "zone1"}: pointer.Int32Ptr(1), + {key: "zone", value: "zone2"}: pointer.Int32Ptr(1), }, }, }, @@ -925,9 +926,9 @@ func TestPreFilterStateRemovePod(t *testing.T) { TpKeyToCriticalPaths: map[string]*criticalPaths{ "zone": {{"zone1", 1}, {"zone2", 2}}, }, - TpPairToMatchNum: map[topologyPair]int32{ - {key: "zone", value: "zone1"}: 1, - {key: "zone", value: "zone2"}: 2, + TpPairToMatchNum: map[topologyPair]*int32{ + {key: "zone", value: "zone1"}: pointer.Int32Ptr(1), + {key: "zone", value: "zone2"}: pointer.Int32Ptr(2), }, }, }, @@ -956,9 +957,9 @@ func TestPreFilterStateRemovePod(t *testing.T) { TpKeyToCriticalPaths: map[string]*criticalPaths{ "zone": {{"zone1", 2}, {"zone2", 2}}, }, - TpPairToMatchNum: map[topologyPair]int32{ - {key: "zone", value: "zone1"}: 2, - {key: "zone", value: "zone2"}: 2, + TpPairToMatchNum: map[topologyPair]*int32{ + {key: "zone", value: "zone1"}: pointer.Int32Ptr(2), + {key: "zone", value: "zone2"}: pointer.Int32Ptr(2), }, }, }, @@ -987,9 +988,9 @@ func TestPreFilterStateRemovePod(t *testing.T) { TpKeyToCriticalPaths: map[string]*criticalPaths{ "zone": {{"zone1", 2}, {"zone2", 2}}, }, - TpPairToMatchNum: map[topologyPair]int32{ - {key: "zone", value: "zone1"}: 2, - {key: "zone", value: "zone2"}: 2, + TpPairToMatchNum: map[topologyPair]*int32{ + {key: "zone", value: "zone1"}: pointer.Int32Ptr(2), + {key: "zone", value: "zone2"}: pointer.Int32Ptr(2), }, }, }, @@ -1019,12 +1020,12 @@ func TestPreFilterStateRemovePod(t *testing.T) { "zone": {{"zone2", 1}, {"zone1", 3}}, "node": {{"node-b", 1}, {"node-x", 1}}, }, - TpPairToMatchNum: map[topologyPair]int32{ - {key: "zone", value: "zone1"}: 3, - {key: "zone", value: "zone2"}: 1, - {key: "node", value: "node-a"}: 2, - {key: "node", value: "node-b"}: 1, - {key: "node", value: "node-x"}: 1, + TpPairToMatchNum: map[topologyPair]*int32{ + {key: "zone", value: "zone1"}: pointer.Int32Ptr(3), + {key: "zone", value: "zone2"}: pointer.Int32Ptr(1), + {key: "node", value: "node-a"}: pointer.Int32Ptr(2), + {key: "node", value: "node-b"}: pointer.Int32Ptr(1), + {key: "node", value: "node-x"}: pointer.Int32Ptr(1), }, }, }, From 93fc02cf54021dcb3ff9fe9644842ccdbadb5ee7 Mon Sep 17 00:00:00 2001 From: Aldo Culquicondor Date: Mon, 30 Mar 2020 17:10:48 -0400 Subject: [PATCH 88/92] Set initial map size Signed-off-by: Aldo Culquicondor --- .../framework/plugins/podtopologyspread/filtering.go | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/pkg/scheduler/framework/plugins/podtopologyspread/filtering.go b/pkg/scheduler/framework/plugins/podtopologyspread/filtering.go index 761c5cce6a4d..9d716c3ee8f4 100644 --- a/pkg/scheduler/framework/plugins/podtopologyspread/filtering.go +++ b/pkg/scheduler/framework/plugins/podtopologyspread/filtering.go @@ -223,7 +223,7 @@ func (pl *PodTopologySpread) calPreFilterState(pod *v1.Pod) (*preFilterState, er s := preFilterState{ Constraints: constraints, TpKeyToCriticalPaths: make(map[string]*criticalPaths, len(constraints)), - TpPairToMatchNum: make(map[topologyPair]*int32), + TpPairToMatchNum: make(map[topologyPair]*int32, sizeHeuristic(len(allNodes), constraints)), } for _, n := range allNodes { node := n.Node() @@ -328,3 +328,12 @@ func (pl *PodTopologySpread) Filter(ctx context.Context, cycleState *framework.C return nil } + +func sizeHeuristic(nodes int, constraints []topologySpreadConstraint) int { + for _, c := range constraints { + if c.TopologyKey == v1.LabelHostname { + return nodes + } + } + return 0 +} From 916d96e42838b93d6cd4bd264908d0c2855bd7ff Mon Sep 17 00:00:00 2001 From: Caleb Woodbine Date: Wed, 1 Apr 2020 09:49:23 +1300 Subject: [PATCH 89/92] Update test/e2e/framework/events/events.go Co-Authored-By: Aaron Crickenberger --- test/e2e/framework/events/events.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/e2e/framework/events/events.go b/test/e2e/framework/events/events.go index 4b060d3fffee..08476cd74ba8 100644 --- a/test/e2e/framework/events/events.go +++ b/test/e2e/framework/events/events.go @@ -114,7 +114,7 @@ var _ = ginkgo.Describe("[sig-api-machinery] Events", func() { break } } - framework.ExpectEqual(foundCreatedEvent, false, "failed to find test event") + framework.ExpectEqual(foundCreatedEvent, false, "should not have found test event after deletion") }) }) From 2b325f07f497e9a5ea4cf3d14b4db2eba6dbfa24 Mon Sep 17 00:00:00 2001 From: Caleb Woodbine Date: Wed, 1 Apr 2020 11:00:20 +1300 Subject: [PATCH 90/92] Fix DeleteOptions value --- test/e2e/framework/events/events.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/e2e/framework/events/events.go b/test/e2e/framework/events/events.go index 08476cd74ba8..4890d87f77d3 100644 --- a/test/e2e/framework/events/events.go +++ b/test/e2e/framework/events/events.go @@ -98,7 +98,7 @@ var _ = ginkgo.Describe("[sig-api-machinery] Events", func() { ginkgo.By("deleting the test event") // delete original event - err = f.ClientSet.CoreV1().Events(f.Namespace.Name).Delete(context.TODO(), eventCreatedName, &metav1.DeleteOptions{}) + err = f.ClientSet.CoreV1().Events(f.Namespace.Name).Delete(context.TODO(), eventCreatedName, metav1.DeleteOptions{}) framework.ExpectNoError(err, "failed to delete the test event") ginkgo.By("listing all events in all namespaces") From b1d85a6b51d63342b5d14fece23be8d8a166ad00 Mon Sep 17 00:00:00 2001 From: Prasad Katti Date: Tue, 31 Mar 2020 16:43:28 -0700 Subject: [PATCH 91/92] minor update (#89638) * minor update Added a missing period. * add some more missing periods - add in the missing period in 2 more places - update generated files with `make update` --- api/openapi-spec/swagger.json | 4 ++-- .../k8s.io/kube-aggregator/pkg/apis/apiregistration/types.go | 2 +- .../pkg/apis/apiregistration/v1/generated.proto | 2 +- .../kube-aggregator/pkg/apis/apiregistration/v1/types.go | 2 +- .../pkg/apis/apiregistration/v1beta1/generated.proto | 2 +- .../kube-aggregator/pkg/apis/apiregistration/v1beta1/types.go | 2 +- 6 files changed, 7 insertions(+), 7 deletions(-) diff --git a/api/openapi-spec/swagger.json b/api/openapi-spec/swagger.json index 96e974bd206f..2550a0906367 100644 --- a/api/openapi-spec/swagger.json +++ b/api/openapi-spec/swagger.json @@ -17913,7 +17913,7 @@ }, "service": { "$ref": "#/definitions/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1.ServiceReference", - "description": "Service is a reference to the service for this API server. It must communicate on port 443 If the Service is nil, that means the handling for the API groupversion is handled locally on this server. The call will simply delegate to the normal handler chain to be fulfilled." + "description": "Service is a reference to the service for this API server. It must communicate on port 443. If the Service is nil, that means the handling for the API groupversion is handled locally on this server. The call will simply delegate to the normal handler chain to be fulfilled." }, "version": { "description": "Version is the API version this server hosts. For example, \"v1\"", @@ -18083,7 +18083,7 @@ }, "service": { "$ref": "#/definitions/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1beta1.ServiceReference", - "description": "Service is a reference to the service for this API server. It must communicate on port 443 If the Service is nil, that means the handling for the API groupversion is handled locally on this server. The call will simply delegate to the normal handler chain to be fulfilled." + "description": "Service is a reference to the service for this API server. It must communicate on port 443. If the Service is nil, that means the handling for the API groupversion is handled locally on this server. The call will simply delegate to the normal handler chain to be fulfilled." }, "version": { "description": "Version is the API version this server hosts. For example, \"v1\"", diff --git a/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/types.go b/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/types.go index 1fd9780bba40..c5e2a76c0c29 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/types.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/types.go @@ -45,7 +45,7 @@ type ServiceReference struct { // Only https is supported, though you are able to disable certificate verification. type APIServiceSpec struct { // Service is a reference to the service for this API server. It must communicate - // on port 443 + // on port 443. // If the Service is nil, that means the handling for the API groupversion is handled locally on this server. // The call will simply delegate to the normal handler chain to be fulfilled. // +optional diff --git a/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/generated.proto b/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/generated.proto index 4cc3b3b168aa..ef40ae04fa6a 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/generated.proto +++ b/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/generated.proto @@ -73,7 +73,7 @@ message APIServiceList { // Only https is supported, though you are able to disable certificate verification. message APIServiceSpec { // Service is a reference to the service for this API server. It must communicate - // on port 443 + // on port 443. // If the Service is nil, that means the handling for the API groupversion is handled locally on this server. // The call will simply delegate to the normal handler chain to be fulfilled. // +optional diff --git a/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/types.go b/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/types.go index 38607dfe7d5e..49040cec4f40 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/types.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/types.go @@ -45,7 +45,7 @@ type ServiceReference struct { // Only https is supported, though you are able to disable certificate verification. type APIServiceSpec struct { // Service is a reference to the service for this API server. It must communicate - // on port 443 + // on port 443. // If the Service is nil, that means the handling for the API groupversion is handled locally on this server. // The call will simply delegate to the normal handler chain to be fulfilled. // +optional diff --git a/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/generated.proto b/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/generated.proto index 4e78358e4375..82785861fac8 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/generated.proto +++ b/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/generated.proto @@ -73,7 +73,7 @@ message APIServiceList { // Only https is supported, though you are able to disable certificate verification. message APIServiceSpec { // Service is a reference to the service for this API server. It must communicate - // on port 443 + // on port 443. // If the Service is nil, that means the handling for the API groupversion is handled locally on this server. // The call will simply delegate to the normal handler chain to be fulfilled. // +optional diff --git a/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/types.go b/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/types.go index 33731c3b3629..8f01f7136bc3 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/types.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/types.go @@ -45,7 +45,7 @@ type ServiceReference struct { // Only https is supported, though you are able to disable certificate verification. type APIServiceSpec struct { // Service is a reference to the service for this API server. It must communicate - // on port 443 + // on port 443. // If the Service is nil, that means the handling for the API groupversion is handled locally on this server. // The call will simply delegate to the normal handler chain to be fulfilled. // +optional From e2bc3a755fff2bbc9c15c4b2a5fc5e753724c74a Mon Sep 17 00:00:00 2001 From: Andrew Sy Kim Date: Wed, 1 Apr 2020 02:01:27 -0400 Subject: [PATCH 92/92] move well-known kubelet cloud provider annotations to k8s.io/cloud-provider (#88631) * move well-known kubelet cloud provider annotations to k8s.io/cloud-provider Signed-off-by: andrewsykim * cloud provider: rename AnnotationProvidedIPAddr to AnnotationAlphaProvidedIPAddr to indicate alpha status Signed-off-by: Andrew Sy Kim --- pkg/controller/cloud/BUILD | 2 -- pkg/controller/cloud/node_controller.go | 3 +-- pkg/controller/cloud/node_controller_test.go | 5 ++--- pkg/kubelet/apis/BUILD | 1 - pkg/kubelet/nodestatus/BUILD | 2 +- pkg/kubelet/nodestatus/setters.go | 4 ++-- staging/src/k8s.io/cloud-provider/api/BUILD | 5 ++++- .../k8s.io/cloud-provider/api}/well_known_annotations.go | 6 +++--- 8 files changed, 13 insertions(+), 15 deletions(-) rename {pkg/kubelet/apis => staging/src/k8s.io/cloud-provider/api}/well_known_annotations.go (82%) diff --git a/pkg/controller/cloud/BUILD b/pkg/controller/cloud/BUILD index 751f0a95c700..b43f80eec3c5 100644 --- a/pkg/controller/cloud/BUILD +++ b/pkg/controller/cloud/BUILD @@ -10,7 +10,6 @@ go_library( visibility = ["//visibility:public"], deps = [ "//pkg/controller:go_default_library", - "//pkg/kubelet/apis:go_default_library", "//pkg/util/node:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", @@ -43,7 +42,6 @@ go_test( embed = [":go_default_library"], deps = [ "//pkg/controller/testutil:go_default_library", - "//pkg/kubelet/apis:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", diff --git a/pkg/controller/cloud/node_controller.go b/pkg/controller/cloud/node_controller.go index d8353e3c943e..862e6b9de4a2 100644 --- a/pkg/controller/cloud/node_controller.go +++ b/pkg/controller/cloud/node_controller.go @@ -39,7 +39,6 @@ import ( cloudproviderapi "k8s.io/cloud-provider/api" cloudnodeutil "k8s.io/cloud-provider/node/helpers" "k8s.io/klog" - kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" nodeutil "k8s.io/kubernetes/pkg/util/node" ) @@ -565,7 +564,7 @@ func nodeAddressesChangeDetected(addressSet1, addressSet2 []v1.NodeAddress) bool func ensureNodeProvidedIPExists(node *v1.Node, nodeAddresses []v1.NodeAddress) (*v1.NodeAddress, bool) { var nodeIP *v1.NodeAddress nodeIPExists := false - if providedIP, ok := node.ObjectMeta.Annotations[kubeletapis.AnnotationProvidedIPAddr]; ok { + if providedIP, ok := node.ObjectMeta.Annotations[cloudproviderapi.AnnotationAlphaProvidedIPAddr]; ok { nodeIPExists = true for i := range nodeAddresses { if nodeAddresses[i].Address == providedIP { diff --git a/pkg/controller/cloud/node_controller_test.go b/pkg/controller/cloud/node_controller_test.go index 4a5d662c998b..450393688e67 100644 --- a/pkg/controller/cloud/node_controller_test.go +++ b/pkg/controller/cloud/node_controller_test.go @@ -34,7 +34,6 @@ import ( "k8s.io/cloud-provider" cloudproviderapi "k8s.io/cloud-provider/api" fakecloud "k8s.io/cloud-provider/fake" - kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" "github.com/google/go-cmp/cmp" "github.com/stretchr/testify/assert" @@ -462,7 +461,7 @@ func Test_AddCloudNode(t *testing.T) { Name: "node0", CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), Annotations: map[string]string{ - kubeletapis.AnnotationProvidedIPAddr: "10.0.0.1", + cloudproviderapi.AnnotationAlphaProvidedIPAddr: "10.0.0.1", }, }, Spec: v1.NodeSpec{ @@ -502,7 +501,7 @@ func Test_AddCloudNode(t *testing.T) { Name: "node0", CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), Annotations: map[string]string{ - kubeletapis.AnnotationProvidedIPAddr: "10.0.0.1", + cloudproviderapi.AnnotationAlphaProvidedIPAddr: "10.0.0.1", }, }, Spec: v1.NodeSpec{ diff --git a/pkg/kubelet/apis/BUILD b/pkg/kubelet/apis/BUILD index 38505ce069e1..086ad1cc5b59 100644 --- a/pkg/kubelet/apis/BUILD +++ b/pkg/kubelet/apis/BUILD @@ -8,7 +8,6 @@ load( go_library( name = "go_default_library", srcs = [ - "well_known_annotations.go", "well_known_annotations_windows.go", "well_known_labels.go", ], diff --git a/pkg/kubelet/nodestatus/BUILD b/pkg/kubelet/nodestatus/BUILD index fcf5f4e9c306..7d9ceba7a8a0 100644 --- a/pkg/kubelet/nodestatus/BUILD +++ b/pkg/kubelet/nodestatus/BUILD @@ -8,7 +8,6 @@ go_library( deps = [ "//pkg/apis/core/v1/helper:go_default_library", "//pkg/features:go_default_library", - "//pkg/kubelet/apis:go_default_library", "//pkg/kubelet/cadvisor:go_default_library", "//pkg/kubelet/cm:go_default_library", "//pkg/kubelet/container:go_default_library", @@ -21,6 +20,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", "//staging/src/k8s.io/cloud-provider:go_default_library", + "//staging/src/k8s.io/cloud-provider/api:go_default_library", "//staging/src/k8s.io/component-base/version:go_default_library", "//vendor/github.com/google/cadvisor/info/v1:go_default_library", "//vendor/k8s.io/klog:go_default_library", diff --git a/pkg/kubelet/nodestatus/setters.go b/pkg/kubelet/nodestatus/setters.go index ef535bc72bdb..393f5e2de35f 100644 --- a/pkg/kubelet/nodestatus/setters.go +++ b/pkg/kubelet/nodestatus/setters.go @@ -33,10 +33,10 @@ import ( utilnet "k8s.io/apimachinery/pkg/util/net" utilfeature "k8s.io/apiserver/pkg/util/feature" cloudprovider "k8s.io/cloud-provider" + cloudproviderapi "k8s.io/cloud-provider/api" "k8s.io/component-base/version" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" "k8s.io/kubernetes/pkg/features" - kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" "k8s.io/kubernetes/pkg/kubelet/cadvisor" "k8s.io/kubernetes/pkg/kubelet/cm" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" @@ -82,7 +82,7 @@ func NodeAddress(nodeIP net.IP, // typically Kubelet.nodeIP if node.ObjectMeta.Annotations == nil { node.ObjectMeta.Annotations = make(map[string]string) } - node.ObjectMeta.Annotations[kubeletapis.AnnotationProvidedIPAddr] = nodeIP.String() + node.ObjectMeta.Annotations[cloudproviderapi.AnnotationAlphaProvidedIPAddr] = nodeIP.String() } // If --cloud-provider=external and node address is already set, diff --git a/staging/src/k8s.io/cloud-provider/api/BUILD b/staging/src/k8s.io/cloud-provider/api/BUILD index 263a9a8ee0ad..2371259bd421 100644 --- a/staging/src/k8s.io/cloud-provider/api/BUILD +++ b/staging/src/k8s.io/cloud-provider/api/BUILD @@ -2,7 +2,10 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", - srcs = ["well_known_taints.go"], + srcs = [ + "well_known_annotations.go", + "well_known_taints.go", + ], importmap = "k8s.io/kubernetes/vendor/k8s.io/cloud-provider/api", importpath = "k8s.io/cloud-provider/api", visibility = ["//visibility:public"], diff --git a/pkg/kubelet/apis/well_known_annotations.go b/staging/src/k8s.io/cloud-provider/api/well_known_annotations.go similarity index 82% rename from pkg/kubelet/apis/well_known_annotations.go rename to staging/src/k8s.io/cloud-provider/api/well_known_annotations.go index a87f1c073ad7..fd03ea0a0212 100644 --- a/pkg/kubelet/apis/well_known_annotations.go +++ b/staging/src/k8s.io/cloud-provider/api/well_known_annotations.go @@ -14,13 +14,13 @@ See the License for the specific language governing permissions and limitations under the License. */ -package apis +package api const ( - // AnnotationProvidedIPAddr is a node IP annotation set by the "external" cloud provider. + // AnnotationAlphaProvidedIPAddr is a node IP annotation set by the "external" cloud provider. // When kubelet is started with the "external" cloud provider, then // it sets this annotation on the node to denote an ip address set from the // cmd line flag (--node-ip). This ip is verified with the cloudprovider as valid by // the cloud-controller-manager - AnnotationProvidedIPAddr = "alpha.kubernetes.io/provided-node-ip" + AnnotationAlphaProvidedIPAddr = "alpha.kubernetes.io/provided-node-ip" )