From 106fce6fae114710a599cd14becf19468bad28fb Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Wed, 1 Mar 2023 15:00:30 +0100 Subject: [PATCH 1/2] e2e dra: improve goroutine handling There is an API now to wait for informer factory goroutine termination. While at it, an incorrect comment for mutex locking gets removed. --- test/e2e/dra/deploy.go | 5 +---- test/e2e/dra/test-driver/app/controller.go | 7 +++---- 2 files changed, 4 insertions(+), 8 deletions(-) diff --git a/test/e2e/dra/deploy.go b/test/e2e/dra/deploy.go index ffb80764404e..bf5cb8e1d82d 100644 --- a/test/e2e/dra/deploy.go +++ b/test/e2e/dra/deploy.go @@ -140,10 +140,7 @@ func (d *Driver) SetUp(nodes *Nodes, resources app.Resources) { d.ctx = ctx d.cleanup = append(d.cleanup, cancel) - // The controller is easy: we simply connect to the API server. It - // would be slightly nicer if we had a way to wait for all goroutines, but - // SharedInformerFactory has no API for that. At least we can wait - // for our own goroutine to stop once the context gets cancelled. + // The controller is easy: we simply connect to the API server. d.Controller = app.NewController(d.f.ClientSet, d.Name, resources) d.wg.Add(1) go func() { diff --git a/test/e2e/dra/test-driver/app/controller.go b/test/e2e/dra/test-driver/app/controller.go index 243a62ffef0d..6fbae0aabddb 100644 --- a/test/e2e/dra/test-driver/app/controller.go +++ b/test/e2e/dra/test-driver/app/controller.go @@ -58,7 +58,6 @@ type ExampleController struct { resources Resources driverName string - // mutex must be locked at the gRPC call level. mutex sync.Mutex // allocated maps claim.UID to the node (if network-attached) or empty (if not). allocated map[types.UID]string @@ -77,13 +76,13 @@ func NewController(clientset kubernetes.Interface, driverName string, resources return c } -func (c *ExampleController) Run(ctx context.Context, workers int) *ExampleController { +func (c *ExampleController) Run(ctx context.Context, workers int) { informerFactory := informers.NewSharedInformerFactory(c.clientset, 0 /* resync period */) ctrl := controller.New(ctx, c.driverName, c, c.clientset, informerFactory) informerFactory.Start(ctx.Done()) ctrl.Run(workers) - - return c + // If we get here, the context was canceled and we can wait for informer factory goroutines. + informerFactory.Shutdown() } type parameters struct { From 74785074c6a920f839747618b63f7ec1d67b7df4 Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Wed, 1 Mar 2023 15:02:03 +0100 Subject: [PATCH 2/2] e2e dra: update logging When running as part of the scheduler_perf benchmark testing, we want to print less information by default, so we should use V to limit verbosity Pretty-printing doesn't belong into "application" code. I am moving that into the ktesting formatting (https://github.com/kubernetes/kubernetes/pull/116180). --- test/e2e/dra/test-driver/app/controller.go | 40 +++++++--------------- 1 file changed, 12 insertions(+), 28 deletions(-) diff --git a/test/e2e/dra/test-driver/app/controller.go b/test/e2e/dra/test-driver/app/controller.go index 6fbae0aabddb..22e12207a925 100644 --- a/test/e2e/dra/test-driver/app/controller.go +++ b/test/e2e/dra/test-driver/app/controller.go @@ -163,7 +163,7 @@ func (c *ExampleController) Allocate(ctx context.Context, claim *resourcev1alpha func (c *ExampleController) allocate(ctx context.Context, claim *resourcev1alpha1.ResourceClaim, claimParameters interface{}, class *resourcev1alpha1.ResourceClass, classParameters interface{}, selectedNode string) (result *resourcev1alpha1.AllocationResult, err error) { logger := klog.LoggerWithValues(klog.LoggerWithName(klog.FromContext(ctx), "Allocate"), "claim", klog.KObj(claim), "uid", claim.UID) defer func() { - logger.Info("done", "result", prettyPrint(result), "err", err) + logger.V(3).Info("done", "result", result, "err", err) }() c.mutex.Lock() @@ -175,9 +175,9 @@ func (c *ExampleController) allocate(ctx context.Context, claim *resourcev1alpha // Idempotent result - kind of. We don't check whether // the parameters changed in the meantime. A real // driver would have to do that. - logger.Info("already allocated") + logger.V(3).V(3).Info("already allocated") } else { - logger.Info("starting", "selectedNode", selectedNode) + logger.V(3).Info("starting", "selectedNode", selectedNode) if c.resources.NodeLocal { node = selectedNode if node == "" { @@ -196,7 +196,7 @@ func (c *ExampleController) allocate(ctx context.Context, claim *resourcev1alpha // Pick randomly. We could also prefer the one with the least // number of allocations (even spreading) or the most (packing). node = viableNodes[rand.Intn(len(viableNodes))] - logger.Info("picked a node ourselves", "selectedNode", selectedNode) + logger.V(3).Info("picked a node ourselves", "selectedNode", selectedNode) } else if !contains(c.resources.Nodes, node) || c.resources.MaxAllocations > 0 && c.countAllocations(node) >= c.resources.MaxAllocations { @@ -258,11 +258,11 @@ func (c *ExampleController) Deallocate(ctx context.Context, claim *resourcev1alp defer c.mutex.Unlock() if _, ok := c.allocated[claim.UID]; !ok { - logger.Info("already deallocated") + logger.V(3).Info("already deallocated") return nil } - logger.Info("done") + logger.V(3).Info("done") c.numDeallocations++ delete(c.allocated, claim.UID) return nil @@ -270,11 +270,15 @@ func (c *ExampleController) Deallocate(ctx context.Context, claim *resourcev1alp func (c *ExampleController) UnsuitableNodes(ctx context.Context, pod *v1.Pod, claims []*controller.ClaimAllocation, potentialNodes []string) (finalErr error) { logger := klog.LoggerWithValues(klog.LoggerWithName(klog.FromContext(ctx), "UnsuitableNodes"), "pod", klog.KObj(pod)) - logger.Info("starting", "claim", prettyPrintSlice(claims), "potentialNodes", potentialNodes) + c.mutex.Lock() + defer c.mutex.Unlock() + + logger.V(3).Info("starting", "claims", claims, "potentialNodes", potentialNodes) defer func() { // UnsuitableNodes is the same for all claims. - logger.Info("done", "unsuitableNodes", claims[0].UnsuitableNodes, "err", finalErr) + logger.V(3).Info("done", "unsuitableNodes", claims[0].UnsuitableNodes, "err", finalErr) }() + if c.resources.MaxAllocations == 0 { // All nodes are suitable. return nil @@ -335,23 +339,3 @@ func contains[T comparable](list []T, value T) bool { return false } - -func prettyPrint[T any](obj *T) interface{} { - if obj == nil { - return "" - } - return *obj -} - -// prettyPrintSlice prints the values the slice points to, not the pointers. -func prettyPrintSlice[T any](slice []*T) interface{} { - var values []interface{} - for _, v := range slice { - if v == nil { - values = append(values, "") - } else { - values = append(values, *v) - } - } - return values -}