Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

e2e: dra test driver update #116181

Merged
merged 2 commits into from Mar 1, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
5 changes: 1 addition & 4 deletions test/e2e/dra/deploy.go
Expand Up @@ -140,10 +140,7 @@ func (d *Driver) SetUp(nodes *Nodes, resources app.Resources) {
d.ctx = ctx
d.cleanup = append(d.cleanup, cancel)

// The controller is easy: we simply connect to the API server. It
// would be slightly nicer if we had a way to wait for all goroutines, but
// SharedInformerFactory has no API for that. At least we can wait
// for our own goroutine to stop once the context gets cancelled.
// The controller is easy: we simply connect to the API server.
d.Controller = app.NewController(d.f.ClientSet, d.Name, resources)
d.wg.Add(1)
go func() {
Expand Down
47 changes: 15 additions & 32 deletions test/e2e/dra/test-driver/app/controller.go
Expand Up @@ -58,7 +58,6 @@ type ExampleController struct {
resources Resources
driverName string

// mutex must be locked at the gRPC call level.
mutex sync.Mutex
// allocated maps claim.UID to the node (if network-attached) or empty (if not).
allocated map[types.UID]string
Expand All @@ -77,13 +76,13 @@ func NewController(clientset kubernetes.Interface, driverName string, resources
return c
}

func (c *ExampleController) Run(ctx context.Context, workers int) *ExampleController {
func (c *ExampleController) Run(ctx context.Context, workers int) {
informerFactory := informers.NewSharedInformerFactory(c.clientset, 0 /* resync period */)
ctrl := controller.New(ctx, c.driverName, c, c.clientset, informerFactory)
informerFactory.Start(ctx.Done())
ctrl.Run(workers)

return c
// If we get here, the context was canceled and we can wait for informer factory goroutines.
informerFactory.Shutdown()
}

type parameters struct {
Expand Down Expand Up @@ -164,7 +163,7 @@ func (c *ExampleController) Allocate(ctx context.Context, claim *resourcev1alpha
func (c *ExampleController) allocate(ctx context.Context, claim *resourcev1alpha1.ResourceClaim, claimParameters interface{}, class *resourcev1alpha1.ResourceClass, classParameters interface{}, selectedNode string) (result *resourcev1alpha1.AllocationResult, err error) {
logger := klog.LoggerWithValues(klog.LoggerWithName(klog.FromContext(ctx), "Allocate"), "claim", klog.KObj(claim), "uid", claim.UID)
defer func() {
logger.Info("done", "result", prettyPrint(result), "err", err)
logger.V(3).Info("done", "result", result, "err", err)
}()

c.mutex.Lock()
Expand All @@ -176,9 +175,9 @@ func (c *ExampleController) allocate(ctx context.Context, claim *resourcev1alpha
// Idempotent result - kind of. We don't check whether
// the parameters changed in the meantime. A real
// driver would have to do that.
logger.Info("already allocated")
logger.V(3).V(3).Info("already allocated")
} else {
logger.Info("starting", "selectedNode", selectedNode)
logger.V(3).Info("starting", "selectedNode", selectedNode)
if c.resources.NodeLocal {
node = selectedNode
if node == "" {
Expand All @@ -197,7 +196,7 @@ func (c *ExampleController) allocate(ctx context.Context, claim *resourcev1alpha
// Pick randomly. We could also prefer the one with the least
// number of allocations (even spreading) or the most (packing).
node = viableNodes[rand.Intn(len(viableNodes))]
logger.Info("picked a node ourselves", "selectedNode", selectedNode)
logger.V(3).Info("picked a node ourselves", "selectedNode", selectedNode)
} else if !contains(c.resources.Nodes, node) ||
c.resources.MaxAllocations > 0 &&
c.countAllocations(node) >= c.resources.MaxAllocations {
Expand Down Expand Up @@ -259,23 +258,27 @@ func (c *ExampleController) Deallocate(ctx context.Context, claim *resourcev1alp
defer c.mutex.Unlock()

if _, ok := c.allocated[claim.UID]; !ok {
logger.Info("already deallocated")
logger.V(3).Info("already deallocated")
return nil
}

logger.Info("done")
logger.V(3).Info("done")
c.numDeallocations++
delete(c.allocated, claim.UID)
return nil
}

func (c *ExampleController) UnsuitableNodes(ctx context.Context, pod *v1.Pod, claims []*controller.ClaimAllocation, potentialNodes []string) (finalErr error) {
logger := klog.LoggerWithValues(klog.LoggerWithName(klog.FromContext(ctx), "UnsuitableNodes"), "pod", klog.KObj(pod))
logger.Info("starting", "claim", prettyPrintSlice(claims), "potentialNodes", potentialNodes)
c.mutex.Lock()
defer c.mutex.Unlock()

logger.V(3).Info("starting", "claims", claims, "potentialNodes", potentialNodes)
defer func() {
// UnsuitableNodes is the same for all claims.
logger.Info("done", "unsuitableNodes", claims[0].UnsuitableNodes, "err", finalErr)
logger.V(3).Info("done", "unsuitableNodes", claims[0].UnsuitableNodes, "err", finalErr)
}()

if c.resources.MaxAllocations == 0 {
// All nodes are suitable.
return nil
Expand Down Expand Up @@ -336,23 +339,3 @@ func contains[T comparable](list []T, value T) bool {

return false
}

func prettyPrint[T any](obj *T) interface{} {
if obj == nil {
return "<nil>"
}
return *obj
}

// prettyPrintSlice prints the values the slice points to, not the pointers.
func prettyPrintSlice[T any](slice []*T) interface{} {
var values []interface{}
for _, v := range slice {
if v == nil {
values = append(values, "<nil>")
} else {
values = append(values, *v)
}
}
return values
}