Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat(qrm): implement a native policy for cpu qrm plugin #144

Merged
merged 9 commits into from
Jul 27, 2023
4 changes: 4 additions & 0 deletions .licenseignore
Original file line number Diff line number Diff line change
@@ -1,2 +1,6 @@
vendor
cmd/katalyst-scheduler/app/server.go
pkg/agent/qrm-plugins/cpu/nativepolicy/calculator/cpu_assignment.go
pkg/agent/qrm-plugins/cpu/nativepolicy/calculator/cpu_assignment_test.go
pkg/agent/qrm-plugins/cpu/nativepolicy/policy_allocation_handlers.go
pkg/agent/qrm-plugins/cpu/nativepolicy/policy_hint_handlers.go
40 changes: 26 additions & 14 deletions cmd/katalyst-agent/app/options/qrm/cpu_plugin.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,24 +23,28 @@ import (
)

type CPUOptions struct {
PolicyName string
EnableCPUAdvisor bool
ReservedCPUCores int
SkipCPUStateCorruption bool
EnableCPUPressureEviction bool
EnableSyncingCPUIdle bool
EnableCPUIdle bool
PolicyName string
EnableCPUAdvisor bool
ReservedCPUCores int
SkipCPUStateCorruption bool
EnableCPUPressureEviction bool
EnableSyncingCPUIdle bool
EnableCPUIdle bool
EnableFullPhysicalCPUsOnly bool
CPUAllocationOption string
}

func NewCPUOptions() *CPUOptions {
return &CPUOptions{
PolicyName: "dynamic",
EnableCPUAdvisor: false,
ReservedCPUCores: 0,
SkipCPUStateCorruption: false,
EnableCPUPressureEviction: false,
EnableSyncingCPUIdle: false,
EnableCPUIdle: false,
PolicyName: "dynamic",
EnableCPUAdvisor: false,
ReservedCPUCores: 0,
SkipCPUStateCorruption: false,
EnableCPUPressureEviction: false,
EnableSyncingCPUIdle: false,
EnableCPUIdle: false,
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

maybe it's better to separate options/configs for different policies, ie.

  1. define general options/configs in CPUOptions/CPUConfigurations
  2. define native-policy-specified options/configs in CPUNativePolicyOptions/CPUNativePolicyConfigurations
  3. define dynamic-policy-specified options/configs in CPUDynamicPolicyOptions/CPUDynamicPolicyConfiguration

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

done

EnableFullPhysicalCPUsOnly: false,
CPUAllocationOption: "packed",
}
}

Expand All @@ -62,6 +66,12 @@ func (o *CPUOptions) AddFlags(fss *cliflag.NamedFlagSets) {
fs.BoolVar(&o.EnableCPUIdle, "enable-cpu-idle", o.EnableCPUIdle,
"if set true, we will enable cpu idle for "+
"specific cgroup paths and it requires --enable-syncing-cpu-idle=true to make effect")
fs.StringVar(&o.CPUAllocationOption, "cpu-allocation-option",
o.CPUAllocationOption, "The allocation option of cpu (packed/distributed). The default value is packed."+
"in cases where more than one NUMA node is required to satisfy the allocation.")
fs.BoolVar(&o.EnableFullPhysicalCPUsOnly, "enable-full-physical-cpus-only",
o.EnableFullPhysicalCPUsOnly, "if set true, we will enable extra allocation restrictions to "+
"avoid different containers to possibly end up on the same core.")
}

func (o *CPUOptions) ApplyTo(conf *qrmconfig.CPUQRMPluginConfig) error {
Expand All @@ -72,5 +82,7 @@ func (o *CPUOptions) ApplyTo(conf *qrmconfig.CPUQRMPluginConfig) error {
conf.EnableCPUPressureEviction = o.EnableCPUPressureEviction
conf.EnableSyncingCPUIdle = o.EnableSyncingCPUIdle
conf.EnableCPUIdle = o.EnableCPUIdle
conf.EnableFullPhysicalCPUsOnly = o.EnableFullPhysicalCPUsOnly
conf.CPUAllocationOption = o.CPUAllocationOption
return nil
}
2 changes: 1 addition & 1 deletion go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -162,7 +162,7 @@ replace (
k8s.io/kube-proxy => k8s.io/kube-proxy v0.24.6
k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.24.6
k8s.io/kubectl => k8s.io/kubectl v0.24.6
k8s.io/kubelet => github.com/kubewharf/kubelet v1.24.6-kubewharf.5
k8s.io/kubelet => github.com/kubewharf/kubelet v1.24.6-kubewharf.6
k8s.io/kubernetes => k8s.io/kubernetes v1.24.6
k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.24.6
k8s.io/metrics => k8s.io/metrics v0.24.6
Expand Down
4 changes: 2 additions & 2 deletions go.sum
Original file line number Diff line number Diff line change
Expand Up @@ -545,8 +545,8 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/kubewharf/katalyst-api v0.1.12 h1:dmfXMzknvgAPL/DI5hUmU9JGbrI6X3TUs4M9a0jZxYg=
github.com/kubewharf/katalyst-api v0.1.12/go.mod h1:iVILS5UL5PRtkUPH2Iu1K/gFGTPMNItnth5fmQ80VGE=
github.com/kubewharf/kubelet v1.24.6-kubewharf.5 h1:i3BcfBY3fFTzPWi5BCYyhkiSZCrIGczaGNAwgUvga6U=
github.com/kubewharf/kubelet v1.24.6-kubewharf.5/go.mod h1:MxbSZUx3wXztFneeelwWWlX7NAAStJ6expqq7gY2J3c=
github.com/kubewharf/kubelet v1.24.6-kubewharf.6 h1:36IfOYzDL4Eb8uwJgpq2080lIn04Il+MbmFx5yi46UA=
github.com/kubewharf/kubelet v1.24.6-kubewharf.6/go.mod h1:MxbSZUx3wXztFneeelwWWlX7NAAStJ6expqq7gY2J3c=
github.com/kyoh86/exportloopref v0.1.7/go.mod h1:h1rDl2Kdj97+Kwh4gdz3ujE7XHmH51Q0lUiZ1z4NLj8=
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/libopenstorage/openstorage v1.0.0/go.mod h1:Sp1sIObHjat1BeXhfMqLZ14wnOzEhNx2YQedreMcUyc=
Expand Down
4 changes: 3 additions & 1 deletion pkg/agent/qrm-plugins/cpu/cpu.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,10 @@ package cpu
import (
"github.com/kubewharf/katalyst-core/cmd/katalyst-agent/app/agent/qrm"
"github.com/kubewharf/katalyst-core/pkg/agent/qrm-plugins/cpu/dynamicpolicy"
"github.com/kubewharf/katalyst-core/pkg/consts"
)

func init() {
qrm.RegisterCPUPolicyInitializer(dynamicpolicy.CPUResourcePluginPolicyNameDynamic, dynamicpolicy.NewDynamicPolicy)
qrm.RegisterCPUPolicyInitializer(consts.CPUResourcePluginPolicyNameDynamic, dynamicpolicy.NewDynamicPolicy)
qrm.RegisterCPUPolicyInitializer(consts.CPUResourcePluginPolicyNameNative, dynamicpolicy.NewDynamicPolicy)
}
13 changes: 7 additions & 6 deletions pkg/agent/qrm-plugins/cpu/dynamicpolicy/policy.go
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,7 @@ import (
dynamicconfig "github.com/kubewharf/katalyst-core/pkg/config/agent/dynamic"
"github.com/kubewharf/katalyst-core/pkg/config/agent/dynamic/crd"
"github.com/kubewharf/katalyst-core/pkg/config/generic"
coreconsts "github.com/kubewharf/katalyst-core/pkg/consts"
"github.com/kubewharf/katalyst-core/pkg/metaserver"
"github.com/kubewharf/katalyst-core/pkg/metrics"
"github.com/kubewharf/katalyst-core/pkg/util/general"
Expand Down Expand Up @@ -138,7 +139,7 @@ func NewDynamicPolicy(agentCtx *agent.GenericContext, conf *config.Configuration
general.Infof("take reservedCPUs: %s by reservedCPUsNum: %d", reservedCPUs.String(), reservedCPUsNum)

stateImpl, stateErr := state.NewCheckpointState(conf.GenericQRMPluginConfiguration.StateFileDirectory, cpuPluginStateFileName,
CPUResourcePluginPolicyNameDynamic, agentCtx.CPUTopology, conf.SkipCPUStateCorruption)
coreconsts.CPUResourcePluginPolicyNameDynamic, agentCtx.CPUTopology, conf.SkipCPUStateCorruption)
if stateErr != nil {
return false, agent.ComponentStub{}, fmt.Errorf("NewCheckpointState failed with error: %v", stateErr)
}
Expand All @@ -149,7 +150,7 @@ func NewDynamicPolicy(agentCtx *agent.GenericContext, conf *config.Configuration

wrappedEmitter := agentCtx.EmitterPool.GetDefaultMetricsEmitter().WithTags(agentName, metrics.MetricTag{
Key: util.QRMPluginPolicyTagName,
Val: CPUResourcePluginPolicyNameDynamic,
Val: coreconsts.CPUResourcePluginPolicyNameDynamic,
})

var (
Expand All @@ -169,7 +170,7 @@ func NewDynamicPolicy(agentCtx *agent.GenericContext, conf *config.Configuration
// for those pods have already been allocated reservedCPUs,
// we won't touch them and wait them to be deleted the next update.
policyImplement := &DynamicPolicy{
name: fmt.Sprintf("%s_%s", agentName, CPUResourcePluginPolicyNameDynamic),
name: fmt.Sprintf("%s_%s", agentName, coreconsts.CPUResourcePluginPolicyNameDynamic),
stopCh: make(chan struct{}),

machineInfo: agentCtx.KatalystMachineInfo,
Expand Down Expand Up @@ -823,7 +824,7 @@ func (p *DynamicPolicy) removePod(podUID string) error {
}
delete(podEntries, podUID)

updatedMachineState, err := state.GenerateMachineStateFromPodEntries(p.machineInfo.CPUTopology, podEntries)
updatedMachineState, err := generateMachineStateFromPodEntries(p.machineInfo.CPUTopology, podEntries)
if err != nil {
return fmt.Errorf("GenerateMachineStateFromPodEntries failed with error: %v", err)
}
Expand All @@ -840,7 +841,7 @@ func (p *DynamicPolicy) removeContainer(podUID, containerName string) error {
}
delete(podEntries[podUID], containerName)

updatedMachineState, err := state.GenerateMachineStateFromPodEntries(p.machineInfo.CPUTopology, podEntries)
updatedMachineState, err := generateMachineStateFromPodEntries(p.machineInfo.CPUTopology, podEntries)
if err != nil {
return fmt.Errorf("GenerateMachineStateFromPodEntries failed with error: %v", err)
}
Expand Down Expand Up @@ -903,7 +904,7 @@ func (p *DynamicPolicy) cleanPools() error {
delete(podEntries, poolName)
}

machineState, err := state.GenerateMachineStateFromPodEntries(p.machineInfo.CPUTopology, podEntries)
machineState, err := generateMachineStateFromPodEntries(p.machineInfo.CPUTopology, podEntries)
if err != nil {
return fmt.Errorf("calculate machineState by podEntries failed with error: %v", err)
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -597,7 +597,7 @@ func (p *DynamicPolicy) applyBlocks(blockCPUSet advisorapi.BlockCPUSet, resp *ad
}

// use pod entries generated above to generate machine state info, and store in local state
newMachineState, err := state.GenerateMachineStateFromPodEntries(p.machineInfo.CPUTopology, newEntries)
newMachineState, err := generateMachineStateFromPodEntries(p.machineInfo.CPUTopology, newEntries)
if err != nil {
return fmt.Errorf("calculate machineState by newPodEntries failed with error: %v", err)
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -122,7 +122,7 @@ func (p *DynamicPolicy) sharedCoresAllocationHandler(_ context.Context,
p.state.SetAllocationInfo(allocationInfo.PodUid, allocationInfo.ContainerName, allocationInfo)
podEntries := p.state.GetPodEntries()

updatedMachineState, err := state.GenerateMachineStateFromPodEntries(p.machineInfo.CPUTopology, podEntries)
updatedMachineState, err := generateMachineStateFromPodEntries(p.machineInfo.CPUTopology, podEntries)
if err != nil {
general.Errorf("pod: %s/%s, container: %s GenerateMachineStateFromPodEntries failed with error: %v",
req.PodNamespace, req.PodName, req.ContainerName, err)
Expand All @@ -131,7 +131,7 @@ func (p *DynamicPolicy) sharedCoresAllocationHandler(_ context.Context,
p.state.SetMachineState(updatedMachineState)
}

resp, err := packAllocationResponse(allocationInfo, string(v1.ResourceCPU), util.OCIPropertyNameCPUSetCPUs, false, true, req)
resp, err := util.PackAllocationResponse(allocationInfo, string(v1.ResourceCPU), util.OCIPropertyNameCPUSetCPUs, false, true, req)
if err != nil {
general.Errorf("pod: %s/%s, container: %s packAllocationResponse failed with error: %v",
req.PodNamespace, req.PodName, req.ContainerName, err)
Expand Down Expand Up @@ -200,14 +200,14 @@ func (p *DynamicPolicy) reclaimedCoresAllocationHandler(_ context.Context,
p.state.SetAllocationInfo(allocationInfo.PodUid, allocationInfo.ContainerName, allocationInfo)
podEntries := p.state.GetPodEntries()

updatedMachineState, err := state.GenerateMachineStateFromPodEntries(p.machineInfo.CPUTopology, podEntries)
updatedMachineState, err := generateMachineStateFromPodEntries(p.machineInfo.CPUTopology, podEntries)
if err != nil {
general.Errorf("pod: %s/%s, container: %s GenerateMachineStateFromPodEntries failed with error: %v",
req.PodNamespace, req.PodName, req.ContainerName, err)
return nil, fmt.Errorf("GenerateMachineStateFromPodEntries failed with error: %v", err)
}

resp, err := packAllocationResponse(allocationInfo, string(v1.ResourceCPU), util.OCIPropertyNameCPUSetCPUs, false, true, req)
resp, err := util.PackAllocationResponse(allocationInfo, string(v1.ResourceCPU), util.OCIPropertyNameCPUSetCPUs, false, true, req)
if err != nil {
general.Errorf("pod: %s/%s, container: %s packAllocationResponse failed with error: %v",
req.PodNamespace, req.PodName, req.ContainerName, err)
Expand Down Expand Up @@ -254,7 +254,7 @@ func (p *DynamicPolicy) dedicatedCoresWithNUMABindingAllocationHandler(ctx conte
podEntries := p.state.GetPodEntries()

var err error
machineState, err = state.GenerateMachineStateFromPodEntries(p.machineInfo.CPUTopology, podEntries)
machineState, err = generateMachineStateFromPodEntries(p.machineInfo.CPUTopology, podEntries)
if err != nil {
general.Errorf("pod: %s/%s, container: %s GenerateMachineStateFromPodEntries failed with error: %v",
req.PodNamespace, req.PodName, req.ContainerName, err)
Expand Down Expand Up @@ -322,7 +322,7 @@ func (p *DynamicPolicy) dedicatedCoresWithNUMABindingAllocationHandler(ctx conte
p.state.SetAllocationInfo(allocationInfo.PodUid, allocationInfo.ContainerName, allocationInfo)
podEntries := p.state.GetPodEntries()

updatedMachineState, err := state.GenerateMachineStateFromPodEntries(p.machineInfo.CPUTopology, podEntries)
updatedMachineState, err := generateMachineStateFromPodEntries(p.machineInfo.CPUTopology, podEntries)
if err != nil {
general.Errorf("pod: %s/%s, container: %s GenerateMachineStateFromPodEntries failed with error: %v",
req.PodNamespace, req.PodName, req.ContainerName, err)
Expand All @@ -337,7 +337,7 @@ func (p *DynamicPolicy) dedicatedCoresWithNUMABindingAllocationHandler(ctx conte
return nil, fmt.Errorf("adjustAllocationEntries failed with error: %v", err)
}

resp, err := packAllocationResponse(allocationInfo, string(v1.ResourceCPU), util.OCIPropertyNameCPUSetCPUs, false, true, req)
resp, err := util.PackAllocationResponse(allocationInfo, string(v1.ResourceCPU), util.OCIPropertyNameCPUSetCPUs, false, true, req)
if err != nil {
general.Errorf("pod: %s/%s, container: %s PackResourceAllocationResponseByAllocationInfo failed with error: %v",
req.PodNamespace, req.PodName, req.ContainerName, err)
Expand Down Expand Up @@ -395,15 +395,15 @@ func (p *DynamicPolicy) dedicatedCoresWithNUMABindingAllocationSidecarHandler(_
p.state.SetAllocationInfo(allocationInfo.PodUid, allocationInfo.ContainerName, allocationInfo)
podEntries = p.state.GetPodEntries()

updatedMachineState, err := state.GenerateMachineStateFromPodEntries(p.machineInfo.CPUTopology, podEntries)
updatedMachineState, err := generateMachineStateFromPodEntries(p.machineInfo.CPUTopology, podEntries)
if err != nil {
general.Errorf("pod: %s/%s, container: %s GenerateMachineStateFromPodEntries failed with error: %v",
req.PodNamespace, req.PodName, req.ContainerName, err)
return nil, fmt.Errorf("GenerateMachineStateFromPodEntries failed with error: %v", err)
}
p.state.SetMachineState(updatedMachineState)

resp, err := packAllocationResponse(allocationInfo, string(v1.ResourceCPU), util.OCIPropertyNameCPUSetCPUs, false, true, req)
resp, err := util.PackAllocationResponse(allocationInfo, string(v1.ResourceCPU), util.OCIPropertyNameCPUSetCPUs, false, true, req)
if err != nil {
general.Errorf("pod: %s/%s, container: %s packAllocationResponse failed with error: %v",
req.PodNamespace, req.PodName, req.ContainerName, err)
Expand Down Expand Up @@ -799,7 +799,7 @@ func (p *DynamicPolicy) applyPoolsAndIsolatedInfo(poolsCPUSet map[string]machine
}

// use pod entries generated above to generate machine state info, and store in local state
machineState, err = state.GenerateMachineStateFromPodEntries(p.machineInfo.CPUTopology, newPodEntries)
machineState, err = generateMachineStateFromPodEntries(p.machineInfo.CPUTopology, newPodEntries)
if err != nil {
return fmt.Errorf("calculate machineState by newPodEntries failed with error: %v", err)
}
Expand Down Expand Up @@ -1048,43 +1048,3 @@ func (p *DynamicPolicy) takeCPUsForContainers(containersQuantityMap map[string]m
}
return containersCPUSet, availableCPUs, nil
}

// packAllocationResponse fills pluginapi.ResourceAllocationResponse with information from AllocationInfo and pluginapi.ResourceRequest
func packAllocationResponse(allocationInfo *state.AllocationInfo, resourceName, ociPropertyName string,
isNodeResource, isScalarResource bool, req *pluginapi.ResourceRequest) (*pluginapi.ResourceAllocationResponse, error) {
if allocationInfo == nil {
return nil, fmt.Errorf("packAllocationResponse got nil allocationInfo")
} else if req == nil {
return nil, fmt.Errorf("packAllocationResponse got nil request")
}

return &pluginapi.ResourceAllocationResponse{
PodUid: req.PodUid,
PodNamespace: req.PodNamespace,
PodName: req.PodName,
ContainerName: req.ContainerName,
ContainerType: req.ContainerType,
ContainerIndex: req.ContainerIndex,
PodRole: req.PodRole,
PodType: req.PodType,
ResourceName: resourceName,
AllocationResult: &pluginapi.ResourceAllocation{
ResourceAllocation: map[string]*pluginapi.ResourceAllocationInfo{
resourceName: {
OciPropertyName: ociPropertyName,
IsNodeResource: isNodeResource,
IsScalarResource: isScalarResource,
AllocatedQuantity: float64(allocationInfo.AllocationResult.Size()),
AllocationResult: allocationInfo.AllocationResult.String(),
ResourceHints: &pluginapi.ListOfTopologyHints{
Hints: []*pluginapi.TopologyHint{
req.Hint,
},
},
},
},
},
Labels: general.DeepCopyMap(req.Labels),
Annotations: general.DeepCopyMap(req.Annotations),
}, nil
}
Original file line number Diff line number Diff line change
Expand Up @@ -211,7 +211,7 @@ func (p *DynamicPolicy) clearResidualState() {
delete(podEntries, podUID)
}

updatedMachineState, err := state.GenerateMachineStateFromPodEntries(p.machineInfo.CPUTopology, podEntries)
updatedMachineState, err := generateMachineStateFromPodEntries(p.machineInfo.CPUTopology, podEntries)
if err != nil {
general.Errorf("GenerateMachineStateFromPodEntries failed with error: %v", err)
return
Expand Down
43 changes: 2 additions & 41 deletions pkg/agent/qrm-plugins/cpu/dynamicpolicy/policy_hint_handlers.go
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ func (p *DynamicPolicy) dedicatedCoresWithNUMABindingHintHandler(_ context.Conte

allocationInfo := p.state.GetAllocationInfo(req.PodUid, req.ContainerName)
if allocationInfo != nil {
hints = regenerateHints(allocationInfo, reqInt)
hints = util.RegenerateHints(allocationInfo, reqInt)

// regenerateHints failed. need to clear container record and re-calculate.
if hints == nil {
Expand All @@ -96,7 +96,7 @@ func (p *DynamicPolicy) dedicatedCoresWithNUMABindingHintHandler(_ context.Conte
}

var err error
machineState, err = state.GenerateMachineStateFromPodEntries(p.machineInfo.CPUTopology, podEntries)
machineState, err = generateMachineStateFromPodEntries(p.machineInfo.CPUTopology, podEntries)
if err != nil {
general.Errorf("pod: %s/%s, container: %s GenerateMachineStateFromPodEntries failed with error: %v",
req.PodNamespace, req.PodName, req.ContainerName, err)
Expand Down Expand Up @@ -221,42 +221,3 @@ func (p *DynamicPolicy) calculateHints(reqInt int, machineState state.NUMANodeMa

return hints, nil
}

// regenerateHints regenerates hints for container that'd already been allocated cpu,
// and regenerateHints will assemble hints based on already-existed AllocationInfo,
// without any calculation logics at all
func regenerateHints(allocationInfo *state.AllocationInfo, reqInt int) map[string]*pluginapi.ListOfTopologyHints {
hints := map[string]*pluginapi.ListOfTopologyHints{}

if allocationInfo.OriginalAllocationResult.Size() < reqInt {
general.ErrorS(nil, "cpus already allocated with smaller quantity than requested",
"podUID", allocationInfo.PodUid,
"containerName", allocationInfo.ContainerName,
"requestedResource", reqInt,
"allocatedSize", allocationInfo.OriginalAllocationResult.Size())

return nil
}

allocatedNumaNodes := make([]uint64, 0, len(allocationInfo.TopologyAwareAssignments))
for numaNode, cset := range allocationInfo.TopologyAwareAssignments {
if cset.Size() > 0 {
allocatedNumaNodes = append(allocatedNumaNodes, uint64(numaNode))
}
}

general.InfoS("regenerating machineInfo hints, cpus was already allocated to pod",
"podNamespace", allocationInfo.PodNamespace,
"podName", allocationInfo.PodName,
"containerName", allocationInfo.ContainerName,
"hint", allocatedNumaNodes)
hints[string(v1.ResourceCPU)] = &pluginapi.ListOfTopologyHints{
Hints: []*pluginapi.TopologyHint{
{
Nodes: allocatedNumaNodes,
Preferred: true,
},
},
}
return hints
}