Skip to content

Commit

Permalink
all: switch to k8s.io/utils/cpuset.
Browse files Browse the repository at this point in the history
Instead of importing cpuset.CPUSet directly from the kubelet,
switch to using k8s.io/utils/cpuset. However, don't import it
directly all over the map. Instead wrap it in a single place
(pkg/utils/cpuset) and import it everywhere else from there.

Signed-off-by: Krisztian Litkey <krisztian.litkey@intel.com>
  • Loading branch information
klihub committed Aug 14, 2023
1 parent ea08011 commit 68a8ade
Show file tree
Hide file tree
Showing 35 changed files with 187 additions and 179 deletions.
4 changes: 2 additions & 2 deletions go.mod
Expand Up @@ -25,8 +25,9 @@ require (
k8s.io/apimachinery v0.25.12
k8s.io/client-go v0.25.12
k8s.io/cri-api v0.25.12
k8s.io/klog/v2 v2.70.1
k8s.io/klog/v2 v2.80.1
k8s.io/kubernetes v1.25.12
k8s.io/utils v0.0.0-20230505201702-9f6742963106
sigs.k8s.io/yaml v1.3.0
)

Expand Down Expand Up @@ -119,7 +120,6 @@ require (
k8s.io/kube-scheduler v0.24.1 // indirect
k8s.io/kubelet v0.24.1 // indirect
k8s.io/mount-utils v0.24.1 // indirect
k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed // indirect
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
)
Expand Down
9 changes: 4 additions & 5 deletions go.sum
Expand Up @@ -373,7 +373,6 @@ github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic
github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE=
github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
github.com/spf13/afero v1.6.0 h1:xoax2sJ2DT8S8xA2paPFjDCScCNeWsg75VG0DLRreiY=
github.com/spf13/cobra v1.4.0 h1:y+wJpx64xcgO1V+RcnwW0LEHxTKRi2ZDPSBjWnrg88Q=
github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
Expand Down Expand Up @@ -758,8 +757,8 @@ k8s.io/csi-translation-lib v0.25.12 h1:U7V/1al+tN39BtcBMjCtMLFEwpvKjDSoZ9WipBgVb
k8s.io/csi-translation-lib v0.25.12/go.mod h1:y5jczv+eEJY26/8p/idJfAkUdxD7WjmEFRfN+rgVAwU=
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
k8s.io/klog/v2 v2.70.1 h1:7aaoSdahviPmR+XkS7FyxlkkXs6tHISSG03RxleQAVQ=
k8s.io/klog/v2 v2.70.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
k8s.io/klog/v2 v2.80.1 h1:atnLQ121W371wYYFawwYx1aEY2eUfs4l3J72wtgAwV4=
k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1 h1:MQ8BAZPZlWk3S9K4a9NCkIFQtZShWqoha7snGixVgEA=
k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1/go.mod h1:C/N6wCaBHeBHkHUesQOQy2/MZqGgMAFPqGsGQLdbZBU=
k8s.io/kube-scheduler v0.25.12 h1:uCJmiTr5TedKyYg1tsoLDZ2GBBl/2ERSuRtLX0ikiyA=
Expand All @@ -771,8 +770,8 @@ k8s.io/kubernetes v1.25.12/go.mod h1:CjSm5tJyKxHyGhK5aAID68YMErzDhAjWLL7Nd7NMonU
k8s.io/mount-utils v0.25.12 h1:wHRywLgHq4VxEFpTDrPsrMNjtnrdIHTggGyReU9wIHc=
k8s.io/mount-utils v0.25.12/go.mod h1:IM9QOFh15E1a4Nb6Rcn8FJ9Z1PbBpuyAPCty/qvKSAw=
k8s.io/utils v0.0.0-20211116205334-6203023598ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed h1:jAne/RjBTyawwAy0utX5eqigAwz/lQhTmy+Hr/Cpue4=
k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
k8s.io/utils v0.0.0-20230505201702-9f6742963106 h1:EObNQ3TW2D+WptiYXlApGNLVy0zm/JIBVY9i+M4wpAU=
k8s.io/utils v0.0.0-20230505201702-9f6742963106/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
Expand Down
27 changes: 13 additions & 14 deletions pkg/cpuallocator/allocator.go
Expand Up @@ -18,11 +18,10 @@ import (
"fmt"
"sort"

"k8s.io/kubernetes/pkg/kubelet/cm/cpuset"

logger "github.com/intel/cri-resource-manager/pkg/log"
"github.com/intel/cri-resource-manager/pkg/sysfs"
"github.com/intel/cri-resource-manager/pkg/utils"
"github.com/intel/cri-resource-manager/pkg/utils/cpuset"
"github.com/intel/goresctrl/pkg/sst"
idset "github.com/intel/goresctrl/pkg/utils"
)
Expand Down Expand Up @@ -192,7 +191,7 @@ func (a *allocatorHelper) takeIdleCores() {
if cset.IsEmpty() {
return false
}
return cset.Intersection(a.from).Equals(cset) && cset.ToSlice()[0] == int(id)
return cset.Intersection(a.from).Equals(cset) && cset.List()[0] == int(id)
})

// sorted by id
Expand Down Expand Up @@ -271,8 +270,8 @@ func (a *allocatorHelper) takeIdleThreads() {
return iPkg < jPkg
}

iCset := cpuset.NewCPUSet(int(cores[i]))
jCset := cpuset.NewCPUSet(int(cores[j]))
iCset := cpuset.New(int(cores[i]))
jCset := cpuset.New(int(cores[j]))
if res := a.topology.cpuPriorities.cmpCPUSet(iCset, jCset, a.prefer, 0); res != 0 {
return res > 0
}
Expand All @@ -298,7 +297,7 @@ func (a *allocatorHelper) takeIdleThreads() {
for _, id := range cores {
cset := a.topology.core[id].Difference(offline)
a.Debug(" => considering thread %v (#%s)...", id, cset)
cset = cpuset.NewCPUSet(int(id))
cset = cpuset.New(int(id))
a.result = a.result.Union(cset)
a.from = a.from.Difference(cset)
a.cnt -= cset.Size()
Expand All @@ -313,10 +312,10 @@ func (a *allocatorHelper) takeIdleThreads() {
func (a *allocatorHelper) takeAny() {
a.Debug("* takeAnyCores()...")

cpus := a.from.ToSlice()
cpus := a.from.List()

if len(cpus) >= a.cnt {
cset := cpuset.NewCPUSet(cpus[0:a.cnt]...)
cset := cpuset.New(cpus[0:a.cnt]...)
a.result = a.result.Union(cset)
a.from = a.from.Difference(cset)
a.cnt = 0
Expand All @@ -342,7 +341,7 @@ func (a *allocatorHelper) allocate() cpuset.CPUSet {
return a.result
}

return cpuset.NewCPUSet()
return cpuset.New()
}

func (ca *cpuAllocator) allocateCpus(from *cpuset.CPUSet, cnt int, prefer CPUPriority) (cpuset.CPUSet, error) {
Expand All @@ -351,9 +350,9 @@ func (ca *cpuAllocator) allocateCpus(from *cpuset.CPUSet, cnt int, prefer CPUPri

switch {
case from.Size() < cnt:
result, err = cpuset.NewCPUSet(), fmt.Errorf("cpuset %s does not have %d CPUs", from, cnt)
result, err = cpuset.New(), fmt.Errorf("cpuset %s does not have %d CPUs", from, cnt)
case from.Size() == cnt:
result, err, *from = from.Clone(), nil, cpuset.NewCPUSet()
result, err, *from = from.Clone(), nil, cpuset.New()
default:
a := newAllocatorHelper(ca.sys, ca.topologyCache)
a.from = from.Clone()
Expand Down Expand Up @@ -436,7 +435,7 @@ func (c *topologyCache) discoverSstCPUPriority(sys sysfs.System, pkgID idset.ID)

pkg := sys.Package(pkgID)
sst := pkg.SstInfo()
cpuIDs := c.pkg[pkgID].ToSlice()
cpuIDs := c.pkg[pkgID].List()
prios := make(map[idset.ID]CPUPriority, len(cpuIDs))

// Determine SST-based priority. Based on experimentation there is some
Expand Down Expand Up @@ -514,7 +513,7 @@ func (c *topologyCache) sstClosPriority(sys sysfs.System, pkgID idset.ID) map[in
// Get a list of unique CLOS proportional priority values
closPps := make(map[int]int)
closIds := make(map[int]int)
for _, cpuID := range c.pkg[pkgID].ToSlice() {
for _, cpuID := range c.pkg[pkgID].List() {
clos := sys.CPU(idset.ID(cpuID)).SstClos()
pp := sstinfo.ClosInfo[clos].ProportionalPriority
closPps[pp] = clos
Expand Down Expand Up @@ -558,7 +557,7 @@ func (c *topologyCache) discoverCpufreqPriority(sys sysfs.System, pkgID idset.ID
// Group cpus by base frequency and energy performance profile
freqs := map[uint64][]idset.ID{}
epps := map[sysfs.EPP][]idset.ID{}
cpuIDs := c.pkg[pkgID].ToSlice()
cpuIDs := c.pkg[pkgID].List()
for _, num := range cpuIDs {
id := idset.ID(num)
cpu := sys.CPU(id)
Expand Down
7 changes: 3 additions & 4 deletions pkg/cpuallocator/cpuallocator_test.go
Expand Up @@ -19,10 +19,9 @@ import (
"path"
"testing"

"k8s.io/kubernetes/pkg/kubelet/cm/cpuset"

"github.com/intel/cri-resource-manager/pkg/sysfs"
"github.com/intel/cri-resource-manager/pkg/utils"
"github.com/intel/cri-resource-manager/pkg/utils/cpuset"
)

func TestAllocatorHelper(t *testing.T) {
Expand Down Expand Up @@ -66,7 +65,7 @@ func TestAllocatorHelper(t *testing.T) {
from: cpuset.MustParse("2,3,10-14,20"),
prefer: PriorityNormal,
cnt: 9,
expected: cpuset.NewCPUSet(),
expected: cpuset.New(),
},
{
description: "request all available CPUs",
Expand All @@ -80,7 +79,7 @@ func TestAllocatorHelper(t *testing.T) {
from: cpuset.MustParse("2,3,10-25"),
prefer: PriorityHigh,
cnt: 4,
expected: cpuset.NewCPUSet(2, 3, 15, 17),
expected: cpuset.New(2, 3, 15, 17),
},
}

Expand Down
2 changes: 1 addition & 1 deletion pkg/cri/resource-manager/cache/cache.go
Expand Up @@ -26,13 +26,13 @@ import (

v1 "k8s.io/api/core/v1"
criv1 "k8s.io/cri-api/pkg/apis/runtime/v1"
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset"

"github.com/intel/cri-resource-manager/pkg/apis/resmgr"
"github.com/intel/cri-resource-manager/pkg/cri/resource-manager/config"
"github.com/intel/cri-resource-manager/pkg/cri/resource-manager/kubernetes"
logger "github.com/intel/cri-resource-manager/pkg/log"
"github.com/intel/cri-resource-manager/pkg/topology"
"github.com/intel/cri-resource-manager/pkg/utils/cpuset"
idset "github.com/intel/goresctrl/pkg/utils"
)

Expand Down
7 changes: 3 additions & 4 deletions pkg/cri/resource-manager/control/cpu/cpu.go
Expand Up @@ -17,14 +17,13 @@ package cpu
import (
"fmt"

"k8s.io/kubernetes/pkg/kubelet/cm/cpuset"

pkgcfg "github.com/intel/cri-resource-manager/pkg/config"
"github.com/intel/cri-resource-manager/pkg/cri/client"
"github.com/intel/cri-resource-manager/pkg/cri/resource-manager/cache"
"github.com/intel/cri-resource-manager/pkg/cri/resource-manager/control"
logger "github.com/intel/cri-resource-manager/pkg/log"
"github.com/intel/cri-resource-manager/pkg/sysfs"
"github.com/intel/cri-resource-manager/pkg/utils/cpuset"
"github.com/intel/goresctrl/pkg/utils"
)

Expand Down Expand Up @@ -155,7 +154,7 @@ func (ctl *cpuctl) enforceUncore(assignments cpuClassAssignments, affectedCPUs .
return nil
}

cpus := cpuset.NewCPUSet(affectedCPUs...)
cpus := cpuset.New(affectedCPUs...)

for _, cpuPkgID := range ctl.system.PackageIDs() {
cpuPkg := ctl.system.Package(cpuPkgID)
Expand All @@ -164,7 +163,7 @@ func (ctl *cpuctl) enforceUncore(assignments cpuClassAssignments, affectedCPUs .

// Check if this die is affected by the specified cpuset
if cpus.Size() == 0 || dieCPUs.Intersection(cpus).Size() > 0 {
min, max, minCls, maxCls := effectiveUncoreFreqs(utils.NewIDSet(dieCPUs.ToSlice()...), ctl.config.Classes, assignments)
min, max, minCls, maxCls := effectiveUncoreFreqs(utils.NewIDSet(dieCPUs.List()...), ctl.config.Classes, assignments)

if min == 0 && max == 0 {
log.Debug("no uncore frequency limits for cpu package/die %d/%d", cpuPkgID, cpuDieID)
Expand Down
20 changes: 10 additions & 10 deletions pkg/cri/resource-manager/policy/builtin/balloons/balloons-policy.go
Expand Up @@ -21,7 +21,6 @@ import (
corev1 "k8s.io/api/core/v1"
resapi "k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset"

pkgcfg "github.com/intel/cri-resource-manager/pkg/config"
"github.com/intel/cri-resource-manager/pkg/cpuallocator"
Expand All @@ -34,6 +33,7 @@ import (
policyapi "github.com/intel/cri-resource-manager/pkg/cri/resource-manager/policy"
logger "github.com/intel/cri-resource-manager/pkg/log"
"github.com/intel/cri-resource-manager/pkg/utils"
"github.com/intel/cri-resource-manager/pkg/utils/cpuset"
idset "github.com/intel/goresctrl/pkg/utils"
)

Expand Down Expand Up @@ -159,7 +159,7 @@ func CreateBalloonsPolicy(policyOptions *policy.BackendOptions) policy.Backend {
p.allowed = policyOptions.System.CPUSet().Difference(policyOptions.System.Offlined())
}
// p.reserved: CPUs reserved for kube-system pods, subset of p.allowed.
p.reserved = cpuset.NewCPUSet()
p.reserved = cpuset.New()
if reserved, ok := p.options.Reserved[policyapi.DomainCPU]; ok {
switch v := reserved.(type) {
case cpuset.CPUSet:
Expand Down Expand Up @@ -484,7 +484,7 @@ func (p *balloons) resetCpuClass() error {
// containers on the balloon, including the reserved balloon.
//
// TODO: don't depend on cpu controller directly
cpucontrol.Assign(p.cch, p.bpoptions.IdleCpuClass, p.allowed.ToSliceNoSort()...)
cpucontrol.Assign(p.cch, p.bpoptions.IdleCpuClass, p.allowed.UnsortedList()...)
log.Debugf("resetCpuClass available: %s; reserved: %s", p.allowed, p.reserved)
return nil
}
Expand All @@ -509,7 +509,7 @@ func (p *balloons) useCpuClass(bln *Balloon) error {
// - User-defined CPU AllocatorPriority: bln.Def.AllocatorPriority.
// - All existing balloon instances: p.balloons.
// - CPU configurations by user: bln.Def.CpuClass (for bln in p.balloons)
cpucontrol.Assign(p.cch, bln.Def.CpuClass, bln.Cpus.ToSliceNoSort()...)
cpucontrol.Assign(p.cch, bln.Def.CpuClass, bln.Cpus.UnsortedList()...)
log.Debugf("useCpuClass Cpus: %s; CpuClass: %s", bln.Cpus, bln.Def.CpuClass)
return nil
}
Expand All @@ -518,7 +518,7 @@ func (p *balloons) useCpuClass(bln *Balloon) error {
func (p *balloons) forgetCpuClass(bln *Balloon) {
// Use p.IdleCpuClass for bln.Cpus.
// Usual inputs: see useCpuClass
cpucontrol.Assign(p.cch, p.bpoptions.IdleCpuClass, bln.Cpus.ToSliceNoSort()...)
cpucontrol.Assign(p.cch, p.bpoptions.IdleCpuClass, bln.Cpus.UnsortedList()...)
log.Debugf("forgetCpuClass Cpus: %s; CpuClass: %s", bln.Cpus, bln.Def.CpuClass)
}

Expand Down Expand Up @@ -551,7 +551,7 @@ func (p *balloons) newBalloon(blnDef *BalloonDef, confCpus bool) (*Balloon, erro
// So does the default balloon unless its CPU counts are tweaked.
cpus = p.reserved
} else {
addFromCpus, _, err := p.cpuTreeAllocator.ResizeCpus(cpuset.NewCPUSet(), p.freeCpus, blnDef.MinCpus)
addFromCpus, _, err := p.cpuTreeAllocator.ResizeCpus(cpuset.New(), p.freeCpus, blnDef.MinCpus)
if err != nil {
return nil, balloonsError("failed to choose a cpuset for allocating first %d CPUs from %#s", blnDef.MinCpus, p.freeCpus)
}
Expand All @@ -566,7 +566,7 @@ func (p *balloons) newBalloon(blnDef *BalloonDef, confCpus bool) (*Balloon, erro
Instance: freeInstance,
PodIDs: make(map[string][]string),
Cpus: cpus,
SharedIdleCpus: cpuset.NewCPUSet(),
SharedIdleCpus: cpuset.New(),
Mems: p.closestMems(cpus),
}
if confCpus {
Expand Down Expand Up @@ -1086,7 +1086,7 @@ func (p *balloons) setConfig(bpoptions *BalloonsOptions) error {
}
// No errors in balloon creation, take new configuration into use.
p.bpoptions = *bpoptions
p.updatePinning(p.shareIdleCpus(p.freeCpus, cpuset.NewCPUSet())...)
p.updatePinning(p.shareIdleCpus(p.freeCpus, cpuset.New())...)
// (Re)configures all CPUs in balloons.
p.resetCpuClass()
for _, bln := range p.balloons {
Expand Down Expand Up @@ -1179,7 +1179,7 @@ func (p *balloons) resizeBalloon(bln *Balloon, newMilliCpus int) error {
log.Debugf("- old freeCpus: %#s, old bln.Cpus: %#s, releasing: %#s", p.freeCpus, bln.Cpus, removeFromCpus)
p.freeCpus = p.freeCpus.Union(removeFromCpus)
bln.Cpus = bln.Cpus.Difference(removeFromCpus)
p.updatePinning(p.shareIdleCpus(removeFromCpus, cpuset.NewCPUSet())...)
p.updatePinning(p.shareIdleCpus(removeFromCpus, cpuset.New())...)
}
log.Debugf("- resize successful: %s, freecpus: %#s", bln, p.freeCpus)
p.updatePinning(bln)
Expand Down Expand Up @@ -1217,7 +1217,7 @@ func (p *balloons) shareIdleCpus(addCpus, removeCpus cpuset.CPUSet) []*Balloon {
if topoLevel == CPUTopologyLevelUndefined {
continue
}
idleCpusInTopoLevel := cpuset.NewCPUSet()
idleCpusInTopoLevel := cpuset.New()
p.cpuTree.DepthFirstWalk(func(t *cpuTreeNode) error {
// Dive in correct topology level.
if t.level != topoLevel {
Expand Down
14 changes: 7 additions & 7 deletions pkg/cri/resource-manager/policy/builtin/balloons/cputree.go
Expand Up @@ -22,7 +22,7 @@ import (
"strings"

system "github.com/intel/cri-resource-manager/pkg/sysfs"
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
"github.com/intel/cri-resource-manager/pkg/utils/cpuset"
)

type CPUTopologyLevel int
Expand Down Expand Up @@ -142,7 +142,7 @@ func (tna cpuTreeNodeAttributes) String() string {
func NewCpuTree(name string) *cpuTreeNode {
return &cpuTreeNode{
name: name,
cpus: cpuset.NewCPUSet(),
cpus: cpuset.New(),
}
}

Expand Down Expand Up @@ -236,17 +236,17 @@ func NewCpuTreeFromSystem() (*cpuTreeNode, error) {
nodeTree.level = CPUTopologyLevelNuma
dieTree.AddChild(nodeTree)
node := sys.Node(nodeID)
for _, cpuID := range node.CPUSet().ToSlice() {
for _, cpuID := range node.CPUSet().List() {
cpuTree := NewCpuTree(fmt.Sprintf("p%dd%dn%dcpu%d", packageID, dieID, nodeID, cpuID))

cpuTree.level = CPUTopologyLevelCore
nodeTree.AddChild(cpuTree)
cpu := sys.CPU(cpuID)
for _, threadID := range cpu.ThreadCPUSet().ToSlice() {
for _, threadID := range cpu.ThreadCPUSet().List() {
threadTree := NewCpuTree(fmt.Sprintf("p%dd%dn%dcpu%dt%d", packageID, dieID, nodeID, cpuID, threadID))
threadTree.level = CPUTopologyLevelThread
cpuTree.AddChild(threadTree)
threadTree.AddCpus(cpuset.NewCPUSet(threadID))
threadTree.AddCpus(cpuset.New(threadID))
}
}
}
Expand Down Expand Up @@ -414,8 +414,8 @@ func (ta *cpuTreeAllocator) ResizeCpus(currentCpus, freeCpus cpuset.CPUSet, delt
// In multi-CPU removal, remove CPUs one by one instead of
// trying to find a single topology element from which all of
// them could be removed.
removeFrom := cpuset.NewCPUSet()
addFrom := cpuset.NewCPUSet()
removeFrom := cpuset.New()
addFrom := cpuset.New()
for n := 0; n < -delta; n++ {
_, removeSingleFrom, err := ta.resizeCpus(currentCpus, freeCpus, -1)
if err != nil {
Expand Down

0 comments on commit 68a8ade

Please sign in to comment.