Skip to content

Commit

Permalink
Merge pull request #281 from dcantah/cg2-reduce-allocs
Browse files Browse the repository at this point in the history
Cgroup2: Reduce allocations for `manager.Stat`
  • Loading branch information
estesp committed May 10, 2023
2 parents 474049b + a8621bd commit a6075af
Show file tree
Hide file tree
Showing 5 changed files with 130 additions and 52 deletions.
7 changes: 5 additions & 2 deletions cgroup2/manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -525,7 +525,10 @@ func (c *Manager) Stat() (*stats.Metrics, error) {
if err != nil {
return nil, err
}
out := make(map[string]uint64)
// Sizing this avoids an allocation to increase the map at runtime;
// currently the default bucket size is 8 and we put 40+ elements
// in it so we'd always end up allocating.
out := make(map[string]uint64, 50)
for _, controller := range controllers {
switch controller {
case "cpu", "memory":
Expand All @@ -543,8 +546,8 @@ func (c *Manager) Stat() (*stats.Metrics, error) {
return nil, err
}
}
var metrics stats.Metrics

var metrics stats.Metrics
metrics.Pids = &stats.PidsStat{
Current: getStatFileContentUint64(filepath.Join(c.path, "pids.current")),
Limit: getStatFileContentUint64(filepath.Join(c.path, "pids.max")),
Expand Down
17 changes: 17 additions & 0 deletions cgroup2/manager_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -228,3 +228,20 @@ func TestCgroupType(t *testing.T) {
require.NoError(t, err)
require.Equal(t, cgType, Threaded)
}

func BenchmarkStat(b *testing.B) {
checkCgroupMode(b)
group := "/stat-test-cg"
groupPath := fmt.Sprintf("%s-%d", group, os.Getpid())
c, err := NewManager(defaultCgroup2Path, groupPath, &Resources{})
require.NoErrorf(b, err, "failed to init new cgroup manager")
b.Cleanup(func() {
_ = c.Delete()
})

b.ReportAllocs()
for i := 0; i < b.N; i++ {
_, err := c.Stat()
require.NoError(b, err)
}
}
6 changes: 3 additions & 3 deletions cgroup2/testutils_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,14 +27,14 @@ import (
"golang.org/x/sys/unix"
)

func checkCgroupMode(t *testing.T) {
func checkCgroupMode(tb testing.TB) {
var st unix.Statfs_t
err := unix.Statfs(defaultCgroup2Path, &st)
require.NoError(t, err, "cannot statfs cgroup root")
require.NoError(tb, err, "cannot statfs cgroup root")

isUnified := st.Type == unix.CGROUP2_SUPER_MAGIC
if !isUnified {
t.Skip("System running in hybrid or cgroupv1 mode")
tb.Skip("System running in hybrid or cgroupv1 mode")
}
}

Expand Down
144 changes: 97 additions & 47 deletions cgroup2/utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,13 +18,15 @@ package cgroup2

import (
"bufio"
"errors"
"fmt"
"io"
"math"
"os"
"path/filepath"
"strconv"
"strings"
"sync"
"time"
"unsafe"

Expand Down Expand Up @@ -236,18 +238,28 @@ func ToResources(spec *specs.LinuxResources) *Resources {

// Gets uint64 parsed content of single value cgroup stat file
func getStatFileContentUint64(filePath string) uint64 {
contents, err := os.ReadFile(filePath)
f, err := os.Open(filePath)
if err != nil {
return 0
}
trimmed := strings.TrimSpace(string(contents))
defer f.Close()

// We expect an unsigned 64 bit integer, or a "max" string
// in some cases.
buf := make([]byte, 32)
n, err := f.Read(buf)
if err != nil {
return 0
}

trimmed := strings.TrimSpace(string(buf[:n]))
if trimmed == "max" {
return math.MaxUint64
}

res, err := parseUint(trimmed, 10, 64)
if err != nil {
logrus.Errorf("unable to parse %q as a uint from Cgroup file %q", string(contents), filePath)
logrus.Errorf("unable to parse %q as a uint from Cgroup file %q", trimmed, filePath)
return res
}

Expand Down Expand Up @@ -377,56 +389,94 @@ func systemdUnitFromPath(path string) string {
}

func readHugeTlbStats(path string) []*stats.HugeTlbStat {
usage := []*stats.HugeTlbStat{}
keyUsage := make(map[string]*stats.HugeTlbStat)
f, err := os.Open(path)
if err != nil {
return usage
}
files, err := f.Readdir(-1)
f.Close()
if err != nil {
return usage
hpSizes := hugePageSizes()
usage := make([]*stats.HugeTlbStat, len(hpSizes))
for idx, pagesize := range hpSizes {
usage[idx] = &stats.HugeTlbStat{
Max: getStatFileContentUint64(filepath.Join(path, "hugetlb."+pagesize+".max")),
Current: getStatFileContentUint64(filepath.Join(path, "hugetlb."+pagesize+".current")),
Pagesize: pagesize,
}
}
return usage
}

for _, file := range files {
if strings.Contains(file.Name(), "hugetlb") &&
(strings.HasSuffix(file.Name(), "max") || strings.HasSuffix(file.Name(), "current")) {
var hugeTlb *stats.HugeTlbStat
var ok bool
fileName := strings.Split(file.Name(), ".")
pageSize := fileName[1]
if hugeTlb, ok = keyUsage[pageSize]; !ok {
hugeTlb = &stats.HugeTlbStat{}
}
hugeTlb.Pagesize = pageSize
out, err := os.ReadFile(filepath.Join(path, file.Name()))
if err != nil {
continue
}
var value uint64
stringVal := strings.TrimSpace(string(out))
if stringVal == "max" {
value = math.MaxUint64
} else {
value, err = strconv.ParseUint(stringVal, 10, 64)
}
if err != nil {
continue
var (
hPageSizes []string
initHPSOnce sync.Once
)

// The following idea and implementation is taken pretty much line for line from
// runc. Because the hugetlb files are well known, and the only variable thrown in
// the mix is what huge page sizes you have on your host, this lends itself well
// to doing the work to find the files present once, and then re-using this. This
// saves a os.Readdirnames(0) call to search for hugeltb files on every `manager.Stat`
// call.
// https://github.com/opencontainers/runc/blob/3a2c0c2565644d8a7e0f1dd594a060b21fa96cf1/libcontainer/cgroups/utils.go#L301
func hugePageSizes() []string {
initHPSOnce.Do(func() {
dir, err := os.OpenFile("/sys/kernel/mm/hugepages", unix.O_DIRECTORY|unix.O_RDONLY, 0)
if err != nil {
return
}
files, err := dir.Readdirnames(0)
dir.Close()
if err != nil {
return
}

hPageSizes, err = getHugePageSizeFromFilenames(files)
if err != nil {
logrus.Warnf("hugePageSizes: %s", err)
}
})

return hPageSizes
}

func getHugePageSizeFromFilenames(fileNames []string) ([]string, error) {
pageSizes := make([]string, 0, len(fileNames))
var warn error

for _, file := range fileNames {
// example: hugepages-1048576kB
val := strings.TrimPrefix(file, "hugepages-")
if len(val) == len(file) {
// Unexpected file name: no prefix found, ignore it.
continue
}
// In all known versions of Linux up to 6.3 the suffix is always
// "kB". If we find something else, produce an error but keep going.
eLen := len(val) - 2
val = strings.TrimSuffix(val, "kB")
if len(val) != eLen {
// Highly unlikely.
if warn == nil {
warn = errors.New(file + `: invalid suffix (expected "kB")`)
}
switch fileName[2] {
case "max":
hugeTlb.Max = value
case "current":
hugeTlb.Current = value
continue
}
size, err := strconv.Atoi(val)
if err != nil {
// Highly unlikely.
if warn == nil {
warn = fmt.Errorf("%s: %w", file, err)
}
keyUsage[pageSize] = hugeTlb
continue
}
// Model after https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/mm/hugetlb_cgroup.c?id=eff48ddeab782e35e58ccc8853f7386bbae9dec4#n574
// but in our case the size is in KB already.
if size >= (1 << 20) {
val = strconv.Itoa(size>>20) + "GB"
} else if size >= (1 << 10) {
val = strconv.Itoa(size>>10) + "MB"
} else {
val += "KB"
}
pageSizes = append(pageSizes, val)
}
for _, entry := range keyUsage {
usage = append(usage, entry)
}
return usage

return pageSizes, warn
}

func getSubreaper() (int, error) {
Expand Down
8 changes: 8 additions & 0 deletions cgroup2/utils_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -58,3 +58,11 @@ func TestToResources(t *testing.T) {
v2resources2 := ToResources(&res2)
assert.Equal(t, CPUMax("max 10000"), v2resources2.CPU.Max)
}

func BenchmarkGetStatFileContentUint64(b *testing.B) {
b.ReportAllocs()

for i := 0; i < b.N; i++ {
_ = getStatFileContentUint64("/proc/self/loginuid")
}
}

0 comments on commit a6075af

Please sign in to comment.