From f76ba4865da3784d7206aa1c7e95cd0cee7d2b9b Mon Sep 17 00:00:00 2001 From: Your Name Date: Fri, 15 Sep 2023 17:04:19 +0200 Subject: [PATCH 1/2] container/libcontainer: Add limit parsing benchmark --- container/libcontainer/handler_test.go | 15 +++++++++++++++ container/libcontainer/testdata/limits | 17 +++++++++++++++++ 2 files changed, 32 insertions(+) create mode 100644 container/libcontainer/testdata/limits diff --git a/container/libcontainer/handler_test.go b/container/libcontainer/handler_test.go index 826aaefd16..84410745e5 100644 --- a/container/libcontainer/handler_test.go +++ b/container/libcontainer/handler_test.go @@ -296,3 +296,18 @@ func TestClearReferencedBytesWhenClearRefsMissing(t *testing.T) { err := clearReferencedBytes(pids, 0, 1) assert.Nil(t, err) } + +var ulimits []info.UlimitSpec + +func BenchmarkProcessLimitsFile(b *testing.B) { + content, err := os.ReadFile("testdata/limits") + assert.Nil(b, err) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + ulimits = processLimitsFile(string(content)) + } + + // Ensure the compiler doesn't optimize away the benchmark + _ = ulimits +} diff --git a/container/libcontainer/testdata/limits b/container/libcontainer/testdata/limits new file mode 100644 index 0000000000..03b5a16c87 --- /dev/null +++ b/container/libcontainer/testdata/limits @@ -0,0 +1,17 @@ +Limit Soft Limit Hard Limit Units +Max cpu time unlimited unlimited seconds +Max file size unlimited unlimited bytes +Max data size unlimited unlimited bytes +Max stack size 8388608 unlimited bytes +Max core file size unlimited unlimited bytes +Max resident set unlimited unlimited bytes +Max processes 119958 119958 processes +Max open files 1073741816 1073741816 files +Max locked memory 3932852224 3932852224 bytes +Max address space unlimited unlimited bytes +Max file locks unlimited unlimited locks +Max pending signals 119958 119958 signals +Max msgqueue size 819200 819200 bytes +Max nice priority 0 0 +Max realtime priority 0 0 +Max realtime timeout unlimited unlimited us From 4c90b9077ce23da48ffae0ec9e001b1fe6f33a5b Mon Sep 17 00:00:00 2001 From: Your Name Date: Fri, 15 Sep 2023 17:30:39 +0200 Subject: [PATCH 2/2] container/libcontainer: Improve limits file parsing perf MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Only max open files is ever parsed from limits files, therefore this change optimizes for that case. Benchmark: ``` $ benchstat old.txt new.txt goos: linux goarch: amd64 pkg: github.com/google/cadvisor/container/libcontainer cpu: AMD Ryzen 5 3400GE with Radeon Vega Graphics │ old.txt │ new.txt │ │ sec/op │ sec/op vs base │ ProcessLimitsFile-8 85.012µ ± 1% 1.324µ ± 0% -98.44% (p=0.000 n=10) ``` On a GKE v1.27.4 production cluster, this code path used roughly 1.5% of the total kubelet CPU usage, and at 98.44% improvement this likely results in at least a 1.5% CPU reduction (perhaps even more since also less garbage is produced to be collected by the GC). --- container/libcontainer/handler.go | 71 +++++++++++++------------- container/libcontainer/handler_test.go | 10 ++++ 2 files changed, 46 insertions(+), 35 deletions(-) diff --git a/container/libcontainer/handler.go b/container/libcontainer/handler.go index 5a7f694776..24405ccff4 100644 --- a/container/libcontainer/handler.go +++ b/container/libcontainer/handler.go @@ -39,7 +39,6 @@ import ( ) var ( - whitelistedUlimits = [...]string{"max_open_files"} referencedResetInterval = flag.Uint64("referenced_reset_interval", 0, "Reset interval for referenced bytes (container_referenced_bytes metric), number of measurement cycles after which referenced bytes are cleared, if set to 0 referenced bytes are never cleared (default: 0)") @@ -205,51 +204,53 @@ func parseUlimit(value string) (int64, error) { return num, nil } -func isUlimitWhitelisted(name string) bool { - for _, whitelist := range whitelistedUlimits { - if name == whitelist { - return true - } - } - return false -} - func processLimitsFile(fileData string) []info.UlimitSpec { + const maxOpenFilesLinePrefix = "Max open files" + limits := strings.Split(fileData, "\n") ulimits := make([]info.UlimitSpec, 0, len(limits)) for _, lim := range limits { // Skip any headers/footers - if strings.HasPrefix(lim, "Max") { - - // Line format: Max open files 16384 16384 files - fields := regexp.MustCompile(`[\s]{2,}`).Split(lim, -1) - name := strings.Replace(strings.ToLower(strings.TrimSpace(fields[0])), " ", "_", -1) - - found := isUlimitWhitelisted(name) - if !found { - continue - } - - soft := strings.TrimSpace(fields[1]) - softNum, softErr := parseUlimit(soft) - - hard := strings.TrimSpace(fields[2]) - hardNum, hardErr := parseUlimit(hard) - - // Omit metric if there were any parsing errors - if softErr == nil && hardErr == nil { - ulimitSpec := info.UlimitSpec{ - Name: name, - SoftLimit: int64(softNum), - HardLimit: int64(hardNum), - } - ulimits = append(ulimits, ulimitSpec) + if strings.HasPrefix(lim, "Max open files") { + // Remove line prefix + ulimit, err := processMaxOpenFileLimitLine( + "max_open_files", + lim[len(maxOpenFilesLinePrefix):], + ) + if err == nil { + ulimits = append(ulimits, ulimit) } } } return ulimits } +// Any caller of processMaxOpenFileLimitLine must ensure that the name prefix is already removed from the limit line. +// with the "Max open files" prefix. +func processMaxOpenFileLimitLine(name, line string) (info.UlimitSpec, error) { + // Remove any leading whitespace + line = strings.TrimSpace(line) + // Split on whitespace + fields := strings.Fields(line) + if len(fields) != 3 { + return info.UlimitSpec{}, fmt.Errorf("unable to parse max open files line: %s", line) + } + // The first field is the soft limit, the second is the hard limit + soft, err := parseUlimit(fields[0]) + if err != nil { + return info.UlimitSpec{}, err + } + hard, err := parseUlimit(fields[1]) + if err != nil { + return info.UlimitSpec{}, err + } + return info.UlimitSpec{ + Name: name, + SoftLimit: soft, + HardLimit: hard, + }, nil +} + func processRootProcUlimits(rootFs string, rootPid int) []info.UlimitSpec { filePath := path.Join(rootFs, "/proc", strconv.Itoa(rootPid), "limits") out, err := os.ReadFile(filePath) diff --git a/container/libcontainer/handler_test.go b/container/libcontainer/handler_test.go index 84410745e5..82da0b3e67 100644 --- a/container/libcontainer/handler_test.go +++ b/container/libcontainer/handler_test.go @@ -311,3 +311,13 @@ func BenchmarkProcessLimitsFile(b *testing.B) { // Ensure the compiler doesn't optimize away the benchmark _ = ulimits } + +func TestProcessMaxOpenFileLimitLine(t *testing.T) { + line := " 1073741816 1073741816 files " + + ulimit, err := processMaxOpenFileLimitLine("max_open_files", line) + assert.Nil(t, err) + assert.Equal(t, "max_open_files", ulimit.Name) + assert.Equal(t, int64(1073741816), ulimit.SoftLimit) + assert.Equal(t, int64(1073741816), ulimit.HardLimit) +}