Skip to content

Commit

Permalink
libcontainer: set pids limit to max when set to 0
Browse files Browse the repository at this point in the history
Signed-off-by: Peter Hunt <pehunt@redhat.com>
  • Loading branch information
haircommander committed Sep 14, 2023
1 parent cb44958 commit 3eb8ea5
Show file tree
Hide file tree
Showing 11 changed files with 62 additions and 39 deletions.
5 changes: 3 additions & 2 deletions libcontainer/cgroups/devices/systemd_test.go
Expand Up @@ -28,12 +28,13 @@ func TestPodSkipDevicesUpdate(t *testing.T) {
}

podName := "system-runc_test_pod" + t.Name() + ".slice"
l := int64(42)
podConfig := &configs.Cgroup{
Systemd: true,
Parent: "system.slice",
Name: podName,
Resources: &configs.Resources{
PidsLimit: 42,
PidsLimit: &l,
Memory: 32 * 1024 * 1024,
SkipDevices: true,
},
Expand Down Expand Up @@ -97,7 +98,7 @@ func TestPodSkipDevicesUpdate(t *testing.T) {

// Now update the pod a few times.
for i := 0; i < 42; i++ {
podConfig.Resources.PidsLimit++
*podConfig.Resources.PidsLimit++
podConfig.Resources.Memory += 1024 * 1024
if err := pm.Set(podConfig.Resources); err != nil {
t.Fatal(err)
Expand Down
19 changes: 8 additions & 11 deletions libcontainer/cgroups/fs/pids.go
Expand Up @@ -20,20 +20,17 @@ func (s *PidsGroup) Apply(path string, _ *configs.Resources, pid int) error {
}

func (s *PidsGroup) Set(path string, r *configs.Resources) error {
if r.PidsLimit != 0 {
// "max" is the fallback value.
limit := "max"

if r.PidsLimit > 0 {
limit = strconv.FormatInt(r.PidsLimit, 10)
}
if r.PidsLimit == nil {
return nil
}
// "max" is the fallback value.
limit := "max"

if err := cgroups.WriteFile(path, "pids.max", limit); err != nil {
return err
}
if *r.PidsLimit > 0 {
limit = strconv.FormatInt(*r.PidsLimit, 10)
}

return nil
return cgroups.WriteFile(path, "pids.max", limit)
}

func (s *PidsGroup) GetStats(path string, stats *cgroups.Stats) error {
Expand Down
44 changes: 35 additions & 9 deletions libcontainer/cgroups/fs/pids_test.go
Expand Up @@ -9,9 +9,10 @@ import (
"github.com/opencontainers/runc/libcontainer/configs"
)

const (
maxUnlimited = -1
maxLimited = 1024
var (
maxUnlimited int64 = -1
maxZero int64 = 0

Check warning on line 14 in libcontainer/cgroups/fs/pids_test.go

View workflow job for this annotation

GitHub Actions / lint

var-declaration: should drop = 0 from declaration of var maxZero; it is the zero value (revive)
maxLimited int64 = 1024
)

func TestPidsSetMax(t *testing.T) {
Expand All @@ -22,7 +23,7 @@ func TestPidsSetMax(t *testing.T) {
})

r := &configs.Resources{
PidsLimit: maxLimited,
PidsLimit: &maxLimited,
}
pids := &PidsGroup{}
if err := pids.Set(path, r); err != nil {
Expand All @@ -33,20 +34,45 @@ func TestPidsSetMax(t *testing.T) {
if err != nil {
t.Fatal(err)
}
if value != maxLimited {
// Only done for comparison
if value != uint64(maxLimited) {
t.Fatalf("Expected %d, got %d for setting pids.max - limited", maxLimited, value)
}
}

func TestPidsSetUnlimitedWhenZero(t *testing.T) {
path := tempDir(t, "pids")

writeFileContents(t, path, map[string]string{
"pids.max": "max",
})

r := &configs.Resources{
PidsLimit: &maxZero,
}
pids := &PidsGroup{}
if err := pids.Set(path, r); err != nil {
t.Fatal(err)
}

value, err := fscommon.GetCgroupParamString(path, "pids.max")
if err != nil {
t.Fatal(err)
}
if value != "max" {
t.Fatalf("Expected %s, got %s for setting pids.max - unlimited", "max", value)
}
}

func TestPidsSetUnlimited(t *testing.T) {
path := tempDir(t, "pids")

writeFileContents(t, path, map[string]string{
"pids.max": strconv.Itoa(maxLimited),
"pids.max": strconv.FormatInt(maxLimited, 10),
})

r := &configs.Resources{
PidsLimit: maxUnlimited,
PidsLimit: &maxUnlimited,
}
pids := &PidsGroup{}
if err := pids.Set(path, r); err != nil {
Expand All @@ -67,7 +93,7 @@ func TestPidsStats(t *testing.T) {

writeFileContents(t, path, map[string]string{
"pids.current": strconv.Itoa(1337),
"pids.max": strconv.Itoa(maxLimited),
"pids.max": strconv.FormatInt(maxLimited, 10),
})

pids := &PidsGroup{}
Expand All @@ -80,7 +106,7 @@ func TestPidsStats(t *testing.T) {
t.Fatalf("Expected %d, got %d for pids.current", 1337, stats.PidsStats.Current)
}

if stats.PidsStats.Limit != maxLimited {
if stats.PidsStats.Limit != uint64(maxLimited) {
t.Fatalf("Expected %d, got %d for pids.max", maxLimited, stats.PidsStats.Limit)
}
}
Expand Down
2 changes: 1 addition & 1 deletion libcontainer/cgroups/fs2/create.go
Expand Up @@ -39,7 +39,7 @@ func needAnyControllers(r *configs.Resources) (bool, error) {
return ok
}

if isPidsSet(r) && have("pids") {
if r.PidsLimit != nil && have("pids") {
return true, nil
}
if isMemorySet(r) && have("memory") {
Expand Down
8 changes: 2 additions & 6 deletions libcontainer/cgroups/fs2/pids.go
Expand Up @@ -13,15 +13,11 @@ import (
"github.com/opencontainers/runc/libcontainer/configs"
)

func isPidsSet(r *configs.Resources) bool {
return r.PidsLimit != 0
}

func setPids(dirPath string, r *configs.Resources) error {
if !isPidsSet(r) {
if r.PidsLimit == nil {
return nil
}
if val := numToStr(r.PidsLimit); val != "" {
if val := numToStr(*r.PidsLimit); val != "" {
if err := cgroups.WriteFile(dirPath, "pids.max", val); err != nil {
return err
}
Expand Down
4 changes: 2 additions & 2 deletions libcontainer/cgroups/systemd/v1.go
Expand Up @@ -98,9 +98,9 @@ func genV1ResourcesProperties(r *configs.Resources, cm *dbusConnManager) ([]syst
newProp("BlockIOWeight", uint64(r.BlkioWeight)))
}

if r.PidsLimit > 0 || r.PidsLimit == -1 {
if r.PidsLimit != nil && (*r.PidsLimit > 0 || *r.PidsLimit == -1) {
properties = append(properties,
newProp("TasksMax", uint64(r.PidsLimit)))
newProp("TasksMax", uint64(*r.PidsLimit)))
}

err = addCpuset(cm, &properties, r.CpusetCpus, r.CpusetMems)
Expand Down
4 changes: 2 additions & 2 deletions libcontainer/cgroups/systemd/v2.go
Expand Up @@ -257,9 +257,9 @@ func genV2ResourcesProperties(dirPath string, r *configs.Resources, cm *dbusConn

addCpuQuota(cm, &properties, r.CpuQuota, r.CpuPeriod)

if r.PidsLimit > 0 || r.PidsLimit == -1 {
if r.PidsLimit != nil && (*r.PidsLimit > 0 || *r.PidsLimit == -1) {
properties = append(properties,
newProp("TasksMax", uint64(r.PidsLimit)))
newProp("TasksMax", uint64(*r.PidsLimit)))
}

err = addCpuset(cm, &properties, r.CpusetCpus, r.CpusetMems)
Expand Down
2 changes: 1 addition & 1 deletion libcontainer/configs/cgroup_linux.go
Expand Up @@ -88,7 +88,7 @@ type Resources struct {
CPUIdle *int64 `json:"cpu_idle,omitempty"`

// Process limit; set <= `0' to disable limit.
PidsLimit int64 `json:"pids_limit"`
PidsLimit *int64 `json:"pids_limit"`

// Specifies per cgroup weight, range is from 10 to 1000.
BlkioWeight uint16 `json:"blkio_weight"`
Expand Down
7 changes: 4 additions & 3 deletions libcontainer/integration/exec_test.go
Expand Up @@ -535,14 +535,15 @@ func testPids(t *testing.T, systemd bool) {
}

config := newTemplateConfig(t, &tParam{systemd: systemd})
config.Cgroups.Resources.PidsLimit = -1
l := int64(-1)
config.Cgroups.Resources.PidsLimit = &l

// Running multiple processes, expecting it to succeed with no pids limit.
_ = runContainerOk(t, config, "/bin/sh", "-c", "/bin/true | /bin/true | /bin/true | /bin/true")

// Enforce a permissive limit. This needs to be fairly hand-wavey due to the
// issues with running Go binaries with pids restrictions (see below).
config.Cgroups.Resources.PidsLimit = 64
*config.Cgroups.Resources.PidsLimit = 64
_ = runContainerOk(t, config, "/bin/sh", "-c", `
/bin/true | /bin/true | /bin/true | /bin/true | /bin/true | /bin/true | bin/true | /bin/true |
/bin/true | /bin/true | /bin/true | /bin/true | /bin/true | /bin/true | bin/true | /bin/true |
Expand All @@ -551,7 +552,7 @@ func testPids(t *testing.T, systemd bool) {

// Enforce a restrictive limit. 64 * /bin/true + 1 * shell should cause
// this to fail reliably.
config.Cgroups.Resources.PidsLimit = 64
*config.Cgroups.Resources.PidsLimit = 64
out, _, err := runContainer(t, config, "/bin/sh", "-c", `
/bin/true | /bin/true | /bin/true | /bin/true | /bin/true | /bin/true | bin/true | /bin/true |
/bin/true | /bin/true | /bin/true | /bin/true | /bin/true | /bin/true | bin/true | /bin/true |
Expand Down
3 changes: 2 additions & 1 deletion libcontainer/specconv/spec_linux.go
Expand Up @@ -768,7 +768,8 @@ func CreateCgroupConfig(opts *CreateOpts, defaultDevs []*devices.Device) (*confi
c.Resources.CPUIdle = r.CPU.Idle
}
if r.Pids != nil {
c.Resources.PidsLimit = r.Pids.Limit
l := r.Pids.Limit
c.Resources.PidsLimit = &l
}
if r.BlockIO != nil {
if r.BlockIO.Weight != nil {
Expand Down
3 changes: 2 additions & 1 deletion update.go
Expand Up @@ -310,8 +310,9 @@ other options are ignored.
config.Cgroups.Resources.MemoryReservation = *r.Memory.Reservation
config.Cgroups.Resources.MemorySwap = *r.Memory.Swap
config.Cgroups.Resources.MemoryCheckBeforeUpdate = *r.Memory.CheckBeforeUpdate
config.Cgroups.Resources.PidsLimit = r.Pids.Limit
config.Cgroups.Resources.Unified = r.Unified
l := r.Pids.Limit
config.Cgroups.Resources.PidsLimit = &l

// Update Intel RDT
l3CacheSchema := context.String("l3-cache-schema")
Expand Down

0 comments on commit 3eb8ea5

Please sign in to comment.