Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Promote PidLimits to GA #94140

Merged
merged 1 commit into from Sep 9, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
16 changes: 7 additions & 9 deletions cmd/kubelet/app/server.go
Expand Up @@ -1248,16 +1248,14 @@ func parseResourceList(m map[string]string) (v1.ResourceList, error) {
switch v1.ResourceName(k) {
// CPU, memory, local storage, and PID resources are supported.
case v1.ResourceCPU, v1.ResourceMemory, v1.ResourceEphemeralStorage, pidlimit.PIDs:
if v1.ResourceName(k) != pidlimit.PIDs || utilfeature.DefaultFeatureGate.Enabled(features.SupportNodePidsLimit) {
q, err := resource.ParseQuantity(v)
if err != nil {
return nil, err
}
if q.Sign() == -1 {
return nil, fmt.Errorf("resource quantity for %q cannot be negative: %v", k, v)
}
rl[v1.ResourceName(k)] = q
q, err := resource.ParseQuantity(v)
if err != nil {
return nil, err
}
if q.Sign() == -1 {
return nil, fmt.Errorf("resource quantity for %q cannot be negative: %v", k, v)
}
rl[v1.ResourceName(k)] = q
default:
return nil, fmt.Errorf("cannot reserve %q resource", k)
}
Expand Down
8 changes: 5 additions & 3 deletions pkg/features/kube_features.go
Expand Up @@ -203,6 +203,7 @@ const (
// owner: @dims, @derekwaynecarr
// alpha: v1.10
// beta: v1.14
// GA: v1.20
//
// Implement support for limiting pids in pods
SupportPodPidsLimit featuregate.Feature = "SupportPodPidsLimit"
Expand Down Expand Up @@ -447,8 +448,9 @@ const (
// a volume in a Pod.
ConfigurableFSGroupPolicy featuregate.Feature = "ConfigurableFSGroupPolicy"

// owner: @RobertKrawitz
// owner: @RobertKrawitz, @derekwaynecarr
// beta: v1.15
// GA: v1.20
//
// Implement support for limiting pids in nodes
SupportNodePidsLimit featuregate.Feature = "SupportNodePidsLimit"
Expand Down Expand Up @@ -680,8 +682,8 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
BlockVolume: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.20
StorageObjectInUseProtection: {Default: true, PreRelease: featuregate.GA},
SupportIPVSProxyMode: {Default: true, PreRelease: featuregate.GA},
SupportPodPidsLimit: {Default: true, PreRelease: featuregate.Beta},
SupportNodePidsLimit: {Default: true, PreRelease: featuregate.Beta},
SupportPodPidsLimit: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.21
SupportNodePidsLimit: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.21
liggitt marked this conversation as resolved.
Show resolved Hide resolved
HyperVContainer: {Default: false, PreRelease: featuregate.Alpha},
TokenRequest: {Default: true, PreRelease: featuregate.Beta},
TokenRequestProjection: {Default: true, PreRelease: featuregate.Beta},
Expand Down
27 changes: 7 additions & 20 deletions pkg/kubelet/cm/cgroup_manager_linux.go
Expand Up @@ -36,8 +36,6 @@ import (

utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/sets"
utilfeature "k8s.io/apiserver/pkg/util/feature"
kubefeatures "k8s.io/kubernetes/pkg/features"
cmutil "k8s.io/kubernetes/pkg/kubelet/cm/util"
"k8s.io/kubernetes/pkg/kubelet/metrics"
)
Expand Down Expand Up @@ -275,11 +273,8 @@ func (m *cgroupManagerImpl) Exists(name CgroupName) bool {
// scoped to the set control groups it understands. this is being discussed
// in https://github.com/opencontainers/runc/issues/1440
// once resolved, we can remove this code.
whitelistControllers := sets.NewString("cpu", "cpuacct", "cpuset", "memory", "systemd")
whitelistControllers := sets.NewString("cpu", "cpuacct", "cpuset", "memory", "systemd", "pids")

if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.SupportPodPidsLimit) || utilfeature.DefaultFeatureGate.Enabled(kubefeatures.SupportNodePidsLimit) {
whitelistControllers.Insert("pids")
}
if _, ok := m.subsystems.MountPoints["hugetlb"]; ok {
whitelistControllers.Insert("hugetlb")
}
Expand Down Expand Up @@ -352,13 +347,10 @@ func getSupportedSubsystems() map[subsystem]bool {
supportedSubsystems := map[subsystem]bool{
&cgroupfs.MemoryGroup{}: true,
&cgroupfs.CpuGroup{}: true,
&cgroupfs.PidsGroup{}: false,
&cgroupfs.PidsGroup{}: true,
}
// not all hosts support hugetlb cgroup, and in the absent of hugetlb, we will fail silently by reporting no capacity.
supportedSubsystems[&cgroupfs.HugetlbGroup{}] = false
if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.SupportPodPidsLimit) || utilfeature.DefaultFeatureGate.Enabled(kubefeatures.SupportNodePidsLimit) {
supportedSubsystems[&cgroupfs.PidsGroup{}] = true
}
return supportedSubsystems
}

Expand Down Expand Up @@ -417,10 +409,7 @@ var (
// getSupportedUnifiedControllers returns a set of supported controllers when running on cgroup v2
func getSupportedUnifiedControllers() sets.String {
// This is the set of controllers used by the Kubelet
supportedControllers := sets.NewString("cpu", "cpuset", "memory", "hugetlb")
if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.SupportPodPidsLimit) || utilfeature.DefaultFeatureGate.Enabled(kubefeatures.SupportNodePidsLimit) {
supportedControllers.Insert("pids")
}
supportedControllers := sets.NewString("cpu", "cpuset", "memory", "hugetlb", "pids")
// Memoize the set of controllers that are present in the root cgroup
availableRootControllersOnce.Do(func() {
var err error
Expand Down Expand Up @@ -547,10 +536,8 @@ func (m *cgroupManagerImpl) toResources(resourceConfig *ResourceConfig) *libcont
if resourceConfig.CpuPeriod != nil {
resources.CpuPeriod = *resourceConfig.CpuPeriod
}
if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.SupportPodPidsLimit) || utilfeature.DefaultFeatureGate.Enabled(kubefeatures.SupportNodePidsLimit) {
if resourceConfig.PidsLimit != nil {
resources.PidsLimit = *resourceConfig.PidsLimit
}
if resourceConfig.PidsLimit != nil {
resources.PidsLimit = *resourceConfig.PidsLimit
}
// if huge pages are enabled, we set them in libcontainer
// for each page size enumerated, set that value
Expand Down Expand Up @@ -608,7 +595,7 @@ func (m *cgroupManagerImpl) Update(cgroupConfig *CgroupConfig) error {
updateSystemdCgroupInfo(libcontainerCgroupConfig, cgroupConfig.Name)
}

if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.SupportPodPidsLimit) && cgroupConfig.ResourceParameters != nil && cgroupConfig.ResourceParameters.PidsLimit != nil {
if cgroupConfig.ResourceParameters != nil && cgroupConfig.ResourceParameters.PidsLimit != nil {
libcontainerCgroupConfig.PidsLimit = *cgroupConfig.ResourceParameters.PidsLimit
}

Expand Down Expand Up @@ -648,7 +635,7 @@ func (m *cgroupManagerImpl) Create(cgroupConfig *CgroupConfig) error {
}
}

if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.SupportPodPidsLimit) && cgroupConfig.ResourceParameters != nil && cgroupConfig.ResourceParameters.PidsLimit != nil {
if cgroupConfig.ResourceParameters != nil && cgroupConfig.ResourceParameters.PidsLimit != nil {
libcontainerCgroupConfig.PidsLimit = *cgroupConfig.ResourceParameters.PidsLimit
}

Expand Down
4 changes: 1 addition & 3 deletions pkg/kubelet/cm/pod_container_manager_linux.go
Expand Up @@ -26,10 +26,8 @@ import (
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/klog/v2"
v1qos "k8s.io/kubernetes/pkg/apis/core/v1/helper/qos"
kubefeatures "k8s.io/kubernetes/pkg/features"
)

const (
Expand Down Expand Up @@ -86,7 +84,7 @@ func (m *podContainerManagerImpl) EnsureExists(pod *v1.Pod) error {
Name: podContainerName,
ResourceParameters: ResourceConfigForPod(pod, m.enforceCPULimits, m.cpuCFSQuotaPeriod),
}
if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.SupportPodPidsLimit) && m.podPidsLimit > 0 {
if m.podPidsLimit > 0 {
containerConfig.ResourceParameters.PidsLimit = &m.podPidsLimit
}
if err := m.cgroupManager.Create(containerConfig); err != nil {
Expand Down
3 changes: 1 addition & 2 deletions test/e2e_node/pids_test.go
Expand Up @@ -118,14 +118,13 @@ func runPodPidsLimitTests(f *framework.Framework) {
}

// Serial because the test updates kubelet configuration.
var _ = SIGDescribe("PodPidsLimit [Serial] [Feature:SupportPodPidsLimit][NodeFeature:SupportPodPidsLimit]", func() {
var _ = SIGDescribe("PodPidsLimit [Serial]", func() {
f := framework.NewDefaultFramework("pids-limit-test")
ginkgo.Context("With config updated with pids feature enabled", func() {
tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) {
if initialConfig.FeatureGates == nil {
initialConfig.FeatureGates = make(map[string]bool)
}
initialConfig.FeatureGates["SupportPodPidsLimit"] = true
initialConfig.PodPidsLimit = int64(1024)
})
runPodPidsLimitTests(f)
Expand Down