Skip to content

Commit

Permalink
factory: strip Managed Fields to reduce memory usage
Browse files Browse the repository at this point in the history
We don't care about them, so why cache them and use a ton
of memory.

Inspired by kubernetes/kubernetes#118455

Signed-off-by: Dan Williams <dcbw@redhat.com>
  • Loading branch information
dcbw committed Nov 15, 2023
1 parent 4f50535 commit d9a7732
Show file tree
Hide file tree
Showing 2 changed files with 30 additions and 5 deletions.
28 changes: 26 additions & 2 deletions go-controller/pkg/factory/factory.go
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,7 @@ import (
kapi "k8s.io/api/core/v1"
discovery "k8s.io/api/discovery/v1"
knet "k8s.io/api/networking/v1"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
Expand Down Expand Up @@ -191,6 +192,29 @@ func NewMasterWatchFactory(ovnClientset *util.OVNMasterClientset) (*WatchFactory
return wf, nil
}

// Informer transform to trim object fields for memory efficiency.
func informerObjectTrim(obj interface{}) (interface{}, error) {
if accessor, err := meta.Accessor(obj); err == nil {
accessor.SetManagedFields(nil)
}
if pod, ok := obj.(*kapi.Pod); ok {
pod.Spec.Volumes = []kapi.Volume{}
for i := range pod.Spec.Containers {
pod.Spec.Containers[i].Command = nil
pod.Spec.Containers[i].Args = nil
pod.Spec.Containers[i].Env = nil
pod.Spec.Containers[i].VolumeMounts = nil
}
} else if node, ok := obj.(*kapi.Node); ok {
node.Status.Images = nil
node.Status.VolumesInUse = nil
node.Status.VolumesAttached = nil
node.Status.Capacity = nil
node.Status.Allocatable = nil
}
return obj, nil
}

// NewOVNKubeControllerWatchFactory initializes a new watch factory for the ovnkube controller process
func NewOVNKubeControllerWatchFactory(ovnClientset *util.OVNKubeControllerClientset) (*WatchFactory, error) {
// resync time is 12 hours, none of the resources being watched in ovn-kubernetes have
Expand All @@ -200,7 +224,7 @@ func NewOVNKubeControllerWatchFactory(ovnClientset *util.OVNKubeControllerClient
// the downside of making it tight (like 10 minutes) is needless spinning on all resources
// However, AddEventHandlerWithResyncPeriod can specify a per handler resync period
wf := &WatchFactory{
iFactory: informerfactory.NewSharedInformerFactory(ovnClientset.KubeClient, resyncInterval),
iFactory: informerfactory.NewSharedInformerFactoryWithOptions(ovnClientset.KubeClient, resyncInterval, informerfactory.WithTransform(informerObjectTrim)),
anpFactory: anpinformerfactory.NewSharedInformerFactory(ovnClientset.ANPClient, resyncInterval),
eipFactory: egressipinformerfactory.NewSharedInformerFactory(ovnClientset.EgressIPClient, resyncInterval),
efFactory: egressfirewallinformerfactory.NewSharedInformerFactory(ovnClientset.EgressFirewallClient, resyncInterval),
Expand Down Expand Up @@ -427,7 +451,7 @@ func (wf *WatchFactory) Start() error {
// of the localPodSelector or figure out how to deal with selecting all pods everywhere.
func NewNodeWatchFactory(ovnClientset *util.OVNNodeClientset, nodeName string) (*WatchFactory, error) {
wf := &WatchFactory{
iFactory: informerfactory.NewSharedInformerFactory(ovnClientset.KubeClient, resyncInterval),
iFactory: informerfactory.NewSharedInformerFactoryWithOptions(ovnClientset.KubeClient, resyncInterval, informerfactory.WithTransform(informerObjectTrim)),
egressServiceFactory: egressserviceinformerfactory.NewSharedInformerFactory(ovnClientset.EgressServiceClient, resyncInterval),
eipFactory: egressipinformerfactory.NewSharedInformerFactory(ovnClientset.EgressIPClient, resyncInterval),
apbRouteFactory: adminbasedpolicyinformerfactory.NewSharedInformerFactory(ovnClientset.AdminPolicyRouteClient, resyncInterval),
Expand Down
7 changes: 4 additions & 3 deletions go-controller/pkg/factory/factory_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -2085,6 +2085,8 @@ var _ = Describe("Watch Factory Operations", func() {
})

pods = append(pods, pod)
podCopy := pod.DeepCopy()
podCopy2 := pod.DeepCopy()

// Pod doesn't pass filter; shouldn't be added
podWatch.Add(pod)
Expand All @@ -2093,16 +2095,15 @@ var _ = Describe("Watch Factory Operations", func() {
// Update pod to pass filter; should be treated as add. Need
// to deep-copy pod when modifying because it's a pointer all
// the way through when using FakeClient
podCopy := pod.DeepCopy()
podCopy.ObjectMeta.Labels["blah"] = "foobar"
pods = []*v1.Pod{podCopy}
equalPod = podCopy
podWatch.Modify(podCopy)
Eventually(c.getAdded, 2).Should(Equal(1))

// Update pod to fail filter; should be treated as delete
pod.ObjectMeta.Labels["blah"] = "baz"
podWatch.Modify(pod)
podCopy2.ObjectMeta.Labels["blah"] = "baz"
podWatch.Modify(podCopy2)
Eventually(c.getDeleted, 2).Should(Equal(1))
Consistently(c.getAdded, 2).Should(Equal(1))
Consistently(c.getUpdated, 2).Should(Equal(0))
Expand Down

0 comments on commit d9a7732

Please sign in to comment.