Skip to content

Commit

Permalink
Put chatty logs on a diet
Browse files Browse the repository at this point in the history
While running OVN-IC on scaling cluster, we have identified some
chatty logs that we can do without. This PR addresses that.

Reference commit that generated these numbers:
4.14.0-0.nightly-2023-07-16-230237 ovn-kubernetes image (pullspec) came
from brew build ose-ovn-kubernetes-container-v4.14.0-202307141530.p0.ge7779d2.assembly.stream
from commit e7779d2

Extracted from a 330 Mb log file:

kube.go:484                130M
services_controller.go:416  37M
kube.go:501                 33M <-- also among longest lines
services_controller.go:537  26M
services_controller.go:326  26M
services_controller.go:433  24M
services_controller.go:322  23M
kube.go:462                 22M
client.go:783                6.9M <-- also among longest lines
obj_retry.go:623             1.5M

See: https://issues.redhat.com/browse/SDN-3507?focusedId=22659623&page=com.atlassian.jira.plugin.system.issuetabpanels%3Acomment-tabpanel#comment-22659623

Signed-off-by: Flavio Fernandes <flaviof@redhat.com>
(cherry picked from commit 94f0783)
  • Loading branch information
flavio-fernandes authored and dcbw committed Jul 21, 2023
1 parent 8acfdcb commit baab8dd
Show file tree
Hide file tree
Showing 4 changed files with 10 additions and 9 deletions.
10 changes: 5 additions & 5 deletions go-controller/pkg/ovn/controller/services/services_controller.go
Expand Up @@ -319,11 +319,11 @@ func (c *Controller) syncService(key string) error {
if err != nil {
return err
}
klog.Infof("Processing sync for service %s/%s", namespace, name)
klog.V(5).Infof("Processing sync for service %s/%s", namespace, name)
metrics.MetricSyncServiceCount.Inc()

defer func() {
klog.V(4).Infof("Finished syncing service %s on namespace %s : %v", name, namespace, time.Since(startTime))
klog.V(5).Infof("Finished syncing service %s on namespace %s : %v", name, namespace, time.Since(startTime))
metrics.MetricSyncServiceLatency.Observe(time.Since(startTime).Seconds())
}()

Expand Down Expand Up @@ -413,7 +413,7 @@ func (c *Controller) syncService(key string) error {
klog.V(5).Infof("Built service %s cluster-wide LB %#v", key, clusterLBs)
klog.V(5).Infof("Built service %s per-node LB %#v", key, perNodeLBs)
klog.V(5).Infof("Built service %s template LB %#v", key, templateLBs)
klog.V(3).Infof("Service %s has %d cluster-wide, %d per-node configs, %d template configs, making %d (cluster) %d (per node) and %d (template) load balancers",
klog.V(5).Infof("Service %s has %d cluster-wide, %d per-node configs, %d template configs, making %d (cluster) %d (per node) and %d (template) load balancers",
key, len(clusterConfigs), len(perNodeConfigs), len(templateConfigs),
len(clusterLBs), len(perNodeLBs), len(templateLBs))
lbs := append(clusterLBs, templateLBs...)
Expand All @@ -430,7 +430,7 @@ func (c *Controller) syncService(key string) error {
c.alreadyAppliedRWLock.RUnlock()

if alreadyAppliedKeyExists && LoadBalancersEqualNoUUID(existingLBs, lbs) {
klog.V(3).Infof("Skipping no-op change for service %s", key)
klog.V(5).Infof("Skipping no-op change for service %s", key)
} else {
klog.V(5).Infof("Services do not match, existing lbs: %#v, built lbs: %#v", existingLBs, lbs)
// Actually apply load-balancers to OVN.
Expand Down Expand Up @@ -534,7 +534,7 @@ func (c *Controller) onServiceAdd(obj interface{}) {
utilruntime.HandleError(fmt.Errorf("couldn't get key for object %+v: %v", obj, err))
return
}
klog.V(4).Infof("Adding service %s", key)
klog.V(5).Infof("Adding service %s", key)
service := obj.(*v1.Service)
metrics.GetConfigDurationRecorder().Start("service", service.Namespace, service.Name)
c.queue.Add(key)
Expand Down
2 changes: 1 addition & 1 deletion go-controller/pkg/retry/obj_retry.go
Expand Up @@ -620,7 +620,7 @@ func (r *RetryFramework) WatchResourceFiltered(namespaceForFilteredHandler strin
if found {
existingCacheEntry = retryEntryOrNil.config
}
klog.Infof("Deleting old %s of type %s during update", oldKey, r.ResourceHandler.ObjType)
klog.V(5).Infof("Deleting old %s of type %s during update", oldKey, r.ResourceHandler.ObjType)
if err := r.ResourceHandler.DeleteResource(old, existingCacheEntry); err != nil {
klog.Errorf("Failed to delete %s %s, during update: %v",
r.ResourceHandler.ObjType, oldKey, err)
Expand Down
6 changes: 3 additions & 3 deletions go-controller/pkg/util/kube.go
Expand Up @@ -459,7 +459,7 @@ func GetLbEndpoints(slices []*discovery.EndpointSlice, svcPort kapi.ServicePort,
}

for _, slice := range slices {
klog.V(4).Infof("Getting endpoints for slice %s/%s", slice.Namespace, slice.Name)
klog.V(5).Infof("Getting endpoints for slice %s/%s", slice.Namespace, slice.Name)

// build the list of valid endpoints in the slice
for _, port := range slice.Ports {
Expand All @@ -481,7 +481,7 @@ func GetLbEndpoints(slices []*discovery.EndpointSlice, svcPort kapi.ServicePort,
out.Port = *port.Port
ForEachEligibleEndpoint(slice, service, func(endpoint discovery.Endpoint, shortcut *bool) {
for _, ip := range endpoint.Addresses {
klog.V(4).Infof("Adding slice %s endpoint: %v, port: %d", slice.Name, endpoint.Addresses, *port.Port)
klog.V(5).Infof("Adding slice %s endpoint: %v, port: %d", slice.Name, endpoint.Addresses, *port.Port)
ipStr := utilnet.ParseIPSloppy(ip).String()
switch slice.AddressType {
case discovery.AddressTypeIPv4:
Expand All @@ -498,7 +498,7 @@ func GetLbEndpoints(slices []*discovery.EndpointSlice, svcPort kapi.ServicePort,

out.V4IPs = v4ips.List()
out.V6IPs = v6ips.List()
klog.V(4).Infof("LB Endpoints for %s/%s are: %v / %v on port: %d",
klog.V(5).Infof("LB Endpoints for %s/%s are: %v / %v on port: %d",
slices[0].Namespace, slices[0].Labels[discovery.LabelServiceName],
out.V4IPs, out.V6IPs, out.Port)
return out
Expand Down

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

0 comments on commit baab8dd

Please sign in to comment.