Skip to content

Commit

Permalink
Make log statements easier to read
Browse files Browse the repository at this point in the history
This is a cosmetic change to break up long log lines into smaller lines

Signed-off-by: yulng <wei.yang@daocloud.io>
Signed-off-by: Chris Tarazi <chris@isovalent.com>
  • Loading branch information
yulng authored and ldelossa committed Jan 25, 2023
1 parent 15ff0dc commit ce152da
Show file tree
Hide file tree
Showing 8 changed files with 70 additions and 27 deletions.
6 changes: 4 additions & 2 deletions daemon/cmd/ciliumendpoints.go
Original file line number Diff line number Diff line change
Expand Up @@ -103,8 +103,10 @@ func (d *Daemon) deleteCiliumEndpoint(
// This may occur for various reasons:
// * Pod was restarted while Cilium was not running (likely prior to CNI conf being installed).
// * Local endpoint was deleted (i.e. due to reboot + temporary filesystem) and Cilium or the Pod where restarted.
log.WithFields(logrus.Fields{logfields.CEPName: cepName, logfields.K8sNamespace: cepNamespace}).
Info("Found stale ciliumendpoint for local pod that is not being managed, deleting.")
log.WithFields(logrus.Fields{
logfields.CEPName: cepName,
logfields.K8sNamespace: cepNamespace,
}).Info("Found stale ciliumendpoint for local pod that is not being managed, deleting.")
if err := ciliumClient.CiliumEndpoints(cepNamespace).Delete(ctx, cepName, metav1.DeleteOptions{
Preconditions: &metav1.Preconditions{
UID: cepUID,
Expand Down
8 changes: 6 additions & 2 deletions operator/watchers/node_taint.go
Original file line number Diff line number Diff line change
Expand Up @@ -99,10 +99,14 @@ func checkAndMarkNode(c kubernetes.Interface, nodeGetter slimNodeGetter, nodeNam
if isCiliumPodRunning(node.GetName()) {
markNode(c, nodeGetter, node.GetName(), options)
} else {
log.WithFields(logrus.Fields{logfields.NodeName: node.GetName()}).Debug("Cilium pod not running for node")
log.WithFields(logrus.Fields{
logfields.NodeName: node.GetName(),
}).Debug("Cilium pod not running for node")
}
} else {
log.WithFields(logrus.Fields{logfields.NodeName: node.GetName()}).Debug("Node without taint and with CiliumIsUp condition")
log.WithFields(logrus.Fields{
logfields.NodeName: node.GetName(),
}).Debug("Node without taint and with CiliumIsUp condition")
}
return true
}
Expand Down
18 changes: 12 additions & 6 deletions pkg/bpf/bpffs_migrate.go
Original file line number Diff line number Diff line change
Expand Up @@ -101,8 +101,10 @@ func repinMap(bpffsPath string, name string, spec *ebpf.MapSpec) error {

dest := file + bpffsPending

log.WithFields(logrus.Fields{logfields.BPFMapName: name, logfields.BPFMapPath: file}).
Infof("New version of map has different properties, re-pinning with '%s' suffix", bpffsPending)
log.WithFields(logrus.Fields{
logfields.BPFMapName: name,
logfields.BPFMapPath: file,
}).Infof("New version of map has different properties, re-pinning with '%s' suffix", bpffsPending)

// Atomically re-pin the map to the its new path.
if err := pinned.Pin(dest); err != nil {
Expand Down Expand Up @@ -133,8 +135,10 @@ func finalizeMap(bpffsPath, name string, revert bool) error {
// Pending Map was found on bpffs and needs to be reverted.
if revert {
dest := filepath.Join(bpffsPath, name)
log.WithFields(logrus.Fields{logfields.BPFMapPath: dest, logfields.BPFMapName: name}).
Infof("Repinning without '%s' suffix after failed migration", bpffsPending)
log.WithFields(logrus.Fields{
logfields.BPFMapPath: dest,
logfields.BPFMapName: name,
}).Infof("Repinning without '%s' suffix after failed migration", bpffsPending)

// Atomically re-pin the map to its original path.
if err := pending.Pin(dest); err != nil {
Expand All @@ -144,8 +148,10 @@ func finalizeMap(bpffsPath, name string, revert bool) error {
return nil
}

log.WithFields(logrus.Fields{logfields.BPFMapPath: file, logfields.BPFMapName: name}).
Info("Unpinning map after successful recreation")
log.WithFields(logrus.Fields{
logfields.BPFMapPath: file,
logfields.BPFMapName: name,
}).Info("Unpinning map after successful recreation")

// Pending Map found on bpffs and its replacement was successfully loaded.
// Unpin the old map since it no longer needs to be interacted with from userspace.
Expand Down
5 changes: 4 additions & 1 deletion pkg/hubble/metrics/api/api.go
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,10 @@ func NewHandlers(log logrus.FieldLogger, registry *prometheus.Registry, in []Nam
return nil, fmt.Errorf("unable to initialize metric '%s': %s", item.Name, err)
}

log.WithFields(logrus.Fields{"name": item.Name, "status": item.Handler.Status()}).Info("Configured metrics plugin")
log.WithFields(logrus.Fields{
"name": item.Name,
"status": item.Handler.Status(),
}).Info("Configured metrics plugin")
}
return &handlers, nil
}
Expand Down
14 changes: 10 additions & 4 deletions pkg/ipam/allocator/group.go
Original file line number Diff line number Diff line change
Expand Up @@ -59,12 +59,18 @@ func (g *PoolGroupAllocator) ReserveAddresses(iterator AddressIterator) {
ip := net.ParseIP(ipString)
if ip != nil {
if err := g.Allocate(types.PoolID(poolID), ip); err != nil {
log.WithFields(logrus.Fields{"instance": instanceID, "interface": interfaceID, "ip": ipString}).
WithError(err).Warning("Unable to allocate IP in internal allocator")
log.WithFields(logrus.Fields{
"instance": instanceID,
"interface": interfaceID,
"ip": ipString,
}).WithError(err).Warning("Unable to allocate IP in internal allocator")
}
} else {
log.WithFields(logrus.Fields{"instance": instanceID, "interface": interfaceID, "ip": ipString}).
Warning("Unable to parse IP")
log.WithFields(logrus.Fields{
"instance": instanceID,
"interface": interfaceID,
"ip": ipString,
}).Warning("Unable to parse IP")
}
return nil
})
Expand Down
23 changes: 16 additions & 7 deletions pkg/ipcache/kvstore.go
Original file line number Diff line number Diff line change
Expand Up @@ -236,7 +236,10 @@ restart:
}

if option.Config.Debug {
scopedLog = log.WithFields(logrus.Fields{"kvstore-event": event.Typ.String(), "key": event.Key})
scopedLog = log.WithFields(logrus.Fields{
"kvstore-event": event.Typ.String(),
"key": event.Key,
})
scopedLog.Debug("Received event")
}

Expand Down Expand Up @@ -271,8 +274,10 @@ restart:
var ipIDPair identity.IPIdentityPair
err := json.Unmarshal(event.Value, &ipIDPair)
if err != nil {
log.WithFields(logrus.Fields{"kvstore-event": event.Typ.String(), "key": event.Key}).
WithError(err).Error("Not adding entry to ip cache; error unmarshaling data from key-value store")
log.WithFields(logrus.Fields{
"kvstore-event": event.Typ.String(),
"key": event.Key,
}).WithError(err).Error("Not adding entry to ip cache; error unmarshaling data from key-value store")
continue
}
ip := ipIDPair.PrefixString()
Expand All @@ -292,8 +297,10 @@ restart:
for _, np := range ipIDPair.NamedPorts {
err = k8sMeta.NamedPorts.AddPort(np.Name, int(np.Port), np.Protocol)
if err != nil {
log.WithFields(logrus.Fields{"kvstore-event": event.Typ.String(), "key": event.Key}).
WithError(err).Error("Parsing named port failed")
log.WithFields(logrus.Fields{
"kvstore-event": event.Typ.String(),
"key": event.Key,
}).WithError(err).Error("Parsing named port failed")
}
}
}
Expand Down Expand Up @@ -325,8 +332,10 @@ restart:
// need to convert kvstore key to IP.
ipnet, isHost, err := keyToIPNet(event.Key)
if err != nil {
log.WithFields(logrus.Fields{"kvstore-event": event.Typ.String(), "key": event.Key}).
WithError(err).Error("Error parsing IP from key")
log.WithFields(logrus.Fields{
"kvstore-event": event.Typ.String(),
"key": event.Key,
}).WithError(err).Error("Error parsing IP from key")
continue
}
var ip string
Expand Down
5 changes: 4 additions & 1 deletion pkg/k8s/service.go
Original file line number Diff line number Diff line change
Expand Up @@ -703,7 +703,10 @@ func CreateCustomDialer(b ServiceIPGetter, log *logrus.Entry) func(ctx context.C
log.Debug("Service not found in the service IP getter")
}
} else {
log.WithFields(logrus.Fields{"url-host": u.Host, "url": s}).Debug("Unable to parse etcd service URL into a service ID")
log.WithFields(logrus.Fields{
"url-host": u.Host,
"url": s,
}).Debug("Unable to parse etcd service URL into a service ID")
}
log.Debugf("custom dialer based on k8s service backend is dialing to %q", s)
} else {
Expand Down
18 changes: 14 additions & 4 deletions pkg/policy/selectorcache.go
Original file line number Diff line number Diff line change
Expand Up @@ -993,10 +993,15 @@ func (sc *SelectorCache) UpdateIdentities(added, deleted cache.IdentityCache, wg
// prepopulated with all matching numeric identities.
for numericID := range deleted {
if old, exists := sc.idCache[numericID]; exists {
log.WithFields(logrus.Fields{logfields.Identity: numericID, logfields.Labels: old.lbls}).Debug("UpdateIdentities: Deleting identity")
log.WithFields(logrus.Fields{
logfields.Identity: numericID,
logfields.Labels: old.lbls,
}).Debug("UpdateIdentities: Deleting identity")
delete(sc.idCache, numericID)
} else {
log.WithFields(logrus.Fields{logfields.Identity: numericID}).Warning("UpdateIdentities: Skipping Delete of a non-existing identity")
log.WithFields(logrus.Fields{
logfields.Identity: numericID,
}).Warning("UpdateIdentities: Skipping Delete of a non-existing identity")
delete(deleted, numericID)
}
}
Expand All @@ -1007,7 +1012,9 @@ func (sc *SelectorCache) UpdateIdentities(added, deleted cache.IdentityCache, wg
// sorted for the kv-store, so there should
// not be too many false negatives.
if lbls.Equals(old.lbls) {
log.WithFields(logrus.Fields{logfields.Identity: numericID}).Debug("UpdateIdentities: Skipping add of an existing identical identity")
log.WithFields(logrus.Fields{
logfields.Identity: numericID,
}).Debug("UpdateIdentities: Skipping add of an existing identical identity")
delete(added, numericID)
continue
}
Expand All @@ -1027,7 +1034,10 @@ func (sc *SelectorCache) UpdateIdentities(added, deleted cache.IdentityCache, wg
scopedLog.Warning(msg)
}
} else {
log.WithFields(logrus.Fields{logfields.Identity: numericID, logfields.Labels: lbls}).Debug("UpdateIdentities: Adding a new identity")
log.WithFields(logrus.Fields{
logfields.Identity: numericID,
logfields.Labels: lbls,
}).Debug("UpdateIdentities: Adding a new identity")
}
sc.idCache[numericID] = newIdentity(numericID, lbls)
}
Expand Down

0 comments on commit ce152da

Please sign in to comment.