Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

refactor: use go-logr/logr with log/slog backend #1791

Closed
wants to merge 9 commits into from
6 changes: 3 additions & 3 deletions api/v1alpha1/event.go
Original file line number Diff line number Diff line change
Expand Up @@ -102,23 +102,23 @@ func NewPromotionEventAnnotations(
if len(f.Commits) > 0 {
data, err := json.Marshal(f.Commits)
if err != nil {
log.WithError(err).Error("marshal freight commits in JSON")
log.Error(err, "marshal freight commits in JSON")
} else {
annotations[AnnotationKeyEventFreightCommits] = string(data)
}
}
if len(f.Images) > 0 {
data, err := json.Marshal(f.Images)
if err != nil {
log.WithError(err).Error("marshal freight images in JSON")
log.Error(err, "marshal freight images in JSON")
} else {
annotations[AnnotationKeyEventFreightImages] = string(data)
}
}
if len(f.Charts) > 0 {
data, err := json.Marshal(f.Charts)
if err != nil {
log.WithError(err).Error("marshal freight charts in JSON")
log.Error(err, "marshal freight charts in JSON")
} else {
annotations[AnnotationKeyEventFreightCharts] = string(data)
}
Expand Down
29 changes: 16 additions & 13 deletions cmd/controlplane/api.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ import (
"fmt"
"net"

log "github.com/sirupsen/logrus"
"github.com/go-logr/logr"
"github.com/spf13/cobra"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
Expand All @@ -22,6 +22,7 @@ import (
"github.com/akuity/kargo/internal/api/kubernetes"
rollouts "github.com/akuity/kargo/internal/controller/rollouts/api/v1alpha1"
"github.com/akuity/kargo/internal/kubeclient"
"github.com/akuity/kargo/internal/logging"
"github.com/akuity/kargo/internal/os"
versionpkg "github.com/akuity/kargo/internal/version"
)
Expand All @@ -32,12 +33,12 @@ type apiOptions struct {
Host string
Port string

Logger *log.Logger
Logger logr.Logger
}

func newAPICommand() *cobra.Command {
cmdOpts := &apiOptions{
Logger: log.StandardLogger(),
Logger: logging.DefaultLogger(),
}

cmd := &cobra.Command{
Expand All @@ -64,10 +65,11 @@ func (o *apiOptions) complete() {

func (o *apiOptions) run(ctx context.Context) error {
version := versionpkg.GetVersion()
o.Logger.WithFields(log.Fields{
"version": version.Version,
"commit": version.GitCommit,
}).Info("Starting Kargo API Server")
o.Logger.Info(
"Starting Kargo API Server",
"version", version.Version,
"commit", version.GitCommit,
)

cfg := config.ServerConfigFromEnv()

Expand All @@ -84,7 +86,7 @@ func (o *apiOptions) run(ctx context.Context) error {
case !cfg.RolloutsIntegrationEnabled:
o.Logger.Info("Argo Rollouts integration is disabled")
case !argoRolloutsExists(ctx, clientCfg):
o.Logger.Warn(
o.Logger.Info(
krancour marked this conversation as resolved.
Show resolved Hide resolved
"Argo Rollouts integration was enabled, but no Argo Rollouts " +
"CRDs were found. Proceeding without Argo Rollouts integration.",
)
Expand All @@ -97,11 +99,12 @@ func (o *apiOptions) run(ctx context.Context) error {
o.Logger.Info("admin account is enabled")
}
if cfg.OIDCConfig != nil {
o.Logger.WithFields(log.Fields{
"issuerURL": cfg.OIDCConfig.IssuerURL,
"clientID": cfg.OIDCConfig.ClientID,
"cliClientID": cfg.OIDCConfig.CLIClientID,
}).Info("SSO via OpenID Connect is enabled")
o.Logger.Info(
"SSO via OpenID Connect is enabled",
"issuerURL", cfg.OIDCConfig.IssuerURL,
"clientID", cfg.OIDCConfig.ClientID,
"cliClientID", cfg.OIDCConfig.CLIClientID,
)
}

srv := api.NewServer(cfg, kubeClient, internalClient, recorder)
Expand Down
22 changes: 10 additions & 12 deletions cmd/controlplane/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ import (
"fmt"
"sync"

log "github.com/sirupsen/logrus"
"github.com/go-logr/logr"
"github.com/spf13/cobra"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/labels"
Expand All @@ -26,6 +26,7 @@ import (
"github.com/akuity/kargo/internal/controller/stages"
"github.com/akuity/kargo/internal/controller/warehouses"
"github.com/akuity/kargo/internal/credentials"
"github.com/akuity/kargo/internal/logging"
"github.com/akuity/kargo/internal/os"
"github.com/akuity/kargo/internal/types"
versionpkg "github.com/akuity/kargo/internal/version"
Expand All @@ -41,12 +42,12 @@ type controllerOptions struct {
ArgoCDKubeConfig string
ArgoCDNamespaceOnly bool

Logger *log.Logger
Logger logr.Logger
}

func newControllerCommand() *cobra.Command {
cmdOpts := &controllerOptions{
Logger: log.StandardLogger(),
Logger: logging.DefaultLogger(),
}

cmd := &cobra.Command{
Expand Down Expand Up @@ -75,14 +76,11 @@ func (o *controllerOptions) complete() {
func (o *controllerOptions) run(ctx context.Context) error {
version := versionpkg.GetVersion()

startupLogEntry := o.Logger.WithFields(log.Fields{
"version": version.Version,
"commit": version.GitCommit,
})
keyVals := []any{"version", version.Version, "commit", version.GitCommit}
if o.ShardName != "" {
startupLogEntry = startupLogEntry.WithField("shard", o.ShardName)
keyVals = append(keyVals, "shard", o.ShardName)
}
startupLogEntry.Info("Starting Kargo Controller")
o.Logger.Info("Starting Kargo Controller")

promotionsReconcilerCfg := promotions.ReconcilerConfigFromEnv()
stagesReconcilerCfg := stages.ReconcilerConfigFromEnv()
Expand Down Expand Up @@ -149,15 +147,15 @@ func (o *controllerOptions) setupKargoManager(
}
if stagesReconcilerCfg.RolloutsIntegrationEnabled {
if argoRolloutsExists(ctx, restCfg) {
log.Info("Argo Rollouts integration is enabled")
o.Logger.Info("Argo Rollouts integration is enabled")
if err = rollouts.AddToScheme(scheme); err != nil {
return nil, fmt.Errorf(
"error adding Argo Rollouts API to Kargo controller manager scheme: %w",
err,
)
}
} else {
log.Warn(
o.Logger.Info(
"Argo Rollouts integration was enabled, but no Argo Rollouts " +
"CRDs were found. Proceeding without Argo Rollouts integration.",
)
Expand Down Expand Up @@ -217,7 +215,7 @@ func (o *controllerOptions) setupArgoCDManager(ctx context.Context) (manager.Man
// Application resources in a single namespace, so we will use that
// namespace when attempting to determine if Argo CD CRDs are installed.
if !argoCDExists(ctx, restCfg, argocdNamespace) {
o.Logger.Warn(
o.Logger.Info(
"Argo CD integration was enabled, but no Argo CD CRDs were found. " +
"Proceeding without Argo CD integration.",
)
Expand Down
16 changes: 9 additions & 7 deletions cmd/controlplane/garbage_collector.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ import (
"errors"
"fmt"

log "github.com/sirupsen/logrus"
"github.com/go-logr/logr"
"github.com/spf13/cobra"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
Expand All @@ -17,19 +17,20 @@ import (
"github.com/akuity/kargo/internal/api/kubernetes"
"github.com/akuity/kargo/internal/garbage"
"github.com/akuity/kargo/internal/kubeclient"
"github.com/akuity/kargo/internal/logging"
"github.com/akuity/kargo/internal/os"
versionpkg "github.com/akuity/kargo/internal/version"
)

type garbageCollectorOptions struct {
KubeConfig string

Logger *log.Logger
Logger logr.Logger
}

func newGarbageCollectorCommand() *cobra.Command {
cmdOpts := &garbageCollectorOptions{
Logger: log.StandardLogger(),
Logger: logging.DefaultLogger(),
}

cmd := &cobra.Command{
Expand All @@ -54,10 +55,11 @@ func (o *garbageCollectorOptions) complete() {
func (o *garbageCollectorOptions) run(ctx context.Context) error {
version := versionpkg.GetVersion()

o.Logger.WithFields(log.Fields{
"version": version.Version,
"commit": version.GitCommit,
}).Info("Starting Kargo Garbage Collector")
o.Logger.Info(
"Starting Kargo Garbage Collector",
"version", version.Version,
"commit", version.GitCommit,
)

mgr, err := o.setupManager(ctx)
if err != nil {
Expand Down
2 changes: 1 addition & 1 deletion cmd/controlplane/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ import (
func main() {
ctx := signals.SetupSignalHandler()
if err := Execute(ctx); err != nil {
logging.LoggerFromContext(ctx).Error(err)
logging.LoggerFromContext(ctx).Error(err, "couldn't execute command")
os.Exit(1)
}
}
16 changes: 9 additions & 7 deletions cmd/controlplane/management_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ import (
"context"
"fmt"

log "github.com/sirupsen/logrus"
"github.com/go-logr/logr"
"github.com/spf13/cobra"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
Expand All @@ -17,19 +17,20 @@ import (
"github.com/akuity/kargo/internal/api/kubernetes"
"github.com/akuity/kargo/internal/controller/management/namespaces"
"github.com/akuity/kargo/internal/controller/management/projects"
"github.com/akuity/kargo/internal/logging"
"github.com/akuity/kargo/internal/os"
versionpkg "github.com/akuity/kargo/internal/version"
)

type managementControllerOptions struct {
KubeConfig string

Logger *log.Logger
Logger logr.Logger
}

func newManagementControllerCommand() *cobra.Command {
cmdOpts := &managementControllerOptions{
Logger: log.StandardLogger(),
Logger: logging.DefaultLogger(),
}

cmd := &cobra.Command{
Expand All @@ -54,10 +55,11 @@ func (o *managementControllerOptions) complete() {
func (o *managementControllerOptions) run(ctx context.Context) error {
version := versionpkg.GetVersion()

o.Logger.WithFields(log.Fields{
"version": version.Version,
"commit": version.GitCommit,
}).Info("Starting Kargo Management Controller")
o.Logger.Info(
"Starting Kargo Management Controller",
"version", version.Version,
"commit", version.GitCommit,
)

kargoMgr, err := o.setupManager(ctx)
if err != nil {
Expand Down
16 changes: 9 additions & 7 deletions cmd/controlplane/webhooks.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ import (
"context"
"fmt"

log "github.com/sirupsen/logrus"
"github.com/go-logr/logr"
"github.com/spf13/cobra"
authzv1 "k8s.io/api/authorization/v1"
corev1 "k8s.io/api/core/v1"
Expand All @@ -17,6 +17,7 @@ import (
kargoapi "github.com/akuity/kargo/api/v1alpha1"
"github.com/akuity/kargo/internal/api/kubernetes"
"github.com/akuity/kargo/internal/kubeclient"
"github.com/akuity/kargo/internal/logging"
"github.com/akuity/kargo/internal/os"
versionpkg "github.com/akuity/kargo/internal/version"
libWebhook "github.com/akuity/kargo/internal/webhook"
Expand All @@ -30,12 +31,12 @@ import (
type webhooksServerOptions struct {
KubeConfig string

Logger *log.Logger
Logger logr.Logger
}

func newWebhooksServerCommand() *cobra.Command {
cmdOpts := &webhooksServerOptions{
Logger: log.StandardLogger(),
Logger: logging.DefaultLogger(),
}

cmd := &cobra.Command{
Expand All @@ -59,10 +60,11 @@ func (o *webhooksServerOptions) complete() {

func (o *webhooksServerOptions) run(ctx context.Context) error {
version := versionpkg.GetVersion()
o.Logger.WithFields(log.Fields{
"version": version.Version,
"commit": version.GitCommit,
}).Info("Starting Kargo Webhooks Server")
o.Logger.Info(
"Starting Kargo Webhooks Server",
"version", version.Version,
"commit", version.GitCommit,
)

webhookCfg := libWebhook.ConfigFromEnv()

Expand Down
9 changes: 5 additions & 4 deletions go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@ require (
github.com/Masterminds/semver/v3 v3.2.1
github.com/adrg/xdg v0.4.0
github.com/bacongobbler/browser v1.1.0
github.com/bombsimon/logrusr/v4 v4.1.0
github.com/coreos/go-oidc/v3 v3.10.0
github.com/distribution/distribution/v3 v3.0.0-20230722181636-7b502560cad4
github.com/evanphx/json-patch/v5 v5.9.0
Expand All @@ -28,7 +27,6 @@ require (
github.com/opencontainers/image-spec v1.1.0
github.com/patrickmn/go-cache v2.1.0+incompatible
github.com/rs/cors v1.10.1
github.com/sirupsen/logrus v1.9.3
github.com/spf13/cobra v1.8.0
github.com/spf13/pflag v1.0.5
github.com/stretchr/testify v1.9.0
Expand Down Expand Up @@ -56,7 +54,10 @@ require (
sigs.k8s.io/yaml v1.4.0
)

require github.com/go-jose/go-jose/v4 v4.0.1 // indirect
require (
github.com/go-jose/go-jose/v4 v4.0.1 // indirect
github.com/sirupsen/logrus v1.9.3 // indirect
)

require (
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 // indirect
Expand All @@ -82,7 +83,7 @@ require (
github.com/felixge/httpsnoop v1.0.3 // indirect
github.com/fsnotify/fsnotify v1.7.0 // indirect
github.com/go-errors/errors v1.4.2 // indirect
github.com/go-logr/logr v1.4.1 // indirect
github.com/go-logr/logr v1.4.1
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-openapi/jsonpointer v0.19.6 // indirect
github.com/go-openapi/jsonreference v0.20.2 // indirect
Expand Down
2 changes: 0 additions & 2 deletions go.sum
Original file line number Diff line number Diff line change
Expand Up @@ -34,8 +34,6 @@ github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bombsimon/logrusr/v4 v4.1.0 h1:uZNPbwusB0eUXlO8hIUwStE6Lr5bLN6IgYgG+75kuh4=
github.com/bombsimon/logrusr/v4 v4.1.0/go.mod h1:pjfHC5e59CvjTBIU3V3sGhFWFAnsnhOR03TRc6im0l8=
github.com/bshuster-repo/logrus-logstash-hook v1.0.0 h1:e+C0SB5R1pu//O4MQ3f9cFuPGoOVeF2fE4Og9otCc70=
github.com/bshuster-repo/logrus-logstash-hook v1.0.0/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk=
github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd h1:rFt+Y/IK1aEZkEHchZRSq9OQbsSzIT/OrI8YFFmRIng=
Expand Down
4 changes: 2 additions & 2 deletions internal/api/kubernetes/client.go
Original file line number Diff line number Diff line change
Expand Up @@ -578,15 +578,15 @@ func GetRestConfig(ctx context.Context, path string) (*rest.Config, error) {
// is empty, but will issue a warning that we can suppress by checking for
// that condition ourselves and calling rest.InClusterConfig() directly.
if path == "" {
logger.Debug("loading in-cluster REST config")
logger.V(1).Info("loading in-cluster REST config")
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I feel uncertain about anywhere where we see something like logger.V(1). Doesn't the call to V() change the verbosity level? Except for in the startup logs, (as mentioned here) we want to use the level specified by whoever installed Kargo. i.e. If obtained from logging.LoggerFromContext, it should already be configured to the correct verbosity.

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

As far as I understand it, based on the docs here logr doesn't provide named levels (and therefore an API like logr.Debug). So, logr.Info is equivalent to logr.V(0).Info at verbosity level 0.

If we, for example, wanted to map logr levels directly to slog.Level as defined here, the Info level would remain as logr.Info or logr.V(0).Info and Debug level would become logr.V(4).Info, since logr negates the verbosity level to match slog docs.

Because we customise names of the level keys here, logr.V(1).Info() is equivalent to a Debug log, with verbosity 1.

The call to V() does change the verbosity level of that particular log, but has nothing to do with slog.HandlerOptions.Level. If slog.HandlerOptions.Level is set to LevelInfo (slog.Level(0)), logs with V(1) effectively won't be printed.

Sorry, I'm finding this quite tricky to explain well. I tried to give some examples under the "Level names" heading in the PR description. Let me know if this message helps at all, or just makes everything more confusing.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🤦‍♂️ You're right. Although I'm comfortable with the notion of an abstraction over the underlying logger (mainly from my Java days), having not used logr before, I wasn't at all prepared for how wildly different the interface is from what I am accustomed to. My initial reaction is that needing to remember numeric levels as I code seems painful in comparison to .Debug() or .Warn(). Let me cozy up with the logr docs and try to wrap my head around why this is supposed to be better before commenting further.

Copy link
Member

@krancour krancour Apr 18, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@hiddeco pointed out that realistically, we'd use constants we defined ourselves for the levels.

I'm still left feeling that logger.V(logging.LevelDebug).Info(...) is all at once wordier and less expressive than logger.Debug(...).

I'm totally bought into the benefits of logr, but just cannot get over the interface.

Wondering what @jessesuen and @hiddeco might think about a thin wrapper around logr.Logger in our existing logging package. logging.DefaultLogger() and logging.LoggerFromContext(ctx) could be modified to return something that looks like this:

type Logger struct {
	*logr.Logger
}

func (l *Logger) Debug(msg string, keysAndValues ...any) {
	l.Logger.V(LevelDebug).Info(msg, keysAndValues...)
}

// Similar for trace, warn, error, etc.

func (l *Logger) WithValues(keysAndValues ...any) *Logger {
	newLogger := l.Logger.WithValues(keysAndValues...)
	return &Logger{Logger: &newLogger}
}

To be clear... I'm not proposing an "abstraction over an abstraction." I'm proposing fully taking advantage of the logr abstraction (i.e. we can still swap logging back ends no problem), but merely enhancing their interface without obscuring access in case we sometimes need to "drop-down" to it.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

On one hand, I feel averse of this idea because it does not allow you to log errors at debug level, which a real logr.Logger implementation does allow you to do.

I also think (and know based on experience), the problem can largely be worked around by using a modular approach in your application architecture. Allowing you to e.g. inject a logr.Logger instance at a predefined level, so that you have to think less about the level which you are logging at.

On the other hand, is the question how often you really have the desire to log an error at debug level, and isn't this approach really uncommon in our landscape.

Copy link
Member

@krancour krancour Apr 18, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

it does not allow you to log errors at debug level

Keep in mind, we wouldn't lose access to the underlying logr.Logger.

Allowing you to e.g. inject a logr.Logger instance at a predefined level

To be clear, my concern isn't so much about obtaining a logger that logs at a certain level of verbosity. My concern is the logr interface makes it harder to express the relative importance of an individual message.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

And lol at that link being almost exactly what I described.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I don't have a philosophical attachment to the principles of go-logr. A lot of the stuff in the go-logr FAQ is quite principled. I'm not a go-logr purist so we don't have to fully accept their interface as-is. The main use case I was concerned with was that we didn't have a way of setting a logger to sigs.k8s.io/controller-runtime/pkg/log, which wants a logr.Logger.

I'm fine with creating the wrapper that embeds logr.Logger, especially if it maintains our code readability while also addressing the initial use case. The underlying logr.Logger is always available if we wish to use the lower interface.

FWIW logger.Debug(...) is definitely preferrable over logger.V(logging.LevelDebug).Info(...) and so if we had to switch all our logging statements to the 2nd version, that might be reason enough to create our wrapper.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thanks @jessesuen.

@bakunowski are you game for adding a wrapper like the one @hiddeco linked to in our existing logging package?

cfg, err := rest.InClusterConfig()
if err != nil {
return cfg, fmt.Errorf("error loading in-cluster REST config: %w", err)
}
return cfg, nil
}

logger.WithField("path", path).Debug("loading REST config from path")
logger.V(1).Info("loading REST config from path", "path", path)
cfg, err := clientcmd.BuildConfigFromFlags("", path)
if err != nil {
return cfg, fmt.Errorf("error loading REST config from %q: %w", path, err)
Expand Down