diff --git a/api/v1alpha1/event.go b/api/v1alpha1/event.go index 5405bfc28..86408a664 100644 --- a/api/v1alpha1/event.go +++ b/api/v1alpha1/event.go @@ -102,7 +102,7 @@ func NewPromotionEventAnnotations( if len(f.Commits) > 0 { data, err := json.Marshal(f.Commits) if err != nil { - log.WithError(err).Error("marshal freight commits in JSON") + log.Error(err, "marshal freight commits in JSON") } else { annotations[AnnotationKeyEventFreightCommits] = string(data) } @@ -110,7 +110,7 @@ func NewPromotionEventAnnotations( if len(f.Images) > 0 { data, err := json.Marshal(f.Images) if err != nil { - log.WithError(err).Error("marshal freight images in JSON") + log.Error(err, "marshal freight images in JSON") } else { annotations[AnnotationKeyEventFreightImages] = string(data) } @@ -118,7 +118,7 @@ func NewPromotionEventAnnotations( if len(f.Charts) > 0 { data, err := json.Marshal(f.Charts) if err != nil { - log.WithError(err).Error("marshal freight charts in JSON") + log.Error(err, "marshal freight charts in JSON") } else { annotations[AnnotationKeyEventFreightCharts] = string(data) } diff --git a/cmd/controlplane/api.go b/cmd/controlplane/api.go index ee7d8552c..02b8198db 100644 --- a/cmd/controlplane/api.go +++ b/cmd/controlplane/api.go @@ -5,7 +5,7 @@ import ( "fmt" "net" - log "github.com/sirupsen/logrus" + "github.com/go-logr/logr" "github.com/spf13/cobra" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" @@ -22,6 +22,7 @@ import ( "github.com/akuity/kargo/internal/api/kubernetes" rollouts "github.com/akuity/kargo/internal/controller/rollouts/api/v1alpha1" "github.com/akuity/kargo/internal/kubeclient" + "github.com/akuity/kargo/internal/logging" "github.com/akuity/kargo/internal/os" versionpkg "github.com/akuity/kargo/internal/version" ) @@ -32,12 +33,12 @@ type apiOptions struct { Host string Port string - Logger *log.Logger + Logger logr.Logger } func newAPICommand() *cobra.Command { cmdOpts := &apiOptions{ - Logger: log.StandardLogger(), + Logger: logging.DefaultLogger(), } cmd := &cobra.Command{ @@ -64,10 +65,11 @@ func (o *apiOptions) complete() { func (o *apiOptions) run(ctx context.Context) error { version := versionpkg.GetVersion() - o.Logger.WithFields(log.Fields{ - "version": version.Version, - "commit": version.GitCommit, - }).Info("Starting Kargo API Server") + o.Logger.Info( + "Starting Kargo API Server", + "version", version.Version, + "commit", version.GitCommit, + ) cfg := config.ServerConfigFromEnv() @@ -84,7 +86,7 @@ func (o *apiOptions) run(ctx context.Context) error { case !cfg.RolloutsIntegrationEnabled: o.Logger.Info("Argo Rollouts integration is disabled") case !argoRolloutsExists(ctx, clientCfg): - o.Logger.Warn( + o.Logger.Info( "Argo Rollouts integration was enabled, but no Argo Rollouts " + "CRDs were found. Proceeding without Argo Rollouts integration.", ) @@ -97,11 +99,12 @@ func (o *apiOptions) run(ctx context.Context) error { o.Logger.Info("admin account is enabled") } if cfg.OIDCConfig != nil { - o.Logger.WithFields(log.Fields{ - "issuerURL": cfg.OIDCConfig.IssuerURL, - "clientID": cfg.OIDCConfig.ClientID, - "cliClientID": cfg.OIDCConfig.CLIClientID, - }).Info("SSO via OpenID Connect is enabled") + o.Logger.Info( + "SSO via OpenID Connect is enabled", + "issuerURL", cfg.OIDCConfig.IssuerURL, + "clientID", cfg.OIDCConfig.ClientID, + "cliClientID", cfg.OIDCConfig.CLIClientID, + ) } srv := api.NewServer(cfg, kubeClient, internalClient, recorder) diff --git a/cmd/controlplane/controller.go b/cmd/controlplane/controller.go index 4d2d94850..1611a880c 100644 --- a/cmd/controlplane/controller.go +++ b/cmd/controlplane/controller.go @@ -5,7 +5,7 @@ import ( "fmt" "sync" - log "github.com/sirupsen/logrus" + "github.com/go-logr/logr" "github.com/spf13/cobra" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/labels" @@ -26,6 +26,7 @@ import ( "github.com/akuity/kargo/internal/controller/stages" "github.com/akuity/kargo/internal/controller/warehouses" "github.com/akuity/kargo/internal/credentials" + "github.com/akuity/kargo/internal/logging" "github.com/akuity/kargo/internal/os" "github.com/akuity/kargo/internal/types" versionpkg "github.com/akuity/kargo/internal/version" @@ -41,12 +42,12 @@ type controllerOptions struct { ArgoCDKubeConfig string ArgoCDNamespaceOnly bool - Logger *log.Logger + Logger logr.Logger } func newControllerCommand() *cobra.Command { cmdOpts := &controllerOptions{ - Logger: log.StandardLogger(), + Logger: logging.DefaultLogger(), } cmd := &cobra.Command{ @@ -75,14 +76,11 @@ func (o *controllerOptions) complete() { func (o *controllerOptions) run(ctx context.Context) error { version := versionpkg.GetVersion() - startupLogEntry := o.Logger.WithFields(log.Fields{ - "version": version.Version, - "commit": version.GitCommit, - }) + keyVals := []any{"version", version.Version, "commit", version.GitCommit} if o.ShardName != "" { - startupLogEntry = startupLogEntry.WithField("shard", o.ShardName) + keyVals = append(keyVals, "shard", o.ShardName) } - startupLogEntry.Info("Starting Kargo Controller") + o.Logger.Info("Starting Kargo Controller") promotionsReconcilerCfg := promotions.ReconcilerConfigFromEnv() stagesReconcilerCfg := stages.ReconcilerConfigFromEnv() @@ -149,7 +147,7 @@ func (o *controllerOptions) setupKargoManager( } if stagesReconcilerCfg.RolloutsIntegrationEnabled { if argoRolloutsExists(ctx, restCfg) { - log.Info("Argo Rollouts integration is enabled") + o.Logger.Info("Argo Rollouts integration is enabled") if err = rollouts.AddToScheme(scheme); err != nil { return nil, fmt.Errorf( "error adding Argo Rollouts API to Kargo controller manager scheme: %w", @@ -157,7 +155,7 @@ func (o *controllerOptions) setupKargoManager( ) } } else { - log.Warn( + o.Logger.Info( "Argo Rollouts integration was enabled, but no Argo Rollouts " + "CRDs were found. Proceeding without Argo Rollouts integration.", ) @@ -217,7 +215,7 @@ func (o *controllerOptions) setupArgoCDManager(ctx context.Context) (manager.Man // Application resources in a single namespace, so we will use that // namespace when attempting to determine if Argo CD CRDs are installed. if !argoCDExists(ctx, restCfg, argocdNamespace) { - o.Logger.Warn( + o.Logger.Info( "Argo CD integration was enabled, but no Argo CD CRDs were found. " + "Proceeding without Argo CD integration.", ) diff --git a/cmd/controlplane/garbage_collector.go b/cmd/controlplane/garbage_collector.go index d7625d3c2..8d08f035e 100644 --- a/cmd/controlplane/garbage_collector.go +++ b/cmd/controlplane/garbage_collector.go @@ -5,7 +5,7 @@ import ( "errors" "fmt" - log "github.com/sirupsen/logrus" + "github.com/go-logr/logr" "github.com/spf13/cobra" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" @@ -17,6 +17,7 @@ import ( "github.com/akuity/kargo/internal/api/kubernetes" "github.com/akuity/kargo/internal/garbage" "github.com/akuity/kargo/internal/kubeclient" + "github.com/akuity/kargo/internal/logging" "github.com/akuity/kargo/internal/os" versionpkg "github.com/akuity/kargo/internal/version" ) @@ -24,12 +25,12 @@ import ( type garbageCollectorOptions struct { KubeConfig string - Logger *log.Logger + Logger logr.Logger } func newGarbageCollectorCommand() *cobra.Command { cmdOpts := &garbageCollectorOptions{ - Logger: log.StandardLogger(), + Logger: logging.DefaultLogger(), } cmd := &cobra.Command{ @@ -54,10 +55,11 @@ func (o *garbageCollectorOptions) complete() { func (o *garbageCollectorOptions) run(ctx context.Context) error { version := versionpkg.GetVersion() - o.Logger.WithFields(log.Fields{ - "version": version.Version, - "commit": version.GitCommit, - }).Info("Starting Kargo Garbage Collector") + o.Logger.Info( + "Starting Kargo Garbage Collector", + "version", version.Version, + "commit", version.GitCommit, + ) mgr, err := o.setupManager(ctx) if err != nil { diff --git a/cmd/controlplane/main.go b/cmd/controlplane/main.go index ab063a9a7..68411621f 100644 --- a/cmd/controlplane/main.go +++ b/cmd/controlplane/main.go @@ -11,7 +11,7 @@ import ( func main() { ctx := signals.SetupSignalHandler() if err := Execute(ctx); err != nil { - logging.LoggerFromContext(ctx).Error(err) + logging.LoggerFromContext(ctx).Error(err, "couldn't execute command") os.Exit(1) } } diff --git a/cmd/controlplane/management_controller.go b/cmd/controlplane/management_controller.go index 3137cbd3e..fcfd4d7a2 100644 --- a/cmd/controlplane/management_controller.go +++ b/cmd/controlplane/management_controller.go @@ -4,7 +4,7 @@ import ( "context" "fmt" - log "github.com/sirupsen/logrus" + "github.com/go-logr/logr" "github.com/spf13/cobra" corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" @@ -17,6 +17,7 @@ import ( "github.com/akuity/kargo/internal/api/kubernetes" "github.com/akuity/kargo/internal/controller/management/namespaces" "github.com/akuity/kargo/internal/controller/management/projects" + "github.com/akuity/kargo/internal/logging" "github.com/akuity/kargo/internal/os" versionpkg "github.com/akuity/kargo/internal/version" ) @@ -24,12 +25,12 @@ import ( type managementControllerOptions struct { KubeConfig string - Logger *log.Logger + Logger logr.Logger } func newManagementControllerCommand() *cobra.Command { cmdOpts := &managementControllerOptions{ - Logger: log.StandardLogger(), + Logger: logging.DefaultLogger(), } cmd := &cobra.Command{ @@ -54,10 +55,11 @@ func (o *managementControllerOptions) complete() { func (o *managementControllerOptions) run(ctx context.Context) error { version := versionpkg.GetVersion() - o.Logger.WithFields(log.Fields{ - "version": version.Version, - "commit": version.GitCommit, - }).Info("Starting Kargo Management Controller") + o.Logger.Info( + "Starting Kargo Management Controller", + "version", version.Version, + "commit", version.GitCommit, + ) kargoMgr, err := o.setupManager(ctx) if err != nil { diff --git a/cmd/controlplane/webhooks.go b/cmd/controlplane/webhooks.go index 50b6be4e8..cfda5e702 100644 --- a/cmd/controlplane/webhooks.go +++ b/cmd/controlplane/webhooks.go @@ -4,7 +4,7 @@ import ( "context" "fmt" - log "github.com/sirupsen/logrus" + "github.com/go-logr/logr" "github.com/spf13/cobra" authzv1 "k8s.io/api/authorization/v1" corev1 "k8s.io/api/core/v1" @@ -17,6 +17,7 @@ import ( kargoapi "github.com/akuity/kargo/api/v1alpha1" "github.com/akuity/kargo/internal/api/kubernetes" "github.com/akuity/kargo/internal/kubeclient" + "github.com/akuity/kargo/internal/logging" "github.com/akuity/kargo/internal/os" versionpkg "github.com/akuity/kargo/internal/version" libWebhook "github.com/akuity/kargo/internal/webhook" @@ -30,12 +31,12 @@ import ( type webhooksServerOptions struct { KubeConfig string - Logger *log.Logger + Logger logr.Logger } func newWebhooksServerCommand() *cobra.Command { cmdOpts := &webhooksServerOptions{ - Logger: log.StandardLogger(), + Logger: logging.DefaultLogger(), } cmd := &cobra.Command{ @@ -59,10 +60,11 @@ func (o *webhooksServerOptions) complete() { func (o *webhooksServerOptions) run(ctx context.Context) error { version := versionpkg.GetVersion() - o.Logger.WithFields(log.Fields{ - "version": version.Version, - "commit": version.GitCommit, - }).Info("Starting Kargo Webhooks Server") + o.Logger.Info( + "Starting Kargo Webhooks Server", + "version", version.Version, + "commit", version.GitCommit, + ) webhookCfg := libWebhook.ConfigFromEnv() diff --git a/go.mod b/go.mod index 6a84575d2..299e9c4e5 100644 --- a/go.mod +++ b/go.mod @@ -12,7 +12,6 @@ require ( github.com/Masterminds/semver/v3 v3.2.1 github.com/adrg/xdg v0.4.0 github.com/bacongobbler/browser v1.1.0 - github.com/bombsimon/logrusr/v4 v4.1.0 github.com/coreos/go-oidc/v3 v3.10.0 github.com/distribution/distribution/v3 v3.0.0-20230722181636-7b502560cad4 github.com/evanphx/json-patch/v5 v5.9.0 @@ -28,7 +27,6 @@ require ( github.com/opencontainers/image-spec v1.1.0 github.com/patrickmn/go-cache v2.1.0+incompatible github.com/rs/cors v1.10.1 - github.com/sirupsen/logrus v1.9.3 github.com/spf13/cobra v1.8.0 github.com/spf13/pflag v1.0.5 github.com/stretchr/testify v1.9.0 @@ -56,7 +54,10 @@ require ( sigs.k8s.io/yaml v1.4.0 ) -require github.com/go-jose/go-jose/v4 v4.0.1 // indirect +require ( + github.com/go-jose/go-jose/v4 v4.0.1 // indirect + github.com/sirupsen/logrus v1.9.3 // indirect +) require ( github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 // indirect @@ -82,7 +83,7 @@ require ( github.com/felixge/httpsnoop v1.0.3 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/go-errors/errors v1.4.2 // indirect - github.com/go-logr/logr v1.4.1 // indirect + github.com/go-logr/logr v1.4.1 github.com/go-logr/stdr v1.2.2 // indirect github.com/go-openapi/jsonpointer v0.19.6 // indirect github.com/go-openapi/jsonreference v0.20.2 // indirect diff --git a/go.sum b/go.sum index 761c50147..7e27198c4 100644 --- a/go.sum +++ b/go.sum @@ -34,8 +34,6 @@ github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24 github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bombsimon/logrusr/v4 v4.1.0 h1:uZNPbwusB0eUXlO8hIUwStE6Lr5bLN6IgYgG+75kuh4= -github.com/bombsimon/logrusr/v4 v4.1.0/go.mod h1:pjfHC5e59CvjTBIU3V3sGhFWFAnsnhOR03TRc6im0l8= github.com/bshuster-repo/logrus-logstash-hook v1.0.0 h1:e+C0SB5R1pu//O4MQ3f9cFuPGoOVeF2fE4Og9otCc70= github.com/bshuster-repo/logrus-logstash-hook v1.0.0/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd h1:rFt+Y/IK1aEZkEHchZRSq9OQbsSzIT/OrI8YFFmRIng= diff --git a/internal/api/kubernetes/client.go b/internal/api/kubernetes/client.go index 0114b92b0..a396e2721 100644 --- a/internal/api/kubernetes/client.go +++ b/internal/api/kubernetes/client.go @@ -578,7 +578,7 @@ func GetRestConfig(ctx context.Context, path string) (*rest.Config, error) { // is empty, but will issue a warning that we can suppress by checking for // that condition ourselves and calling rest.InClusterConfig() directly. if path == "" { - logger.Debug("loading in-cluster REST config") + logger.V(1).Info("loading in-cluster REST config") cfg, err := rest.InClusterConfig() if err != nil { return cfg, fmt.Errorf("error loading in-cluster REST config: %w", err) @@ -586,7 +586,7 @@ func GetRestConfig(ctx context.Context, path string) (*rest.Config, error) { return cfg, nil } - logger.WithField("path", path).Debug("loading REST config from path") + logger.V(1).Info("loading REST config from path", "path", path) cfg, err := clientcmd.BuildConfigFromFlags("", path) if err != nil { return cfg, fmt.Errorf("error loading REST config from %q: %w", path, err) diff --git a/internal/api/option/log.go b/internal/api/option/log.go index 391e66312..b66d45c21 100644 --- a/internal/api/option/log.go +++ b/internal/api/option/log.go @@ -6,7 +6,7 @@ import ( "time" "connectrpc.com/connect" - log "github.com/sirupsen/logrus" + "github.com/go-logr/logr" "github.com/akuity/kargo/internal/logging" ) @@ -23,12 +23,12 @@ var ( ) type logInterceptor struct { - logger *log.Entry + logger logr.Logger ignorableMethods map[string]bool } func newLogInterceptor( - logger *log.Entry, + logger logr.Logger, ignorableMethods map[string]bool, ) connect.Interceptor { return &logInterceptor{ @@ -49,18 +49,21 @@ func (i *logInterceptor) WrapUnary(next connect.UnaryFunc) connect.UnaryFunc { } res, err := next(ctx, req) - fields := log.Fields{ + fields := map[string]any{ "connect.duration": time.Since(start).String(), } - level := log.InfoLevel if err != nil { - level = log.ErrorLevel fields["connect.code"] = connect.CodeOf(err).String() + logging. + LoggerFromContext(ctx). + WithValues(fields). + Error(err, "finished unary call") + return res, err } logging. LoggerFromContext(ctx). - WithFields(fields). - Log(level, "finished unary call") + WithValues(fields). + Info("finished unary call") return res, err } } @@ -81,18 +84,22 @@ func (i *logInterceptor) WrapStreamingHandler( } err := next(ctx, conn) - fields := log.Fields{ + fields := map[string]any{ "connect.duration": time.Since(start), } - level := log.InfoLevel if err != nil { - level = log.ErrorLevel fields["connect.code"] = connect.CodeOf(err).String() + logging. + LoggerFromContext(ctx). + WithValues(fields). + Error(err, "finished streaming call") + return err } + logging. LoggerFromContext(ctx). - WithFields(fields). - Log(level, "finished streaming call") + WithValues(fields). + Info("finished streaming call") return err } } @@ -101,11 +108,11 @@ func (i *logInterceptor) newLogger( ctx context.Context, procedure string, start time.Time) context.Context { service := path.Dir(procedure)[1:] method := path.Base(procedure) - logger := i.logger.WithFields(log.Fields{ - "connect.service": service, - "connect.method": method, - "connect.start_time": start.Format(time.RFC3339), - }) + logger := i.logger.WithValues( + "connect.service", service, + "connect.method", method, + "connect.start_time", start.Format(time.RFC3339), + ) return logging.ContextWithLogger(ctx, logger) } diff --git a/internal/api/option/log_test.go b/internal/api/option/log_test.go index f5c451e67..7e5fd11e0 100644 --- a/internal/api/option/log_test.go +++ b/internal/api/option/log_test.go @@ -1,14 +1,16 @@ package option import ( + "bytes" "context" + "log/slog" "net/http" "net/http/httptest" "testing" "connectrpc.com/connect" grpchealth "connectrpc.com/grpchealth" - testlog "github.com/sirupsen/logrus/hooks/test" + "github.com/go-logr/logr" "github.com/stretchr/testify/require" "google.golang.org/grpc/health/grpc_health_v1" ) @@ -30,10 +32,11 @@ func TestUnaryServerLogging(t *testing.T) { } for name, testSet := range testSets { t.Run(name, func(t *testing.T) { - logger, hook := testlog.NewNullLogger() + tmpWriteBuffer := bytes.NewBuffer(nil) + logger := logr.FromSlogHandler(slog.NewTextHandler(tmpWriteBuffer, nil)) opt := connect.WithInterceptors( - newLogInterceptor(logger.WithFields(nil), testSet.ignorableMethods)) + newLogInterceptor(logger.WithValues(nil), testSet.ignorableMethods)) mux := http.NewServeMux() mux.Handle(grpchealth.NewHandler(grpchealth.NewStaticChecker(), opt)) srv := httptest.NewServer(mux) @@ -53,7 +56,7 @@ func TestUnaryServerLogging(t *testing.T) { require.NoError(t, err) if testSet.logExpected { - entry := hook.LastEntry() + entry := tmpWriteBuffer.String() require.NotNil(t, entry) for _, field := range []string{ "connect.service", @@ -61,10 +64,10 @@ func TestUnaryServerLogging(t *testing.T) { "connect.start_time", "connect.duration", } { - require.Contains(t, entry.Data, field) + require.Contains(t, entry, field) } } else { - require.Nil(t, hook.LastEntry()) + require.Empty(t, tmpWriteBuffer.String()) } }) } diff --git a/internal/api/option/option.go b/internal/api/option/option.go index 1bdc200b2..4c2840ad1 100644 --- a/internal/api/option/option.go +++ b/internal/api/option/option.go @@ -6,7 +6,6 @@ import ( "net/http" "connectrpc.com/connect" - log "github.com/sirupsen/logrus" libClient "sigs.k8s.io/controller-runtime/pkg/client" "github.com/akuity/kargo/internal/api/config" @@ -33,7 +32,7 @@ func NewHandlerOption( connect.WithInterceptors(interceptors...), connect.WithRecover( func(ctx context.Context, _ connect.Spec, _ http.Header, r any) error { - logging.LoggerFromContext(ctx).Log(log.ErrorLevel, takeStacktrace(defaultStackLength, 3)) + logging.LoggerFromContext(ctx).Error(nil, takeStacktrace(defaultStackLength, 3)) return connect.NewError( connect.CodeInternal, fmt.Errorf("panic: %v", r)) }), diff --git a/internal/api/server.go b/internal/api/server.go index 4f2461426..41d48a58b 100644 --- a/internal/api/server.go +++ b/internal/api/server.go @@ -11,7 +11,6 @@ import ( "connectrpc.com/grpchealth" "github.com/rs/cors" - "github.com/sirupsen/logrus" "golang.org/x/net/http2" "golang.org/x/net/http2/h2c" "k8s.io/apimachinery/pkg/runtime/schema" @@ -232,9 +231,11 @@ func (s *server) Serve(ctx context.Context, l net.Listener) error { } }() - log.WithFields(logrus.Fields{ - "tls": s.cfg.TLSConfig != nil, - }).Infof("Server is listening on %q", l.Addr().String()) + log.Info( + "Server is listening", + "bindAddress", l.Addr().String(), + "tls", s.cfg.TLSConfig != nil, + ) select { case <-ctx.Done(): diff --git a/internal/controller/management/namespaces/namespaces.go b/internal/controller/management/namespaces/namespaces.go index 02c3734d5..12a2a6c30 100644 --- a/internal/controller/management/namespaces/namespaces.go +++ b/internal/controller/management/namespaces/namespaces.go @@ -4,7 +4,6 @@ import ( "context" "fmt" - log "github.com/sirupsen/logrus" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" @@ -81,11 +80,11 @@ func (r *reconciler) Reconcile( ctx context.Context, req ctrl.Request, ) (ctrl.Result, error) { - logger := logging.LoggerFromContext(ctx).WithFields(log.Fields{ - "project": req.NamespacedName.Name, - }) + logger := logging.LoggerFromContext(ctx).WithValues( + "project", req.NamespacedName.Name, + ) ctx = logging.ContextWithLogger(ctx, logger) - logger.Debug("reconciling Namespace") + logger.V(1).Info("reconciling Namespace") // Find the Namespace ns := &corev1.Namespace{} @@ -99,12 +98,12 @@ func (r *reconciler) Reconcile( if ns.DeletionTimestamp == nil { return ctrl.Result{}, nil } - logger.Debug("Namespace is being deleted") + logger.V(1).Info("Namespace is being deleted") if !controllerutil.ContainsFinalizer(ns, kargoapi.FinalizerName) { return ctrl.Result{}, nil } - logger.Debug("Namespace needs finalizing") + logger.V(1).Info("Namespace needs finalizing") // Ignore not found errors to keep this idempotent. if err := client.IgnoreNotFound( @@ -124,6 +123,6 @@ func (r *reconciler) Reconcile( return ctrl.Result{}, fmt.Errorf("error removing finalizer: %w", err) } } - logger.Debug("done reconciling Namespace") + logger.V(1).Info("done reconciling Namespace") return ctrl.Result{}, nil } diff --git a/internal/controller/management/projects/projects.go b/internal/controller/management/projects/projects.go index de4ad54cc..ed44b5bf6 100644 --- a/internal/controller/management/projects/projects.go +++ b/internal/controller/management/projects/projects.go @@ -5,7 +5,6 @@ import ( "fmt" "github.com/kelseyhightower/envconfig" - log "github.com/sirupsen/logrus" corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -135,11 +134,11 @@ func (r *reconciler) Reconcile( ctx context.Context, req ctrl.Request, ) (ctrl.Result, error) { - logger := logging.LoggerFromContext(ctx).WithFields(log.Fields{ - "project": req.NamespacedName.Name, - }) + logger := logging.LoggerFromContext(ctx).WithValues( + "project", req.NamespacedName.Name, + ) ctx = logging.ContextWithLogger(ctx, logger) - logger.Debug("reconciling Project") + logger.V(1).Info("reconciling Project") // Find the Project project, err := r.getProjectFn(ctx, r.client, req.NamespacedName.Name) @@ -153,19 +152,19 @@ func (r *reconciler) Reconcile( } if project.DeletionTimestamp != nil { - logger.Debug("Project is being deleted; nothing to do") + logger.V(1).Info("Project is being deleted; nothing to do") return ctrl.Result{}, nil } if project.Status.Phase.IsTerminal() { - logger.Debugf("Project is %s; nothing to do", project.Status.Phase) + logger.V(1).Info("nothing to do", "projectPhase", project.Status.Phase) return ctrl.Result{}, nil } newStatus, err := r.syncProjectFn(ctx, project) if err != nil { newStatus.Message = err.Error() - logger.Errorf("error syncing Project: %s", err) + logger.Error(err, "error syncing Project") } else { // Be sure to blank this out in case there's an error in this field from // the previous reconciliation @@ -174,7 +173,7 @@ func (r *reconciler) Reconcile( patchErr := r.patchProjectStatusFn(ctx, project, newStatus) if patchErr != nil { - logger.Errorf("error updating Project status: %s", patchErr) + logger.Error(patchErr, "error updating Project status") } // If we had no error, but couldn't patch, then we DO have an error. But we @@ -183,7 +182,7 @@ func (r *reconciler) Reconcile( if err == nil { err = patchErr } - logger.Debug("done reconciling Project") + logger.V(1).Info("done reconciling Project") // Controller runtime automatically gives us a progressive backoff if err is // not nil @@ -213,9 +212,9 @@ func (r *reconciler) ensureNamespace( ) (kargoapi.ProjectStatus, error) { status := *project.Status.DeepCopy() - logger := logging.LoggerFromContext(ctx).WithFields(log.Fields{ - "project": project.Name, - }) + logger := logging.LoggerFromContext(ctx).WithValues( + "project", project.Name, + ) ownerRef := metav1.NewControllerRef( project, @@ -233,14 +232,14 @@ func (r *reconciler) ensureNamespace( // We found an existing namespace with the same name as the Project. for _, ownerRef := range ns.OwnerReferences { if ownerRef.UID == project.UID { - logger.Debug("namespace exists and is owned by this Project") + logger.V(1).Info("namespace exists and is owned by this Project") return status, nil } } if ns.Labels != nil && ns.Labels[kargoapi.ProjectLabelKey] == kargoapi.LabelTrueValue && len(ns.OwnerReferences) == 0 { - logger.Debug( + logger.V(1).Info( "namespace exists, but is not owned by this Project, but has the " + "project label; Project will adopt it", ) @@ -249,7 +248,7 @@ func (r *reconciler) ensureNamespace( if err = r.updateNamespaceFn(ctx, ns); err != nil { return status, fmt.Errorf("error updating namespace %q: %w", project.Name, err) } - logger.Debug("updated namespace with Project as owner") + logger.V(1).Info("updated namespace with Project as owner") return status, nil } status.Phase = kargoapi.ProjectPhaseInitializationFailed @@ -263,7 +262,7 @@ func (r *reconciler) ensureNamespace( return status, fmt.Errorf("error getting namespace %q: %w", project.Name, err) } - logger.Debug("namespace does not exist yet; creating namespace") + logger.V(1).Info("namespace does not exist yet; creating namespace") ns = &corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{ @@ -283,7 +282,7 @@ func (r *reconciler) ensureNamespace( if err := r.createNamespaceFn(ctx, ns); err != nil { return status, fmt.Errorf("error creating namespace %q: %w", project.Name, err) } - logger.Debug("created namespace") + logger.V(1).Info("created namespace") return status, nil } @@ -294,12 +293,12 @@ func (r *reconciler) ensureSecretPermissions( ) error { const roleBindingName = "kargo-api-server-manage-project-secrets" - logger := logging.LoggerFromContext(ctx).WithFields(log.Fields{ - "project": project.Name, - "name": project.Name, - "namespace": project.Name, - "roleBinding": roleBindingName, - }) + logger := logging.LoggerFromContext(ctx).WithValues( + "project", project.Name, + "name", project.Name, + "namespace", project.Name, + "roleBinding", roleBindingName, + ) roleBinding := &rbacv1.RoleBinding{ ObjectMeta: metav1.ObjectMeta{ @@ -326,7 +325,7 @@ func (r *reconciler) ensureSecretPermissions( } if err := r.createRoleBindingFn(ctx, roleBinding); err != nil { if apierrors.IsAlreadyExists(err) { - logger.Debug("role binding already exists in project namespace") + logger.V(1).Info("role binding already exists in project namespace") return nil } return fmt.Errorf( @@ -336,7 +335,7 @@ func (r *reconciler) ensureSecretPermissions( err, ) } - logger.Debug("granted API server access to manage project secrets") + logger.V(1).Info("granted API server access to manage project secrets") return nil } diff --git a/internal/controller/promotion/argocd.go b/internal/controller/promotion/argocd.go index 3a468f86f..fa3cfe578 100644 --- a/internal/controller/promotion/argocd.go +++ b/internal/controller/promotion/argocd.go @@ -91,7 +91,7 @@ func (a *argoCDMechanism) Promote( } logger := logging.LoggerFromContext(ctx) - logger.Debug("executing Argo CD-based promotion mechanisms") + logger.V(1).Info("executing Argo CD-based promotion mechanisms") for _, update := range updates { if err := a.doSingleUpdateFn( @@ -104,7 +104,7 @@ func (a *argoCDMechanism) Promote( } } - logger.Debug("done executing Argo CD-based promotion mechanisms") + logger.V(1).Info("done executing Argo CD-based promotion mechanisms") return promo.Status.WithPhase(kargoapi.PromotionPhaseSucceeded), newFreight, nil } @@ -213,8 +213,7 @@ func (a *argoCDMechanism) doSingleUpdate( ); err != nil { return fmt.Errorf("error patching Argo CD Application %q: %w", app.Name, err) } - logging.LoggerFromContext(ctx).WithField("app", app.Name). - Debug("patched Argo CD Application") + logging.LoggerFromContext(ctx).V(1).Info("patched Argo CD Application", "app", app.Name) return nil } diff --git a/internal/controller/promotion/composite.go b/internal/controller/promotion/composite.go index e1ad4f032..b7e7ad32f 100644 --- a/internal/controller/promotion/composite.go +++ b/internal/controller/promotion/composite.go @@ -54,7 +54,7 @@ func (c *compositeMechanism) Promote( newFreight = *newFreight.DeepCopy() logger := logging.LoggerFromContext(ctx) - logger.Debugf("executing %s", c.name) + logger.V(1).Info("executing", "name", c.name) for _, childMechanism := range c.childMechanisms { var err error @@ -76,7 +76,7 @@ func (c *compositeMechanism) Promote( } } - logger.Debug("done executing promotion mechanisms. aggregated status: ", newStatus.Phase) + logger.V(1).Info("done executing promotion mechanisms", "aggregated status", newStatus.Phase) return newStatus, newFreight, nil } diff --git a/internal/controller/promotion/git.go b/internal/controller/promotion/git.go index 57111d1e4..48ddc8b1e 100644 --- a/internal/controller/promotion/git.go +++ b/internal/controller/promotion/git.go @@ -106,7 +106,7 @@ func (g *gitMechanism) Promote( newFreight = *newFreight.DeepCopy() logger := logging.LoggerFromContext(ctx) - logger.Debugf("executing %s", g.name) + logger.V(1).Info("executing", "name", g.name) for _, update := range updates { var err error @@ -122,7 +122,7 @@ func (g *gitMechanism) Promote( newStatus = aggregateGitPromoStatus(newStatus, *otherStatus) } - logger.Debugf("done executing %s", g.name) + logger.V(1).Info("done executing", "name", g.name) return newStatus, newFreight, nil } @@ -267,12 +267,12 @@ func getRepoCredentialsFn( if err != nil { return nil, fmt.Errorf("error obtaining credentials for git repo %q: %w", repoURL, err) } - logger := logging.LoggerFromContext(ctx).WithField("repo", repoURL) + logger := logging.LoggerFromContext(ctx).WithValues("repo", repoURL) if !ok { - logger.Debug("found no credentials for git repo") + logger.V(1).Info("found no credentials for git repo") return nil, nil } - logger.Debug("obtained credentials for git repo") + logger.V(1).Info("obtained credentials for git repo") return &git.RepoCredentials{ Username: creds.Username, Password: creds.Password, diff --git a/internal/controller/promotions/promoqueues.go b/internal/controller/promotions/promoqueues.go index 1f066aa6a..c3ae121f0 100644 --- a/internal/controller/promotions/promoqueues.go +++ b/internal/controller/promotions/promoqueues.go @@ -4,7 +4,6 @@ import ( "context" "sync" - log "github.com/sirupsen/logrus" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" @@ -68,20 +67,22 @@ func (pqs *promoQueues) initializeQueues(ctx context.Context, promos kargoapi.Pr continue } pq.Push(&promo) - logger.WithFields(log.Fields{ - "promotion": promo.Name, - "namespace": promo.Namespace, - "stage": promo.Spec.Stage, - "phase": promo.Status.Phase, - }).Debug("pushed Promotion onto Stage-specific Promotion queue") + logger.V(1).Info( + "pushed Promotion onto Stage-specific Promotion queue", + "promotion", promo.Name, + "namespace", promo.Namespace, + "stage", promo.Spec.Stage, + "phase", promo.Status.Phase, + ) } - if logger.Logger.IsLevelEnabled(log.DebugLevel) { + if logger.V(1).Enabled() { for stage, pq := range pqs.pendingPromoQueuesByStage { - logger.WithFields(log.Fields{ - "stage": stage.Name, - "namespace": stage.Namespace, - "depth": pq.Depth(), - }).Debug("Stage-specific Promotion queue initialized") + logger.V(1).Info( + "Stage-specific Promotion queue initialized", + "stage", stage.Name, + "namespace", stage.Namespace, + "depth", pq.Depth(), + ) } } } @@ -118,7 +119,7 @@ func (pqs *promoQueues) tryBegin(ctx context.Context, promo *kargoapi.Promotion) // Push this promo to the queue in case it doesn't exist in the queue. Note that we // deduplicate pushes on the same object, so this is safe to call repeatedly if pq.Push(promo) { - logger.Debug("promo added to priority queue") + logger.V(1).Info("promo added to priority queue") } if activePromoName == "" { // If we get here, the Stage does not have any active Promotions Running against it. @@ -129,7 +130,7 @@ func (pqs *promoQueues) tryBegin(ctx context.Context, promo *kargoapi.Promotion) // This promo is the first in the queue. Mark it as active and pop it off the pending queue. popped := pq.Pop() pqs.activePromoByStage[stageKey] = popped.GetName() - logger.Debug("begin promo") + logger.V(1).Info("begin promo") return true } } @@ -142,11 +143,13 @@ func (pqs *promoQueues) conclude(ctx context.Context, stageKey types.NamespacedN pqs.promoQueuesByStageMu.RLock() defer pqs.promoQueuesByStageMu.RUnlock() if pqs.activePromoByStage[stageKey] == promoName { - logger := logging.LoggerFromContext(ctx).WithFields(log.Fields{ - "namespace": stageKey.Namespace, - "promotion": promoName, - }) + logger := logging.LoggerFromContext(ctx) + delete(pqs.activePromoByStage, stageKey) - logger.Debug("conclude promo") + logger.V(1).Info( + "conclude promo", + "namespace", stageKey.Namespace, + "promotion", promoName, + ) } } diff --git a/internal/controller/promotions/promotions.go b/internal/controller/promotions/promotions.go index 109638f3c..9f32d819e 100644 --- a/internal/controller/promotions/promotions.go +++ b/internal/controller/promotions/promotions.go @@ -8,7 +8,6 @@ import ( "time" "github.com/kelseyhightower/envconfig" - log "github.com/sirupsen/logrus" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/record" @@ -173,12 +172,12 @@ func (r *reconciler) Reconcile( req ctrl.Request, ) (ctrl.Result, error) { logger := logging.LoggerFromContext(ctx). - WithFields(log.Fields{ - "namespace": req.NamespacedName.Namespace, - "promotion": req.NamespacedName.Name, - }) + WithValues( + "namespace", req.NamespacedName.Namespace, + "promotion", req.NamespacedName.Name, + ) ctx = logging.ContextWithLogger(ctx, logger) - logger.Debug("reconciling Promotion") + logger.V(1).Info("reconciling Promotion") // Note that initialization occurs here because we basically know that the // controller runtime client's cache is ready at this point. We cannot attempt @@ -190,7 +189,7 @@ func (r *reconciler) Reconcile( err = fmt.Errorf("error listing promotions: %w", err) } else { r.pqs.initializeQueues(ctx, promos) - logger.Debug( + logger.V(1).Info( "initialized Stage-specific Promotion queues from list of existing Promotions", ) } @@ -223,16 +222,16 @@ func (r *reconciler) Reconcile( ) } - logger = logger.WithFields(log.Fields{ - "namespace": req.NamespacedName.Namespace, - "promotion": req.NamespacedName.Name, - "stage": promo.Spec.Stage, - "freight": promo.Spec.Freight, - }) + logger = logger.WithValues( + "namespace", req.NamespacedName.Namespace, + "promotion", req.NamespacedName.Name, + "stage", promo.Spec.Stage, + "freight", promo.Spec.Freight, + ) if promo.Status.Phase == kargoapi.PromotionPhaseRunning { // anything we've already marked Running, we allow it to continue to reconcile - logger.Debug("continuing Promotion") + logger.V(1).Info("continuing Promotion") } else { // promo is Pending. Try to begin it. if !r.pqs.tryBegin(ctx, promo) { @@ -245,7 +244,7 @@ func (r *reconciler) Reconcile( } return ctrl.Result{}, nil } - logger.Infof("began promotion") + logger.Info("began promotion") } // Update promo status as Running to give visibility in UI. Also, a promo which @@ -268,7 +267,7 @@ func (r *reconciler) Reconcile( func() { defer func() { if err := recover(); err != nil { - logger.Errorf("Promotion panic: %v", err) + logger.Error(nil, "Promotion panic") newStatus.Phase = kargoapi.PromotionPhaseErrored newStatus.Message = fmt.Sprintf("%v", err) } @@ -281,21 +280,21 @@ func (r *reconciler) Reconcile( if promoteErr != nil { newStatus.Phase = kargoapi.PromotionPhaseErrored newStatus.Message = promoteErr.Error() - logger.Errorf("error executing Promotion: %s", promoteErr) + logger.Error(promoteErr, "error executing Promotion") } else { newStatus = otherStatus } }() if newStatus.Phase.IsTerminal() { - logger.Infof("promotion %s", newStatus.Phase) + logger.Info("reporting promotion phase", "phase", newStatus.Phase) } err = kubeclient.PatchStatus(ctx, r.kargoClient, promo, func(status *kargoapi.PromotionStatus) { *status = *newStatus }) if err != nil { - logger.Errorf("error updating Promotion status: %s", err) + logger.Error(err, "error updating Promotion status") } // Record event after patching status if new phase is terminal @@ -351,7 +350,7 @@ func (r *reconciler) Reconcile( promo, kargoapi.AnnotationKeyRefresh, ); clearRefreshErr != nil { - logger.Errorf("error clearing Promotion refresh annotation: %s", clearRefreshErr) + logger.Error(clearRefreshErr, "error clearing Promotion refresh annotation") } if err != nil { @@ -393,7 +392,7 @@ func (r *reconciler) promote( if stage == nil { return nil, fmt.Errorf("could not find Stage %q in namespace %q", stageName, stageNamespace) } - logger.Debug("found associated Stage") + logger.V(1).Info("found associated Stage") if targetFreight == nil { return nil, fmt.Errorf("Freight %q not found in namespace %q", promo.Spec.Freight, promo.Namespace) @@ -411,7 +410,7 @@ func (r *reconciler) promote( ) } - logger = logger.WithField("targetFreight", targetFreight.Name) + logger = logger.WithValues("targetFreight", targetFreight.Name) targetFreightRef := kargoapi.FreightReference{ Name: targetFreight.Name, @@ -437,7 +436,7 @@ func (r *reconciler) promote( return nil, err } - logger.Debugf("promotion %s", newStatus.Phase) + logger.V(1).Info("reporting promotion phase", "phase", newStatus.Phase) if newStatus.Phase.IsTerminal() { // The assumption is that controller does not process multiple promotions in one stage @@ -463,7 +462,7 @@ func (r *reconciler) promote( ); err != nil { // Log the error, but don't let failure to initiate re-verification // prevent the promotion from succeeding. - logger.Errorf("error triggering re-verification: %s", err) + logger.Error(err, "error triggering re-verification") } } else if stage.Spec.PromotionMechanisms != nil { status.CurrentFreight = &nextFreight diff --git a/internal/controller/promotions/watches.go b/internal/controller/promotions/watches.go index cccfc7031..76469e5b0 100644 --- a/internal/controller/promotions/watches.go +++ b/internal/controller/promotions/watches.go @@ -3,7 +3,7 @@ package promotions import ( "context" - log "github.com/sirupsen/logrus" + "github.com/go-logr/logr" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/util/workqueue" "sigs.k8s.io/controller-runtime/pkg/client" @@ -16,7 +16,7 @@ import ( // EnqueueHighestPriorityPromotionHandler is an event handler that enqueues the next // highest priority Promotion for reconciliation when an active Promotion becomes terminal type EnqueueHighestPriorityPromotionHandler struct { - logger *log.Entry + logger logr.Logger ctx context.Context pqs *promoQueues kargoClient client.Client @@ -65,12 +65,12 @@ func (e *EnqueueHighestPriorityPromotionHandler) Update( wq workqueue.RateLimitingInterface, ) { if evt.ObjectNew == nil { - e.logger.Errorf("Update event has no new object to update: %v", evt) + e.logger.Error(nil, "Update event has no new object to update", "object", evt) return } promo, ok := evt.ObjectNew.(*kargoapi.Promotion) if !ok { - e.logger.Errorf("Failed to convert new Promotion: %v", evt.ObjectNew) + e.logger.Error(nil, "Failed to convert new Promotion", "promotion", evt.ObjectNew) return } if promo.Status.Phase.IsTerminal() { @@ -116,7 +116,7 @@ func (e *EnqueueHighestPriorityPromotionHandler) enqueueNext( firstKey := types.NamespacedName{Namespace: first.GetNamespace(), Name: first.GetName()} promo, err := kargoapi.GetPromotion(e.ctx, e.kargoClient, firstKey) if err != nil { - e.logger.Errorf("Failed to get next highest priority Promotion (%s) for enqueue: %v", firstKey, err) + e.logger.Error(err, "Failed to get next highest priority Promotion for enqueue", "promotion", firstKey) return } if promo == nil || promo.Status.Phase.IsTerminal() { @@ -133,11 +133,12 @@ func (e *EnqueueHighestPriorityPromotionHandler) enqueueNext( }, }, ) - e.logger.WithFields(log.Fields{ - "promotion": promo.Name, - "namespace": promo.Namespace, - "stage": promo.Spec.Stage, - }).Debug("enqueued promo") + e.logger.V(1).Info( + "enqueued promo", + "promotion", promo.Name, + "namespace", promo.Namespace, + "stage", promo.Spec.Stage, + ) return } } diff --git a/internal/controller/stages/stages.go b/internal/controller/stages/stages.go index e9c543428..56f2d4c71 100644 --- a/internal/controller/stages/stages.go +++ b/internal/controller/stages/stages.go @@ -8,7 +8,6 @@ import ( "time" "github.com/kelseyhightower/envconfig" - log "github.com/sirupsen/logrus" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" @@ -499,12 +498,12 @@ func (r *reconciler) Reconcile( ctx context.Context, req ctrl.Request, ) (ctrl.Result, error) { - logger := logging.LoggerFromContext(ctx).WithFields(log.Fields{ - "namespace": req.NamespacedName.Namespace, - "stage": req.NamespacedName.Name, - }) + logger := logging.LoggerFromContext(ctx).WithValues( + "namespace", req.NamespacedName.Namespace, + "stage", req.NamespacedName.Name, + ) ctx = logging.ContextWithLogger(ctx, logger) - logger.Debug("reconciling Stage") + logger.V(1).Info("reconciling Stage") // Find the Stage stage, err := kargoapi.GetStage(ctx, r.kargoClient, req.NamespacedName) @@ -521,7 +520,7 @@ func (r *reconciler) Reconcile( // Ignore if stage does not belong to given shard return ctrl.Result{}, err } - logger.Debug("found Stage") + logger.V(1).Info("found Stage") var newStatus kargoapi.StageStatus if stage.DeletionTimestamp != nil { @@ -545,7 +544,7 @@ func (r *reconciler) Reconcile( } if err != nil { newStatus.Message = err.Error() - logger.Errorf("error syncing Stage: %s", stage.Status.Message) + logger.Error(err, "error syncing Stage", "message", stage.Status.Message) } else { // Be sure to blank this out in case there's an error in this field from // the previous reconciliation @@ -556,7 +555,7 @@ func (r *reconciler) Reconcile( *status = newStatus }) if updateErr != nil { - logger.Errorf("error updating Stage status: %s", updateErr) + logger.Error(updateErr, "error updating Stage status") } clearErr := kargoapi.ClearAnnotations( ctx, @@ -568,7 +567,7 @@ func (r *reconciler) Reconcile( kargoapi.AnnotationKeyAbort, ) if clearErr != nil { - logger.Errorf("error clearing Stage annotations: %s", clearErr) + logger.Error(clearErr, "error clearing Stage annotations") } // If we had no error, but couldn't update, then we DO have an error. But we @@ -577,7 +576,7 @@ func (r *reconciler) Reconcile( if err == nil { err = errors.Join(updateErr, clearErr) } - logger.Debug("done reconciling Stage") + logger.V(1).Info("done reconciling Stage") // If we do have an error at this point, return it so controller runtime // retries with a progressive backoff. @@ -713,7 +712,7 @@ func (r *reconciler) syncNormalStage( ); err != nil { return status, err } else if hasNonTerminalPromos { - logger.Debug( + logger.V(1).Info( "Stage has one or more Promotions in a non-terminal phase; skipping " + "this reconciliation loop", ) @@ -726,11 +725,11 @@ func (r *reconciler) syncNormalStage( if status.CurrentFreight == nil { status.Phase = kargoapi.StagePhaseNotApplicable - logger.Debug( + logger.V(1).Info( "Stage has no current Freight; no health checks or verification to perform", ) } else { - freightLogger := logger.WithField("freight", status.CurrentFreight.Name) + freightLogger := logger.WithValues("freight", status.CurrentFreight.Name) shouldRecordFreightVerificationEvent := false // Push the latest state of the current Freight to the history at the @@ -745,10 +744,12 @@ func (r *reconciler) syncNormalStage( *status.CurrentFreight, stage.Spec.PromotionMechanisms.ArgoCDAppUpdates, ); status.Health != nil { - freightLogger.WithField("health", status.Health.Status). - Debug("Stage health assessed") + freightLogger.V(1).Info( + "Stage health assessed", + "health", status.Health.Status, + ) } else { - freightLogger.Debug("Stage health deemed not applicable") + freightLogger.V(1).Info("Stage health deemed not applicable") } // If the Stage is healthy and no verification process is defined, then the @@ -774,7 +775,7 @@ func (r *reconciler) syncNormalStage( info := status.CurrentFreight.VerificationInfo if info != nil && info.ID != "" && info.Phase.IsTerminal() { if v, ok := stage.GetAnnotations()[kargoapi.AnnotationKeyReverify]; ok && v == info.ID { - logger.Debug("rerunning verification") + logger.V(1).Info("rerunning verification") status.Phase = kargoapi.StagePhaseVerifying status.CurrentFreight.VerificationInfo = nil } @@ -787,7 +788,7 @@ func (r *reconciler) syncNormalStage( if status.Phase == kargoapi.StagePhaseVerifying || status.Phase == kargoapi.StagePhaseNotApplicable { if !status.CurrentFreight.VerificationInfo.HasAnalysisRun() { if status.Health == nil || status.Health.Status == kargoapi.HealthStateHealthy { - log.Debug("starting verification") + logger.V(1).Info("starting verification") var err error if status.CurrentFreight.VerificationInfo, err = r.startVerificationFn( ctx, @@ -800,7 +801,7 @@ func (r *reconciler) syncNormalStage( } } } else { - log.Debug("checking verification results") + logger.V(1).Info("checking verification results") var err error if status.CurrentFreight.VerificationInfo, err = r.getVerificationInfoFn( ctx, @@ -817,23 +818,23 @@ func (r *reconciler) syncNormalStage( newInfo := status.CurrentFreight.VerificationInfo if newInfo.ID != "" && !newInfo.Phase.IsTerminal() { if v, ok := stage.GetAnnotations()[kargoapi.AnnotationKeyAbort]; ok && v == newInfo.ID { - log.Debug("aborting verification") + logger.V(1).Info("aborting verification") status.CurrentFreight.VerificationInfo = r.abortVerificationFn(ctx, stage) } } } if status.CurrentFreight.VerificationInfo != nil { - log.Debugf( - "verification phase is %s", - status.CurrentFreight.VerificationInfo.Phase, + logger.V(1).Info( + "verification phase", + "phase", status.CurrentFreight.VerificationInfo.Phase, ) if status.CurrentFreight.VerificationInfo.Phase.IsTerminal() { // Verification is complete shouldRecordFreightVerificationEvent = true status.Phase = kargoapi.StagePhaseSteady - log.Debug("verification is complete") + logger.V(1).Info("verification is complete") } // Add latest verification info to history. @@ -922,14 +923,14 @@ func (r *reconciler) syncNormalStage( // Stop here if we have no chance of finding any Freight to promote. if stage.Spec.Subscriptions == nil || (stage.Spec.Subscriptions.Warehouse == "" && len(stage.Spec.Subscriptions.UpstreamStages) == 0) { - logger.Warn( + logger.V(1).Info( "Stage has no subscriptions. This may indicate an issue with resource" + "validation logic.", ) return status, nil } - logger.Debug("checking if auto-promotion is permitted...") + logger.V(1).Info("checking if auto-promotion is permitted...") if permitted, err := r.isAutoPromotionPermittedFn(ctx, stage.Namespace, stage.Name); err != nil { return status, fmt.Errorf( @@ -939,7 +940,7 @@ func (r *reconciler) syncNormalStage( err, ) } else if !permitted { - logger.Debug("auto-promotion is not permitted for the Stage") + logger.V(1).Info("auto-promotion is not permitted for the Stage") return status, nil } @@ -958,16 +959,16 @@ func (r *reconciler) syncNormalStage( } if latestFreight == nil { - logger.Debug("no Freight found") + logger.V(1).Info("no Freight found") return status, nil } - logger = logger.WithField("freight", latestFreight.Name) + logger = logger.WithValues("freight", latestFreight.Name) // Only proceed if nextFreight isn't the one we already have if stage.Status.CurrentFreight != nil && stage.Status.CurrentFreight.Name == latestFreight.Name { - logger.Debug("Stage already has latest available Freight") + logger.V(1).Info("Stage already has latest available Freight") return status, nil } @@ -996,11 +997,11 @@ func (r *reconciler) syncNormalStage( } if len(promos.Items) > 0 { - logger.Debug("Promotion already exists for Freight") + logger.V(1).Info("Promotion already exists for Freight") return status, nil } - logger.Debug("auto-promotion will proceed") + logger.V(1).Info("auto-promotion will proceed") promo := kargo.NewPromotion(*stage, latestFreight.Name) if err := @@ -1028,7 +1029,7 @@ func (r *reconciler) syncNormalStage( promo.Spec.Stage, ) - logger.WithField("promotion", promo.Name).Debug("created Promotion resource") + logger.V(1).Info("created Promotion resource", "promotion", promo.Name) return status, nil } @@ -1213,7 +1214,7 @@ func (r *reconciler) verifyFreightInStage( freightName string, stageName string, ) (bool, error) { - logger := logging.LoggerFromContext(ctx).WithField("freight", freightName) + logger := logging.LoggerFromContext(ctx).WithValues("freight", freightName) // Find the Freight freight, err := r.getFreightFn( @@ -1247,7 +1248,7 @@ func (r *reconciler) verifyFreightInStage( // Only try to mark as verified in this Stage if not already the case. if _, ok := newStatus.VerifiedIn[stageName]; ok { - logger.Debug("Freight already marked as verified in Stage") + logger.V(1).Info("Freight already marked as verified in Stage") return false, nil } @@ -1256,7 +1257,7 @@ func (r *reconciler) verifyFreightInStage( return false, err } - logger.Debug("marked Freight as verified in Stage") + logger.V(1).Info("marked Freight as verified in Stage") return true, nil } @@ -1297,13 +1298,16 @@ func (r *reconciler) isAutoPromotionPermitted( return false, fmt.Errorf("Project %q not found", namespace) } if project.Spec == nil || len(project.Spec.PromotionPolicies) == 0 { - logger.Debug("found no PromotionPolicy associated with the Stage") + logger.V(1).Info("found no PromotionPolicy associated with the Stage") return false, nil } for _, policy := range project.Spec.PromotionPolicies { if policy.Stage == stageName { - logger.WithField("autoPromotionEnabled", policy.AutoPromotionEnabled). - Debug("found PromotionPolicy associated with the Stage") + logger.V(1).Info( + "found PromotionPolicy associated with the Stage", + "autoPromotionEnabled", policy.AutoPromotionEnabled, + ) + return policy.AutoPromotionEnabled, nil } } @@ -1332,8 +1336,10 @@ func (r *reconciler) getLatestAvailableFreight( ) } if latestFreight == nil { - logger.WithField("warehouse", stage.Spec.Subscriptions.Warehouse). - Debug("no Freight found from Warehouse") + logger.V(1).Info( + "no Freight found from Warehouse", + "warehouse", stage.Spec.Subscriptions.Warehouse, + ) } return latestFreight, nil } @@ -1352,7 +1358,7 @@ func (r *reconciler) getLatestAvailableFreight( ) } if latestVerifiedFreight == nil { - logger.Debug("no verified Freight found upstream from Stage") + logger.V(1).Info("no verified Freight found upstream from Stage") } latestApprovedFreight, err := r.getLatestApprovedFreightFn( @@ -1369,7 +1375,7 @@ func (r *reconciler) getLatestAvailableFreight( ) } if latestVerifiedFreight == nil { - logger.Debug("no approved Freight found for Stage") + logger.V(1).Info("no approved Freight found for Stage") } if latestVerifiedFreight == nil && latestApprovedFreight == nil { diff --git a/internal/controller/stages/verification.go b/internal/controller/stages/verification.go index 568ab2304..3af7d7377 100644 --- a/internal/controller/stages/verification.go +++ b/internal/controller/stages/verification.go @@ -86,7 +86,7 @@ func (r *reconciler) startVerification( return analysisRuns.Items[j].CreationTimestamp.Before(&analysisRuns.Items[i].CreationTimestamp) }) - logger.Debug("AnalysisRun already exists for Freight") + logger.V(1).Info("AnalysisRun already exists for Freight") latestAnalysisRun := analysisRuns.Items[0] return &kargoapi.VerificationInfo{ ID: uuid.NewString(), diff --git a/internal/controller/stages/watches.go b/internal/controller/stages/watches.go index 62b747805..906be9866 100644 --- a/internal/controller/stages/watches.go +++ b/internal/controller/stages/watches.go @@ -4,7 +4,6 @@ import ( "context" "fmt" - log "github.com/sirupsen/logrus" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" @@ -63,17 +62,17 @@ func (v *verifiedFreightEventHandler) Update( ) { logger := logging.LoggerFromContext(ctx) if evt.ObjectOld == nil || evt.ObjectNew == nil { - logger.Errorf("Update event has no old or new object to update: %v", evt) + logger.Error(nil, "Update event has no old or new object to update", "event", evt) return } oldFreight, ok := evt.ObjectOld.(*kargoapi.Freight) if !ok { - logger.Errorf("Failed to convert old Freight: %v", evt.ObjectOld) + logger.Error(nil, "Failed to convert old Freight", "object", evt.ObjectOld) return } newFreight, ok := evt.ObjectNew.(*kargoapi.Freight) if !ok { - logger.Errorf("Failed to convert new Freight: %v", evt.ObjectNew) + logger.Error(nil, "Failed to convert new Freight", "object", evt.ObjectNew) return } newlyVerifiedStages := getNewlyVerifiedStages(oldFreight, newFreight) @@ -92,10 +91,11 @@ func (v *verifiedFreightEventHandler) Update( LabelSelector: v.shardSelector, }, ); err != nil { - logger.Errorf( - "Failed list Stages downstream from Stage %v in namespace %q", - evt.ObjectOld, - newFreight.Namespace, + logger.Error( + err, + "Failed list Stages downstream from Stage", + "stage", evt.ObjectOld, + "namespace", newFreight.Namespace, ) return } @@ -112,10 +112,11 @@ func (v *verifiedFreightEventHandler) Update( }, }, ) - logger.WithFields(log.Fields{ - "namespace": newFreight.Namespace, - "stage": downStreamStage, - }).Debug("enqueued downstream Stage for reconciliation") + logger.V(1).Info( + "enqueued downstream Stage for reconciliation", + "namespace", newFreight.Namespace, + "stage", downStreamStage, + ) } } @@ -171,17 +172,17 @@ func (a *approvedFreightEventHandler) Update( ) { logger := logging.LoggerFromContext(ctx) if evt.ObjectOld == nil || evt.ObjectNew == nil { - logger.Errorf("Update event has no old or new object to update: %v", evt) + logger.Error(nil, "Update event has no old or new object to update", "event", evt) return } oldFreight, ok := evt.ObjectOld.(*kargoapi.Freight) if !ok { - logger.Errorf("Failed to convert old Freight: %v", evt.ObjectOld) + logger.Error(nil, "Failed to convert old Freight", "object", evt.ObjectOld) return } newFreight, ok := evt.ObjectNew.(*kargoapi.Freight) if !ok { - logger.Errorf("Failed to convert new Freight: %v", evt.ObjectNew) + logger.Error(nil, "Failed to convert new Freight", "object", evt.ObjectNew) return } newlyApprovedStages := getNewlyApprovedStages(oldFreight, newFreight) @@ -194,10 +195,11 @@ func (a *approvedFreightEventHandler) Update( }, }, ) - logger.WithFields(log.Fields{ - "namespace": newFreight.Namespace, - "stage": stage, - }).Debug("enqueued Stage for reconciliation") + logger.V(1).Info( + "enqueued Stage for reconciliation", + "namespace", newFreight.Namespace, + "stage", stage, + ) } } @@ -241,10 +243,11 @@ func (c *createdFreightEventHandler) Create( LabelSelector: c.shardSelector, }, ); err != nil { - logger.Errorf( - "Failed list Stages subscribed to Warehouse %q in namespace %q", - freight.Warehouse, - freight.Namespace, + logger.Error( + nil, + "Failed list Stages subscribed to Warehouse", + "warehouse", freight.Warehouse, + "namespace", freight.Namespace, ) return } @@ -257,10 +260,11 @@ func (c *createdFreightEventHandler) Create( }, }, ) - logger.WithFields(log.Fields{ - "namespace": freight.Namespace, - "stage": stage.Name, - }).Debug("enqueued Stage for reconciliation") + logger.V(1).Info( + "enqueued Stage for reconciliation", + "namespace", freight.Namespace, + "stage", stage.Name, + ) } } @@ -350,11 +354,11 @@ func (u *updatedArgoCDAppHandler) Update( LabelSelector: u.shardSelector, }, ); err != nil { - logger.Errorf( - "error listing Stages for Application %q in namespace %q: %s", - e.ObjectNew.GetNamespace(), - e.ObjectNew.GetName(), + logger.Error( err, + "error listing Stages for Application", + "namespace", e.ObjectNew.GetNamespace(), + "application", e.ObjectNew.GetName(), ) } for _, stage := range stages.Items { @@ -366,11 +370,12 @@ func (u *updatedArgoCDAppHandler) Update( }, }, ) - logger.WithFields(log.Fields{ - "namespace": stage.Namespace, - "stage": stage.Name, - "app": e.ObjectNew.GetName(), - }).Debug("enqueued Stage for reconciliation") + logger.V(1).Info( + "enqueued Stage for reconciliation", + "namespace", stage.Namespace, + "stage", stage.Name, + "app", e.ObjectNew.GetName(), + ) } } } @@ -378,18 +383,18 @@ func (u *updatedArgoCDAppHandler) Update( func appHealthOrSyncStatusChanged(ctx context.Context, e event.UpdateEvent) bool { logger := logging.LoggerFromContext(ctx) if e.ObjectOld == nil { - logger.Errorf("Update event has no old object to update: %v", e) + logger.Error(nil, "Update event has no old object to update", "event", e) } if e.ObjectNew == nil { - logger.Errorf("Update event has no new object for update: %v", e) + logger.Error(nil, "Update event has no new object for update", "event", e) } newUn, err := runtime.DefaultUnstructuredConverter.ToUnstructured(e.ObjectNew) if err != nil { - logger.Errorf("Failed to convert new app: %v", e.ObjectNew) + logger.Error(err, "Failed to convert new app", "object", e.ObjectNew) } oldUn, err := runtime.DefaultUnstructuredConverter.ToUnstructured(e.ObjectOld) if err != nil { - logger.Errorf("Failed to convert old app: %v", e.ObjectOld) + logger.Error(err, "Failed to convert old app", "object", e.ObjectOld) } oldHealth, _, _ := unstructured.NestedString(oldUn, "status", "health", "status") newHealth, _, _ := unstructured.NestedString(newUn, "status", "health", "status") @@ -463,11 +468,11 @@ func (p *phaseChangedAnalysisRunHandler) Update( LabelSelector: p.shardSelector, }, ); err != nil { - logger.Errorf( - "error listing Stages for AnalysisRun %q in namespace %q: %s", - e.ObjectNew.GetNamespace(), - e.ObjectNew.GetName(), + logger.Error( err, + "error listing Stages for AnalysisRun", + "namespace", e.ObjectNew.GetNamespace(), + "analysisRun", e.ObjectNew.GetName(), ) } for _, stage := range stages.Items { @@ -479,11 +484,12 @@ func (p *phaseChangedAnalysisRunHandler) Update( }, }, ) - logger.WithFields(log.Fields{ - "namespace": stage.Namespace, - "stage": stage.Name, - "analysisRun": e.ObjectNew.GetName(), - }).Debug("enqueued Stage for reconciliation") + logger.V(1).Info( + "enqueued Stage for reconciliation", + "namespace", stage.Namespace, + "stage", stage.Name, + "analysisRun", e.ObjectNew.GetName(), + ) } } } @@ -491,18 +497,18 @@ func (p *phaseChangedAnalysisRunHandler) Update( func analysisRunPhaseChanged(ctx context.Context, e event.UpdateEvent) bool { logger := logging.LoggerFromContext(ctx) if e.ObjectOld == nil { - logger.Errorf("Update event has no old object to update: %v", e) + logger.Error(nil, "Update event has no old object to update", "event", e) } if e.ObjectNew == nil { - logger.Errorf("Update event has no new object for update: %v", e) + logger.Error(nil, "Update event has no new object for update", "event", e) } newUn, err := runtime.DefaultUnstructuredConverter.ToUnstructured(e.ObjectNew) if err != nil { - logger.Errorf("Failed to convert new AnalysisRun: %v", e.ObjectNew) + logger.Error(nil, "Failed to convert new AnalysisRun", "object", e.ObjectNew) } oldUn, err := runtime.DefaultUnstructuredConverter.ToUnstructured(e.ObjectOld) if err != nil { - logger.Errorf("Failed to convert old AnalysisRun: %v", e.ObjectOld) + logger.Error(nil, "Failed to convert old AnalysisRun", "object", e.ObjectOld) } oldPhase, _, _ := unstructured.NestedString(oldUn, "status", "phase") newPhase, _, _ := unstructured.NestedString(newUn, "status", "phase") diff --git a/internal/controller/warehouses/git.go b/internal/controller/warehouses/git.go index f5e535ba9..7b58c772f 100644 --- a/internal/controller/warehouses/git.go +++ b/internal/controller/warehouses/git.go @@ -48,7 +48,7 @@ func (r *reconciler) selectCommits( continue } sub := s.Git - logger := logging.LoggerFromContext(ctx).WithField("repo", sub.RepoURL) + logger := logging.LoggerFromContext(ctx).WithValues("repo", sub.RepoURL) creds, ok, err := r.credentialsDB.Get(ctx, namespace, credentials.TypeGit, sub.RepoURL) if err != nil { @@ -65,9 +65,9 @@ func (r *reconciler) selectCommits( Password: creds.Password, SSHPrivateKey: creds.SSHPrivateKey, } - logger.Debug("obtained credentials for git repo") + logger.V(1).Info("obtained credentials for git repo") } else { - logger.Debug("found no credentials for git repo") + logger.V(1).Info("found no credentials for git repo") } baseCommit := repoCommitMappings[sub.RepoURL+"#"+sub.Branch] @@ -80,8 +80,7 @@ func (r *reconciler) selectCommits( err, ) } - logger.WithField("commit", gm.Commit). - Debug("found latest commit from repo") + logger.V(1).Info("found latest commit from repo", "commit", gm.Commit) latestCommits = append( latestCommits, kargoapi.GitCommit{ @@ -105,7 +104,7 @@ func (r *reconciler) selectCommitMeta( creds *git.RepoCredentials, baseCommit string, ) (*gitMeta, error) { - logger := logging.LoggerFromContext(ctx).WithField("repo", sub.RepoURL) + logger := logging.LoggerFromContext(ctx).WithValues("repo", sub.RepoURL) if creds == nil { creds = &git.RepoCredentials{} } @@ -142,7 +141,7 @@ func (r *reconciler) selectCommitMeta( msg, err := repo.CommitMessage(selectedCommit) if err != nil { // This is best effort, so just log the error - logger.Warnf("failed to get message from commit %q: %v", selectedCommit, err) + logger.Error(err, "failed to get message from commit", "commit", selectedCommit, err) } return &gitMeta{ Commit: selectedCommit, diff --git a/internal/controller/warehouses/helm.go b/internal/controller/warehouses/helm.go index d5d0e5e9e..755606851 100644 --- a/internal/controller/warehouses/helm.go +++ b/internal/controller/warehouses/helm.go @@ -24,9 +24,9 @@ func (r *reconciler) selectCharts( sub := s.Chart - logger := logging.LoggerFromContext(ctx).WithField("repoURL", sub.RepoURL) + logger := logging.LoggerFromContext(ctx).WithValues("repoURL", sub.RepoURL) if sub.Name != "" { - logger = logger.WithField("chart", sub.Name) + logger = logger.WithValues("chart", sub.Name) } creds, ok, err := @@ -45,9 +45,9 @@ func (r *reconciler) selectCharts( Username: creds.Username, Password: creds.Password, } - logger.Debug("obtained credentials for chart repo") + logger.V(1).Info("obtained credentials for chart repo") } else { - logger.Debug("found no credentials for chart repo") + logger.V(1).Info("found no credentials for chart repo") } vers, err := r.selectChartVersionFn( @@ -74,7 +74,7 @@ func (r *reconciler) selectCharts( } if vers == "" { - logger.Error("found no suitable chart version") + logger.Error(nil, "found no suitable chart version") if sub.Name == "" { return nil, fmt.Errorf( "found no suitable version of chart in repository %q", @@ -87,8 +87,10 @@ func (r *reconciler) selectCharts( sub.RepoURL, ) } - logger.WithField("version", vers). - Debug("found latest suitable chart version") + logger.V(1).Info( + "found latest suitable chart version", + "version", vers, + ) charts = append( charts, diff --git a/internal/controller/warehouses/images.go b/internal/controller/warehouses/images.go index 27aab8eef..1d186168f 100644 --- a/internal/controller/warehouses/images.go +++ b/internal/controller/warehouses/images.go @@ -5,8 +5,6 @@ import ( "fmt" "strings" - log "github.com/sirupsen/logrus" - kargoapi "github.com/akuity/kargo/api/v1alpha1" "github.com/akuity/kargo/internal/credentials" "github.com/akuity/kargo/internal/git" @@ -27,7 +25,7 @@ func (r *reconciler) selectImages( sub := s.Image - logger := logging.LoggerFromContext(ctx).WithField("repo", sub.RepoURL) + logger := logging.LoggerFromContext(ctx).WithValues("repo", sub.RepoURL) creds, ok, err := r.credentialsDB.Get(ctx, namespace, credentials.TypeImage, sub.RepoURL) @@ -44,9 +42,9 @@ func (r *reconciler) selectImages( Username: creds.Username, Password: creds.Password, } - logger.Debug("obtained credentials for image repo") + logger.V(1).Info("obtained credentials for image repo") } else { - logger.Debug("found no credentials for image repo") + logger.V(1).Info("found no credentials for image repo") } tag, digest, err := r.getImageRefsFn(ctx, *sub, regCreds) @@ -66,10 +64,11 @@ func (r *reconciler) selectImages( Digest: digest, }, ) - logger.WithFields(log.Fields{ - "tag": tag, - "digest": digest, - }).Debug("found latest suitable image") + logger.V(1).Info( + "found latest suitable image", + "tag", tag, + "digest", digest, + ) } return imgs, nil } diff --git a/internal/controller/warehouses/warehouses.go b/internal/controller/warehouses/warehouses.go index 775d9b152..cefc392ec 100644 --- a/internal/controller/warehouses/warehouses.go +++ b/internal/controller/warehouses/warehouses.go @@ -5,7 +5,6 @@ import ( "fmt" "time" - log "github.com/sirupsen/logrus" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ctrl "sigs.k8s.io/controller-runtime" @@ -169,12 +168,12 @@ func (r *reconciler) Reconcile( ) (ctrl.Result, error) { logger := logging.LoggerFromContext(ctx) - logger = logger.WithFields(log.Fields{ - "namespace": req.NamespacedName.Namespace, - "warehouse": req.NamespacedName.Name, - }) + logger = logger.WithValues( + "namespace", req.NamespacedName.Namespace, + "warehouse", req.NamespacedName.Name, + ) ctx = logging.ContextWithLogger(ctx, logger) - logger.Debug("reconciling Warehouse") + logger.V(1).Info("reconciling Warehouse") // Find the Warehouse warehouse, err := kargoapi.GetWarehouse(ctx, r.client, req.NamespacedName) @@ -190,7 +189,7 @@ func (r *reconciler) Reconcile( newStatus, err := r.syncWarehouse(ctx, warehouse) if err != nil { newStatus.Message = err.Error() - logger.Errorf("error syncing Warehouse: %s", err) + logger.Error(err, "error syncing Warehouse") } updateErr := kubeclient.PatchStatus( @@ -202,7 +201,7 @@ func (r *reconciler) Reconcile( }, ) if updateErr != nil { - logger.Errorf("error updating Warehouse status: %s", updateErr) + logger.Error(updateErr, "error updating Warehouse status") } if clearRefreshErr := kargoapi.ClearAnnotations( ctx, @@ -210,7 +209,7 @@ func (r *reconciler) Reconcile( warehouse, kargoapi.AnnotationKeyRefresh, ); clearRefreshErr != nil { - logger.Errorf("error clearing Warehouse refresh annotation: %s", clearRefreshErr) + logger.Error(clearRefreshErr, "error clearing Warehouse refresh annotation") } // If we had no error, but couldn't update, then we DO have an error. But we @@ -219,7 +218,7 @@ func (r *reconciler) Reconcile( if err == nil { err = updateErr } - logger.Debug("done reconciling Warehouse") + logger.V(1).Info("done reconciling Warehouse") // Controller runtime automatically gives us a progressive backoff if err is // not nil @@ -248,17 +247,17 @@ func (r *reconciler) syncWarehouse( return status, fmt.Errorf("error getting latest Freight from repositories: %w", err) } if freight == nil { - logger.Debug("found no Freight from repositories") + logger.V(1).Info("found no Freight from repositories") return status, nil } - logger.Debug("got latest Freight from repositories") + logger.V(1).Info("got latest Freight from repositories") if err = r.createFreightFn(ctx, freight); err != nil { if apierrors.IsAlreadyExists(err) { - logger.Debugf( - "Freight %q in namespace %q already exists", - freight.Name, - freight.Namespace, + logger.V(1).Info( + "Freight already exists", + "freight", freight.Name, + "namespace", freight.Namespace, ) return status, nil } @@ -269,10 +268,10 @@ func (r *reconciler) syncWarehouse( err, ) } - log.Debugf( - "created Freight %q in namespace %q", - freight.Name, - freight.Namespace, + logger.V(1).Info( + "created Freight", + "freight", freight.Name, + "namespace", freight.Namespace, ) status.LastFreight = &kargoapi.FreightReference{ Name: freight.Name, @@ -299,7 +298,7 @@ func (r *reconciler) getLatestFreightFromRepos( if err != nil { return nil, fmt.Errorf("error syncing git repo subscriptions: %w", err) } - logger.Debug("synced git repo subscriptions") + logger.V(1).Info("synced git repo subscriptions") selectedImages, err := r.selectImagesFn( ctx, @@ -309,7 +308,7 @@ func (r *reconciler) getLatestFreightFromRepos( if err != nil { return nil, fmt.Errorf("error syncing image repo subscriptions: %w", err) } - logger.Debug("synced image repo subscriptions") + logger.V(1).Info("synced image repo subscriptions") selectedCharts, err := r.selectChartsFn( ctx, @@ -319,7 +318,7 @@ func (r *reconciler) getLatestFreightFromRepos( if err != nil { return nil, fmt.Errorf("error syncing chart repo subscriptions: %w", err) } - logger.Debug("synced chart repo subscriptions") + logger.V(1).Info("synced chart repo subscriptions") freight := &kargoapi.Freight{ ObjectMeta: metav1.ObjectMeta{ diff --git a/internal/credentials/credentials.go b/internal/credentials/credentials.go index c1e4aff85..dcf107ad6 100644 --- a/internal/credentials/credentials.go +++ b/internal/credentials/credentials.go @@ -7,7 +7,6 @@ import ( "strings" "github.com/kelseyhightower/envconfig" - log "github.com/sirupsen/logrus" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/labels" "sigs.k8s.io/controller-runtime/pkg/client" @@ -110,8 +109,11 @@ func (k *kubernetesDatabase) Get( // If we are dealing with an insecure HTTP endpoint (of any type), // refuse to return any credentials if strings.HasPrefix(repoURL, "http://") { - logger := logging.LoggerFromContext(ctx).WithField("repoURL", repoURL) - logger.Warnf("refused to get credentials for insecure HTTP endpoint") + logger := logging.LoggerFromContext(ctx) + logger.Info( + "refused to get credentials for insecure HTTP endpoint", + "repoURL", repoURL, + ) return creds, false, nil } @@ -220,10 +222,11 @@ func (k *kubernetesDatabase) getCredentialsSecret( } regex, err := regexp.Compile(string(patternBytes)) if err != nil { - logger.WithFields(log.Fields{ - "namespace": namespace, - "secret": secret.Name, - }).Warn("failed to compile regex for credential secret") + logger.Info( + "failed to compile regex for credential secret", + "namespace", namespace, + "secret", secret.Name, + ) continue } if regex.MatchString(repoURL) { diff --git a/internal/garbage/freight.go b/internal/garbage/freight.go index 50bbcef8f..f6d6480ce 100644 --- a/internal/garbage/freight.go +++ b/internal/garbage/freight.go @@ -6,7 +6,6 @@ import ( "sort" "time" - "github.com/sirupsen/logrus" "sigs.k8s.io/controller-runtime/pkg/client" kargoapi "github.com/akuity/kargo/api/v1alpha1" @@ -20,7 +19,7 @@ import ( // Freight (from the same Warehouse) that remains in use. // - Older than some configurable minimum age. func (c *collector) cleanProjectFreight(ctx context.Context, project string) error { - logger := logging.LoggerFromContext(ctx).WithField("project", project) + logger := logging.LoggerFromContext(ctx).WithValues("project", project) warehouses := &kargoapi.WarehouseList{} if err := c.listWarehousesFn( @@ -33,13 +32,13 @@ func (c *collector) cleanProjectFreight(ctx context.Context, project string) err var cleanErrCount int for _, warehouse := range warehouses.Items { - warehouseLogger := logger.WithField("warehouse", warehouse.Name) + warehouseLogger := logger.WithValues("warehouse", warehouse.Name) if err := c.cleanWarehouseFreightFn(ctx, project, warehouse.Name); err != nil { - warehouseLogger.Error("error cleaning Freight from Warehouse") + warehouseLogger.Error(err, "error cleaning Freight from Warehouse") cleanErrCount++ continue } - warehouseLogger.Debug("cleaned Freight from Warehouse") + warehouseLogger.V(1).Info("cleaned Freight from Warehouse") } if cleanErrCount > 0 { @@ -62,10 +61,10 @@ func (c *collector) cleanWarehouseFreight( project string, warehouse string, ) error { - logger := logging.LoggerFromContext(ctx).WithFields(logrus.Fields{ - "project": project, - "warehouse": warehouse, - }) + logger := logging.LoggerFromContext(ctx).WithValues( + "project", project, + "warehouse", warehouse, + ) freight := kargoapi.FreightList{} if err := c.listFreightFn( @@ -103,7 +102,7 @@ func (c *collector) cleanWarehouseFreight( kubeclient.StagesByFreightIndexField: f.Name, }, ); err != nil { - logger.WithField("freight", f).Error("error listing Stages using Freight") + logger.Error(err, "error listing Stages using Freight", "freight", f) return fmt.Errorf( "error listing Stages in Project %q using Freight %q: %w", project, @@ -127,12 +126,12 @@ func (c *collector) cleanWarehouseFreight( if time.Since(f.CreationTimestamp.Time) < c.cfg.MinFreightDeletionAge { continue // Not old enough } - freightLogger := logger.WithField("freight", f.Name) + freightLogger := logger.WithValues("freight", f.Name) if err := c.deleteFreightFn(ctx, &f); err != nil { - freightLogger.Errorf("error deleting Freight: %s", err) + freightLogger.Error(err, "error deleting Freight") deleteErrCount++ } else { - freightLogger.Debug("deleted Freight") + freightLogger.V(1).Info("deleted Freight") } } diff --git a/internal/garbage/projects.go b/internal/garbage/projects.go index 564e3448e..0673d33fc 100644 --- a/internal/garbage/projects.go +++ b/internal/garbage/projects.go @@ -23,16 +23,16 @@ func (c *collector) cleanProjects( if !ok { return // Channel was closed } - projectLogger := logger.WithField("project", project) + projectLogger := logger.WithValues("project", project) if err := c.cleanProjectFn(ctx, project); err != nil { - projectLogger.Errorf("error cleaning Project: %s", err) + projectLogger.Error(err, "error cleaning Project") select { case errCh <- struct{}{}: case <-ctx.Done(): return } } else { - projectLogger.Debug("cleaned Project") + projectLogger.V(1).Info("cleaned Project") } case <-ctx.Done(): return diff --git a/internal/garbage/promotions.go b/internal/garbage/promotions.go index a36f9af7f..ace4fe3d2 100644 --- a/internal/garbage/promotions.go +++ b/internal/garbage/promotions.go @@ -6,7 +6,6 @@ import ( "sort" "time" - "github.com/sirupsen/logrus" "sigs.k8s.io/controller-runtime/pkg/client" kargoapi "github.com/akuity/kargo/api/v1alpha1" @@ -20,7 +19,7 @@ import ( // Promotion (from the same Stage) in a non-terminal phase. // - Older than some configurable minimum age. func (c *collector) cleanProjectPromotions(ctx context.Context, project string) error { - logger := logging.LoggerFromContext(ctx).WithField("project", project) + logger := logging.LoggerFromContext(ctx).WithValues("project", project) stages := &kargoapi.StageList{} if err := c.listStagesFn( @@ -33,13 +32,13 @@ func (c *collector) cleanProjectPromotions(ctx context.Context, project string) var cleanErrCount int for _, stage := range stages.Items { - stageLogger := logger.WithField("stage", stage.Name) + stageLogger := logger.WithValues("stage", stage.Name) if err := c.cleanStagePromotionsFn(ctx, project, stage.Name); err != nil { - stageLogger.Error("error cleaning Promotions to Stage") + stageLogger.Error(err, "error cleaning Promotions to Stage") cleanErrCount++ continue } - stageLogger.Debug("cleaned Promotions to Stage") + stageLogger.V(1).Info("cleaned Promotions to Stage") } if cleanErrCount > 0 { @@ -57,10 +56,10 @@ func (c *collector) cleanStagePromotions( project string, stage string, ) error { - logger := logging.LoggerFromContext(ctx).WithFields(logrus.Fields{ - "project": project, - "stage": stage, - }) + logger := logging.LoggerFromContext(ctx).WithValues( + "project", project, + "stage", stage, + ) promos := kargoapi.PromotionList{} if err := c.listPromotionsFn( @@ -105,12 +104,12 @@ func (c *collector) cleanStagePromotions( if time.Since(promo.CreationTimestamp.Time) < c.cfg.MinPromotionDeletionAge { continue // Not old enough } - promoLogger := logger.WithField("promotion", promo.Name) + promoLogger := logger.WithValues("promotion", promo.Name) if err := c.deletePromotionFn(ctx, &promo); err != nil { - promoLogger.Errorf("error deleting Promotion: %s", err) + promoLogger.Error(err, "error deleting Promotion") deleteErrCount++ } else { - promoLogger.Debug("deleted Promotion") + promoLogger.V(1).Info("deleted Promotion") } } diff --git a/internal/image/digest_selector.go b/internal/image/digest_selector.go index 263662b12..fed95eef9 100644 --- a/internal/image/digest_selector.go +++ b/internal/image/digest_selector.go @@ -5,8 +5,6 @@ import ( "errors" "fmt" - log "github.com/sirupsen/logrus" - "github.com/akuity/kargo/internal/logging" ) @@ -36,13 +34,13 @@ func newDigestSelector( // Select implements the Selector interface. func (d *digestSelector) Select(ctx context.Context) (*Image, error) { - logger := logging.LoggerFromContext(ctx).WithFields(log.Fields{ - "registry": d.repoClient.registry.name, - "image": d.repoClient.image, - "selectionStrategy": SelectionStrategyDigest, - "platformConstrained": d.platform != nil, - }) - logger.Trace("selecting image") + logger := logging.LoggerFromContext(ctx).WithValues( + "registry", d.repoClient.registry.name, + "image", d.repoClient.image, + "selectionStrategy", SelectionStrategyDigest, + "platformConstrained", d.platform != nil, + ) + logger.V(2).Info("selecting image") ctx = logging.ContextWithLogger(ctx, logger) @@ -51,10 +49,10 @@ func (d *digestSelector) Select(ctx context.Context) (*Image, error) { return nil, fmt.Errorf("error listing tags: %w", err) } if len(tags) == 0 { - logger.Trace("found no tags") + logger.V(2).Info("found no tags") return nil, nil } - logger.Trace("got all tags") + logger.V(2).Info("got all tags") for _, tag := range tags { if tag != d.constraint { @@ -65,19 +63,20 @@ func (d *digestSelector) Select(ctx context.Context) (*Image, error) { return nil, fmt.Errorf("error retrieving image with tag %q: %w", tag, err) } if image == nil { - logger.Tracef( - "image with tag %q was found, but did not match platform constraint", - tag, + logger.V(2).Info( + "image was found, but did not match platform constraint", + "tag", tag, ) return nil, nil } - logger.WithFields(log.Fields{ - "tag": image.Tag, - "digest": image.Digest.String(), - }).Trace("found image") + logger.V(2).Info( + "found image", + "tag", image.Tag, + "digest", image.Digest.String(), + ) return image, nil } - logger.Trace("no images matched criteria") + logger.V(2).Info("no images matched criteria") return nil, nil } diff --git a/internal/image/lexical_selector.go b/internal/image/lexical_selector.go index dc99fa742..802b0aadd 100644 --- a/internal/image/lexical_selector.go +++ b/internal/image/lexical_selector.go @@ -6,8 +6,6 @@ import ( "regexp" "sort" - log "github.com/sirupsen/logrus" - "github.com/akuity/kargo/internal/logging" ) @@ -38,13 +36,13 @@ func newLexicalSelector( // Select implements the Selector interface. func (l *lexicalSelector) Select(ctx context.Context) (*Image, error) { - logger := logging.LoggerFromContext(ctx).WithFields(log.Fields{ - "registry": l.repoClient.registry.name, - "image": l.repoClient.image, - "selectionStrategy": SelectionStrategyLexical, - "platformConstrained": l.platform != nil, - }) - logger.Trace("selecting image") + logger := logging.LoggerFromContext(ctx).WithValues( + "registry", l.repoClient.registry.name, + "image", l.repoClient.image, + "selectionStrategy", SelectionStrategyLexical, + "platformConstrained", l.platform != nil, + ) + logger.V(2).Info("selecting image") ctx = logging.ContextWithLogger(ctx, logger) @@ -53,10 +51,10 @@ func (l *lexicalSelector) Select(ctx context.Context) (*Image, error) { return nil, fmt.Errorf("error listing tags: %w", err) } if len(tags) == 0 { - logger.Trace("found no tags") + logger.V(2).Info("found no tags") return nil, nil } - logger.Trace("got all tags") + logger.V(2).Info("got all tags") if l.allowRegex != nil || len(l.ignore) > 0 { matchedTags := make([]string, 0, len(tags)) @@ -66,14 +64,14 @@ func (l *lexicalSelector) Select(ctx context.Context) (*Image, error) { } } if len(matchedTags) == 0 { - logger.Trace("no tags matched criteria") + logger.V(2).Info("no tags matched criteria") return nil, nil } tags = matchedTags } - logger.Tracef("%d tags matched criteria", len(tags)) + logger.V(2).Info("tags matched criteria", "numberOfTags", len(tags)) - logger.Trace("sorting tags lexically") + logger.V(2).Info("sorting tags lexically") sortTagsLexically(tags) tag := tags[0] @@ -82,17 +80,18 @@ func (l *lexicalSelector) Select(ctx context.Context) (*Image, error) { return nil, fmt.Errorf("error retrieving image with tag %q: %w", tag, err) } if image == nil { - logger.Tracef( - "image with tag %q was found, but did not match platform constraint", - tag, + logger.V(2).Info( + "image was found, but did not match platform constraint", + "tag", tag, ) return nil, nil } - logger.WithFields(log.Fields{ - "tag": image.Tag, - "digest": image.Digest.String(), - }).Trace("found image") + logger.V(2).Info( + "found image", + "tag", image.Tag, + "digest", image.Digest.String(), + ) return image, nil } diff --git a/internal/image/newest_build_selector.go b/internal/image/newest_build_selector.go index 491a17bc6..a9dd9fa03 100644 --- a/internal/image/newest_build_selector.go +++ b/internal/image/newest_build_selector.go @@ -7,8 +7,6 @@ import ( "sort" "sync" - log "github.com/sirupsen/logrus" - "github.com/akuity/kargo/internal/logging" ) @@ -39,13 +37,13 @@ func newNewestBuildSelector( // Select implements the Selector interface. func (n *newestBuildSelector) Select(ctx context.Context) (*Image, error) { - logger := logging.LoggerFromContext(ctx).WithFields(log.Fields{ - "registry": n.repoClient.registry.name, - "image": n.repoClient.image, - "selectionStrategy": SelectionStrategyNewestBuild, - "platformConstrained": n.platform != nil, - }) - logger.Trace("selecting image") + logger := logging.LoggerFromContext(ctx).WithValues( + "registry", n.repoClient.registry.name, + "image", n.repoClient.image, + "selectionStrategy", SelectionStrategyNewestBuild, + "platformConstrained", n.platform != nil, + ) + logger.V(2).Info("selecting image") ctx = logging.ContextWithLogger(ctx, logger) @@ -54,10 +52,10 @@ func (n *newestBuildSelector) Select(ctx context.Context) (*Image, error) { return nil, fmt.Errorf("error listing tags: %w", err) } if len(tags) == 0 { - logger.Trace("found no tags") + logger.V(2).Info("found no tags") return nil, nil } - logger.Trace("got all tags") + logger.V(2).Info("got all tags") if n.allowRegex != nil || len(n.ignore) > 0 { matchedTags := make([]string, 0, len(tags)) @@ -67,14 +65,14 @@ func (n *newestBuildSelector) Select(ctx context.Context) (*Image, error) { } } if len(matchedTags) == 0 { - logger.Trace("no tags matched criteria") + logger.V(2).Info("no tags matched criteria") return nil, nil } tags = matchedTags } - logger.Tracef("%d tags matched criteria", len(tags)) + logger.V(2).Info("some tags matched criteria", "tags", len(tags)) - logger.Trace("retrieving images for all tags that matched criteria") + logger.V(2).Info("retrieving images for all tags that matched criteria") images, err := n.getImagesByTags(ctx, tags) if err != nil { return nil, fmt.Errorf("error retrieving images for all matched tags: %w", err) @@ -84,15 +82,16 @@ func (n *newestBuildSelector) Select(ctx context.Context) (*Image, error) { return nil, nil } - logger.Trace("sorting images by date") + logger.V(2).Info("sorting images by date") sortImagesByDate(images) if n.platform == nil { image := images[0] - logger.WithFields(log.Fields{ - "tag": image.Tag, - "digest": image.Digest.String(), - }).Trace("found image") + logger.V(2).Info( + "found image", + "tag", image.Tag, + "digest", image.Digest.String(), + ) return &image, nil } @@ -103,18 +102,19 @@ func (n *newestBuildSelector) Select(ctx context.Context) (*Image, error) { return nil, fmt.Errorf("error retrieving image with digest %q: %w", digest.String(), err) } if image == nil { - logger.Tracef( - "image with digest %q was found, but did not match platform constraint", - digest.String(), + logger.V(2).Info( + "image with a digest was found, but did not match platform constraint", + "digest", digest.String(), ) return nil, nil } image.Tag = tag - logger.WithFields(log.Fields{ - "tag": image.Tag, - "digest": image.Digest.String(), - }).Trace("found image") + logger.V(2).Info( + "found image", + "tag", image.Tag, + "digest", image.Digest.String(), + ) return image, nil } diff --git a/internal/image/repository_client.go b/internal/image/repository_client.go index f8337e3ed..e52411327 100644 --- a/internal/image/repository_client.go +++ b/internal/image/repository_client.go @@ -244,7 +244,7 @@ var getChallengeManager = func( // getTags retrieves a list of all tags from the repository. func (r *repositoryClient) getTags(ctx context.Context) ([]string, error) { logger := logging.LoggerFromContext(ctx) - logger.Trace("retrieving tags for image") + logger.V(2).Info("retrieving tags for image") tagSvc := r.repo.Tags(ctx) tags, err := tagSvc.All(ctx) if err != nil { @@ -282,14 +282,14 @@ func (r *repositoryClient) getImageByDigest( platform *platformConstraint, ) (*Image, error) { logger := logging.LoggerFromContext(ctx) - logger.Tracef("retrieving image for manifest %s", d) + logger.V(2).Info("retrieving image for manifest", "digest", d) if entry, exists := r.registry.imageCache.Get(d.String()); exists { image := entry.(Image) // nolint: forcetypeassert return &image, nil } - logger.Tracef("image for manifest %s NOT found in cache", d) + logger.V(2).Info("image for manifest NOT found in cache", "digest", d) manifest, err := r.getManifestByDigestFn(ctx, d) if err != nil { @@ -303,7 +303,7 @@ func (r *repositoryClient) getImageByDigest( if image != nil { // Cache the image r.registry.imageCache.Set(d.String(), *image, cache.DefaultExpiration) - logger.Tracef("cached image for manifest %s", d) + logger.V(2).Info("cached image for manifest", "digest", d) } return image, nil @@ -315,7 +315,7 @@ func (r *repositoryClient) getManifestByTag( tag string, ) (distribution.Manifest, error) { logger := logging.LoggerFromContext(ctx) - logger.Tracef("retrieving manifest for tag %q from repository", tag) + logger.V(2).Info("retrieving manifest from repository", "tag", tag) manifestSvc, err := r.repo.Manifests(ctx) if err != nil { return nil, fmt.Errorf("error getting manifest service: %w", err) @@ -338,7 +338,7 @@ func (r *repositoryClient) getManifestByDigest( d digest.Digest, ) (distribution.Manifest, error) { logger := logging.LoggerFromContext(ctx) - logger.Tracef("retrieving manifest for digest %q from repository", d.String()) + logger.V(2).Info("retrieving manifest from repository", "digest", d.String()) manifestSvc, err := r.repo.Manifests(ctx) if err != nil { return nil, fmt.Errorf("error getting manifest service: %w", err) @@ -399,7 +399,7 @@ func (r *repositoryClient) extractImageFromV1Manifest( digest := digest.FromBytes(manifestBytes) logger := logging.LoggerFromContext(context.Background()) - logger.Tracef("extracting image from V1 manifest %s", digest) + logger.V(2).Info("extracting image from V1 manifest", "digest", digest) if len(manifest.History) == 0 { return nil, fmt.Errorf("no history information found in V1 manifest %s", digest) @@ -449,7 +449,7 @@ func (r *repositoryClient) extractImageFromV2Manifest( digest := digest.FromBytes(manifestBytes) logger := logging.LoggerFromContext(ctx) - logger.Tracef("extracting image from V2 manifest %s", digest) + logger.V(2).Info("extracting image from V2 manifest", "digest", digest) // This referenced config object has platform information and creation // timestamp @@ -509,7 +509,7 @@ func (r *repositoryClient) extractImageFromOCIManifest( digest := digest.FromBytes(manifestBytes) logger := logging.LoggerFromContext(ctx) - logger.Tracef("extracting image from OCI manifest %s", digest) + logger.V(2).Info("extracting image from OCI manifest", "digest", digest) // This referenced config object has platform information and creation // timestamp @@ -577,9 +577,9 @@ func (r *repositoryClient) extractImageFromCollection( digest := digest.FromBytes(manifestBytes) logger := logging.LoggerFromContext(ctx) - logger.Tracef( - "extracting image from V2 manifest list or OCI index %s", - digest, + logger.V(2).Info( + "extracting image from V2 manifest list or OCI index", + "digest", digest, ) refs := make([]distribution.Descriptor, 0, len(collection.References())) @@ -688,7 +688,7 @@ func (r *repositoryClient) getBlob( digest digest.Digest, ) ([]byte, error) { logger := logging.LoggerFromContext(ctx) - logger.Tracef("retrieving blob for digest %q", digest.String()) + logger.V(2).Info("retrieving blob for digest", "digest", digest.String()) return r.repo.Blobs(ctx).Get(ctx, digest) } diff --git a/internal/image/selector_docker_hub_test.go b/internal/image/selector_docker_hub_test.go index d3a7af396..54e5b9c5c 100644 --- a/internal/image/selector_docker_hub_test.go +++ b/internal/image/selector_docker_hub_test.go @@ -8,7 +8,6 @@ import ( "testing" "github.com/Masterminds/semver" - log "github.com/sirupsen/logrus" "github.com/stretchr/testify/require" "github.com/akuity/kargo/internal/logging" @@ -28,7 +27,7 @@ func TestSelectImageDockerHub(t *testing.T) { ctx := context.Background() logger := logging.LoggerFromContext(ctx) - logger.Logger.SetLevel(log.TraceLevel) + logging.SetLevel(logging.LevelTrace) ctx = logging.ContextWithLogger(ctx, logger) t.Run("digest strategy miss", func(t *testing.T) { diff --git a/internal/image/selector_ghcr_test.go b/internal/image/selector_ghcr_test.go index 4b027b8f1..080b17d99 100644 --- a/internal/image/selector_ghcr_test.go +++ b/internal/image/selector_ghcr_test.go @@ -8,7 +8,6 @@ import ( "testing" "github.com/Masterminds/semver" - log "github.com/sirupsen/logrus" "github.com/stretchr/testify/require" "github.com/akuity/kargo/internal/logging" @@ -23,7 +22,7 @@ func TestSelectImageGHCR(t *testing.T) { ctx := context.Background() logger := logging.LoggerFromContext(ctx) - logger.Logger.SetLevel(log.TraceLevel) + logging.SetLevel(logging.LevelTrace) ctx = logging.ContextWithLogger(ctx, logger) t.Run("digest strategy", func(t *testing.T) { diff --git a/internal/image/semver_selector.go b/internal/image/semver_selector.go index 91d04bcdc..6bc4129ef 100644 --- a/internal/image/semver_selector.go +++ b/internal/image/semver_selector.go @@ -7,7 +7,6 @@ import ( "sort" "github.com/Masterminds/semver/v3" - log "github.com/sirupsen/logrus" "github.com/akuity/kargo/internal/logging" ) @@ -52,13 +51,13 @@ func newSemVerSelector( // Select implements the Selector interface. func (s *semVerSelector) Select(ctx context.Context) (*Image, error) { - logger := logging.LoggerFromContext(ctx).WithFields(log.Fields{ - "registry": s.repoClient.registry.name, - "image": s.repoClient.image, - "selectionStrategy": SelectionStrategySemVer, - "platformConstrained": s.platform != nil, - }) - logger.Trace("selecting image") + logger := logging.LoggerFromContext(ctx).WithValues( + "registry", s.repoClient.registry.name, + "image", s.repoClient.image, + "selectionStrategy", SelectionStrategySemVer, + "platformConstrained", s.platform != nil, + ) + logger.V(2).Info("selecting image") ctx = logging.ContextWithLogger(ctx, logger) @@ -67,10 +66,10 @@ func (s *semVerSelector) Select(ctx context.Context) (*Image, error) { return nil, fmt.Errorf("error listing tags: %w", err) } if len(tags) == 0 { - logger.Trace("found no tags") + logger.V(2).Info("found no tags") return nil, nil } - logger.Trace("got all tags") + logger.V(2).Info("got all tags") images := make([]Image, 0, len(tags)) for _, tag := range tags { @@ -92,12 +91,12 @@ func (s *semVerSelector) Select(ctx context.Context) (*Image, error) { } } if len(images) == 0 { - logger.Trace("no tags matched criteria") + logger.V(2).Info("no tags matched criteria") return nil, nil } - logger.Tracef("%d tags matched criteria", len(images)) + logger.V(2).Info("tags matched criteria", "numberOfTags", len(images)) - logger.Trace("sorting images by semantic version") + logger.V(2).Info("sorting images by semantic version") sortImagesBySemVer(images) tag := images[0].Tag @@ -106,17 +105,18 @@ func (s *semVerSelector) Select(ctx context.Context) (*Image, error) { return nil, fmt.Errorf("error retrieving image with tag %q: %w", tag, err) } if image == nil { - logger.Tracef( - "image with tag %q was found, but did not match platform constraint", - tag, + logger.V(2).Info( + "image was found, but did not match platform constraint", + "tag", tag, ) return nil, nil } - logger.WithFields(log.Fields{ - "tag": image.Tag, - "digest": image.Digest.String(), - }).Trace("found image") + logger.V(2).Info( + "found image", + "tag", image.Tag, + "digest", image.Digest.String(), + ) return image, nil } diff --git a/internal/kargo/kargo.go b/internal/kargo/kargo.go index 0b3816daf..ee9a04d63 100644 --- a/internal/kargo/kargo.go +++ b/internal/kargo/kargo.go @@ -4,8 +4,8 @@ import ( "fmt" "strings" + "github.com/go-logr/logr" "github.com/oklog/ulid/v2" - log "github.com/sirupsen/logrus" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/predicate" @@ -49,7 +49,7 @@ func NewPromotion(stage kargoapi.Stage, freight string) kargoapi.Promotion { return promotion } -func NewPromoWentTerminalPredicate(logger *log.Entry) PromoWentTerminal { +func NewPromoWentTerminalPredicate(logger logr.Logger) PromoWentTerminal { return PromoWentTerminal{ logger: logger, } @@ -60,7 +60,7 @@ func NewPromoWentTerminalPredicate(logger *log.Entry) PromoWentTerminal { // Also used by promo reconciler to enqueue the next highest priority promotion. type PromoWentTerminal struct { predicate.Funcs - logger *log.Entry + logger logr.Logger } func (p PromoWentTerminal) Create(_ event.CreateEvent) bool { @@ -83,21 +83,21 @@ func (p PromoWentTerminal) Generic(_ event.GenericEvent) bool { // Update implements default UpdateEvent filter for checking if a promotion went terminal func (p PromoWentTerminal) Update(e event.UpdateEvent) bool { if e.ObjectOld == nil { - p.logger.Errorf("Update event has no old object to update: %v", e) + p.logger.Error(nil, "Update event has no old object to update", "event", e) return false } if e.ObjectNew == nil { - p.logger.Errorf("Update event has no new object for update: %v", e) + p.logger.Error(nil, "Update event has no new object for update", "event", e) return false } newPromo, ok := e.ObjectNew.(*kargoapi.Promotion) if !ok { - p.logger.Errorf("Failed to convert new promo: %v", e.ObjectNew) + p.logger.Error(nil, "Failed to convert new promo", "object", e.ObjectNew) return false } oldPromo, ok := e.ObjectOld.(*kargoapi.Promotion) if !ok { - p.logger.Errorf("Failed to convert old promo: %v", e.ObjectOld) + p.logger.Error(nil, "Failed to convert old promo", "object", e.ObjectOld) return false } if newPromo.Status.Phase.IsTerminal() && !oldPromo.Status.Phase.IsTerminal() { diff --git a/internal/logging/context.go b/internal/logging/context.go index dbc97196b..631e32670 100644 --- a/internal/logging/context.go +++ b/internal/logging/context.go @@ -3,10 +3,12 @@ package logging import ( "context" "flag" + "fmt" + "log/slog" "strconv" + "strings" - "github.com/bombsimon/logrusr/v4" - log "github.com/sirupsen/logrus" + "github.com/go-logr/logr" "k8s.io/klog/v2" runtimelog "sigs.k8s.io/controller-runtime/pkg/log" @@ -15,32 +17,85 @@ import ( type loggerContextKey struct{} -var globalLogger *log.Entry +var ( + globalLogger logr.Logger + + // programLevel allows for setting the logging level dynamically + // via SetLevel(). + programLevel = new(slog.LevelVar) +) + +const ( + LevelTrace = slog.Level(-2) + LevelDebug = slog.Level(-1) + LevelInfo = slog.Level(0) + LevelError = slog.Level(8) + + // These constants contain logging level strings, + // purely for the performance benefit. + TRACE = "TRACE" + DEBUG = "DEBUG" + INFO = "INFO" + ERROR = "ERROR" +) func init() { - globalLogger = log.New().WithFields(nil) - level, err := log.ParseLevel(os.GetEnv("LOG_LEVEL", "INFO")) + level, err := parseLevel(os.GetEnv("LOG_LEVEL", "INFO")) if err != nil { panic(err) } - globalLogger.Logger.SetLevel(level) + programLevel.Set(level) + + opts := &slog.HandlerOptions{ + Level: programLevel, + ReplaceAttr: func(groups []string, a slog.Attr) slog.Attr { + // Customize the name of the level key and the output string, + // including custom level values. Meaning: + // log.V(1).Info() becomes level=DEBUG instead of level=DEBUG+3 + // log.V(2).Info() becomes level=TRACE instead of level=DEBUG+2 + if a.Key == slog.LevelKey { + // Handle custom level values. + level := a.Value.Any().(slog.Level) + + switch { + case level < LevelDebug: + a.Value = slog.StringValue(TRACE) + case level < LevelInfo: + a.Value = slog.StringValue(DEBUG) + case level < LevelError: + a.Value = slog.StringValue(INFO) + default: + a.Value = slog.StringValue(ERROR) + } + } + return a + }, + } + + globalLogger = logr.FromSlogHandler(slog.NewTextHandler(os.Stderr, opts)) + SetKLogLevel(os.GetEnvInt("KLOG_LEVEL", 0)) - runtimelog.SetLogger(logrusr.New(globalLogger)) + runtimelog.SetLogger(globalLogger) +} + +// DefaultLogger returns a logr.Logger with slog as the backend. It writes to +// stderr and is hardcoded to Info level. +func DefaultLogger() logr.Logger { + return logr.FromSlogHandler(slog.NewTextHandler(os.Stderr, nil)) } // ContextWithLogger returns a context.Context that has been augmented with -// the provided log.Entry. -func ContextWithLogger(ctx context.Context, logger *log.Entry) context.Context { +// the provided logr.Logger. +func ContextWithLogger(ctx context.Context, logger logr.Logger) context.Context { return context.WithValue(ctx, loggerContextKey{}, logger) } -// LoggerFromContext extracts a *log.Entry from the provided context.Context and -// returns it. If no *log.Entry is found, a global, error-level *log.Entry is -// returned. -func LoggerFromContext(ctx context.Context) *log.Entry { +// LoggerFromContext extracts a logr.Logger from the provided context.Context and +// returns it. If no logr.Logger is found, a global logr.Logger is returned. +func LoggerFromContext(ctx context.Context) logr.Logger { if logger := ctx.Value(loggerContextKey{}); logger != nil { - return ctx.Value(loggerContextKey{}).(*log.Entry) // nolint: forcetypeassert + return ctx.Value(loggerContextKey{}).(logr.Logger) // nolint: forcetypeassert } return globalLogger } @@ -48,6 +103,27 @@ func LoggerFromContext(ctx context.Context) *log.Entry { // SetKLogLevel set the klog level for the k8s go-client func SetKLogLevel(klogLevel int) { klog.InitFlags(nil) - klog.SetOutput(globalLogger.Writer()) + klog.SetOutput(os.Stderr) _ = flag.Set("v", strconv.Itoa(klogLevel)) } + +// SetLevel dynamically modifies the level of the globbal logr.Logger. +func SetLevel(l slog.Level) { + programLevel.Set(l) +} + +func parseLevel(lvl string) (slog.Level, error) { + switch strings.ToLower(lvl) { + case "error": + return LevelError, nil + case "info": + return LevelInfo, nil + case "debug": + return LevelDebug, nil + case "trace": + return LevelTrace, nil + } + + var l slog.Level + return l, fmt.Errorf("not a valid log level: %q.", lvl) +} diff --git a/internal/logging/context_test.go b/internal/logging/context_test.go index 999127975..f3048842f 100644 --- a/internal/logging/context_test.go +++ b/internal/logging/context_test.go @@ -4,14 +4,14 @@ import ( "context" "testing" - log "github.com/sirupsen/logrus" + "github.com/go-logr/logr" "github.com/stretchr/testify/require" ) func TestContextWithLogger(t *testing.T) { - testLogger := log.New().WithFields(nil) + testLogger := logr.New(nil) ctx := ContextWithLogger(context.Background(), testLogger) - require.Same(t, testLogger, ctx.Value(loggerContextKey{})) + require.Equal(t, testLogger, ctx.Value(loggerContextKey{})) } func TestLoggerFromContext(t *testing.T) { @@ -19,10 +19,11 @@ func TestLoggerFromContext(t *testing.T) { // This should give us the global logger if one was never explicitly added to // the context. require.NotNil(t, logger) - require.IsType(t, &log.Entry{}, logger) - require.Equal(t, log.InfoLevel, logger.Logger.Level) + require.IsType(t, logr.Logger{}, logger) + require.Equal(t, true, logger.V(0).Enabled()) // INFO level enabled + require.Equal(t, false, logger.V(1).Enabled()) // DEBUG level disabled - testLogger := log.New().WithFields(nil) + testLogger := logr.New(nil) ctx := context.WithValue(context.Background(), loggerContextKey{}, testLogger) - require.Same(t, testLogger, LoggerFromContext(ctx)) + require.Equal(t, testLogger, LoggerFromContext(ctx)) } diff --git a/internal/os/env.go b/internal/os/env.go index 5c110fa40..deb5e7b1b 100644 --- a/internal/os/env.go +++ b/internal/os/env.go @@ -29,3 +29,5 @@ func GetEnvInt(key string, defaultValue int) int { } return int(value) } + +var Stderr = os.Stderr diff --git a/internal/webhook/project/webhook.go b/internal/webhook/project/webhook.go index ff2ec368d..4f3b5a54d 100644 --- a/internal/webhook/project/webhook.go +++ b/internal/webhook/project/webhook.go @@ -5,7 +5,6 @@ import ( "fmt" "github.com/kelseyhightower/envconfig" - log "github.com/sirupsen/logrus" corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -199,10 +198,10 @@ func (w *webhook) ensureNamespace( ctx context.Context, project *kargoapi.Project, ) error { - logger := logging.LoggerFromContext(ctx).WithFields(log.Fields{ - "project": project.Name, - "name": project.Name, - }) + logger := logging.LoggerFromContext(ctx).WithValues( + "project", project.Name, + "name", project.Name, + ) ns := &corev1.Namespace{} err := w.getNamespaceFn( @@ -231,7 +230,7 @@ func (w *webhook) ensureNamespace( ), ) } - logger.Debug("namespace exists but no conflict was found") + logger.V(1).Info("namespace exists but no conflict was found") return nil } if !apierrors.IsNotFound(err) { @@ -240,7 +239,7 @@ func (w *webhook) ensureNamespace( ) } - logger.Debug("namespace does not exist; creating it") + logger.V(1).Info("namespace does not exist; creating it") ns = &corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{ @@ -268,7 +267,7 @@ func (w *webhook) ensureNamespace( fmt.Errorf("error creating namespace %q: %w", project.Name, err), ) } - logger.Debug("created namespace") + logger.V(1).Info("created namespace") return nil } @@ -279,12 +278,12 @@ func (w *webhook) ensureSecretPermissions( ) error { const roleBindingName = "kargo-api-server-manage-project-secrets" - logger := logging.LoggerFromContext(ctx).WithFields(log.Fields{ - "project": project.Name, - "name": project.Name, - "namespace": project.Name, - "roleBinding": roleBindingName, - }) + logger := logging.LoggerFromContext(ctx).WithValues( + "project", project.Name, + "name", project.Name, + "namespace", project.Name, + "roleBinding", roleBindingName, + ) roleBinding := &rbacv1.RoleBinding{ ObjectMeta: metav1.ObjectMeta{ @@ -311,7 +310,7 @@ func (w *webhook) ensureSecretPermissions( } if err := w.createRoleBindingFn(ctx, roleBinding); err != nil { if apierrors.IsAlreadyExists(err) { - logger.Debug("role binding already exists in project namespace") + logger.V(1).Info("role binding already exists in project namespace") return nil } return apierrors.NewInternalError( @@ -323,7 +322,7 @@ func (w *webhook) ensureSecretPermissions( ), ) } - logger.Debug("granted API server access to manage project secrets") + logger.V(1).Info("granted API server access to manage project secrets") return nil } diff --git a/internal/webhook/promotion/webhook.go b/internal/webhook/promotion/webhook.go index 62c25f937..7adfa1075 100644 --- a/internal/webhook/promotion/webhook.go +++ b/internal/webhook/promotion/webhook.go @@ -238,7 +238,7 @@ func (w *webhook) authorize( req, err := w.admissionRequestFromContextFn(ctx) if err != nil { - logger.Error(err) + logger.Error(err, "") return apierrors.NewForbidden( promotionGroupResource, promo.Name, @@ -264,7 +264,7 @@ func (w *webhook) authorize( }, } if err := w.createSubjectAccessReviewFn(ctx, accessReview); err != nil { - logger.Error(err) + logger.Error(err, "") return apierrors.NewForbidden( promotionGroupResource, promo.Name,