Skip to content
This repository has been archived by the owner on Nov 9, 2022. It is now read-only.

Make log level configurable #98

Closed
wants to merge 1 commit into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -27,12 +27,17 @@ LD_FLAGS := "-w $(shell EFFECTIVE_VERSION=$(EFFECTIVE_VERSION) $(REPO_ROOT)/vend
# Rules for local development scenarios #
#########################################

ZAP_DEVEL := true
ZAP_LOG_LEVEL := debug

.PHONY: start
start:
@GO111MODULE=on go run \
-mod=vendor \
-ldflags $(LD_FLAGS) \
./cmd/gardener-resource-manager \
--zap-devel=$(ZAP_DEVEL) \
--zap-log-level=$(ZAP_LOG_LEVEL) \
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I know that this isn't your implementation but I somehow don't like the inconsistency between named log levels here and int levels here.

  1. The user doesn't know which log levels exist (could be part of the cmd help)
  2. As a developer using log.V(1).Info or log.Info it's unclear at which log level which entry is printed.

Do you think it makes sense to either (1) mask the --zap-log-level and use --v with an Integer which maps back to a zap-log-level log level or (2) use a constant instead of Integers in the code, e.g. log.V(1).Info --> log.V(Debug).Info.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Discussed this with @timuthy on the phone:
We both find the current implementation to be a bit confusing, especially the stuff with the level inversion that is going on in the zap options in c-r and in zapr.

We think it makes sense to check, how other logging implementations are handling the log levels, e.g. the new klog implementation.
It probably makes sense to add own flags (like --v) to a) be flexible to change the underlying logging library one day without breaking compatibility and b) make it more aligned with kubernetes components.

In general, it might also make sense to do similar refactorings in the extensions and also gardener itself and have everything consistent.
So let's look into this a bit, so we can have a bit of a guideline for this later on, before we end up with a zoo of different ways to handle logging in our components :D

/author-action
/needs changes

--leader-election=false \
--sync-period=60s \
--max-concurrent-workers=10 \
Expand Down
65 changes: 48 additions & 17 deletions cmd/gardener-resource-manager/app/app.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,51 +16,79 @@ package app

import (
"context"
"flag"
"fmt"
"sync"

"github.com/go-logr/logr"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
runtimelog "sigs.k8s.io/controller-runtime/pkg/log"
logzap "sigs.k8s.io/controller-runtime/pkg/log/zap"
"sigs.k8s.io/controller-runtime/pkg/manager"

resourcemanagercmd "github.com/gardener/gardener-resource-manager/pkg/cmd"
healthcontroller "github.com/gardener/gardener-resource-manager/pkg/controller/health"
resourcecontroller "github.com/gardener/gardener-resource-manager/pkg/controller/managedresource"
secretcontroller "github.com/gardener/gardener-resource-manager/pkg/controller/secret"
"github.com/gardener/gardener-resource-manager/pkg/healthz"
logpkg "github.com/gardener/gardener-resource-manager/pkg/log"
"github.com/gardener/gardener-resource-manager/pkg/version"
)

var log = runtimelog.Log.WithName("gardener-resource-manager")

// NewResourceManagerCommand creates a new command for running a gardener resource manager controllers.
func NewResourceManagerCommand() *cobra.Command {
runtimelog.SetLogger(logpkg.ZapLogger(false))
entryLog := log.WithName("entrypoint")
var (
zapOpts = &logzap.Options{}

managerOpts = &resourcemanagercmd.ManagerOptions{}
sourceClientOpts = &resourcemanagercmd.SourceClientOptions{}
targetClientOpts = &resourcemanagercmd.TargetClientOptions{}

managerOpts := &resourcemanagercmd.ManagerOptions{}
sourceClientOpts := &resourcemanagercmd.SourceClientOptions{}
targetClientOpts := &resourcemanagercmd.TargetClientOptions{}
resourceControllerOpts = &resourcecontroller.ControllerOptions{}
secretControllerOpts = &secretcontroller.ControllerOptions{}
healthControllerOpts = &healthcontroller.ControllerOptions{}

resourceControllerOpts := &resourcecontroller.ControllerOptions{}
secretControllerOpts := &secretcontroller.ControllerOptions{}
healthControllerOpts := &healthcontroller.ControllerOptions{}
log logr.Logger
)

cmd := &cobra.Command{
Use: "gardener-resource-manager",
Version: version.Get().GitVersion,

PersistentPreRun: func(cmd *cobra.Command, args []string) {
runtimelog.SetLogger(logzap.New(
// use configuration passed via flags
logzap.UseFlagOptions(zapOpts),
// and overwrite some stuff
func(o *logzap.Options) {
if !o.Development {
encCfg := zap.NewProductionEncoderConfig()
// overwrite time encoding to human readable format for production logs
encCfg.EncodeTime = zapcore.ISO8601TimeEncoder
o.Encoder = zapcore.NewJSONEncoder(encCfg)
}

// don't print stacktrace for warning level logs
o.StacktraceLevel = zapcore.ErrorLevel
},
))

log = runtimelog.Log
log.Info("Starting gardener-resource-manager...", "version", version.Get().GitVersion)
cmd.Flags().VisitAll(func(flag *pflag.Flag) {
log.Info(fmt.Sprintf("FLAG: --%s=%s", flag.Name, flag.Value))
})

cmd.SilenceUsage = true
cmd.SilenceErrors = true
},

RunE: func(cmd *cobra.Command, args []string) error {
ctx, cancel := context.WithCancel(cmd.Context())
defer cancel()

entryLog.Info("Starting gardener-resource-manager...", "version", version.Get().GitVersion)
cmd.Flags().VisitAll(func(flag *pflag.Flag) {
entryLog.Info(fmt.Sprintf("FLAG: --%s=%s", flag.Name, flag.Value))
})

if err := resourcemanagercmd.CompleteAll(
managerOpts,
sourceClientOpts,
Expand Down Expand Up @@ -133,7 +161,7 @@ func NewResourceManagerCommand() *cobra.Command {
return err

case <-cmd.Context().Done():
entryLog.Info("Stop signal received, shutting down.")
log.Info("Stop signal received, shutting down.")
wg.Wait()
return nil
}
Expand All @@ -150,5 +178,8 @@ func NewResourceManagerCommand() *cobra.Command {
healthControllerOpts,
)

zapOpts.BindFlags(flag.CommandLine)
cmd.PersistentFlags().AddGoFlagSet(flag.CommandLine)

return cmd
}
18 changes: 9 additions & 9 deletions cmd/gardener-resource-manager/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,28 +16,28 @@ package main

import (
"context"
"fmt"
"os"

"github.com/gardener/gardener-resource-manager/cmd/gardener-resource-manager/app"
"github.com/gardener/gardener-resource-manager/pkg/log"

runtimelog "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/manager/signals"

"github.com/gardener/gardener-resource-manager/cmd/gardener-resource-manager/app"
)

func main() {
runtimelog.SetLogger(log.ZapLogger(false))

ctx, cancel := context.WithCancel(context.Background())
go func() {
defer cancel()
<-signals.SetupSignalHandler()
}()

cmd := app.NewResourceManagerCommand()

if err := cmd.ExecuteContext(ctx); err != nil {
runtimelog.Log.Error(err, "error executing the main controller command")
if err := app.NewResourceManagerCommand().ExecuteContext(ctx); err != nil {
if log := runtimelog.Log; log.Enabled() {
log.Error(err, "error running gardener-resource-manager")
} else {
fmt.Printf("error running gardener-resource-manager: %v", err)
}
os.Exit(1)
}
}
11 changes: 7 additions & 4 deletions pkg/controller/health/reconciler.go
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ func (r *Reconciler) InjectLogger(l logr.Logger) error {

// Reconcile performs health checks.
func (r *Reconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
log := r.log.WithValues("object", req)
log := r.log.WithValues("managedresource", req)
log.Info("Starting ManagedResource health checks")

mr := &resourcesv1alpha1.ManagedResource{}
Expand Down Expand Up @@ -116,8 +116,8 @@ func (r *Reconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
// objects, so we create a new object of the object's type to use the caching client
obj, err := r.targetScheme.New(ref.GroupVersionKind())
if err != nil {
log.Info("could not create new object of kind for health checks (probably not registered in the used scheme), falling back to unstructured request",
"GroupVersionKind", ref.GroupVersionKind().String(), "error", err.Error())
log.V(1).Info("could not create new object of kind for health checks (probably not registered in the used scheme), falling back to unstructured request",
"GroupVersionKind", ref.GroupVersionKind().String(), "err", err.Error())

// fallback to unstructured requests if the object's type is not registered in the scheme
unstructuredObj := &unstructured.Unstructured{}
Expand All @@ -126,9 +126,10 @@ func (r *Reconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
obj = unstructuredObj
}

resourceLog := log.WithValues("resource", utils.ObjectReferceLogWrapper{ObjectReference: ref.ObjectReference})
if err := r.targetClient.Get(r.ctx, client.ObjectKey{Namespace: ref.Namespace, Name: ref.Name}, obj); err != nil {
if apierrors.IsNotFound(err) {
log.Info("Could not get object", "namespace", ref.Namespace, "name", ref.Name)
resourceLog.Info("Health check failed: resource is missing")

var (
reason = ref.Kind + "Missing"
Expand All @@ -147,6 +148,8 @@ func (r *Reconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
}

if err := CheckHealth(r.targetScheme, obj); err != nil {
resourceLog.Info("Health check failed: resource is unhealthy")

var (
reason = ref.Kind + "Unhealthy"
message = fmt.Sprintf("Required %s %q in namespace %q is unhealthy: %v", ref.Kind, ref.Name, ref.Namespace, err.Error())
Expand Down
Loading