Skip to content

Commit

Permalink
Merge pull request #98 from asalkeld/klog
Browse files Browse the repository at this point in the history
bug 1906143: Use k8s.io/klog instead of zap
  • Loading branch information
openshift-merge-robot committed Feb 3, 2021
2 parents 5ca873d + 23ba16c commit ea1ff11
Show file tree
Hide file tree
Showing 8 changed files with 236 additions and 27 deletions.
5 changes: 3 additions & 2 deletions controllers/clusteroperator.go
Expand Up @@ -11,6 +11,7 @@ import (
k8serrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/klog/v2"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"

osconfigv1 "github.com/openshift/api/config/v1"
Expand Down Expand Up @@ -190,7 +191,7 @@ func (r *ProvisioningReconciler) syncStatus(co *osconfigv1.ClusterOperator, cond
}

if len(co.Status.Versions) < 1 {
r.Log.Info("updating ClusterOperator Status Versions field")
klog.Info("updating ClusterOperator Status Versions field")
co.Status.Versions = operandVersions(r.ReleaseVersion)
}

Expand All @@ -201,7 +202,7 @@ func (r *ProvisioningReconciler) syncStatus(co *osconfigv1.ClusterOperator, cond
func (r *ProvisioningReconciler) updateCOStatus(newReason StatusReason, msg, progressMsg string) error {
co, err := r.OSClient.ConfigV1().ClusterOperators().Get(context.Background(), clusterOperatorName, metav1.GetOptions{})
if err != nil {
r.Log.Error(err, "failed to get or create ClusterOperator")
klog.ErrorS(err, "failed to get or create ClusterOperator")
return fmt.Errorf("failed to get clusterOperator %q: %v", clusterOperatorName, err)
}
conds := defaultStatusConditions()
Expand Down
13 changes: 6 additions & 7 deletions controllers/provisioning_controller.go
Expand Up @@ -19,7 +19,6 @@ import (
"context"
"fmt"

"github.com/go-logr/logr"
"github.com/pkg/errors"
"github.com/stretchr/stew/slice"
appsv1 "k8s.io/api/apps/v1"
Expand All @@ -30,6 +29,7 @@ import (
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/record"
"k8s.io/klog/v2"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
Expand All @@ -56,7 +56,6 @@ type ProvisioningReconciler struct {
// that reads objects from the cache and writes to the apiserver
Client client.Client
Scheme *runtime.Scheme
Log logr.Logger
OSClient osclientset.Interface
EventRecorder record.EventRecorder
KubeClient kubernetes.Interface
Expand Down Expand Up @@ -109,7 +108,7 @@ func (r *ProvisioningReconciler) readProvisioningCR(namespacedName types.Namespa

// provisioning.metal3.io is a singleton
if namespacedName.Name != BaremetalProvisioningCR {
r.Log.V(1).Info("ignoring invalid CR", "name", namespacedName.Name)
klog.Info("ignoring invalid CR", "name", namespacedName.Name)
return nil, nil
}
// Fetch the Provisioning instance
Expand Down Expand Up @@ -157,7 +156,7 @@ func (r *ProvisioningReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error
if baremetalConfig == nil {
// Provisioning configuration not available at this time.
// Cannot proceed wtih metal3 deployment.
r.Log.V(1).Info("Provisioning CR not found")
klog.Info("Provisioning CR not found")
return ctrl.Result{}, nil
}

Expand All @@ -173,7 +172,7 @@ func (r *ProvisioningReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error
// Images config map is not valid
// Provisioning configuration is not valid.
// Requeue request.
r.Log.Error(err, "invalid contents in images Config Map")
klog.ErrorS(err, "invalid contents in images Config Map")
co_err := r.updateCOStatus(ReasonInvalidConfiguration, err.Error(), "invalid contents in images Config Map")
if co_err != nil {
return ctrl.Result{}, fmt.Errorf("unable to put %q ClusterOperator in Degraded state: %w", clusterOperatorName, co_err)
Expand Down Expand Up @@ -212,7 +211,7 @@ func (r *ProvisioningReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error
if err := provisioning.ValidateBaremetalProvisioningConfig(baremetalConfig); err != nil {
// Provisioning configuration is not valid.
// Requeue request.
r.Log.Error(err, "invalid config in Provisioning CR")
klog.ErrorS(err, "invalid config in Provisioning CR")
err = r.updateCOStatus(ReasonInvalidConfiguration, err.Error(), "Unable to apply Provisioning CR: invalid configuration")
if err != nil {
return ctrl.Result{}, fmt.Errorf("unable to put %q ClusterOperator in Degraded state: %w", clusterOperatorName, err)
Expand All @@ -234,7 +233,7 @@ func (r *ProvisioningReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error
}

if maoOwned {
r.Log.V(1).Info("Adding annotation for CBO to take ownership of metal3 deployment created by MAO")
klog.Info("Adding annotation for CBO to take ownership of metal3 deployment created by MAO")
}

for _, ensureResource := range []ensureFunc{
Expand Down
1 change: 0 additions & 1 deletion controllers/provisioning_controller_test.go
Expand Up @@ -28,7 +28,6 @@ func setUpSchemeForReconciler() *runtime.Scheme {
func newFakeProvisioningReconciler(scheme *runtime.Scheme, object runtime.Object) *ProvisioningReconciler {
return &ProvisioningReconciler{
Client: fakeclient.NewFakeClientWithScheme(scheme, object),
Log: ctrl.Log.WithName("controllers").WithName("Provisioning"),
Scheme: scheme,
OSClient: fakeconfigclientset.NewSimpleClientset(),
}
Expand Down
3 changes: 2 additions & 1 deletion go.mod
Expand Up @@ -4,7 +4,7 @@ go 1.15

require (
github.com/go-bindata/go-bindata v3.1.2+incompatible
github.com/go-logr/logr v0.2.1-0.20200730175230-ee2de8da5be6
github.com/go-logr/logr v0.2.1-0.20200730175230-ee2de8da5be6 // indirect
github.com/go-logr/zapr v0.2.0 // indirect
github.com/golangci/golangci-lint v1.32.0
github.com/google/go-cmp v0.5.2
Expand All @@ -18,6 +18,7 @@ require (
k8s.io/api v0.20.0
k8s.io/apimachinery v0.20.0
k8s.io/client-go v0.20.0
k8s.io/klog/v2 v2.4.0
k8s.io/utils v0.0.0-20201110183641-67b214c5f920
sigs.k8s.io/controller-runtime v0.6.0
sigs.k8s.io/controller-tools v0.3.0
Expand Down
30 changes: 14 additions & 16 deletions main.go
Expand Up @@ -26,8 +26,9 @@ import (
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/record"
"k8s.io/klog/v2"
"k8s.io/klog/v2/klogr"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/log/zap"

// +kubebuilder:scaffold:imports

Expand All @@ -38,30 +39,29 @@ import (
)

var (
scheme = runtime.NewScheme()
setupLog = ctrl.Log.WithName("setup")
scheme = runtime.NewScheme()
)

func init() {
if err := clientgoscheme.AddToScheme(scheme); err != nil {
setupLog.Error(err, "Error adding k8s client to scheme.")
klog.ErrorS(err, "Error adding k8s client to scheme.")
os.Exit(1)
}

if err := metal3iov1alpha1.AddToScheme(scheme); err != nil {
setupLog.Error(err, "Error adding k8s client to scheme.")
klog.ErrorS(err, "Error adding k8s client to scheme.")
os.Exit(1)
}

if err := osconfigv1.AddToScheme(scheme); err != nil {
setupLog.Error(err, "Error adding k8s client to scheme.")
klog.ErrorS(err, "Error adding k8s client to scheme.")
os.Exit(1)
}

// +kubebuilder:scaffold:scheme
// The following is needed to read the Infrastructure CR
if err := osconfigv1.Install(scheme); err != nil {
setupLog.Error(err, "")
klog.ErrorS(err, "")
os.Exit(1)
}
}
Expand All @@ -71,20 +71,19 @@ func main() {
var enableLeaderElection bool
var imagesJSONFilename string

klog.InitFlags(nil)
flag.StringVar(&metricsAddr, "metrics-addr", ":8080", "The address the metric endpoint binds to.")
flag.BoolVar(&enableLeaderElection, "enable-leader-election", false,
"Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager.")
flag.StringVar(&imagesJSONFilename, "images-json", "/etc/cluster-baremetal-operator/images/images.json",
"The location of the file containing the images to use for our operands.")
flag.Parse()

ctrl.SetLogger(zap.New(func(o *zap.Options) {
o.Development = true
}))
ctrl.SetLogger(klogr.New())

releaseVersion := os.Getenv("RELEASE_VERSION")
if releaseVersion == "" {
ctrl.Log.Info("Environment variable RELEASE_VERSION not provided")
klog.Info("Environment variable RELEASE_VERSION not provided")
}

config := ctrl.GetConfigOrDie()
Expand All @@ -95,7 +94,7 @@ func main() {
Port: 9443,
})
if err != nil {
setupLog.Error(err, "unable to start manager")
klog.ErrorS(err, "unable to start manager")
os.Exit(1)
}

Expand All @@ -106,22 +105,21 @@ func main() {

if err = (&controllers.ProvisioningReconciler{
Client: mgr.GetClient(),
Log: ctrl.Log.WithName("controllers").WithName("Provisioning"),
Scheme: mgr.GetScheme(),
OSClient: osClient,
EventRecorder: recorder,
KubeClient: kubeClient,
ReleaseVersion: releaseVersion,
ImagesFilename: imagesJSONFilename,
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "Provisioning")
klog.ErrorS(err, "unable to create controller", "controller", "Provisioning")
os.Exit(1)
}
// +kubebuilder:scaffold:builder

setupLog.Info("starting manager")
klog.Info("starting manager")
if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil {
setupLog.Error(err, "problem running manager")
klog.ErrorS(err, "problem running manager")
os.Exit(1)
}
}
8 changes: 8 additions & 0 deletions vendor/k8s.io/klog/v2/klogr/README.md

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

0 comments on commit ea1ff11

Please sign in to comment.