Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Bug 2038275: *: Use --v=2 logging to drop client-side throttling noise #723

Merged
merged 1 commit into from Jan 10, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
2 changes: 1 addition & 1 deletion bootstrap/bootstrap-pod.yaml
Expand Up @@ -16,7 +16,7 @@ spec:
- "--enable-auto-update=false"
- "--enable-default-cluster-version=false"
- "--listen="
- "--v=5"
- "--v=2"
- "--kubeconfig=/etc/kubernetes/kubeconfig"
securityContext:
privileged: true
Expand Down
Expand Up @@ -33,7 +33,7 @@ spec:
- "--listen=0.0.0.0:9099"
- "--serving-cert-file=/etc/tls/serving-cert/tls.crt"
- "--serving-key-file=/etc/tls/serving-cert/tls.key"
- "--v=5"
- "--v=2"
resources:
requests:
cpu: 20m
Expand Down
2 changes: 1 addition & 1 deletion lib/resourceapply/apiext.go
Expand Up @@ -35,7 +35,7 @@ func ApplyCustomResourceDefinitionv1(ctx context.Context, client apiextclientv1.
}

if reconciling {
klog.V(4).Infof("Updating CRD %s due to diff: %v", required.Name, cmp.Diff(existing, required))
klog.V(2).Infof("Updating CRD %s due to diff: %v", required.Name, cmp.Diff(existing, required))
}

actual, err := client.CustomResourceDefinitions().Update(ctx, existing, metav1.UpdateOptions{})
Expand Down
4 changes: 2 additions & 2 deletions lib/resourceapply/apps.go
Expand Up @@ -35,7 +35,7 @@ func ApplyDeploymentv1(ctx context.Context, client appsclientv1.DeploymentsGette
return existing, false, nil
}
if reconciling {
klog.V(4).Infof("Updating Deployment %s/%s due to diff: %v", required.Namespace, required.Name, cmp.Diff(existing, required))
klog.V(2).Infof("Updating Deployment %s/%s due to diff: %v", required.Namespace, required.Name, cmp.Diff(existing, required))
}

actual, err := client.Deployments(required.Namespace).Update(ctx, existing, metav1.UpdateOptions{})
Expand Down Expand Up @@ -65,7 +65,7 @@ func ApplyDaemonSetv1(ctx context.Context, client appsclientv1.DaemonSetsGetter,
}

if reconciling {
klog.V(4).Infof("Updating DaemonSet %s/%s due to diff: %v", required.Namespace, required.Name, cmp.Diff(existing, required))
klog.V(2).Infof("Updating DaemonSet %s/%s due to diff: %v", required.Namespace, required.Name, cmp.Diff(existing, required))
}

actual, err := client.DaemonSets(required.Namespace).Update(ctx, existing, metav1.UpdateOptions{})
Expand Down
2 changes: 1 addition & 1 deletion lib/resourceapply/batch.go
Expand Up @@ -36,7 +36,7 @@ func ApplyJobv1(ctx context.Context, client batchclientv1.JobsGetter, required *
}

if reconciling {
klog.V(4).Infof("Updating Job %s/%s due to diff: %v", required.Namespace, required.Name, cmp.Diff(existing, required))
klog.V(2).Infof("Updating Job %s/%s due to diff: %v", required.Namespace, required.Name, cmp.Diff(existing, required))
}

actual, err := client.Jobs(required.Namespace).Update(ctx, existing, metav1.UpdateOptions{})
Expand Down
8 changes: 4 additions & 4 deletions lib/resourceapply/core.go
Expand Up @@ -38,7 +38,7 @@ func ApplyNamespacev1(ctx context.Context, client coreclientv1.NamespacesGetter,
return existing, false, nil
}
if reconciling {
klog.V(4).Infof("Updating Namespace %s due to diff: %v", required.Name, cmp.Diff(existing, required))
klog.V(2).Infof("Updating Namespace %s due to diff: %v", required.Name, cmp.Diff(existing, required))
}

actual, err := client.Namespaces().Update(ctx, existing, metav1.UpdateOptions{})
Expand Down Expand Up @@ -75,7 +75,7 @@ func ApplyServicev1(ctx context.Context, client coreclientv1.ServicesGetter, req
existing.Spec.Selector = required.Spec.Selector

if reconciling {
klog.V(4).Infof("Updating Service %s/%s due to diff: %v", required.Namespace, required.Name, cmp.Diff(existing, required))
klog.V(2).Infof("Updating Service %s/%s due to diff: %v", required.Namespace, required.Name, cmp.Diff(existing, required))
}

actual, err := client.Services(required.Namespace).Update(ctx, existing, metav1.UpdateOptions{})
Expand Down Expand Up @@ -105,7 +105,7 @@ func ApplyServiceAccountv1(ctx context.Context, client coreclientv1.ServiceAccou
}

if reconciling {
klog.V(4).Infof("Updating ServiceAccount %s/%s due to diff: %v", required.Namespace, required.Name, cmp.Diff(existing, required))
klog.V(2).Infof("Updating ServiceAccount %s/%s due to diff: %v", required.Namespace, required.Name, cmp.Diff(existing, required))
}

actual, err := client.ServiceAccounts(required.Namespace).Update(ctx, existing, metav1.UpdateOptions{})
Expand Down Expand Up @@ -135,7 +135,7 @@ func ApplyConfigMapv1(ctx context.Context, client coreclientv1.ConfigMapsGetter,
}

if reconciling {
klog.V(4).Infof("Updating ConfigMap %s/%s due to diff: %v", required.Namespace, required.Name, cmp.Diff(existing, required))
klog.V(2).Infof("Updating ConfigMap %s/%s due to diff: %v", required.Namespace, required.Name, cmp.Diff(existing, required))
}

actual, err := client.ConfigMaps(required.Namespace).Update(ctx, existing, metav1.UpdateOptions{})
Expand Down
2 changes: 1 addition & 1 deletion lib/resourceapply/cv.go
Expand Up @@ -36,7 +36,7 @@ func ApplyClusterVersionFromCache(ctx context.Context, lister configlistersv1.Cl
return existing, false, nil
}

klog.V(4).Infof("Updating ClusterVersion %s due to diff: %v", required.Name, cmp.Diff(existing, required))
klog.V(2).Infof("Updating ClusterVersion %s due to diff: %v", required.Name, cmp.Diff(existing, required))

actual, err := client.ClusterVersions().Update(ctx, existing, metav1.UpdateOptions{})
return actual, true, err
Expand Down
2 changes: 1 addition & 1 deletion lib/resourceapply/imagestream.go
Expand Up @@ -34,7 +34,7 @@ func ApplyImageStreamv1(ctx context.Context, client imageclientv1.ImageStreamsGe
}

if reconciling {
klog.V(4).Infof("Updating Namespace %s due to diff: %v", required.Name, cmp.Diff(existing, required))
klog.V(2).Infof("Updating Namespace %s due to diff: %v", required.Name, cmp.Diff(existing, required))
}
actual, err := client.ImageStreams(required.Namespace).Update(ctx, existing, metav1.UpdateOptions{})
return actual, true, err
Expand Down
8 changes: 4 additions & 4 deletions lib/resourceapply/rbac.go
Expand Up @@ -35,7 +35,7 @@ func ApplyClusterRoleBindingv1(ctx context.Context, client rbacclientv1.ClusterR
return existing, false, nil
}
if reconciling {
klog.V(4).Infof("Updating ClusterRoleBinding %s due to diff: %v", required.Name, cmp.Diff(existing, required))
klog.V(2).Infof("Updating ClusterRoleBinding %s due to diff: %v", required.Name, cmp.Diff(existing, required))
}

actual, err := client.ClusterRoleBindings().Update(ctx, existing, metav1.UpdateOptions{})
Expand Down Expand Up @@ -64,7 +64,7 @@ func ApplyClusterRolev1(ctx context.Context, client rbacclientv1.ClusterRolesGet
return existing, false, nil
}
if reconciling {
klog.V(4).Infof("Updating ClusterRole %s due to diff: %v", required.Name, cmp.Diff(existing, required))
klog.V(2).Infof("Updating ClusterRole %s due to diff: %v", required.Name, cmp.Diff(existing, required))
}

actual, err := client.ClusterRoles().Update(ctx, existing, metav1.UpdateOptions{})
Expand Down Expand Up @@ -93,7 +93,7 @@ func ApplyRoleBindingv1(ctx context.Context, client rbacclientv1.RoleBindingsGet
return existing, false, nil
}
if reconciling {
klog.V(4).Infof("Updating RoleBinding %s/%s due to diff: %v", required.Namespace, required.Name, cmp.Diff(existing, required))
klog.V(2).Infof("Updating RoleBinding %s/%s due to diff: %v", required.Namespace, required.Name, cmp.Diff(existing, required))
}

actual, err := client.RoleBindings(required.Namespace).Update(ctx, existing, metav1.UpdateOptions{})
Expand Down Expand Up @@ -122,7 +122,7 @@ func ApplyRolev1(ctx context.Context, client rbacclientv1.RolesGetter, required
return existing, false, nil
}
if reconciling {
klog.V(4).Infof("Updating Role %s/%s due to diff: %v", required.Namespace, required.Name, cmp.Diff(existing, required))
klog.V(2).Infof("Updating Role %s/%s due to diff: %v", required.Namespace, required.Name, cmp.Diff(existing, required))
}

actual, err := client.Roles(required.Namespace).Update(ctx, existing, metav1.UpdateOptions{})
Expand Down
2 changes: 1 addition & 1 deletion lib/resourceapply/security.go
Expand Up @@ -36,7 +36,7 @@ func ApplySecurityContextConstraintsv1(ctx context.Context, client securityclien
}

if reconciling {
klog.V(4).Infof("Updating SCC %s due to diff: %v", required.Name, cmp.Diff(existing, required))
klog.V(2).Infof("Updating SCC %s due to diff: %v", required.Name, cmp.Diff(existing, required))
}

actual, err := client.SecurityContextConstraints().Update(ctx, existing, metav1.UpdateOptions{})
Expand Down
6 changes: 3 additions & 3 deletions lib/resourcebuilder/batch.go
Expand Up @@ -19,7 +19,7 @@ func WaitForJobCompletion(ctx context.Context, client batchclientv1.JobsGetter,
klog.Error(err)
return false, nil
} else if !done {
klog.V(4).Infof("Job %s in namespace %s is not ready, continuing to wait.", job.ObjectMeta.Name, job.ObjectMeta.Namespace)
klog.V(2).Infof("Job %s in namespace %s is not ready, continuing to wait.", job.ObjectMeta.Name, job.ObjectMeta.Namespace)
return false, nil
}
return true, nil
Expand Down Expand Up @@ -50,7 +50,7 @@ func checkJobHealth(ctx context.Context, client batchclientv1.JobsGetter, job *b
// the Job will 'Active == 0' if and only if it exceeds the deadline or if the update image could not be pulled.
// Failed jobs will be recreated in the next run.
if j.Status.Active == 0 {
klog.V(4).Infof("No active pods for job %s in namespace %s", job.Name, job.Namespace)
klog.V(2).Infof("No active pods for job %s in namespace %s", job.Name, job.Namespace)
failed, reason, message := hasJobFailed(job)
// If there is more than one failed job pod then get the cause for failure
if j.Status.Failed > 0 {
Expand All @@ -68,7 +68,7 @@ func checkJobHealth(ctx context.Context, client batchclientv1.JobsGetter, job *b
if reason == "DeadlineExceeded" {
return false, fmt.Errorf("deadline exceeded, reason: %q, message: %q", reason, message)
} else {
klog.V(4).Infof("Ignoring job %s in namespace %s with condition Failed=True because %s: %s", job.Name, job.Namespace, reason, message)
klog.V(2).Infof("Ignoring job %s in namespace %s with condition Failed=True because %s: %s", job.Name, job.Namespace, reason, message)
}
}
}
Expand Down
8 changes: 4 additions & 4 deletions lib/resourcedelete/helper.go
Expand Up @@ -62,7 +62,7 @@ func SetDeleteRequested(obj metav1.Object, resource Resource) {
deletedResources.lock.Lock()
deletedResources.m[resource] = times
deletedResources.lock.Unlock()
klog.V(4).Infof("Delete requested for %s.", resource)
klog.V(2).Infof("Delete requested for %s.", resource)
}

// SetDeleteVerified updates map entry to indicate resource deletion has been completed.
Expand All @@ -73,7 +73,7 @@ func SetDeleteVerified(resource Resource) {
deletedResources.lock.Lock()
deletedResources.m[resource] = times
deletedResources.lock.Unlock()
klog.V(4).Infof("Delete of %s completed.", resource)
klog.V(2).Infof("Delete of %s completed.", resource)
}

// getDeleteTimes returns map entry for given resource.
Expand Down Expand Up @@ -111,9 +111,9 @@ func GetDeleteProgress(resource Resource, getError error) (bool, error) {
SetDeleteVerified(resource)
} else {
if deletionTimes.Expected != nil {
klog.V(4).Infof("Delete of %s is expected by %s.", resource, deletionTimes.Expected.String())
klog.V(2).Infof("Delete of %s is expected by %s.", resource, deletionTimes.Expected.String())
} else {
klog.V(4).Infof("Delete of %s has already been requested.", resource)
klog.V(2).Infof("Delete of %s has already been requested.", resource)
}
}
}
Expand Down
4 changes: 2 additions & 2 deletions pkg/autoupdate/autoupdate.go
Expand Up @@ -153,9 +153,9 @@ func (ctrl *Controller) handleErr(err error, key interface{}) {

func (ctrl *Controller) sync(ctx context.Context, key string) error {
startTime := time.Now()
klog.V(4).Infof("Started syncing auto-updates %q (%v)", key, startTime)
klog.V(2).Infof("Started syncing auto-updates %q (%v)", key, startTime)
defer func() {
klog.V(4).Infof("Finished syncing auto-updates %q (%v)", key, time.Since(startTime))
klog.V(2).Infof("Finished syncing auto-updates %q (%v)", key, time.Since(startTime))
}()

clusterversion, err := ctrl.cvLister.Get(ctrl.name)
Expand Down
6 changes: 3 additions & 3 deletions pkg/cincinnati/cincinnati.go
Expand Up @@ -80,16 +80,16 @@ func (c Client) GetUpdates(ctx context.Context, uri *url.URL, arch string, chann
req.Header.Add("Accept", GraphMediaType)
if c.transport != nil && c.transport.TLSClientConfig != nil {
if c.transport.TLSClientConfig.ClientCAs == nil {
klog.V(5).Infof("Using a root CA pool with 0 root CA subjects to request updates from %s", uri)
klog.V(2).Infof("Using a root CA pool with 0 root CA subjects to request updates from %s", uri)
} else {
klog.V(5).Infof("Using a root CA pool with %n root CA subjects to request updates from %s", len(c.transport.TLSClientConfig.RootCAs.Subjects()), uri)
klog.V(2).Infof("Using a root CA pool with %n root CA subjects to request updates from %s", len(c.transport.TLSClientConfig.RootCAs.Subjects()), uri)
}
}

if c.transport != nil && c.transport.Proxy != nil {
proxy, err := c.transport.Proxy(req)
if err == nil && proxy != nil {
klog.V(5).Infof("Using proxy %s to request updates from %s", proxy.Host, uri)
klog.V(2).Infof("Using proxy %s to request updates from %s", proxy.Host, uri)
}
}

Expand Down
2 changes: 1 addition & 1 deletion pkg/cvo/availableupdates.go
Expand Up @@ -39,7 +39,7 @@ func (optr *Operator) syncAvailableUpdates(ctx context.Context, config *configv1
// updates are only checked at most once per minimumUpdateCheckInterval or if the generation changes
u := optr.getAvailableUpdates()
if u != nil && u.Upstream == upstream && u.Channel == channel && u.RecentlyChanged(optr.minimumUpdateCheckInterval) {
klog.V(4).Infof("Available updates were recently retrieved, will try later.")
klog.V(2).Infof("Available updates were recently retrieved, will try later.")
return nil
}

Expand Down
20 changes: 10 additions & 10 deletions pkg/cvo/cvo.go
Expand Up @@ -504,9 +504,9 @@ func handleErr(ctx context.Context, queue workqueue.RateLimitingInterface, err e
// It returns an error if it could not update the cluster version object.
func (optr *Operator) sync(ctx context.Context, key string) error {
startTime := time.Now()
klog.V(4).Infof("Started syncing cluster version %q (%v)", key, startTime)
klog.V(2).Infof("Started syncing cluster version %q (%v)", key, startTime)
defer func() {
klog.V(4).Infof("Finished syncing cluster version %q (%v)", key, time.Since(startTime))
klog.V(2).Infof("Finished syncing cluster version %q (%v)", key, time.Since(startTime))
}()

// ensure the cluster version exists, that the object is valid, and that
Expand All @@ -516,11 +516,11 @@ func (optr *Operator) sync(ctx context.Context, key string) error {
return err
}
if changed {
klog.V(4).Infof("Cluster version changed, waiting for newer event")
klog.V(2).Infof("Cluster version changed, waiting for newer event")
return nil
}
if original == nil {
klog.V(4).Infof("No ClusterVersion object and defaulting not enabled, waiting for one")
klog.V(2).Infof("No ClusterVersion object and defaulting not enabled, waiting for one")
return nil
}

Expand All @@ -535,14 +535,14 @@ func (optr *Operator) sync(ctx context.Context, key string) error {
// identify the desired next version
desired, ok := findUpdateFromConfig(config)
if ok {
klog.V(4).Infof("Desired version from spec is %#v", desired)
klog.V(2).Infof("Desired version from spec is %#v", desired)
} else {
currentVersion := optr.currentVersion()
desired = configv1.Update{
Version: currentVersion.Version,
Image: currentVersion.Image,
}
klog.V(4).Infof("Desired version from operator is %#v", desired)
klog.V(2).Infof("Desired version from operator is %#v", desired)
}

// handle the case of a misconfigured CVO by doing nothing
Expand Down Expand Up @@ -577,9 +577,9 @@ func (optr *Operator) sync(ctx context.Context, key string) error {
// sync available updates. It only modifies cluster version.
func (optr *Operator) availableUpdatesSync(ctx context.Context, key string) error {
startTime := time.Now()
klog.V(4).Infof("Started syncing available updates %q (%v)", key, startTime)
klog.V(2).Infof("Started syncing available updates %q (%v)", key, startTime)
defer func() {
klog.V(4).Infof("Finished syncing available updates %q (%v)", key, time.Since(startTime))
klog.V(2).Infof("Finished syncing available updates %q (%v)", key, time.Since(startTime))
}()

config, err := optr.cvLister.Get(optr.name)
Expand All @@ -599,9 +599,9 @@ func (optr *Operator) availableUpdatesSync(ctx context.Context, key string) erro
// sync upgradeableCondition. It only modifies cluster version.
func (optr *Operator) upgradeableSync(ctx context.Context, key string) error {
startTime := time.Now()
klog.V(4).Infof("Started syncing upgradeable %q (%v)", key, startTime)
klog.V(2).Infof("Started syncing upgradeable %q (%v)", key, startTime)
defer func() {
klog.V(4).Infof("Finished syncing upgradeable %q (%v)", key, time.Since(startTime))
klog.V(2).Infof("Finished syncing upgradeable %q (%v)", key, time.Since(startTime))
}()

config, err := optr.cvLister.Get(optr.name)
Expand Down
2 changes: 1 addition & 1 deletion pkg/cvo/cvo_test.go
Expand Up @@ -4011,7 +4011,7 @@ func fakeClientsetWithUpdates(obj *configv1.ClusterVersion) *fake.Clientset {
obj.Status = update.Status
rv, _ := strconv.Atoi(update.ResourceVersion)
obj.ResourceVersion = strconv.Itoa(rv + 1)
klog.V(5).Infof("updated object to %#v", obj)
klog.V(2).Infof("updated object to %#v", obj)
return true, obj.DeepCopy(), nil
}
return false, nil, fmt.Errorf("unrecognized")
Expand Down
2 changes: 1 addition & 1 deletion pkg/cvo/internal/generic.go
Expand Up @@ -106,7 +106,7 @@ func applyUnstructured(ctx context.Context, client dynamic.ResourceInterface, re
existing.SetOwnerReferences(required.GetOwnerReferences())

if reconciling {
klog.V(4).Infof("Updating %s %s/%s due to diff: %v", required.GetKind(), required.GetNamespace(), required.GetName(), objDiff)
klog.V(2).Infof("Updating %s %s/%s due to diff: %v", required.GetKind(), required.GetNamespace(), required.GetName(), objDiff)
}

actual, err := client.Update(ctx, existing, metav1.UpdateOptions{})
Expand Down
6 changes: 3 additions & 3 deletions pkg/cvo/metrics.go
Expand Up @@ -323,7 +323,7 @@ func (m *operatorMetrics) Collect(ch chan<- prometheus.Metric) {

for _, condition := range cv.Status.Conditions {
if condition.Status != configv1.ConditionFalse && condition.Status != configv1.ConditionTrue {
klog.V(4).Infof("skipping metrics for ClusterVersion condition %s=%s (neither True nor False)", condition.Type, condition.Status)
klog.V(2).Infof("skipping metrics for ClusterVersion condition %s=%s (neither True nor False)", condition.Type, condition.Status)
continue
}
g := m.clusterOperatorConditions.WithLabelValues("version", string(condition.Type), string(condition.Reason))
Expand Down Expand Up @@ -355,7 +355,7 @@ func (m *operatorMetrics) Collect(ch chan<- prometheus.Metric) {
}
}
if version == "" {
klog.V(4).Infof("ClusterOperator %s is not setting the 'operator' version", op.Name)
klog.V(2).Infof("ClusterOperator %s is not setting the 'operator' version", op.Name)
}
g := m.clusterOperatorUp.WithLabelValues(op.Name, version)
if resourcemerge.IsOperatorStatusConditionTrue(op.Status.Conditions, configv1.OperatorAvailable) {
Expand All @@ -366,7 +366,7 @@ func (m *operatorMetrics) Collect(ch chan<- prometheus.Metric) {
ch <- g
for _, condition := range op.Status.Conditions {
if condition.Status != configv1.ConditionFalse && condition.Status != configv1.ConditionTrue {
klog.V(4).Infof("skipping metrics for %s ClusterOperator condition %s=%s (neither True nor False)", op.Name, condition.Type, condition.Status)
klog.V(2).Infof("skipping metrics for %s ClusterOperator condition %s=%s (neither True nor False)", op.Name, condition.Type, condition.Status)
continue
}
g := m.clusterOperatorConditions.WithLabelValues(op.Name, string(condition.Type), string(condition.Reason))
Expand Down