Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Bump client-go and other deps to v1.18.3 #851

Merged
merged 4 commits into from
Jul 22, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Jump to
The table of contents is too big for display.
Diff view
Diff view
  •  
  •  
  •  
33 changes: 16 additions & 17 deletions go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -4,34 +4,33 @@ go 1.13

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Should we do go 1.14 here above?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I don't think we have any features in CMO which need 1.14 and codebase can be compiled with 1.13, so the current setting seems good to me.

require (
github.com/Jeffail/gabs v1.1.1
github.com/coreos/prometheus-operator v0.38.1
github.com/coreos/prometheus-operator v0.40.0
github.com/ghodss/yaml v1.0.0
github.com/gogo/protobuf v1.3.1
github.com/imdario/mergo v0.3.6 // indirect
github.com/kylelemons/godebug v1.1.0 // indirect
github.com/mitchellh/hashstructure v1.0.0 // indirect
github.com/openshift/api v0.0.0-20200116145750-0e2ff1e215dd
github.com/openshift/client-go v0.0.0-20200116152001-92a2713fa240
github.com/openshift/api v0.0.0-20200623075207-eb651a5bb0ad
github.com/openshift/client-go v0.0.0-20200623090625-83993cebb5ae
github.com/openshift/library-go v0.0.0-20200120084036-bb27e57e2f2b
github.com/pkg/errors v0.8.1
github.com/prometheus/client_golang v1.2.1
github.com/prometheus/prometheus v2.9.2+incompatible
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e
k8s.io/api v0.17.3
k8s.io/apiextensions-apiserver v0.17.3
k8s.io/apimachinery v0.17.3
k8s.io/apiserver v0.17.3
github.com/pkg/errors v0.9.1
github.com/prometheus/client_golang v1.6.0
github.com/prometheus/prometheus v1.8.2-0.20200609102542-5d7e3e970602 // v1.8.2 is misleading as Prometheus does not have v2 module. This is pointing to v2.19.0, the same as in promehteus- operator v0.40.0
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a
k8s.io/api v0.18.4
k8s.io/apiextensions-apiserver v0.18.3
k8s.io/apimachinery v0.18.4
k8s.io/apiserver v0.18.3
k8s.io/client-go v12.0.0+incompatible
k8s.io/klog v1.0.0
k8s.io/kube-aggregator v0.17.1
k8s.io/metrics v0.0.0-20191014074242-8b0351268f72
k8s.io/kube-aggregator v0.18.3
k8s.io/metrics v0.18.4
)

replace (
github.com/prometheus/prometheus => github.com/prometheus/prometheus v1.8.2-0.20190819201610-48b2c9c8eae2 // v1.8.2 is misleading as Prometheus does not have v2 module. This is pointing to one commit after 2.12.0.
k8s.io/api => k8s.io/api v0.17.1
k8s.io/apimachinery => k8s.io/apimachinery v0.17.1
k8s.io/client-go => k8s.io/client-go v0.17.1
k8s.io/api => k8s.io/api v0.18.3
k8s.io/apimachinery => k8s.io/apimachinery v0.18.3
k8s.io/client-go => k8s.io/client-go v0.18.3
Comment on lines +31 to +33
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Do we really need these replacements?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Those are ensuring we are pinning to 0.18.3, without them go mod tidy rewrites it to v0.18.4 and in case of client-go it panics.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

👍

k8s.io/code-generator => k8s.io/code-generator v0.0.0-20190912054826-cd179ad6a269
k8s.io/kube-openapi => k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30
)
630 changes: 500 additions & 130 deletions go.sum

Large diffs are not rendered by default.

254 changes: 114 additions & 140 deletions pkg/client/client.go

Large diffs are not rendered by default.

21 changes: 11 additions & 10 deletions pkg/client/status_reporter.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
package client

import (
"context"
"fmt"

"github.com/openshift/cluster-monitoring-operator/pkg/strings"
Expand Down Expand Up @@ -54,10 +55,10 @@ func newRelatedObjects(namespace string) []v1.ObjectReference {
}

func (r *StatusReporter) SetDone() error {
co, err := r.client.Get(r.clusterOperatorName, metav1.GetOptions{})
co, err := r.client.Get(context.TODO(), r.clusterOperatorName, metav1.GetOptions{})
if apierrors.IsNotFound(err) {
co = r.newClusterOperator()
co, err = r.client.Create(co)
co, err = r.client.Create(context.TODO(), co, metav1.CreateOptions{})
}
if err != nil && !apierrors.IsNotFound(err) {
return err
Expand Down Expand Up @@ -86,7 +87,7 @@ func (r *StatusReporter) SetDone() error {
co.Status.Versions = nil
}

_, err = r.client.UpdateStatus(co)
_, err = r.client.UpdateStatus(context.TODO(), co, metav1.UpdateOptions{})
return err
}

Expand All @@ -98,10 +99,10 @@ func (r *StatusReporter) SetDone() error {
// Once controller operator versions are available, an additional check will be introduced that toggles
// the OperatorProgressing state in case of version upgrades.
func (r *StatusReporter) SetInProgress() error {
co, err := r.client.Get(r.clusterOperatorName, metav1.GetOptions{})
co, err := r.client.Get(context.TODO(), r.clusterOperatorName, metav1.GetOptions{})
if apierrors.IsNotFound(err) {
co = r.newClusterOperator()
co, err = r.client.Create(co)
co, err = r.client.Create(context.TODO(), co, metav1.CreateOptions{})
}
if err != nil && !apierrors.IsNotFound(err) {
return err
Expand All @@ -119,19 +120,19 @@ func (r *StatusReporter) SetInProgress() error {
co.Status.Conditions = conditions.entries()
co.Status.RelatedObjects = newRelatedObjects(r.namespace)

_, err = r.client.UpdateStatus(co)
_, err = r.client.UpdateStatus(context.TODO(), co, metav1.UpdateOptions{})
return err
}

func (r *StatusReporter) Get() (*v1.ClusterOperator, error) {
return r.client.Get(r.clusterOperatorName, metav1.GetOptions{})
return r.client.Get(context.TODO(), r.clusterOperatorName, metav1.GetOptions{})
}

func (r *StatusReporter) SetFailed(statusErr error, reason string) error {
co, err := r.client.Get(r.clusterOperatorName, metav1.GetOptions{})
co, err := r.client.Get(context.TODO(), r.clusterOperatorName, metav1.GetOptions{})
if apierrors.IsNotFound(err) {
co = r.newClusterOperator()
co, err = r.client.Create(co)
co, err = r.client.Create(context.TODO(), co, metav1.CreateOptions{})
}
if err != nil && !apierrors.IsNotFound(err) {
return err
Expand All @@ -152,7 +153,7 @@ func (r *StatusReporter) SetFailed(statusErr error, reason string) error {
)
co.Status.Conditions = conditions.entries()

_, err = r.client.UpdateStatus(co)
_, err = r.client.UpdateStatus(context.TODO(), co, metav1.UpdateOptions{})
return err
}

Expand Down
21 changes: 11 additions & 10 deletions pkg/client/status_reporter_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
package client

import (
"context"
"errors"
"fmt"
"reflect"
Expand Down Expand Up @@ -394,41 +395,41 @@ type clusterOperatorMock struct {
// ensure the mock satisfies the ClusterOperatorInterface interface.
var _ clientv1.ClusterOperatorInterface = (*clusterOperatorMock)(nil)

func (com *clusterOperatorMock) Create(co *v1.ClusterOperator) (*v1.ClusterOperator, error) {
func (com *clusterOperatorMock) Create(ctx context.Context, co *v1.ClusterOperator, opts metav1.CreateOptions) (*v1.ClusterOperator, error) {
com.created = co
return com.createFunc(co)
}

func (com *clusterOperatorMock) Update(co *v1.ClusterOperator) (*v1.ClusterOperator, error) {
func (com *clusterOperatorMock) Update(ctx context.Context, co *v1.ClusterOperator, opts metav1.UpdateOptions) (*v1.ClusterOperator, error) {
com.updated = co
return com.updateFunc(co)
}

func (com *clusterOperatorMock) UpdateStatus(co *v1.ClusterOperator) (*v1.ClusterOperator, error) {
func (com *clusterOperatorMock) UpdateStatus(ctx context.Context, co *v1.ClusterOperator, opts metav1.UpdateOptions) (*v1.ClusterOperator, error) {
com.statusUpdated = co
return com.updateStatusFunc(co)
}

func (com *clusterOperatorMock) Delete(name string, options *metav1.DeleteOptions) error {
func (com *clusterOperatorMock) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
return nil
}

func (com *clusterOperatorMock) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error {
func (com *clusterOperatorMock) DeleteCollection(ctx context.Context, options metav1.DeleteOptions, listOptions metav1.ListOptions) error {
return nil
}

func (com *clusterOperatorMock) Get(name string, options metav1.GetOptions) (*v1.ClusterOperator, error) {
return com.getFunc(name, options)
func (com *clusterOperatorMock) Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ClusterOperator, error) {
return com.getFunc(name, opts)
}

func (com *clusterOperatorMock) List(opts metav1.ListOptions) (*v1.ClusterOperatorList, error) {
func (com *clusterOperatorMock) List(ctx context.Context, opts metav1.ListOptions) (*v1.ClusterOperatorList, error) {
return nil, nil
}

func (com *clusterOperatorMock) Watch(opts metav1.ListOptions) (watch.Interface, error) {
func (com *clusterOperatorMock) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
return nil, nil
}

func (com *clusterOperatorMock) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ClusterOperator, err error) {
func (com *clusterOperatorMock) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ClusterOperator, err error) {
return nil, nil
}
36 changes: 18 additions & 18 deletions pkg/manifests/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -86,30 +86,30 @@ type PrometheusOperatorConfig struct {
}

type PrometheusK8sConfig struct {
LogLevel string `json:"logLevel"`
Retention string `json:"retention"`
NodeSelector map[string]string `json:"nodeSelector"`
Tolerations []v1.Toleration `json:"tolerations"`
Resources *v1.ResourceRequirements `json:"resources"`
ExternalLabels map[string]string `json:"externalLabels"`
VolumeClaimTemplate *v1.PersistentVolumeClaim `json:"volumeClaimTemplate"`
RemoteWrite []monv1.RemoteWriteSpec `json:"remoteWrite"`
TelemetryMatches []string `json:"-"`
LogLevel string `json:"logLevel"`
Retention string `json:"retention"`
NodeSelector map[string]string `json:"nodeSelector"`
Tolerations []v1.Toleration `json:"tolerations"`
Resources *v1.ResourceRequirements `json:"resources"`
ExternalLabels map[string]string `json:"externalLabels"`
VolumeClaimTemplate *monv1.EmbeddedPersistentVolumeClaim `json:"volumeClaimTemplate"`
RemoteWrite []monv1.RemoteWriteSpec `json:"remoteWrite"`
TelemetryMatches []string `json:"-"`
}

type AlertmanagerMainConfig struct {
NodeSelector map[string]string `json:"nodeSelector"`
Tolerations []v1.Toleration `json:"tolerations"`
Resources *v1.ResourceRequirements `json:"resources"`
VolumeClaimTemplate *v1.PersistentVolumeClaim `json:"volumeClaimTemplate"`
NodeSelector map[string]string `json:"nodeSelector"`
Tolerations []v1.Toleration `json:"tolerations"`
Resources *v1.ResourceRequirements `json:"resources"`
VolumeClaimTemplate *monv1.EmbeddedPersistentVolumeClaim `json:"volumeClaimTemplate"`
}

type ThanosRulerConfig struct {
LogLevel string `json:"logLevel"`
NodeSelector map[string]string `json:"nodeSelector"`
Tolerations []v1.Toleration `json:"tolerations"`
Resources *v1.ResourceRequirements `json:"resources"`
VolumeClaimTemplate *v1.PersistentVolumeClaim `json:"volumeClaimTemplate"`
LogLevel string `json:"logLevel"`
NodeSelector map[string]string `json:"nodeSelector"`
Tolerations []v1.Toleration `json:"tolerations"`
Resources *v1.ResourceRequirements `json:"resources"`
VolumeClaimTemplate *monv1.EmbeddedPersistentVolumeClaim `json:"volumeClaimTemplate"`
}

type ThanosQuerierConfig struct {
Expand Down
3 changes: 2 additions & 1 deletion pkg/operator/operator.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
package operator

import (
"context"
"strings"
"time"

Expand Down Expand Up @@ -440,7 +441,7 @@ func (o *Operator) Config(key string) (*manifests.Config, error) {
}

err = c.LoadToken(func() (*v1.Secret, error) {
return o.client.KubernetesInterface().CoreV1().Secrets("openshift-config").Get("pull-secret", metav1.GetOptions{})
return o.client.KubernetesInterface().CoreV1().Secrets("openshift-config").Get(context.TODO(), "pull-secret", metav1.GetOptions{})
})

if err != nil {
Expand Down
2 changes: 1 addition & 1 deletion pkg/promqlgen/promqlgen.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ import (
monv1 "github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1"
"github.com/pkg/errors"
"github.com/prometheus/prometheus/pkg/labels"
"github.com/prometheus/prometheus/promql"
promql "github.com/prometheus/prometheus/promql/parser"
)

func LabelSelectorsToRelabelConfig(matches []string) (*monv1.RelabelConfig, error) {
Expand Down
6 changes: 4 additions & 2 deletions pkg/tasks/prometheusadapter.go
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
package tasks

import (
"context"

"github.com/openshift/cluster-monitoring-operator/pkg/client"
"github.com/openshift/cluster-monitoring-operator/pkg/manifests"
"github.com/pkg/errors"
Expand Down Expand Up @@ -195,7 +197,7 @@ func (t *PrometheusAdapterTask) Run() error {
}

func (t *PrometheusAdapterTask) deleteOldPrometheusAdapterSecrets(newHash string) error {
secrets, err := t.client.KubernetesInterface().CoreV1().Secrets(t.namespace).List(metav1.ListOptions{
secrets, err := t.client.KubernetesInterface().CoreV1().Secrets(t.namespace).List(context.TODO(), metav1.ListOptions{
LabelSelector: "monitoring.openshift.io/name=prometheus-adapter,monitoring.openshift.io/hash!=" + newHash,
})

Expand All @@ -204,7 +206,7 @@ func (t *PrometheusAdapterTask) deleteOldPrometheusAdapterSecrets(newHash string
}

for i := range secrets.Items {
err := t.client.KubernetesInterface().CoreV1().Secrets(t.namespace).Delete(secrets.Items[i].Name, &metav1.DeleteOptions{})
err := t.client.KubernetesInterface().CoreV1().Secrets(t.namespace).Delete(context.TODO(), secrets.Items[i].Name, metav1.DeleteOptions{})
if err != nil {
return errors.Wrapf(err, "error deleting secret: %s", secrets.Items[i].Name)
}
Expand Down
4 changes: 2 additions & 2 deletions pkg/tasks/prometheusoperator.go
Original file line number Diff line number Diff line change
Expand Up @@ -98,9 +98,9 @@ func (t *PrometheusOperatorTask) Run() error {
return errors.Wrap(err, "reconciling Prometheus Operator Deployment failed")
}

err = t.client.WaitForPrometheusOperatorCRDsReady()
err = t.client.AssurePrometheusOperatorCRsExist()
if err != nil {
return errors.Wrap(err, "waiting for Prometheus CRDs to become available failed")
return errors.Wrap(err, "waiting for Prometheus Operator CRs to become available failed")
}

w, err := t.factory.PrometheusRuleValidatingWebhook()
Expand Down
6 changes: 3 additions & 3 deletions pkg/tasks/prometheusoperator_user_workload.go
Original file line number Diff line number Diff line change
Expand Up @@ -99,11 +99,11 @@ func (t *PrometheusOperatorUserWorkloadTask) create() error {
return errors.Wrap(err, "reconciling UserWorkload Prometheus Operator Deployment failed")
}

// The CRDs will be registered by the cluster monitoring prometheus operator,
// The CRs will be created externally,
// but we still have to wait for them here.
err = t.client.WaitForPrometheusOperatorCRDsReady()
err = t.client.AssurePrometheusOperatorCRsExist()
if err != nil {
return errors.Wrap(err, "waiting for Prometheus CRDs to become available failed")
return errors.Wrap(err, "waiting for Prometheus Operator CRs to become available failed")
}

smpo, err := t.factory.PrometheusOperatorUserWorkloadServiceMonitor()
Expand Down
13 changes: 7 additions & 6 deletions test/e2e/alertmanager_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
package e2e

import (
"context"
"fmt"
"io/ioutil"
"net/http"
Expand Down Expand Up @@ -66,7 +67,7 @@ func TestAlertmanagerVolumeClaim(t *testing.T) {
var lastErr error
// Wait for persistent volume claim
err = wait.Poll(time.Second, 5*time.Minute, func() (bool, error) {
_, err := f.KubeClient.CoreV1().PersistentVolumeClaims(f.Ns).Get("alertmanager-main-db-alertmanager-main-0", metav1.GetOptions{})
_, err := f.KubeClient.CoreV1().PersistentVolumeClaims(f.Ns).Get(context.TODO(), "alertmanager-main-db-alertmanager-main-0", metav1.GetOptions{})
lastErr = errors.Wrap(err, "getting alertmanager persistent volume claim failed")
if err != nil {
return false, nil
Expand Down Expand Up @@ -100,7 +101,7 @@ func TestAlertmanagerTrustedCA(t *testing.T) {

// Wait for the new ConfigMap to be created
err := wait.Poll(time.Second, 5*time.Minute, func() (bool, error) {
cm, err := f.KubeClient.CoreV1().ConfigMaps(f.Ns).Get("alertmanager-trusted-ca-bundle", metav1.GetOptions{})
cm, err := f.KubeClient.CoreV1().ConfigMaps(f.Ns).Get(context.TODO(), "alertmanager-trusted-ca-bundle", metav1.GetOptions{})
lastErr = errors.Wrap(err, "getting new trusted CA ConfigMap failed")
if err != nil {
return false, nil
Expand All @@ -123,7 +124,7 @@ func TestAlertmanagerTrustedCA(t *testing.T) {

// Wait for the new hashed trusted CA bundle ConfigMap to be created
err = wait.Poll(time.Second, 5*time.Minute, func() (bool, error) {
_, err := f.KubeClient.CoreV1().ConfigMaps(f.Ns).Get(newCM.Name, metav1.GetOptions{})
_, err := f.KubeClient.CoreV1().ConfigMaps(f.Ns).Get(context.TODO(), newCM.Name, metav1.GetOptions{})
lastErr = errors.Wrap(err, "getting new CA ConfigMap failed")
if err != nil {
return false, nil
Expand All @@ -139,7 +140,7 @@ func TestAlertmanagerTrustedCA(t *testing.T) {

// Get Alertmanager StatefulSet and make sure it has a volume mounted.
err = wait.Poll(time.Second, 5*time.Minute, func() (bool, error) {
ss, err := f.KubeClient.AppsV1().StatefulSets(f.Ns).Get("alertmanager-main", metav1.GetOptions{})
ss, err := f.KubeClient.AppsV1().StatefulSets(f.Ns).Get(context.TODO(), "alertmanager-main", metav1.GetOptions{})
lastErr = errors.Wrap(err, "getting Alertmanager StatefulSet failed")
if err != nil {
return false, nil
Expand Down Expand Up @@ -184,12 +185,12 @@ func TestAlertmanagerKubeRbacProxy(t *testing.T) {
Name: testNs,
},
}
ns, err = f.KubeClient.CoreV1().Namespaces().Create(ns)
ns, err = f.KubeClient.CoreV1().Namespaces().Create(context.TODO(), ns, metav1.CreateOptions{})
if err != nil {
t.Fatal(err)
}
defer func() {
err := f.KubeClient.CoreV1().Namespaces().Delete(testNs, &metav1.DeleteOptions{})
err := f.KubeClient.CoreV1().Namespaces().Delete(context.TODO(), testNs, metav1.DeleteOptions{})
t.Logf("deleting namespace %s: %v", testNs, err)
}()

Expand Down