Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

bump library-go to get context fixes #357

Merged
merged 2 commits into from
Jun 24, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
2 changes: 1 addition & 1 deletion go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ require (
github.com/openshift/api v0.0.0-20210521075222-e273a339932a
github.com/openshift/build-machinery-go v0.0.0-20210423112049-9415d7ebd33e
github.com/openshift/client-go v0.0.0-20210521082421-73d9475a9142
github.com/openshift/library-go v0.0.0-20210611094144-35c8a075e255
github.com/openshift/library-go v0.0.0-20210624101008-7aab941b4a2b
github.com/prometheus/client_golang v1.7.1
github.com/prometheus/common v0.10.0
github.com/spf13/cobra v1.1.1
Expand Down
4 changes: 2 additions & 2 deletions go.sum
Original file line number Diff line number Diff line change
Expand Up @@ -405,8 +405,8 @@ github.com/openshift/client-go v0.0.0-20210521082421-73d9475a9142 h1:ZHRIMCFIJN1
github.com/openshift/client-go v0.0.0-20210521082421-73d9475a9142/go.mod h1:fjS8r9mqDVsPb5td3NehsNOAWa4uiFkYEfVZioQ2gH0=
github.com/openshift/kubernetes-apiserver v0.0.0-20210419140141-620426e63a99 h1:KrCYRAJcgZYzMCB1PjJHJMYPu/d+dEkelq5eYyi0fDw=
github.com/openshift/kubernetes-apiserver v0.0.0-20210419140141-620426e63a99/go.mod h1:w2YSn4/WIwYuxG5zJmcqtRdtqgW/J2JRgFAqps3bBpg=
github.com/openshift/library-go v0.0.0-20210611094144-35c8a075e255 h1:4lXXCXSNmAD56T+lL0CRQfm4aImnb1I6Va9QVtN/d+Q=
github.com/openshift/library-go v0.0.0-20210611094144-35c8a075e255/go.mod h1:C5DDOSPucn3EVA0T05fODKtAweTObMBrTYm/G3uUBI8=
github.com/openshift/library-go v0.0.0-20210624101008-7aab941b4a2b h1:WcyGXyaknh8xzHpjsSgSDKwzvm3U5IgNIxzt3HpM9b0=
github.com/openshift/library-go v0.0.0-20210624101008-7aab941b4a2b/go.mod h1:C5DDOSPucn3EVA0T05fODKtAweTObMBrTYm/G3uUBI8=
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
Expand Down
40 changes: 24 additions & 16 deletions pkg/operator/targetconfigcontroller/targetconfigcontroller.go
Original file line number Diff line number Diff line change
Expand Up @@ -105,7 +105,7 @@ func NewTargetConfigController(
return c
}

func (c TargetConfigController) sync() error {
func (c TargetConfigController) sync(ctx context.Context) error {
operatorSpec, _, _, err := c.operatorClient.GetStaticPodOperatorState()
if err != nil {
return err
Expand All @@ -123,7 +123,7 @@ func (c TargetConfigController) sync() error {
c.eventRecorder.Warningf("ManagementStateUnknown", "Unrecognized operator management state %q", operatorSpec.ManagementState)
return nil
}
requeue, err := createTargetConfigController_v311_00_to_latest(c, c.eventRecorder, operatorSpec)
requeue, err := createTargetConfigController_v311_00_to_latest(ctx, c, c.eventRecorder, operatorSpec)
if err != nil {
return err
}
Expand All @@ -147,25 +147,33 @@ func (c *TargetConfigController) Run(workers int, stopCh <-chan struct{}) {
return
}

// TODO: Fix this by refactoring this controller to factory
workerCtx, cancel := context.WithCancel(context.Background())
go func() {
<-stopCh
cancel()
}()
// doesn't matter what workers say, only start one.
go wait.Until(c.runWorker, time.Second, stopCh)
go wait.Until(func() {
c.runWorker(workerCtx)
}, time.Second, stopCh)

<-stopCh
}

func (c *TargetConfigController) runWorker() {
for c.processNextWorkItem() {
func (c *TargetConfigController) runWorker(ctx context.Context) {
for c.processNextWorkItem(ctx) {
}
}

func (c *TargetConfigController) processNextWorkItem() bool {
func (c *TargetConfigController) processNextWorkItem(ctx context.Context) bool {
dsKey, quit := c.queue.Get()
if quit {
return false
}
defer c.queue.Done(dsKey)

err := c.sync()
err := c.sync(ctx)
if err == nil {
c.queue.Forget(dsKey)
return true
Expand Down Expand Up @@ -232,14 +240,14 @@ func (c *TargetConfigController) namespaceEventHandler() cache.ResourceEventHand

// createTargetConfigController_v311_00_to_latest takes care of synchronizing (not upgrading) the thing we're managing.
// most of the time the sync method will be good for a large span of minor versions
func createTargetConfigController_v311_00_to_latest(c TargetConfigController, recorder events.Recorder, operatorSpec *operatorv1.StaticPodOperatorSpec) (bool, error) {
func createTargetConfigController_v311_00_to_latest(ctx context.Context, c TargetConfigController, recorder events.Recorder, operatorSpec *operatorv1.StaticPodOperatorSpec) (bool, error) {
errors := []error{}

_, _, err := manageKubeSchedulerConfigMap_v311_00_to_latest(c.kubeClient.CoreV1(), recorder, c.configSchedulerLister)
_, _, err := manageKubeSchedulerConfigMap_v311_00_to_latest(ctx, c.kubeClient.CoreV1(), recorder, c.configSchedulerLister)
if err != nil {
errors = append(errors, fmt.Errorf("%q: %v", "configmap", err))
}
_, _, err = manageServiceAccountCABundle(c.configMapLister, c.kubeClient.CoreV1(), recorder)
_, _, err = manageServiceAccountCABundle(ctx, c.configMapLister, c.kubeClient.CoreV1(), recorder)
if err != nil {
errors = append(errors, fmt.Errorf("%q: %v", "configmap/serviceaccount-ca", err))
}
Expand Down Expand Up @@ -280,7 +288,7 @@ func createTargetConfigController_v311_00_to_latest(c TargetConfigController, re
return false, nil
}

func manageKubeSchedulerConfigMap_v311_00_to_latest(client corev1client.ConfigMapsGetter, recorder events.Recorder, configSchedulerLister configlistersv1.SchedulerLister) (*corev1.ConfigMap, bool, error) {
func manageKubeSchedulerConfigMap_v311_00_to_latest(ctx context.Context, client corev1client.ConfigMapsGetter, recorder events.Recorder, configSchedulerLister configlistersv1.SchedulerLister) (*corev1.ConfigMap, bool, error) {
configMap := resourceread.ReadConfigMapV1OrDie(v410_00_assets.MustAsset("v4.1.0/kube-scheduler/cm.yaml"))

var kubeSchedulerConfiguration []byte
Expand Down Expand Up @@ -310,7 +318,7 @@ func manageKubeSchedulerConfigMap_v311_00_to_latest(client corev1client.ConfigMa
if err != nil {
return nil, false, err
}
return resourceapply.ApplyConfigMap(client, recorder, requiredConfigMap)
return resourceapply.ApplyConfigMap(ctx, client, recorder, requiredConfigMap)
}

func managePod_v311_00_to_latest(ctx context.Context, configMapsGetter corev1client.ConfigMapsGetter, secretsGetter corev1client.SecretsGetter, recorder events.Recorder, operatorSpec *operatorv1.StaticPodOperatorSpec, imagePullSpec, operatorImagePullSpec string, featureGateLister configlistersv1.FeatureGateLister, configSchedulerLister configlistersv1.SchedulerLister) (*corev1.ConfigMap, bool, error) {
Expand Down Expand Up @@ -414,7 +422,7 @@ func managePod_v311_00_to_latest(ctx context.Context, configMapsGetter corev1cli
configMap.Data["pod.yaml"] = resourceread.WritePodV1OrDie(required)
configMap.Data["forceRedeploymentReason"] = operatorSpec.ForceRedeploymentReason
configMap.Data["version"] = version.Get().String()
appliedConfigMap, changed, err := resourceapply.ApplyConfigMap(configMapsGetter, recorder, configMap)
appliedConfigMap, changed, err := resourceapply.ApplyConfigMap(ctx, configMapsGetter, recorder, configMap)
if changed && len(config.Spec.Policy.Name) > 0 {
klog.Warning("Setting .spec.policy is deprecated and will be removed eventually. Please use .spec.profile instead.")
}
Expand Down Expand Up @@ -475,7 +483,7 @@ func generateFeatureGates(enabledFeatureGates, disabledFeatureGates []string, fe
return featureGates
}

func manageServiceAccountCABundle(lister corev1listers.ConfigMapLister, client corev1client.ConfigMapsGetter, recorder events.Recorder) (*corev1.ConfigMap, bool, error) {
func manageServiceAccountCABundle(ctx context.Context, lister corev1listers.ConfigMapLister, client corev1client.ConfigMapsGetter, recorder events.Recorder) (*corev1.ConfigMap, bool, error) {
requiredConfigMap, err := resourcesynccontroller.CombineCABundleConfigMaps(
resourcesynccontroller.ResourceLocation{Namespace: operatorclient.TargetNamespace, Name: "serviceaccount-ca"},
lister,
Expand All @@ -488,7 +496,7 @@ func manageServiceAccountCABundle(lister corev1listers.ConfigMapLister, client c
if err != nil {
return nil, false, err
}
return resourceapply.ApplyConfigMap(client, recorder, requiredConfigMap)
return resourceapply.ApplyConfigMap(ctx, client, recorder, requiredConfigMap)
}

func ensureLocalhostRecoverySAToken(ctx context.Context, client corev1client.CoreV1Interface, recorder events.Recorder) error {
Expand Down Expand Up @@ -559,7 +567,7 @@ func manageSchedulerKubeconfig(ctx context.Context, client corev1client.CoreV1In
}

requiredCM := resourceread.ReadConfigMapV1OrDie([]byte(cmString))
return resourceapply.ApplyConfigMap(client, recorder, requiredCM)
return resourceapply.ApplyConfigMap(ctx, client, recorder, requiredCM)
}

// getUnsupportedFlagsFromConfig reads and parses flags stored in the "arguments" filed in the unsupported config
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -167,7 +167,7 @@ func Test_manageKubeSchedulerConfigMap_v311_00_to_latest(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
// need a client for each test
got, got1, err := manageKubeSchedulerConfigMap_v311_00_to_latest(fake.NewSimpleClientset().CoreV1(), tt.args.recorder, tt.args.configSchedulerLister)
got, got1, err := manageKubeSchedulerConfigMap_v311_00_to_latest(context.TODO(), fake.NewSimpleClientset().CoreV1(), tt.args.recorder, tt.args.configSchedulerLister)
if (err != nil) != tt.wantErr {
t.Errorf("manageKubeSchedulerConfigMap_v311_00_to_latest() error = %v, wantErr %v", err, tt.wantErr)
return
Expand Down

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.