Skip to content

Commit

Permalink
Use sharedInformer for watch
Browse files Browse the repository at this point in the history
Signed-off-by: Heba Elayoty <hebaelayoty@gmail.com>
  • Loading branch information
helayoty committed Apr 10, 2023
1 parent 4594fb5 commit 1d6594f
Show file tree
Hide file tree
Showing 2 changed files with 47 additions and 54 deletions.
13 changes: 0 additions & 13 deletions pkg/exporter/configmap.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@ import (
apierrors "k8s.io/apimachinery/pkg/api/errors"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/json"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/klog/v2"
)

Expand Down Expand Up @@ -74,18 +73,6 @@ func (e *Exporter) CreateConfigMapFromProperties(ctx context.Context, configMapN
return nil
}

func (e *Exporter) GetGonfigMapWatch(ctx context.Context, configMapName string) watch.Interface {
watch, err := e.clusterClient.CoreV1().
ConfigMaps(client.Namespace).
Watch(ctx, v1.ListOptions{
FieldSelector: "metadata.name=" + configMapName,
})
if err != nil {
klog.Fatalf("unable to watch configMap %s, err: %v", configMapName, err)
}
return watch
}

func (e *Exporter) DeleteConfigMap(ctx context.Context, configMapName string) error {
currentConfigMap, err := e.GetConfigMap(ctx, configMapName)
if err != nil {
Expand Down
88 changes: 47 additions & 41 deletions pkg/exporter/exporter.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,16 +5,19 @@ Copyright (c) Microsoft Corporation.
package exporter

import (
"fmt"
"time"

"github.com/Azure/kubernetes-carbon-intensity-exporter/pkg/sdk/client"
"github.com/antihax/optional"
"golang.org/x/net/context"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/informers"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/retry"
"k8s.io/klog/v2"
Expand Down Expand Up @@ -43,58 +46,66 @@ func New(clusterClient clientset.Interface, apiClient *client.APIClient, recorde
}

func (e *Exporter) Run(ctx context.Context, configMapName, region string, patrolInterval time.Duration, stopChan <-chan struct{}) {
//delete old configmap if any
err := e.DeleteConfigMap(ctx, configMapName)
if err != nil && !apierrors.IsNotFound(err) {
return
}
// create configMap first time
err := e.RefreshData(ctx, configMapName, region, stopChan)
err = e.RefreshData(ctx, configMapName, region)
if err != nil {
return
}
configMapWatch := e.GetGonfigMapWatch(ctx, configMapName)

// configMapWatch is reassigned when the config map is deleted,
// so we want to make sure to stop the last instance.
defer func() {
configMapWatch.Stop()
}()
informerFactory := informers.NewSharedInformerFactory(e.clusterClient, time.Hour*1)
configMapInformer := informerFactory.Core().V1().ConfigMaps().Informer()

// Create a channel to receive events from the informer
eventChan := make(chan interface{})
defer close(eventChan)

configMapInformer.AddEventHandler(
cache.ResourceEventHandlerFuncs{
DeleteFunc: func(obj interface{}) {
eventChan <- obj
},
})

go configMapInformer.Run(stopChan)
// Wait for the informer to sync
if !cache.WaitForCacheSync(stopChan, configMapInformer.HasSynced) {
runtime.HandleError(fmt.Errorf("timed out waiting for cache to sync"))
return
}

refreshPatrol := time.NewTicker(patrolInterval)
defer refreshPatrol.Stop()

for {
select {
// if the configMap got deleted by user
case event := <-configMapWatch.ResultChan():
if event.Type == watch.Deleted {
err := e.RefreshData(ctx, configMapName, region, stopChan)
if err != nil {
return
}
// refresh watch after deletion
configMapWatch.Stop()
configMapWatch = e.GetGonfigMapWatch(ctx, configMapName)

e.recorder.Eventf(&corev1.ObjectReference{
Kind: "Pod",
Namespace: client.Namespace,
Name: client.PodName,
}, corev1.EventTypeWarning, "Configmap Deleted", "Configmap got deleted")
}

// if refresh time elapsed
case <-refreshPatrol.C:
err := e.RefreshData(ctx, configMapName, region, stopChan)
case <-eventChan:
err := e.RefreshData(ctx, configMapName, region)
if err != nil {
return
}

// refresh watch after deletion
configMapWatch.Stop()
configMapWatch = e.GetGonfigMapWatch(ctx, configMapName)
e.recorder.Eventf(&corev1.ObjectReference{
Kind: "Pod",
Namespace: client.Namespace,
Name: client.PodName,
}, corev1.EventTypeWarning, "Configmap Deleted", "Configmap got deleted")

// if refresh time elapsed
case <-refreshPatrol.C:
err := e.DeleteConfigMap(ctx, configMapName)
if err != nil && !apierrors.IsNotFound(err) {
break
}
e.recorder.Eventf(&corev1.ObjectReference{
Kind: "Pod",
Namespace: client.Namespace,
Name: client.PodName,
}, corev1.EventTypeNormal, "Configmap updated", "Configmap gets updated")
}, corev1.EventTypeNormal, "Configmap updated", "Configmap got updated")

// context got canceled or done
case <-ctx.Done():
Expand All @@ -104,23 +115,18 @@ func (e *Exporter) Run(ctx context.Context, configMapName, region string, patrol
}
}

func (e *Exporter) RefreshData(ctx context.Context, configMapName string, region string, stopChan <-chan struct{}) error {
func (e *Exporter) RefreshData(ctx context.Context, configMapName string, region string) error {
// get current object (if any) in case we could not update the data.
currentConfigMap, err := e.GetConfigMap(ctx, configMapName)
if err != nil {
return err
}

err = e.DeleteConfigMap(ctx, configMapName)
if err != nil && !apierrors.IsNotFound(err) { // if configMap is not found,
return err
}

var forecast []client.EmissionsForecastDto
err = retry.OnError(constantBackoff, func(err error) bool {
return true
}, func() error {
forecast, err = e.getCurrentForecastData(ctx, region, stopChan)
forecast, err = e.getCurrentForecastData(ctx, region)
return err
})
if err != nil {
Expand Down Expand Up @@ -179,7 +185,7 @@ func (e *Exporter) UseCurrentConfigMap(ctx context.Context, message string, curr
currentConfigMap.Data, currentConfigMap.BinaryData[BinaryData])
}

func (e *Exporter) getCurrentForecastData(ctx context.Context, region string, stopChan <-chan struct{}) ([]client.EmissionsForecastDto, error) {
func (e *Exporter) getCurrentForecastData(ctx context.Context, region string) ([]client.EmissionsForecastDto, error) {
opt := &client.CarbonAwareApiGetCurrentForecastDataOpts{
DataStartAt: optional.EmptyTime(),
DataEndAt: optional.EmptyTime(),
Expand Down

0 comments on commit 1d6594f

Please sign in to comment.