Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

remove pull secret copy, defer to image registry #249

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
2 changes: 1 addition & 1 deletion Makefile
Expand Up @@ -15,7 +15,7 @@ build-image:
test: test-unit test-e2e

test-unit:
go test -parallel 1 ./cmd/... ./pkg/...
go test ./cmd/... ./pkg/...

test-e2e:
KUBERNETES_CONFIG=${KUBECONFIG} go test -parallel 1 -timeout 30m -v ./test/e2e/...
Expand Down
4 changes: 1 addition & 3 deletions pkg/client/listers.go
@@ -1,16 +1,14 @@
package client

import (
corelisters "k8s.io/client-go/listers/core/v1"

imagelisters "github.com/openshift/client-go/image/listers/image/v1"
templatelisters "github.com/openshift/client-go/template/listers/template/v1"
corelisters "k8s.io/client-go/listers/core/v1"

sampoplisters "github.com/openshift/client-go/samples/listers/samples/v1"
)

type Listers struct {
OpenShiftNamespaceSecrets corelisters.SecretNamespaceLister
ConfigNamespaceSecrets corelisters.SecretNamespaceLister
ImageStreams imagelisters.ImageStreamNamespaceLister
Templates templatelisters.TemplateNamespaceLister
Expand Down
4 changes: 2 additions & 2 deletions pkg/metrics/metrics.go
Expand Up @@ -149,7 +149,7 @@ func (sc *samplesCollector) Collect(ch chan<- prometheus.Metric) {
addCountGauge(ch, invalidSecretDesc, missingTBRCredential, float64(0))
return
}
secret, err := sc.secrets.Get(configv1.SamplesRegistryCredentials)
secret, err := sc.secrets.Get("pull-secret")
if err != nil {
logrus.Infof("metrics pull secret retrieval failed with: %s", err.Error())
addCountGauge(ch, invalidSecretDesc, missingTBRCredential, float64(1))
Expand Down Expand Up @@ -189,7 +189,7 @@ func init() {
}

func InitializeMetricsCollector(listers *client.Listers) {
sc.secrets = listers.OpenShiftNamespaceSecrets
sc.secrets = listers.ConfigNamespaceSecrets
sc.config = listers.Config

if !registered {
Expand Down
89 changes: 15 additions & 74 deletions pkg/operator/controller.go
Expand Up @@ -7,7 +7,6 @@ import (

"github.com/sirupsen/logrus"

corev1 "k8s.io/api/core/v1"
kerrors "k8s.io/apimachinery/pkg/api/errors"
metaapi "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
Expand All @@ -27,10 +26,10 @@ import (
templateinformers "github.com/openshift/client-go/template/informers/externalversions"

sampopapi "github.com/openshift/api/samples/v1"
sampcache "github.com/openshift/cluster-samples-operator/pkg/cache"
sampopclient "github.com/openshift/cluster-samples-operator/pkg/client"
sampleclientv1 "github.com/openshift/client-go/samples/clientset/versioned"
sampopinformers "github.com/openshift/client-go/samples/informers/externalversions"
sampcache "github.com/openshift/cluster-samples-operator/pkg/cache"
sampopclient "github.com/openshift/cluster-samples-operator/pkg/client"

operatorstatus "github.com/openshift/cluster-samples-operator/pkg/operatorstatus"
"github.com/openshift/cluster-samples-operator/pkg/stub"
Expand All @@ -46,19 +45,14 @@ type Controller struct {
restconfig *restclient.Config
cvowrapper *operatorstatus.ClusterOperatorHandler

crWorkqueue workqueue.RateLimitingInterface
osSecWorkqueue workqueue.RateLimitingInterface
ocSecWorkqueue workqueue.RateLimitingInterface
isWorkqueue workqueue.RateLimitingInterface
tWorkqueue workqueue.RateLimitingInterface
crWorkqueue workqueue.RateLimitingInterface
isWorkqueue workqueue.RateLimitingInterface
tWorkqueue workqueue.RateLimitingInterface

crInformer cache.SharedIndexInformer
osSecInformer cache.SharedIndexInformer
ocSecInformer cache.SharedIndexInformer
isInformer cache.SharedIndexInformer
tInformer cache.SharedIndexInformer
crInformer cache.SharedIndexInformer
isInformer cache.SharedIndexInformer
tInformer cache.SharedIndexInformer

kubeOSNSInformerFactory kubeinformers.SharedInformerFactory
kubeOCNSInformerFactory kubeinformers.SharedInformerFactory
imageInformerFactory imageinformers.SharedInformerFactory
templateInformerFactory templateinformers.SharedInformerFactory
Expand All @@ -81,14 +75,12 @@ func NewController() (*Controller, error) {

listers := &sampopclient.Listers{}
c := &Controller{
restconfig: kubeconfig,
cvowrapper: operatorstatus.NewClusterOperatorHandler(operatorClient),
crWorkqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "samplesconfig-changes"),
osSecWorkqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "openshift-secret-changes"),
ocSecWorkqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "openshift-config-namespace-secret-changes"),
isWorkqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "imagestream-changes"),
tWorkqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "template-changes"),
listers: listers,
restconfig: kubeconfig,
cvowrapper: operatorstatus.NewClusterOperatorHandler(operatorClient),
crWorkqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "samplesconfig-changes"),
isWorkqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "imagestream-changes"),
tWorkqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "template-changes"),
listers: listers,
}

// Initial event to bootstrap CR if it doesn't exist.
Expand All @@ -114,7 +106,6 @@ func NewController() (*Controller, error) {
return nil, err
}

c.kubeOSNSInformerFactory = kubeinformers.NewFilteredSharedInformerFactory(kubeClient, defaultResyncDuration, "openshift", nil)
c.kubeOCNSInformerFactory = kubeinformers.NewFilteredSharedInformerFactory(kubeClient, defaultResyncDuration, "openshift-config", nil)
//TODO - eventually a k8s go-client deps bump will lead to the form below, similar to the image registry operator's kubeinformer initialization,
// and similar to what is available with the openshift go-client for imagestreams and templates
Expand All @@ -123,12 +114,6 @@ func NewController() (*Controller, error) {
c.templateInformerFactory = templateinformers.NewSharedInformerFactoryWithOptions(templateClient, defaultResyncDuration, templateinformers.WithNamespace("openshift"))
c.sampopInformerFactory = sampopinformers.NewSharedInformerFactory(sampopClient, defaultResyncDuration)

c.osSecInformer = c.kubeOSNSInformerFactory.Core().V1().Secrets().Informer()
c.osSecInformer.AddEventHandler(c.osSecretInformerEventHandler())
c.listers.OpenShiftNamespaceSecrets = c.kubeOSNSInformerFactory.Core().V1().Secrets().Lister().Secrets("openshift")

c.ocSecInformer = c.kubeOCNSInformerFactory.Core().V1().Secrets().Informer()
c.ocSecInformer.AddEventHandler(c.ocSecretInformerEventHandler())
c.listers.ConfigNamespaceSecrets = c.kubeOCNSInformerFactory.Core().V1().Secrets().Lister().Secrets("openshift-config")

c.isInformer = c.imageInformerFactory.Image().V1().ImageStreams().Informer()
Expand All @@ -154,20 +139,15 @@ func NewController() (*Controller, error) {

func (c *Controller) Run(stopCh <-chan struct{}) error {
defer c.crWorkqueue.ShutDown()
defer c.osSecWorkqueue.ShutDown()
defer c.ocSecWorkqueue.ShutDown()
defer c.isWorkqueue.ShutDown()
defer c.tWorkqueue.ShutDown()

c.kubeOSNSInformerFactory.Start(stopCh)
c.kubeOCNSInformerFactory.Start(stopCh)
c.imageInformerFactory.Start(stopCh)
c.templateInformerFactory.Start(stopCh)
c.sampopInformerFactory.Start(stopCh)

logrus.Println("waiting for informer caches to sync")
if !cache.WaitForCacheSync(stopCh, c.osSecInformer.HasSynced, c.ocSecInformer.HasSynced,
c.isInformer.HasSynced, c.tInformer.HasSynced, c.crInformer.HasSynced) {
if !cache.WaitForCacheSync(stopCh, c.isInformer.HasSynced, c.tInformer.HasSynced, c.crInformer.HasSynced) {
return fmt.Errorf("failed to wait for caches to sync")
}

Expand All @@ -177,18 +157,6 @@ func (c *Controller) Run(stopCh <-chan struct{}) error {
getter: &crGetter{},
}
go wait.Until(crQueueWorker.workqueueProcessor, time.Second, stopCh)
osSecQueueWorker := queueWorker{
c: c,
workQueue: c.osSecWorkqueue,
getter: &osSecretGetter{},
}
go wait.Until(osSecQueueWorker.workqueueProcessor, time.Second, stopCh)
ocSecQueueWorker := queueWorker{
c: c,
workQueue: c.ocSecWorkqueue,
getter: &ocSecretGetter{},
}
go wait.Until(ocSecQueueWorker.workqueueProcessor, time.Second, stopCh)
isQueueWorker := queueWorker{
c: c,
workQueue: c.isWorkqueue,
Expand Down Expand Up @@ -225,18 +193,6 @@ func (g *crGetter) Get(c *Controller, key string) (runtime.Object, error) {
return c.listers.Config.Get(sampopapi.ConfigName)
}

type osSecretGetter struct{}

func (g *osSecretGetter) Get(c *Controller, key string) (runtime.Object, error) {
return c.listers.OpenShiftNamespaceSecrets.Get(key)
}

type ocSecretGetter struct{}

func (g *ocSecretGetter) Get(c *Controller, key string) (runtime.Object, error) {
return c.listers.ConfigNamespaceSecrets.Get(key)
}

type isGetter struct{}

func (g *isGetter) Get(c *Controller, key string) (runtime.Object, error) {
Expand Down Expand Up @@ -299,13 +255,6 @@ func (c *crQueueKeyGen) Key(o interface{}) string {
return cr.Name
}

type secretQueueKeyGen struct{}

func (c *secretQueueKeyGen) Key(o interface{}) string {
secret := o.(*corev1.Secret)
return secret.Name
}

type imagestreamQueueKeyGen struct{}

func (c *imagestreamQueueKeyGen) Key(o interface{}) string {
Expand Down Expand Up @@ -416,14 +365,6 @@ func (c *Controller) crInformerEventHandler() cache.ResourceEventHandlerFuncs {
return c.commonInformerEventHandler(&crQueueKeyGen{}, c.crWorkqueue)
}

func (c *Controller) osSecretInformerEventHandler() cache.ResourceEventHandlerFuncs {
return c.commonInformerEventHandler(&secretQueueKeyGen{}, c.osSecWorkqueue)
}

func (c *Controller) ocSecretInformerEventHandler() cache.ResourceEventHandlerFuncs {
return c.commonInformerEventHandler(&secretQueueKeyGen{}, c.ocSecWorkqueue)
}

func (c *Controller) imagestreamInformerEventHandler() cache.ResourceEventHandlerFuncs {
return c.commonInformerEventHandler(&imagestreamQueueKeyGen{}, c.isWorkqueue)
}
Expand Down
3 changes: 0 additions & 3 deletions pkg/stub/config.go
Expand Up @@ -313,9 +313,6 @@ func (h *Handler) ProcessManagementField(cfg *v1.Config) (bool, bool, error) {

if cfg.Spec.ManagementState != cfg.Status.ManagementState {
logrus.Println("management state set to managed")
if util.ConditionFalse(cfg, v1.ImportCredentialsExist) {
h.copyDefaultClusterPullSecret(nil)
}
}
// will set status state to managed at top level caller
// to deal with config change processing
Expand Down
72 changes: 9 additions & 63 deletions pkg/stub/handler.go
Expand Up @@ -77,7 +77,6 @@ func NewSamplesOperatorHandler(kubeconfig *restclient.Config,
h.crdlister = listers.Config
h.streamlister = listers.ImageStreams
h.tplstore = listers.Templates
h.opshiftsecretlister = listers.OpenShiftNamespaceSecrets
h.cfgsecretlister = listers.ConfigNamespaceSecrets

h.Fileimagegetter = &DefaultImageStreamFromFileGetter{}
Expand All @@ -86,7 +85,6 @@ func NewSamplesOperatorHandler(kubeconfig *restclient.Config,

h.imageclientwrapper = &defaultImageStreamClientWrapper{h: h, lister: listers.ImageStreams}
h.templateclientwrapper = &defaultTemplateClientWrapper{h: h, lister: listers.Templates}
h.secretclientwrapper = &defaultSecretClientWrapper{coreclient: h.coreclient, opnshftlister: listers.OpenShiftNamespaceSecrets, cfglister: listers.ConfigNamespaceSecrets}
h.cvowrapper = operatorstatus.NewClusterOperatorHandler(h.configclient)

h.skippedImagestreams = make(map[string]bool)
Expand Down Expand Up @@ -121,12 +119,10 @@ type Handler struct {

imageclientwrapper ImageStreamClientWrapper
templateclientwrapper TemplateClientWrapper
secretclientwrapper SecretClientWrapper

crdlister configv1lister.ConfigLister
streamlister imagev1lister.ImageStreamNamespaceLister
tplstore templatev1lister.TemplateNamespaceLister
opshiftsecretlister corev1lister.SecretNamespaceLister
cfgsecretlister corev1lister.SecretNamespaceLister
opersecretlister corev1lister.SecretNamespaceLister

Expand Down Expand Up @@ -446,12 +442,6 @@ func (h *Handler) CreateDefaultResourceIfNeeded(cfg *v1.Config) (*v1.Config, err
cfg.Spec.ManagementState = operatorsv1api.Managed
}
h.AddFinalizer(cfg)
// we should get a watch event for the default pull secret, but just in case
// we miss the watch event, as well as reducing churn with not starting the
// imagestream creates until we get the event, we'll do a one time copy attempt
// here ... we don't track errors cause if it doen't work with this one time,
// we'll then fall back on the watch events, sync intervals, etc.
h.copyDefaultClusterPullSecret(nil)
logrus.Println("creating default Config")
err = h.crdwrapper.Create(cfg)
if err != nil {
Expand Down Expand Up @@ -487,7 +477,15 @@ func (h *Handler) CreateDefaultResourceIfNeeded(cfg *v1.Config) (*v1.Config, err
func (h *Handler) initConditions(cfg *v1.Config) *v1.Config {
now := kapis.Now()
util.Condition(cfg, v1.SamplesExist)
util.Condition(cfg, v1.ImportCredentialsExist)
creds := util.Condition(cfg, v1.ImportCredentialsExist)
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

not deleting this condition in case customers are coding to it

could entertain marking deprecated in openshift/api

// image registry operator now handles making TBR creds available
// for imagestreams
if creds.Status != corev1.ConditionTrue {
creds.Status = corev1.ConditionTrue
creds.LastTransitionTime = now
creds.LastUpdateTime = now
util.ConditionUpdate(cfg, creds)
}
valid := util.Condition(cfg, v1.ConfigurationValid)
// our default config is valid; since Condition sets new conditions to false
// if we get false here this is the first pass through; invalid configs
Expand Down Expand Up @@ -582,26 +580,6 @@ func (h *Handler) Handle(event util.Event) error {
err := h.processTemplateWatchEvent(t, event.Deleted)
return err

case *corev1.Secret:
dockercfgSecret, _ := event.Object.(*corev1.Secret)
if !secretsWeCareAbout(dockercfgSecret) {
return nil
}

// if we miss a delete event in the openshift namespace (since we cannot
// add a finalizer in our namespace secret), we our watch
// on the openshift-config pull secret should still repopulate;
// if that gets deleted, the whole cluster is hosed; plus, there is talk
// of moving data like that to a special config namespace that is somehow
// protected

cfg, _ := h.crdwrapper.Get(v1.ConfigName)
if cfg != nil {
return h.processSecretEvent(cfg, dockercfgSecret, event)
} else {
return fmt.Errorf("Received secret %s but do not have the Config yet, requeuing", dockercfgSecret.Name)
}

case *v1.Config:
cfg, _ := event.Object.(*v1.Config)

Expand Down Expand Up @@ -677,19 +655,6 @@ func (h *Handler) Handle(event util.Event) error {
return nil
}

cfg = h.refetchCfgMinimizeConflicts(cfg)
if util.ConditionUnknown(cfg, v1.ImportCredentialsExist) {
// retry the default cred copy if it failed previously
err := h.copyDefaultClusterPullSecret(nil)
if err == nil {
cfg = h.refetchCfgMinimizeConflicts(cfg)
h.GoodConditionUpdate(cfg, corev1.ConditionTrue, v1.ImportCredentialsExist)
dbg := "cleared import cred unknown"
logrus.Printf("CRDUPDATE %s", dbg)
return h.crdwrapper.UpdateStatus(cfg, dbg)
}
}

// Every time we see a change to the Config object, update the ClusterOperator status
// based on the current conditions of the Config.
cfg = h.refetchCfgMinimizeConflicts(cfg)
Expand Down Expand Up @@ -795,25 +760,6 @@ func (h *Handler) Handle(event util.Event) error {
util.ConditionUpdate(cfg, condition)
}

// if trying to do rhel to the default registry.redhat.io registry requires the secret
// be in place since registry.redhat.io requires auth to pull; if it is not ready
// error state will be logged by WaitingForCredential
cfg = h.refetchCfgMinimizeConflicts(cfg)
stillWaitingForSecret, callSDKToUpdate := h.WaitingForCredential(cfg)
if callSDKToUpdate {
// flush status update ... the only error generated by WaitingForCredential, not
// by api obj access
dbg := "Config update ignored since need the RHEL credential"
logrus.Printf("CRDUPDATE %s", dbg)
// if update to set import cred condition to false fails, return that error
// to requeue
return h.crdwrapper.UpdateStatus(cfg, dbg)
}
if stillWaitingForSecret {
// means we previously udpated cfg but nothing has changed wrt the secret's presence
return nil
}

cfg = h.refetchCfgMinimizeConflicts(cfg)
if util.ConditionFalse(cfg, v1.MigrationInProgress) &&
len(cfg.Status.Version) > 0 &&
Expand Down