diff --git a/CHANGELOG.md b/CHANGELOG.md index 16614f6e7..4f4182049 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,7 @@ - (Feature) Allow to configure action timeouts - (Feature) (AT) Add ArangoTask API - (Bugfix) Fix NPE in State fetcher +- (Refactor) Configurable throttle inspector ## [1.2.8](https://github.com/arangodb/kube-arangodb/tree/1.2.8) (2022-02-24) - Do not check License V2 on Community images diff --git a/cmd/admin.go b/cmd/admin.go index add423f62..26310c364 100644 --- a/cmd/admin.go +++ b/cmd/admin.go @@ -46,7 +46,7 @@ import ( "github.com/arangodb/kube-arangodb/pkg/util/constants" "github.com/arangodb/kube-arangodb/pkg/util/errors" "github.com/arangodb/kube-arangodb/pkg/util/k8sutil" - "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/secret" + secretv1 "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/secret/v1" "github.com/arangodb/kube-arangodb/pkg/util/kclient" ) @@ -291,7 +291,7 @@ func createClient(endpoints []string, certCA *x509.CertPool, auth connection.Aut } // getJWTTokenFromSecrets returns token from the secret. -func getJWTTokenFromSecrets(ctx context.Context, secrets secret.ReadInterface, name string) (connection.Authentication, error) { +func getJWTTokenFromSecrets(ctx context.Context, secrets secretv1.ReadInterface, name string) (connection.Authentication, error) { ctxChild, cancel := globals.GetGlobalTimeouts().Kubernetes().WithTimeout(ctx) defer cancel() @@ -309,7 +309,7 @@ func getJWTTokenFromSecrets(ctx context.Context, secrets secret.ReadInterface, n } // getCACertificate returns CA certificate from the secret. -func getCACertificate(ctx context.Context, secrets secret.ReadInterface, name string) (*x509.CertPool, error) { +func getCACertificate(ctx context.Context, secrets secretv1.ReadInterface, name string) (*x509.CertPool, error) { ctxChild, cancel := globals.GetGlobalTimeouts().Kubernetes().WithTimeout(ctx) defer cancel() diff --git a/cmd/main.go b/cmd/main.go index a9e9b8708..4639e92de 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -165,7 +165,7 @@ func init() { f.BoolVar(&operatorOptions.enableStorage, "operator.storage", false, "Enable to run the ArangoLocalStorage operator") f.BoolVar(&operatorOptions.enableBackup, "operator.backup", false, "Enable to run the ArangoBackup operator") f.BoolVar(&operatorOptions.enableApps, "operator.apps", false, "Enable to run the ArangoApps operator") - f.BoolVar(&operatorOptions.enableK2KClusterSync, "operator.k2k-cluster-sync", false, "Enable to run the ArangoClusterSynchronizations operator") + f.BoolVar(&operatorOptions.enableK2KClusterSync, "operator.k2k-cluster-sync", false, "Enable to run the ListSimple operator") f.BoolVar(&operatorOptions.versionOnly, "operator.version", false, "Enable only version endpoint in Operator") f.StringVar(&operatorOptions.alpineImage, "operator.alpine-image", UBIImageEnv.GetOrDefault(defaultAlpineImage), "Docker image used for alpine containers") f.MarkDeprecated("operator.alpine-image", "Value is not used anymore") @@ -226,10 +226,12 @@ func executeMain(cmd *cobra.Command, args []string) { // Prepare log service var err error - logService, err = logging.NewService(defaultLogLevel, logLevels) - if err != nil { + if err := logging.InitGlobalLogger(defaultLogLevel, logLevels); err != nil { cliLog.Fatal().Err(err).Msg("Failed to initialize log service") } + + logService = logging.GlobalLogger() + logService.ConfigureRootLogger(func(log zerolog.Logger) zerolog.Logger { podNameParts := strings.Split(name, "-") operatorID := podNameParts[len(podNameParts)-1] diff --git a/cmd/storage.go b/cmd/storage.go index b3c633094..007ab10fe 100644 --- a/cmd/storage.go +++ b/cmd/storage.go @@ -62,13 +62,13 @@ func init() { // Run the provisioner func cmdStorageProvisionerRun(cmd *cobra.Command, args []string) { - //goflag.CommandLine.Parse([]string{"-logtostderr"}) var err error - logService, err = logging.NewService(defaultLogLevel, logLevels) - if err != nil { + if err := logging.InitGlobalLogger(defaultLogLevel, logLevels); err != nil { cliLog.Fatal().Err(err).Msg("Failed to initialize log service") } + logService = logging.GlobalLogger() + // Log version cliLog.Info().Msgf("Starting arangodb local storage provisioner (%s), version %s build %s", version.GetVersionV1().Edition.Title(), version.GetVersionV1().Version, version.GetVersionV1().Build) diff --git a/go.mod b/go.mod index e50443547..4955b9d4d 100644 --- a/go.mod +++ b/go.mod @@ -28,6 +28,7 @@ require ( github.com/arangodb/go-driver v1.2.1 github.com/arangodb/go-driver/v2 v2.0.0-20211021031401-d92dcd5a4c83 github.com/arangodb/go-upgrade-rules v0.0.0-20180809110947-031b4774ff21 + github.com/arangodb/rebalancer v0.1.1 github.com/cenkalti/backoff v2.2.1+incompatible github.com/dchest/uniuri v0.0.0-20160212164326-8902c56451e9 github.com/ghodss/yaml v1.0.0 diff --git a/pkg/apis/deployment/definitions.go b/pkg/apis/deployment/definitions.go index 8fb65f605..60113b4d2 100644 --- a/pkg/apis/deployment/definitions.go +++ b/pkg/apis/deployment/definitions.go @@ -33,6 +33,10 @@ const ( ArangoClusterSynchronizationResourceKind = "ArangoClusterSynchronization" ArangoClusterSynchronizationResourcePlural = "arangoclustersynchronizations" + ArangoTaskCRDName = ArangoTaskResourcePlural + "." + ArangoDeploymentGroupName + ArangoTaskResourceKind = "ArangoTask" + ArangoTaskResourcePlural = "arangotasks" + ArangoDeploymentGroupName = "database.arangodb.com" ) diff --git a/pkg/deployment/access_package.go b/pkg/deployment/access_package.go index 8fd85c652..dbfb29c1b 100644 --- a/pkg/deployment/access_package.go +++ b/pkg/deployment/access_package.go @@ -49,7 +49,6 @@ const ( func (d *Deployment) createAccessPackages(ctx context.Context) error { log := d.deps.Log spec := d.apiObject.Spec - secrets := d.deps.Client.Kubernetes().CoreV1().Secrets(d.GetNamespace()) if !spec.Sync.IsEnabled() { // We're only relevant when sync is enabled @@ -66,21 +65,15 @@ func (d *Deployment) createAccessPackages(ctx context.Context) error { } // Remove all access packages that we did build, but are no longer needed - ctxChild, cancel := globals.GetGlobalTimeouts().Kubernetes().WithTimeout(ctx) - defer cancel() - secretList, err := secrets.List(ctxChild, metav1.ListOptions{}) - if err != nil { - log.Debug().Err(err).Msg("Failed to list secrets") - return errors.WithStack(err) - } - for _, secret := range secretList.Items { - if d.isOwnerOf(&secret) { + secretList := d.currentState.Secret().V1().ListSimple() + for _, secret := range secretList { + if d.isOwnerOf(secret) { if _, found := secret.Data[constants.SecretAccessPackageYaml]; found { // Secret is an access package if _, wanted := apNameMap[secret.GetName()]; !wanted { // We found an obsolete access package secret. Remove it. - err = globals.GetGlobalTimeouts().Kubernetes().RunWithTimeout(ctx, func(ctxChild context.Context) error { - return secrets.Delete(ctxChild, secret.GetName(), metav1.DeleteOptions{ + err := globals.GetGlobalTimeouts().Kubernetes().RunWithTimeout(ctx, func(ctxChild context.Context) error { + return d.SecretsModInterface().Delete(ctxChild, secret.GetName(), metav1.DeleteOptions{ Preconditions: &metav1.Preconditions{UID: &secret.UID}, }) }) @@ -105,14 +98,9 @@ func (d *Deployment) createAccessPackages(ctx context.Context) error { // it is does not already exist. func (d *Deployment) ensureAccessPackage(ctx context.Context, apSecretName string) error { log := d.deps.Log - ns := d.GetNamespace() - secrets := d.deps.Client.Kubernetes().CoreV1().Secrets(ns) spec := d.apiObject.Spec - err := globals.GetGlobalTimeouts().Kubernetes().RunWithTimeout(ctx, func(ctxChild context.Context) error { - _, err := secrets.Get(ctxChild, apSecretName, metav1.GetOptions{}) - return err - }) + _, err := d.currentState.Secret().V1().Read().Get(ctx, apSecretName, metav1.GetOptions{}) if err == nil { // Secret already exists return nil @@ -123,9 +111,7 @@ func (d *Deployment) ensureAccessPackage(ctx context.Context, apSecretName strin // Fetch client authentication CA clientAuthSecretName := spec.Sync.Authentication.GetClientCASecretName() - ctxChild, cancel := globals.GetGlobalTimeouts().Kubernetes().WithTimeout(ctx) - defer cancel() - clientAuthCert, clientAuthKey, _, err := k8sutil.GetCASecret(ctxChild, secrets, clientAuthSecretName, nil) + clientAuthCert, clientAuthKey, _, err := k8sutil.GetCASecret(ctx, d.currentState.Secret().V1().Read(), clientAuthSecretName, nil) if err != nil { log.Debug().Err(err).Msg("Failed to get client-auth CA secret") return errors.WithStack(err) @@ -133,7 +119,7 @@ func (d *Deployment) ensureAccessPackage(ctx context.Context, apSecretName strin // Fetch TLS CA public key tlsCASecretName := spec.Sync.TLS.GetCASecretName() - tlsCACert, err := k8sutil.GetCACertficateSecret(ctx, secrets, tlsCASecretName) + tlsCACert, err := k8sutil.GetCACertficateSecret(ctx, d.currentState.Secret().V1().Read(), tlsCASecretName) if err != nil { log.Debug().Err(err).Msg("Failed to get TLS CA secret") return errors.WithStack(err) @@ -220,7 +206,7 @@ func (d *Deployment) ensureAccessPackage(ctx context.Context, apSecretName strin // Attach secret to owner secret.SetOwnerReferences(append(secret.GetOwnerReferences(), d.apiObject.AsOwner())) err = globals.GetGlobalTimeouts().Kubernetes().RunWithTimeout(ctx, func(ctxChild context.Context) error { - _, err := secrets.Create(ctxChild, secret, metav1.CreateOptions{}) + _, err := d.SecretsModInterface().Create(ctxChild, secret, metav1.CreateOptions{}) return err }) if err != nil { diff --git a/pkg/deployment/cleanup.go b/pkg/deployment/cleanup.go index 325e14ad2..1f90f5e22 100644 --- a/pkg/deployment/cleanup.go +++ b/pkg/deployment/cleanup.go @@ -28,11 +28,12 @@ import ( core "k8s.io/api/core/v1" meta "k8s.io/apimachinery/pkg/apis/meta/v1" - "github.com/arangodb/kube-arangodb/pkg/deployment/resources/inspector" "github.com/arangodb/kube-arangodb/pkg/util" "github.com/arangodb/kube-arangodb/pkg/util/constants" "github.com/arangodb/kube-arangodb/pkg/util/k8sutil" inspectorInterface "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector" + pvcv1 "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/persistentvolumeclaim/v1" + podv1 "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/pod/v1" ) // removePodFinalizers removes all finalizers from all pods owned by us. @@ -41,7 +42,7 @@ func (d *Deployment) removePodFinalizers(ctx context.Context, cachedStatus inspe found := false - if err := cachedStatus.IteratePods(func(pod *core.Pod) error { + if err := cachedStatus.Pod().V1().Iterate(func(pod *core.Pod) error { log.Info().Str("pod", pod.GetName()).Msgf("Removing Pod Finalizer") if count, err := k8sutil.RemovePodFinalizers(ctx, cachedStatus, log, d.PodsModInterface(), pod, constants.ManagedFinalizers(), true); err != nil { log.Warn().Err(err).Msg("Failed to remove pod finalizers") @@ -62,7 +63,7 @@ func (d *Deployment) removePodFinalizers(ctx context.Context, cachedStatus inspe } } return nil - }, inspector.FilterPodsByLabels(k8sutil.LabelsForDeployment(d.GetName(), ""))); err != nil { + }, podv1.FilterPodsByLabels(k8sutil.LabelsForDeployment(d.GetName(), ""))); err != nil { return false, err } @@ -75,7 +76,7 @@ func (d *Deployment) removePVCFinalizers(ctx context.Context, cachedStatus inspe found := false - if err := cachedStatus.IteratePersistentVolumeClaims(func(pvc *core.PersistentVolumeClaim) error { + if err := cachedStatus.PersistentVolumeClaim().V1().Iterate(func(pvc *core.PersistentVolumeClaim) error { log.Info().Str("pvc", pvc.GetName()).Msgf("Removing PVC Finalizer") if count, err := k8sutil.RemovePVCFinalizers(ctx, cachedStatus, log, d.PersistentVolumeClaimsModInterface(), pvc, constants.ManagedFinalizers(), true); err != nil { log.Warn().Err(err).Msg("Failed to remove PVC finalizers") @@ -84,7 +85,7 @@ func (d *Deployment) removePVCFinalizers(ctx context.Context, cachedStatus inspe found = true } return nil - }, inspector.FilterPersistentVolumeClaimsByLabels(k8sutil.LabelsForDeployment(d.GetName(), ""))); err != nil { + }, pvcv1.FilterPersistentVolumeClaimsByLabels(k8sutil.LabelsForDeployment(d.GetName(), ""))); err != nil { return false, err } diff --git a/pkg/deployment/context_impl.go b/pkg/deployment/context_impl.go index 9049d8a3e..6436b047b 100644 --- a/pkg/deployment/context_impl.go +++ b/pkg/deployment/context_impl.go @@ -33,22 +33,10 @@ import ( "github.com/arangodb/kube-arangodb/pkg/deployment/patch" "k8s.io/apimachinery/pkg/types" - "github.com/arangodb/kube-arangodb/pkg/deployment/resources/inspector" - - "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/arangomember" - "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/persistentvolumeclaim" - podMod "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/pod" - "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/poddisruptionbudget" - "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/service" - "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/serviceaccount" - "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/servicemonitor" - "github.com/arangodb/kube-arangodb/pkg/deployment/reconcile" - inspectorInterface "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector" - "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/secret" - "github.com/arangodb/kube-arangodb/pkg/util/errors" + inspectorInterface "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector" "github.com/arangodb/kube-arangodb/pkg/util/arangod/conn" @@ -72,6 +60,13 @@ import ( "github.com/arangodb/kube-arangodb/pkg/deployment/reconciler" "github.com/arangodb/kube-arangodb/pkg/deployment/resources" "github.com/arangodb/kube-arangodb/pkg/util/k8sutil" + persistentvolumeclaimv1 "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/persistentvolumeclaim/v1" + podv1 "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/pod/v1" + poddisruptionbudgetv1beta1 "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/poddisruptionbudget/v1beta1" + secretv1 "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/secret/v1" + servicev1 "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/service/v1" + serviceaccountv1 "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/serviceaccount/v1" + servicemonitorv1 "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/servicemonitor/v1" "github.com/arangodb/kube-arangodb/pkg/util/kclient" "github.com/rs/zerolog/log" core "k8s.io/api/core/v1" @@ -294,7 +289,7 @@ func (d *Deployment) getAuth() (driver.Authentication, error) { func (d *Deployment) getJWTFolderToken() (string, bool) { if i := d.apiObject.Status.CurrentImage; i == nil || features.JWTRotation().Supported(i.ArangoDBVersion, i.Enterprise) { - s, err := d.GetCachedStatus().SecretReadInterface().Get(context.Background(), pod.JWTSecretFolder(d.GetName()), meta.GetOptions{}) + s, err := d.GetCachedStatus().Secret().V1().Read().Get(context.Background(), pod.JWTSecretFolder(d.GetName()), meta.GetOptions{}) if err != nil { d.deps.Log.Error().Err(err).Msgf("Unable to get secret") return "", false @@ -317,7 +312,7 @@ func (d *Deployment) getJWTFolderToken() (string, bool) { } func (d *Deployment) getJWTToken() (string, bool) { - s, err := d.GetCachedStatus().SecretReadInterface().Get(context.Background(), d.apiObject.Spec.Authentication.GetJWTSecretName(), meta.GetOptions{}) + s, err := d.GetCachedStatus().Secret().V1().Read().Get(context.Background(), d.apiObject.Spec.Authentication.GetJWTSecretName(), meta.GetOptions{}) if err != nil { return "", false } @@ -335,7 +330,7 @@ func (d *Deployment) GetSyncServerClient(ctx context.Context, group api.ServerGr // Fetch monitoring token log := d.deps.Log secretName := d.apiObject.Spec.Sync.Monitoring.GetTokenSecretName() - monitoringToken, err := k8sutil.GetTokenSecret(ctx, d.GetCachedStatus().SecretReadInterface(), secretName) + monitoringToken, err := k8sutil.GetTokenSecret(ctx, d.GetCachedStatus().Secret().V1().Read(), secretName) if err != nil { log.Debug().Err(err).Str("secret-name", secretName).Msg("Failed to get sync monitoring secret") return nil, errors.WithStack(err) @@ -390,7 +385,7 @@ func (d *Deployment) CreateMember(ctx context.Context, group api.ServerGroup, id // GetPod returns pod. func (d *Deployment) GetPod(ctx context.Context, podName string) (*core.Pod, error) { - return d.GetCachedStatus().PodReadInterface().Get(ctx, podName, meta.GetOptions{}) + return d.GetCachedStatus().Pod().V1().Read().Get(ctx, podName, meta.GetOptions{}) } // DeletePod deletes a pod with given name in the namespace @@ -431,7 +426,7 @@ func (d *Deployment) RemovePodFinalizers(ctx context.Context, podName string) er ctxChild, cancel := globals.GetGlobalTimeouts().Kubernetes().WithTimeout(ctx) defer cancel() - p, err := d.GetCachedStatus().PodReadInterface().Get(ctxChild, podName, meta.GetOptions{}) + p, err := d.GetCachedStatus().Pod().V1().Read().Get(ctxChild, podName, meta.GetOptions{}) if err != nil { if k8sutil.IsNotFound(err) { return nil @@ -481,7 +476,7 @@ func (d *Deployment) UpdatePvc(ctx context.Context, pvc *core.PersistentVolumeCl // GetOwnedPVCs returns a list of all PVCs owned by the deployment. func (d *Deployment) GetOwnedPVCs() ([]core.PersistentVolumeClaim, error) { // Get all current PVCs - pvcs := d.GetCachedStatus().PersistentVolumeClaims() + pvcs := d.GetCachedStatus().PersistentVolumeClaim().V1().ListSimple() myPVCs := make([]core.PersistentVolumeClaim, 0, len(pvcs)) for _, p := range pvcs { if d.isOwnerOf(p) { @@ -496,7 +491,7 @@ func (d *Deployment) GetPvc(ctx context.Context, pvcName string) (*core.Persiste ctxChild, cancel := globals.GetGlobalTimeouts().Kubernetes().WithTimeout(ctx) defer cancel() - pvc, err := d.GetCachedStatus().PersistentVolumeClaimReadInterface().Get(ctxChild, pvcName, meta.GetOptions{}) + pvc, err := d.GetCachedStatus().PersistentVolumeClaim().V1().Read().Get(ctxChild, pvcName, meta.GetOptions{}) if err != nil { log.Debug().Err(err).Str("pvc-name", pvcName).Msg("Failed to get PVC") return nil, errors.WithStack(err) @@ -508,7 +503,7 @@ func (d *Deployment) GetPvc(ctx context.Context, pvcName string) (*core.Persiste // the given member. func (d *Deployment) GetTLSKeyfile(group api.ServerGroup, member api.MemberStatus) (string, error) { secretName := k8sutil.CreateTLSKeyfileSecretName(d.GetName(), group.AsRole(), member.ID) - result, err := k8sutil.GetTLSKeyfileSecret(d.GetCachedStatus().SecretReadInterface(), secretName) + result, err := k8sutil.GetTLSKeyfileSecret(d.GetCachedStatus().Secret().V1().Read(), secretName) if err != nil { return "", errors.WithStack(err) } @@ -612,44 +607,47 @@ func (d *Deployment) WithStatusUpdate(ctx context.Context, action reconciler.Dep }, force...) } -func (d *Deployment) SecretsModInterface() secret.ModInterface { +func (d *Deployment) SecretsModInterface() secretv1.ModInterface { + d.currentState.GetThrottles().Secret().Invalidate() return kclient.NewModInterface(d.deps.Client, d.namespace).Secrets() } -func (d *Deployment) PodsModInterface() podMod.ModInterface { +func (d *Deployment) PodsModInterface() podv1.ModInterface { + d.currentState.GetThrottles().Pod().Invalidate() return kclient.NewModInterface(d.deps.Client, d.namespace).Pods() } -func (d *Deployment) ServiceAccountsModInterface() serviceaccount.ModInterface { +func (d *Deployment) ServiceAccountsModInterface() serviceaccountv1.ModInterface { + d.currentState.GetThrottles().ServiceAccount().Invalidate() return kclient.NewModInterface(d.deps.Client, d.namespace).ServiceAccounts() } -func (d *Deployment) ServicesModInterface() service.ModInterface { +func (d *Deployment) ServicesModInterface() servicev1.ModInterface { + d.currentState.GetThrottles().Service().Invalidate() return kclient.NewModInterface(d.deps.Client, d.namespace).Services() } -func (d *Deployment) PersistentVolumeClaimsModInterface() persistentvolumeclaim.ModInterface { +func (d *Deployment) PersistentVolumeClaimsModInterface() persistentvolumeclaimv1.ModInterface { + d.currentState.GetThrottles().PersistentVolumeClaim().Invalidate() return kclient.NewModInterface(d.deps.Client, d.namespace).PersistentVolumeClaims() } -func (d *Deployment) PodDisruptionBudgetsModInterface() poddisruptionbudget.ModInterface { +func (d *Deployment) PodDisruptionBudgetsModInterface() poddisruptionbudgetv1beta1.ModInterface { + d.currentState.GetThrottles().PodDisruptionBudget().Invalidate() return kclient.NewModInterface(d.deps.Client, d.namespace).PodDisruptionBudgets() } -func (d *Deployment) ServiceMonitorsModInterface() servicemonitor.ModInterface { +func (d *Deployment) ServiceMonitorsModInterface() servicemonitorv1.ModInterface { + d.currentState.GetThrottles().ServiceMonitor().Invalidate() return kclient.NewModInterface(d.deps.Client, d.namespace).ServiceMonitors() } -func (d *Deployment) ArangoMembersModInterface() arangomember.ModInterface { - return kclient.NewModInterface(d.deps.Client, d.namespace).ArangoMembers() -} - func (d *Deployment) GetName() string { return d.name } func (d *Deployment) GetOwnedPods(ctx context.Context) ([]core.Pod, error) { - pods := d.GetCachedStatus().Pods() + pods := d.GetCachedStatus().Pod().V1().ListSimple() podList := make([]core.Pod, 0, len(pods)) @@ -665,48 +663,7 @@ func (d *Deployment) GetOwnedPods(ctx context.Context) ([]core.Pod, error) { } func (d *Deployment) GetCachedStatus() inspectorInterface.Inspector { - if c := d.currentState; c != nil { - return c - } - - return inspector.NewEmptyInspector() -} - -func (d *Deployment) SetCachedStatus(i inspectorInterface.Inspector) { - d.currentState = i -} - -func (d *Deployment) WithArangoMemberUpdate(ctx context.Context, namespace, name string, action reconciler.ArangoMemberUpdateFunc) error { - o, err := d.deps.Client.Arango().DatabaseV1().ArangoMembers(namespace).Get(ctx, name, meta.GetOptions{}) - if err != nil { - return err - } - - if action(o) { - if _, err := d.deps.Client.Arango().DatabaseV1().ArangoMembers(namespace).Update(ctx, o, meta.UpdateOptions{}); err != nil { - return err - } - } - - return nil -} - -func (d *Deployment) WithArangoMemberStatusUpdate(ctx context.Context, namespace, name string, action reconciler.ArangoMemberStatusUpdateFunc) error { - o, err := d.deps.Client.Arango().DatabaseV1().ArangoMembers(namespace).Get(ctx, name, meta.GetOptions{}) - if err != nil { - return err - } - - status := o.Status.DeepCopy() - - if action(o, status) { - o.Status = *status - if _, err := d.deps.Client.Arango().DatabaseV1().ArangoMembers(namespace).UpdateStatus(ctx, o, meta.UpdateOptions{}); err != nil { - return err - } - } - - return nil + return d.currentState } func (d *Deployment) ApplyPatchOnPod(ctx context.Context, pod *core.Pod, p ...patch.Item) error { @@ -717,11 +674,9 @@ func (d *Deployment) ApplyPatchOnPod(ctx context.Context, pod *core.Pod, p ...pa return err } - c := d.deps.Client.Kubernetes().CoreV1().Pods(pod.GetNamespace()) - ctxChild, cancel := globals.GetGlobalTimeouts().Kubernetes().WithTimeout(ctx) defer cancel() - _, err = c.Patch(ctxChild, pod.GetName(), types.JSONPatchType, data, meta.PatchOptions{}) + _, err = d.PodsModInterface().Patch(ctxChild, pod.GetName(), types.JSONPatchType, data, meta.PatchOptions{}) if err != nil { return err } diff --git a/pkg/deployment/deployment.go b/pkg/deployment/deployment.go index 3be35a747..b8fb25a48 100644 --- a/pkg/deployment/deployment.go +++ b/pkg/deployment/deployment.go @@ -56,10 +56,12 @@ import ( "github.com/arangodb/kube-arangodb/pkg/deployment/chaos" memberState "github.com/arangodb/kube-arangodb/pkg/deployment/member" "github.com/arangodb/kube-arangodb/pkg/deployment/reconcile" + "github.com/arangodb/kube-arangodb/pkg/deployment/reconciler" "github.com/arangodb/kube-arangodb/pkg/deployment/resilience" "github.com/arangodb/kube-arangodb/pkg/deployment/resources" "github.com/arangodb/kube-arangodb/pkg/util" "github.com/arangodb/kube-arangodb/pkg/util/k8sutil" + "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/throttle" "github.com/arangodb/kube-arangodb/pkg/util/kclient" "github.com/arangodb/kube-arangodb/pkg/util/trigger" ) @@ -141,6 +143,14 @@ type Deployment struct { memberState memberState.StateInspector } +func (d *Deployment) WithArangoMember(cache inspectorInterface.Inspector, timeout time.Duration, name string) reconciler.ArangoMemberModContext { + return reconciler.NewArangoMemberModContext(cache, timeout, name) +} + +func (d *Deployment) WithCurrentArangoMember(name string) reconciler.ArangoMemberModContext { + return d.WithArangoMember(d.currentState, globals.GetGlobals().Timeouts().Kubernetes().Get(), name) +} + func (d *Deployment) GetMembersState() memberState.StateInspector { return d.memberState } @@ -203,6 +213,21 @@ func (d *Deployment) SetAgencyMaintenanceMode(ctx context.Context, enabled bool) return nil } +func newDeploymentThrottle() throttle.Components { + return throttle.NewThrottleComponents( + 30*time.Second, // ArangoDeploymentSynchronization + 30*time.Second, // ArangoMember + 30*time.Second, // ArangoTask + 30*time.Second, // Node + 15*time.Second, // PVC + time.Second, // Pod + 30*time.Second, // PDB + 10*time.Second, // Secret + 10*time.Second, // Service + 30*time.Second, // SA + 30*time.Second) // ServiceMonitor +} + // New creates a new Deployment from the given API object. func New(config Config, deps Dependencies, apiObject *api.ArangoDeployment) (*Deployment, error) { if err := apiObject.Spec.Validate(); err != nil { @@ -210,14 +235,15 @@ func New(config Config, deps Dependencies, apiObject *api.ArangoDeployment) (*De } d := &Deployment{ - apiObject: apiObject, - name: apiObject.GetName(), - namespace: apiObject.GetNamespace(), - config: config, - deps: deps, - eventCh: make(chan *deploymentEvent, deploymentEventQueueSize), - stopCh: make(chan struct{}), - agencyCache: agency.NewCache(apiObject.Spec.Mode), + apiObject: apiObject, + name: apiObject.GetName(), + namespace: apiObject.GetNamespace(), + config: config, + deps: deps, + eventCh: make(chan *deploymentEvent, deploymentEventQueueSize), + stopCh: make(chan struct{}), + agencyCache: agency.NewCache(apiObject.Spec.Mode), + currentState: inspector.NewInspector(newDeploymentThrottle(), deps.Client, apiObject.GetNamespace()), } d.memberState = memberState.NewStateInspector(d) @@ -329,16 +355,16 @@ func (d *Deployment) run() { for { select { case <-d.stopCh: - cachedStatus, err := inspector.NewInspector(context.Background(), d.deps.Client, d.GetNamespace()) + err := d.currentState.Refresh(context.Background()) if err != nil { log.Error().Err(err).Msg("Unable to get resources") } // Remove finalizers from created resources log.Info().Msg("Deployment removed, removing finalizers to prevent orphaned resources") - if _, err := d.removePodFinalizers(context.TODO(), cachedStatus); err != nil { + if _, err := d.removePodFinalizers(context.TODO(), d.GetCachedStatus()); err != nil { log.Warn().Err(err).Msg("Failed to remove Pod finalizers") } - if _, err := d.removePVCFinalizers(context.TODO(), cachedStatus); err != nil { + if _, err := d.removePVCFinalizers(context.TODO(), d.GetCachedStatus()); err != nil { log.Warn().Err(err).Msg("Failed to remove PVC finalizers") } // We're being stopped. @@ -577,7 +603,7 @@ func (d *Deployment) isOwnerOf(obj meta.Object) bool { func (d *Deployment) lookForServiceMonitorCRD() { var err error if d.GetScope().IsNamespaced() { - _, err = d.deps.Client.Monitoring().MonitoringV1().ServiceMonitors(d.GetNamespace()).List(context.Background(), meta.ListOptions{}) + _, err = d.currentState.ServiceMonitor().V1() } else { _, err = d.deps.Client.KubernetesExtensions().ApiextensionsV1().CustomResourceDefinitions().Get(context.Background(), "servicemonitors.monitoring.coreos.com", meta.GetOptions{}) } diff --git a/pkg/deployment/deployment_inspector.go b/pkg/deployment/deployment_inspector.go index e0550cd6e..58c3fdb72 100644 --- a/pkg/deployment/deployment_inspector.go +++ b/pkg/deployment/deployment_inspector.go @@ -36,8 +36,6 @@ import ( operatorErrors "github.com/arangodb/kube-arangodb/pkg/util/errors" - "github.com/arangodb/kube-arangodb/pkg/deployment/resources/inspector" - "github.com/arangodb/kube-arangodb/pkg/apis/deployment" api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" @@ -75,7 +73,7 @@ func (d *Deployment) inspectDeployment(lastInterval util.Interval) util.Interval deploymentName := d.GetName() defer metrics.SetDuration(inspectDeploymentDurationGauges.WithLabelValues(deploymentName), start) - cachedStatus, err := inspector.NewInspector(context.Background(), d.deps.Client, d.GetNamespace()) + err := d.currentState.Refresh(ctxReconciliation) if err != nil { log.Error().Err(err).Msg("Unable to get resources") return minInspectionInterval // Retry ASAP @@ -95,7 +93,7 @@ func (d *Deployment) inspectDeployment(lastInterval util.Interval) util.Interval return nextInterval } else if updated != nil && updated.GetDeletionTimestamp() != nil { // Deployment is marked for deletion - if err := d.runDeploymentFinalizers(ctxReconciliation, cachedStatus); err != nil { + if err := d.runDeploymentFinalizers(ctxReconciliation, d.GetCachedStatus()); err != nil { hasError = true d.CreateEvent(k8sutil.NewErrorEvent("ArangoDeployment finalizer inspection failed", err, d.apiObject)) } @@ -119,7 +117,7 @@ func (d *Deployment) inspectDeployment(lastInterval util.Interval) util.Interval d.GetMembersState().RefreshState(ctxReconciliation, updated.Status.Members.AsList()) d.GetMembersState().Log(d.deps.Log) - inspectNextInterval, err := d.inspectDeploymentWithError(ctxReconciliation, nextInterval, cachedStatus) + inspectNextInterval, err := d.inspectDeploymentWithError(ctxReconciliation, nextInterval) if err != nil { if !operatorErrors.IsReconcile(err) { nextInterval = inspectNextInterval @@ -144,12 +142,9 @@ func (d *Deployment) inspectDeployment(lastInterval util.Interval) util.Interval return nextInterval.ReduceTo(maxInspectionInterval) } -func (d *Deployment) inspectDeploymentWithError(ctx context.Context, lastInterval util.Interval, - cachedStatus inspectorInterface.Inspector) (nextInterval util.Interval, inspectError error) { +func (d *Deployment) inspectDeploymentWithError(ctx context.Context, lastInterval util.Interval) (nextInterval util.Interval, inspectError error) { t := time.Now() - d.SetCachedStatus(cachedStatus) - defer func() { d.deps.Log.Info().Msgf("Reconciliation loop took %s", time.Since(t)) }() @@ -175,36 +170,36 @@ func (d *Deployment) inspectDeploymentWithError(ctx context.Context, lastInterva } } - if err := acs.Inspect(ctx, d.apiObject, d.deps.Client, cachedStatus); err != nil { + if err := acs.Inspect(ctx, d.apiObject, d.deps.Client, d.GetCachedStatus()); err != nil { d.deps.Log.Warn().Err(err).Msgf("Unable to handle ACS objects") } // Cleanup terminated pods on the beginning of loop - if x, err := d.resources.CleanupTerminatedPods(ctx, cachedStatus); err != nil { + if x, err := d.resources.CleanupTerminatedPods(ctx, d.GetCachedStatus()); err != nil { return minInspectionInterval, errors.Wrapf(err, "Pod cleanup failed") } else { nextInterval = nextInterval.ReduceTo(x) } - if err := d.resources.EnsureArangoMembers(ctx, cachedStatus); err != nil { + if err := d.resources.EnsureArangoMembers(ctx, d.GetCachedStatus()); err != nil { return minInspectionInterval, errors.Wrapf(err, "ArangoMember creation failed") } - if err := d.resources.EnsureServices(ctx, cachedStatus); err != nil { + if err := d.resources.EnsureServices(ctx, d.GetCachedStatus()); err != nil { return minInspectionInterval, errors.Wrapf(err, "Service creation failed") } - if err := d.resources.EnsureSecrets(ctx, d.deps.Log, cachedStatus); err != nil { + if err := d.resources.EnsureSecrets(ctx, d.deps.Log, d.GetCachedStatus()); err != nil { return minInspectionInterval, errors.Wrapf(err, "Secret creation failed") } // Inspect secret hashes - if err := d.resources.ValidateSecretHashes(ctx, cachedStatus); err != nil { + if err := d.resources.ValidateSecretHashes(ctx, d.GetCachedStatus()); err != nil { return minInspectionInterval, errors.Wrapf(err, "Secret hash validation failed") } // Check for LicenseKeySecret - if err := d.resources.ValidateLicenseKeySecret(cachedStatus); err != nil { + if err := d.resources.ValidateLicenseKeySecret(d.GetCachedStatus()); err != nil { return minInspectionInterval, errors.Wrapf(err, "License Key Secret invalid") } @@ -214,20 +209,20 @@ func (d *Deployment) inspectDeploymentWithError(ctx context.Context, lastInterva } // Ensure we have image info - if retrySoon, exists, err := d.ensureImages(ctx, d.apiObject, cachedStatus); err != nil { + if retrySoon, exists, err := d.ensureImages(ctx, d.apiObject, d.GetCachedStatus()); err != nil { return minInspectionInterval, errors.Wrapf(err, "Image detection failed") } else if retrySoon || !exists { return minInspectionInterval, nil } // Inspection of generated resources needed - if x, err := d.resources.InspectPods(ctx, cachedStatus); err != nil { + if x, err := d.resources.InspectPods(ctx, d.GetCachedStatus()); err != nil { return minInspectionInterval, errors.Wrapf(err, "Pod inspection failed") } else { nextInterval = nextInterval.ReduceTo(x) } - if x, err := d.resources.InspectPVCs(ctx, cachedStatus); err != nil { + if x, err := d.resources.InspectPVCs(ctx, d.GetCachedStatus()); err != nil { return minInspectionInterval, errors.Wrapf(err, "PVC inspection failed") } else { nextInterval = nextInterval.ReduceTo(x) @@ -243,7 +238,7 @@ func (d *Deployment) inspectDeploymentWithError(ctx context.Context, lastInterva return minInspectionInterval, errors.Wrapf(err, "Reconciler immediate actions failed") } - if interval, err := d.ensureResources(ctx, nextInterval, cachedStatus); err != nil { + if interval, err := d.ensureResources(ctx, nextInterval, d.GetCachedStatus()); err != nil { return minInspectionInterval, errors.Wrapf(err, "Reconciler resource recreation failed") } else { nextInterval = interval @@ -272,7 +267,7 @@ func (d *Deployment) inspectDeploymentWithError(ctx context.Context, lastInterva }, true); err != nil { return minInspectionInterval, errors.Wrapf(err, "Unable clean plan") } - } else if err, updated := d.reconciler.CreatePlan(ctx, cachedStatus); err != nil { + } else if err, updated := d.reconciler.CreatePlan(ctx, d.GetCachedStatus()); err != nil { return minInspectionInterval, errors.Wrapf(err, "Plan creation failed") } else if updated { d.deps.Log.Info().Msgf("Plan generated, reconciling") @@ -325,7 +320,7 @@ func (d *Deployment) inspectDeploymentWithError(ctx context.Context, lastInterva } // Execute current step of scale/update plan - retrySoon, err := d.reconciler.ExecutePlan(ctx, cachedStatus) + retrySoon, err := d.reconciler.ExecutePlan(ctx, d.GetCachedStatus()) if err != nil { return minInspectionInterval, errors.Wrapf(err, "Plan execution failed") } @@ -344,7 +339,7 @@ func (d *Deployment) inspectDeploymentWithError(ctx context.Context, lastInterva } // At the end of the inspect, we cleanup terminated pods. - if x, err := d.resources.CleanupTerminatedPods(ctx, cachedStatus); err != nil { + if x, err := d.resources.CleanupTerminatedPods(ctx, d.GetCachedStatus()); err != nil { return minInspectionInterval, errors.Wrapf(err, "Pod cleanup failed") } else { nextInterval = nextInterval.ReduceTo(x) diff --git a/pkg/deployment/deployment_pod_sync_test.go b/pkg/deployment/deployment_pod_sync_test.go index c37a782f7..eafaaee4f 100644 --- a/pkg/deployment/deployment_pod_sync_test.go +++ b/pkg/deployment/deployment_pod_sync_test.go @@ -226,7 +226,7 @@ func TestEnsurePod_Sync_Master(t *testing.T) { testCase.createTestPodData(deployment, api.ServerGroupSyncMasters, firstSyncMaster) name := testCase.ArangoDeployment.Spec.Sync.Monitoring.GetTokenSecretName() - auth, err := k8sutil.GetTokenSecret(context.Background(), deployment.GetCachedStatus().SecretReadInterface(), name) + auth, err := k8sutil.GetTokenSecret(context.Background(), deployment.GetCachedStatus().Secret().V1().Read(), name) require.NoError(t, err) testCase.ExpectedPod.Spec.Containers[0].LivenessProbe = createTestLivenessProbe( @@ -316,7 +316,7 @@ func TestEnsurePod_Sync_Master(t *testing.T) { testCase.createTestPodData(deployment, api.ServerGroupSyncMasters, firstSyncMaster) name := testCase.ArangoDeployment.Spec.Sync.Monitoring.GetTokenSecretName() - auth, err := k8sutil.GetTokenSecret(context.Background(), deployment.GetCachedStatus().SecretReadInterface(), name) + auth, err := k8sutil.GetTokenSecret(context.Background(), deployment.GetCachedStatus().Secret().V1().Read(), name) require.NoError(t, err) testCase.ExpectedPod.Spec.Containers[0].LivenessProbe = createTestLivenessProbe( @@ -419,7 +419,7 @@ func TestEnsurePod_Sync_Worker(t *testing.T) { testCase.createTestPodData(deployment, api.ServerGroupSyncWorkers, firstSyncWorker) name := testCase.ArangoDeployment.Spec.Sync.Monitoring.GetTokenSecretName() - auth, err := k8sutil.GetTokenSecret(context.Background(), deployment.GetCachedStatus().SecretReadInterface(), name) + auth, err := k8sutil.GetTokenSecret(context.Background(), deployment.GetCachedStatus().Secret().V1().Read(), name) require.NoError(t, err) testCase.ExpectedPod.Spec.Containers[0].LivenessProbe = createTestLivenessProbe( diff --git a/pkg/deployment/deployment_pod_tls_sni_test.go b/pkg/deployment/deployment_pod_tls_sni_test.go index 55622cd92..d589cb391 100644 --- a/pkg/deployment/deployment_pod_tls_sni_test.go +++ b/pkg/deployment/deployment_pod_tls_sni_test.go @@ -25,18 +25,17 @@ import ( "fmt" "testing" - "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/secret" - api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" "github.com/arangodb/kube-arangodb/pkg/util" "github.com/arangodb/kube-arangodb/pkg/util/constants" "github.com/arangodb/kube-arangodb/pkg/util/k8sutil" + secretv1 "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/secret/v1" "github.com/stretchr/testify/require" core "k8s.io/api/core/v1" meta "k8s.io/apimachinery/pkg/apis/meta/v1" ) -func createTLSSNISecret(t *testing.T, client secret.ModInterface, name, namespace string) { +func createTLSSNISecret(t *testing.T, client secretv1.ModInterface, name, namespace string) { secret := core.Secret{ ObjectMeta: meta.ObjectMeta{ Name: name, diff --git a/pkg/deployment/deployment_run_test.go b/pkg/deployment/deployment_run_test.go index 36c269e21..4c16b8ab0 100644 --- a/pkg/deployment/deployment_run_test.go +++ b/pkg/deployment/deployment_run_test.go @@ -33,8 +33,6 @@ import ( "github.com/rs/zerolog/log" - "github.com/arangodb/kube-arangodb/pkg/deployment/resources/inspector" - "github.com/arangodb/kube-arangodb/pkg/util" "github.com/arangodb/kube-arangodb/pkg/util/k8sutil" core "k8s.io/api/core/v1" @@ -157,7 +155,9 @@ func runTestCase(t *testing.T, testCase testCaseStruct) { }, } - if _, err := d.ArangoMembersModInterface().Create(context.Background(), &member, metav1.CreateOptions{}); err != nil { + c := d.WithCurrentArangoMember(m.ArangoMemberName(d.GetName(), group)) + + if err := c.Create(context.Background(), &member); err != nil { return err } @@ -172,15 +172,14 @@ func runTestCase(t *testing.T, testCase testCaseStruct) { return err } - cache, err := inspector.NewInspector(context.Background(), d.deps.Client, d.GetNamespace()) - require.NoError(t, err) + require.NoError(t, d.currentState.Refresh(context.Background())) groupSpec := d.apiObject.Spec.GetServerGroupSpec(group) image, ok := d.resources.SelectImage(d.apiObject.Spec, d.status.last) require.True(t, ok) - template, err := d.resources.RenderPodTemplateForMember(context.Background(), cache, d.apiObject.Spec, d.status.last, m.ID, image) + template, err := d.resources.RenderPodTemplateForMember(context.Background(), d.GetCachedStatus(), d.apiObject.Spec, d.status.last, m.ID, image) if err != nil { return err } @@ -194,11 +193,17 @@ func runTestCase(t *testing.T, testCase testCaseStruct) { member.Status.Template = podTemplate member.Spec.Template = podTemplate - if _, err := d.ArangoMembersModInterface().Update(context.Background(), &member, metav1.UpdateOptions{}); err != nil { + if err := c.Update(context.Background(), func(obj *api.ArangoMember) bool { + obj.Spec.Template = podTemplate + return true + }); err != nil { return err } - if _, err := d.ArangoMembersModInterface().UpdateStatus(context.Background(), &member, metav1.UpdateOptions{}); err != nil { + if err := c.UpdateStatus(context.Background(), func(obj *api.ArangoMember, s *api.ArangoMemberStatus) bool { + s.Template = podTemplate + return true + }); err != nil { return err } } diff --git a/pkg/deployment/deployment_suite_test.go b/pkg/deployment/deployment_suite_test.go index ff261e43a..4e77dd6b5 100644 --- a/pkg/deployment/deployment_suite_test.go +++ b/pkg/deployment/deployment_suite_test.go @@ -35,7 +35,6 @@ import ( "github.com/arangodb/kube-arangodb/pkg/deployment/client" monitoringFakeClient "github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/fake" "github.com/rs/zerolog" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" core "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -49,6 +48,7 @@ import ( "github.com/arangodb/kube-arangodb/pkg/util/arangod/conn" "github.com/arangodb/kube-arangodb/pkg/util/constants" "github.com/arangodb/kube-arangodb/pkg/util/k8sutil" + "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/throttle" "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/probes" "github.com/arangodb/kube-arangodb/pkg/util/kclient" extfake "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/fake" @@ -108,7 +108,7 @@ func createTestLifecycle(group api.ServerGroup) *core.Lifecycle { func createTestToken(deployment *Deployment, testCase *testCaseStruct, paths []string) (string, error) { name := testCase.ArangoDeployment.Spec.Authentication.GetJWTSecretName() - s, err := k8sutil.GetTokenSecret(context.Background(), deployment.GetCachedStatus().SecretReadInterface(), name) + s, err := k8sutil.GetTokenSecret(context.Background(), deployment.GetCachedStatus().Secret().V1().Read(), name) if err != nil { return "", err } @@ -479,20 +479,18 @@ func createTestDeployment(t *testing.T, config Config, arangoDeployment *api.Ara } d := &Deployment{ - apiObject: arangoDeployment, - name: arangoDeployment.GetName(), - namespace: arangoDeployment.GetNamespace(), - config: config, - deps: deps, - eventCh: make(chan *deploymentEvent, deploymentEventQueueSize), - stopCh: make(chan struct{}), + apiObject: arangoDeployment, + name: arangoDeployment.GetName(), + namespace: arangoDeployment.GetNamespace(), + config: config, + deps: deps, + eventCh: make(chan *deploymentEvent, deploymentEventQueueSize), + stopCh: make(chan struct{}), + currentState: inspector.NewInspector(throttle.NewAlwaysThrottleComponents(), deps.Client, arangoDeployment.GetNamespace()), } d.clientCache = client.NewClientCache(d, conn.NewFactory(d.getAuth, d.getConnConfig)) - cachedStatus, err := inspector.NewInspector(context.Background(), deps.Client, d.GetNamespace()) - require.NoError(t, err) - assert.NotEmpty(t, cachedStatus.GetVersionInfo(), "API server should not have returned empty version") - d.SetCachedStatus(cachedStatus) + require.NoError(t, d.currentState.Refresh(context.Background())) arangoDeployment.Spec.SetDefaults(arangoDeployment.GetName()) d.resources = resources.NewResources(deps.Log, d) diff --git a/pkg/deployment/images.go b/pkg/deployment/images.go index 5c14e2623..71bb6dcfb 100644 --- a/pkg/deployment/images.go +++ b/pkg/deployment/images.go @@ -139,7 +139,7 @@ func (ib *imagesBuilder) fetchArangoDBImageIDAndVersion(ctx context.Context, cac // Check if pod exists ctxChild, cancel := globals.GetGlobalTimeouts().Kubernetes().WithTimeout(ctx) defer cancel() - pod, err := ib.Context.GetCachedStatus().PodReadInterface().Get(ctxChild, podName, metav1.GetOptions{}) + pod, err := ib.Context.GetCachedStatus().Pod().V1().Read().Get(ctxChild, podName, metav1.GetOptions{}) if err == nil { // Pod found if k8sutil.IsPodFailed(pod, utils.StringList{k8sutil.ServerContainerName}) { diff --git a/pkg/deployment/images_test.go b/pkg/deployment/images_test.go index c0fe85b38..507bf03b0 100644 --- a/pkg/deployment/images_test.go +++ b/pkg/deployment/images_test.go @@ -29,8 +29,6 @@ import ( "github.com/arangodb/kube-arangodb/pkg/util/constants" - "github.com/arangodb/kube-arangodb/pkg/deployment/resources/inspector" - "github.com/arangodb/kube-arangodb/pkg/deployment/resources" "github.com/arangodb/kube-arangodb/pkg/util/k8sutil" @@ -185,7 +183,7 @@ func TestEnsureImages(t *testing.T) { require.NoError(t, err) }, After: func(t *testing.T, deployment *Deployment) { - pods := deployment.GetCachedStatus().Pods() + pods := deployment.GetCachedStatus().Pod().V1().ListSimple() require.Len(t, pods, 1) }, }, @@ -209,7 +207,7 @@ func TestEnsureImages(t *testing.T) { require.NoError(t, err) }, After: func(t *testing.T, deployment *Deployment) { - pods := deployment.GetCachedStatus().Pods() + pods := deployment.GetCachedStatus().Pod().V1().ListSimple() require.Len(t, pods, 0) }, }, @@ -238,7 +236,7 @@ func TestEnsureImages(t *testing.T) { require.NoError(t, err) }, After: func(t *testing.T, deployment *Deployment) { - pods := deployment.GetCachedStatus().Pods() + pods := deployment.GetCachedStatus().Pod().V1().ListSimple() require.Len(t, pods, 1) }, }, @@ -268,7 +266,7 @@ func TestEnsureImages(t *testing.T) { require.NoError(t, err) }, After: func(t *testing.T, deployment *Deployment) { - pods := deployment.GetCachedStatus().Pods() + pods := deployment.GetCachedStatus().Pod().V1().ListSimple() require.Len(t, pods, 1) }, }, @@ -301,7 +299,7 @@ func TestEnsureImages(t *testing.T) { require.NoError(t, err) }, After: func(t *testing.T, deployment *Deployment) { - pods := deployment.GetCachedStatus().Pods() + pods := deployment.GetCachedStatus().Pod().V1().ListSimple() require.Len(t, pods, 1) }, }, @@ -326,8 +324,10 @@ func TestEnsureImages(t *testing.T) { _, err := d.deps.Client.Arango().DatabaseV1().ArangoDeployments(testNamespace).Create(context.Background(), d.apiObject, metav1.CreateOptions{}) require.NoError(t, err) + require.NoError(t, d.currentState.Refresh(context.Background())) + // Act - retrySoon, _, err := d.ensureImages(context.Background(), d.apiObject, inspector.NewEmptyInspector()) + retrySoon, _, err := d.ensureImages(context.Background(), d.apiObject, d.GetCachedStatus()) // Assert assert.EqualValues(t, testCase.RetrySoon, retrySoon) diff --git a/pkg/deployment/informers.go b/pkg/deployment/informers.go index be8101cbd..58ede4ed1 100644 --- a/pkg/deployment/informers.go +++ b/pkg/deployment/informers.go @@ -51,16 +51,19 @@ func (d *Deployment) listenForPodEvents(stopCh <-chan struct{}) { &v1.Pod{}, cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { + d.currentState.GetThrottles().Pod().Invalidate() if p, ok := getPod(obj); ok && d.isOwnerOf(p) { d.triggerInspection() } }, UpdateFunc: func(oldObj, newObj interface{}) { + d.currentState.GetThrottles().Pod().Invalidate() if p, ok := getPod(newObj); ok && d.isOwnerOf(p) { d.triggerInspection() } }, DeleteFunc: func(obj interface{}) { + d.currentState.GetThrottles().Pod().Invalidate() if p, ok := getPod(obj); ok && d.isOwnerOf(p) { d.triggerInspection() } @@ -93,16 +96,19 @@ func (d *Deployment) listenForPVCEvents(stopCh <-chan struct{}) { &v1.PersistentVolumeClaim{}, cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { + d.currentState.GetThrottles().PersistentVolumeClaim().Invalidate() if p, ok := getPVC(obj); ok && d.isOwnerOf(p) { d.triggerInspection() } }, UpdateFunc: func(oldObj, newObj interface{}) { + d.currentState.GetThrottles().PersistentVolumeClaim().Invalidate() if p, ok := getPVC(newObj); ok && d.isOwnerOf(p) { d.triggerInspection() } }, DeleteFunc: func(obj interface{}) { + d.currentState.GetThrottles().PersistentVolumeClaim().Invalidate() if p, ok := getPVC(obj); ok && d.isOwnerOf(p) { d.triggerInspection() } @@ -136,16 +142,19 @@ func (d *Deployment) listenForSecretEvents(stopCh <-chan struct{}) { cache.ResourceEventHandlerFuncs{ // Note: For secrets we look at all of them because they do not have to be owned by this deployment. AddFunc: func(obj interface{}) { + d.currentState.GetThrottles().Secret().Invalidate() if getSecret(obj) { d.triggerInspection() } }, UpdateFunc: func(oldObj, newObj interface{}) { + d.currentState.GetThrottles().Secret().Invalidate() if getSecret(newObj) { d.triggerInspection() } }, DeleteFunc: func(obj interface{}) { + d.currentState.GetThrottles().Secret().Invalidate() if getSecret(obj) { d.triggerInspection() } @@ -178,16 +187,19 @@ func (d *Deployment) listenForServiceEvents(stopCh <-chan struct{}) { &v1.Service{}, cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { + d.currentState.GetThrottles().Service().Invalidate() if s, ok := getService(obj); ok && d.isOwnerOf(s) { d.triggerInspection() } }, UpdateFunc: func(oldObj, newObj interface{}) { + d.currentState.GetThrottles().Service().Invalidate() if s, ok := getService(newObj); ok && d.isOwnerOf(s) { d.triggerInspection() } }, DeleteFunc: func(obj interface{}) { + d.currentState.GetThrottles().Service().Invalidate() if s, ok := getService(obj); ok && d.isOwnerOf(s) { d.triggerInspection() } diff --git a/pkg/deployment/metrics.go b/pkg/deployment/metrics.go index 47a837bb3..38870acb8 100644 --- a/pkg/deployment/metrics.go +++ b/pkg/deployment/metrics.go @@ -24,6 +24,7 @@ import ( "sync" api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" + "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/throttle" "github.com/arangodb/kube-arangodb/pkg/util/metrics" "github.com/prometheus/client_golang/prometheus" ) @@ -41,6 +42,8 @@ func init() { deploymentAgencyStateMetric: metrics.NewDescription("arango_operator_deployment_agency_state", "Reachability of agency", []string{"namespace", "deployment"}, nil), deploymentShardLeadersMetric: metrics.NewDescription("arango_operator_deployment_shard_leaders", "Deployment leader shards distribution", []string{"namespace", "deployment", "database", "collection", "shard", "server"}, nil), deploymentShardsMetric: metrics.NewDescription("arango_operator_deployment_shards", "Deployment shards distribution", []string{"namespace", "deployment", "database", "collection", "shard", "server"}, nil), + + operatorStateRefreshMetric: metrics.NewDescription("arango_operator_deployment_state_refresh_count", "Number of refreshes in deployment", []string{"namespace", "deployment", "type"}, nil), } prometheus.MustRegister(&localInventory) @@ -55,13 +58,15 @@ type inventory struct { deployments map[string]map[string]*Deployment deploymentsMetric, deploymentMetricsMembersMetric, deploymentAgencyStateMetric, deploymentShardsMetric, deploymentShardLeadersMetric metrics.Description + + operatorStateRefreshMetric metrics.Description } func (i *inventory) Describe(descs chan<- *prometheus.Desc) { i.lock.Lock() defer i.lock.Unlock() - metrics.NewPushDescription(descs).Push(i.deploymentsMetric, i.deploymentMetricsMembersMetric, i.deploymentAgencyStateMetric, i.deploymentShardLeadersMetric, i.deploymentShardsMetric) + metrics.NewPushDescription(descs).Push(i.deploymentsMetric, i.deploymentMetricsMembersMetric, i.deploymentAgencyStateMetric, i.deploymentShardLeadersMetric, i.deploymentShardsMetric, i.operatorStateRefreshMetric) } func (i *inventory) Collect(m chan<- prometheus.Metric) { @@ -73,6 +78,14 @@ func (i *inventory) Collect(m chan<- prometheus.Metric) { for _, deployment := range deployments { p.Push(i.deploymentsMetric.Gauge(1, deployment.GetNamespace(), deployment.GetName())) + if state := deployment.currentState; state != nil { + t := state.GetThrottles() + + for _, c := range throttle.AllComponents() { + p.Push(i.operatorStateRefreshMetric.Gauge(float64(t.Get(c).Count()), deployment.GetNamespace(), deployment.GetName(), string(c))) + } + } + spec := deployment.GetSpec() status, _ := deployment.GetStatus() diff --git a/pkg/deployment/pod/encryption.go b/pkg/deployment/pod/encryption.go index b576d1374..1271c4a53 100644 --- a/pkg/deployment/pod/encryption.go +++ b/pkg/deployment/pod/encryption.go @@ -28,8 +28,6 @@ import ( "github.com/arangodb/kube-arangodb/pkg/util/globals" - "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/secret" - "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/interfaces" "github.com/arangodb/kube-arangodb/pkg/util/errors" @@ -40,6 +38,7 @@ import ( "github.com/arangodb/kube-arangodb/pkg/util/constants" "github.com/arangodb/kube-arangodb/pkg/util/k8sutil" + secretv1 "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/secret/v1" core "k8s.io/api/core/v1" meta "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -69,7 +68,7 @@ func GroupEncryptionSupported(mode api.DeploymentMode, group api.ServerGroup) bo } } -func GetEncryptionKey(ctx context.Context, secrets secret.ReadInterface, name string) (string, []byte, bool, error) { +func GetEncryptionKey(ctx context.Context, secrets secretv1.ReadInterface, name string) (string, []byte, bool, error) { ctxChild, cancel := globals.GetGlobalTimeouts().Kubernetes().WithTimeout(ctx) defer cancel() @@ -170,7 +169,8 @@ func (e encryption) Verify(i Input, cachedStatus interfaces.Inspector) error { } if !MultiFileMode(i) { - secret, exists := cachedStatus.Secret(i.Deployment.RocksDB.Encryption.GetKeySecretName()) + + secret, exists := cachedStatus.Secret().V1().GetSimple(i.Deployment.RocksDB.Encryption.GetKeySecretName()) if !exists { return errors.Newf("Encryption key secret does not exist %s", i.Deployment.RocksDB.Encryption.GetKeySecretName()) } diff --git a/pkg/deployment/pod/jwt.go b/pkg/deployment/pod/jwt.go index 25f5b45ed..379505601 100644 --- a/pkg/deployment/pod/jwt.go +++ b/pkg/deployment/pod/jwt.go @@ -101,7 +101,7 @@ func (e jwt) Verify(i Input, cachedStatus interfaces.Inspector) error { } if !VersionHasJWTSecretKeyfolder(i.Version, i.Enterprise) { - secret, exists := cachedStatus.Secret(i.Deployment.Authentication.GetJWTSecretName()) + secret, exists := cachedStatus.Secret().V1().GetSimple(i.Deployment.Authentication.GetJWTSecretName()) if !exists { return errors.Newf("Secret for JWT token is missing %s", i.Deployment.Authentication.GetJWTSecretName()) } diff --git a/pkg/deployment/pod/sni.go b/pkg/deployment/pod/sni.go index 3f8c78479..726ecadcf 100644 --- a/pkg/deployment/pod/sni.go +++ b/pkg/deployment/pod/sni.go @@ -82,7 +82,7 @@ func (s sni) Verify(i Input, cachedStatus interfaces.Inspector) error { } for _, secret := range util.SortKeys(i.Deployment.TLS.GetSNI().Mapping) { - kubeSecret, exists := cachedStatus.Secret(secret) + kubeSecret, exists := cachedStatus.Secret().V1().GetSimple(secret) if !exists { return errors.Newf("SNI Secret not found %s", secret) } diff --git a/pkg/deployment/pod/utils.go b/pkg/deployment/pod/utils.go index 29ba0bec3..abc19b44f 100644 --- a/pkg/deployment/pod/utils.go +++ b/pkg/deployment/pod/utils.go @@ -31,7 +31,7 @@ import ( func GenerateMemberEndpoint(services service.Inspector, apiObject meta.Object, spec api.DeploymentSpec, group api.ServerGroup, member api.MemberStatus) (string, error) { memberName := member.ArangoMemberName(apiObject.GetName(), group) - svc, ok := services.Service(memberName) + svc, ok := services.Service().V1().GetSimple(memberName) if !ok { return "", errors.Newf("Service %s not found", memberName) } diff --git a/pkg/deployment/reconcile/action.go b/pkg/deployment/reconcile/action.go index 9c9aa9ce5..cf1a22f53 100644 --- a/pkg/deployment/reconcile/action.go +++ b/pkg/deployment/reconcile/action.go @@ -27,6 +27,7 @@ import ( "time" api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" + "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/throttle" "github.com/rs/zerolog" ) @@ -79,15 +80,15 @@ func getActionPost(a Action, ctx context.Context) error { type ActionReloadCachedStatus interface { Action - // ReloadCachedStatus keeps information about CachedStatus reloading (executed after action has been executed) - ReloadCachedStatus() bool + // ReloadComponents return cache components to be reloaded + ReloadComponents() []throttle.Component } -func getActionReloadCachedStatus(a Action) bool { +func getActionReloadCachedStatus(a Action) []throttle.Component { if c, ok := a.(ActionReloadCachedStatus); !ok { - return false + return nil } else { - return c.ReloadCachedStatus() + return c.ReloadComponents() } } diff --git a/pkg/deployment/reconcile/action_arango_member_update_pod_spec.go b/pkg/deployment/reconcile/action_arango_member_update_pod_spec.go index 526fb259c..41ac4ca6e 100644 --- a/pkg/deployment/reconcile/action_arango_member_update_pod_spec.go +++ b/pkg/deployment/reconcile/action_arango_member_update_pod_spec.go @@ -46,8 +46,6 @@ func newArangoMemberUpdatePodSpecAction(log zerolog.Logger, action api.Action, a return a } -var _ ActionReloadCachedStatus = &actionArangoMemberUpdatePodSpec{} - // actionArangoMemberUpdatePodSpec implements an ArangoMemberUpdatePodSpec. type actionArangoMemberUpdatePodSpec struct { // actionImpl implement timeout and member id functions @@ -70,9 +68,7 @@ func (a *actionArangoMemberUpdatePodSpec) Start(ctx context.Context) (bool, erro return true, nil } - cache := a.actionCtx.GetCachedStatus() - - member, ok := cache.ArangoMember(m.ArangoMemberName(a.actionCtx.GetName(), a.action.Group)) + member, ok := a.actionCtx.GetCachedStatus().ArangoMember().V1().GetSimple(m.ArangoMemberName(a.actionCtx.GetName(), a.action.Group)) if !ok { err := errors.Newf("ArangoMember not found") log.Error().Err(err).Msg("ArangoMember not found") @@ -129,7 +125,9 @@ func (a *actionArangoMemberUpdatePodSpec) Start(ctx context.Context) (bool, erro template.Endpoint = &q } - if err := a.actionCtx.WithArangoMemberUpdate(context.Background(), member.GetNamespace(), member.GetName(), func(member *api.ArangoMember) bool { + c := a.actionCtx.WithCurrentArangoMember(member.GetName()) + + if err := c.Update(ctx, func(member *api.ArangoMember) bool { if !member.Spec.Template.Equals(template) { member.Spec.Template = template.DeepCopy() return true @@ -141,7 +139,7 @@ func (a *actionArangoMemberUpdatePodSpec) Start(ctx context.Context) (bool, erro return false, err } - if err := a.actionCtx.WithArangoMemberStatusUpdate(context.Background(), member.GetNamespace(), member.GetName(), func(member *api.ArangoMember, status *api.ArangoMemberStatus) bool { + if err := c.UpdateStatus(ctx, func(member *api.ArangoMember, status *api.ArangoMemberStatus) bool { if (status.Template == nil || status.Template.PodSpec == nil) && (m.PodSpecVersion == "" || m.PodSpecVersion == template.PodSpecChecksum) { status.Template = template.DeepCopy() } @@ -154,7 +152,3 @@ func (a *actionArangoMemberUpdatePodSpec) Start(ctx context.Context) (bool, erro return true, nil } - -func (a *actionArangoMemberUpdatePodSpec) ReloadCachedStatus() bool { - return true -} diff --git a/pkg/deployment/reconcile/action_arango_member_update_pod_status.go b/pkg/deployment/reconcile/action_arango_member_update_pod_status.go index 91f446dd7..79dfc4e58 100644 --- a/pkg/deployment/reconcile/action_arango_member_update_pod_status.go +++ b/pkg/deployment/reconcile/action_arango_member_update_pod_status.go @@ -67,9 +67,7 @@ func (a *actionArangoMemberUpdatePodStatus) Start(ctx context.Context) (bool, er return true, nil } - cache := a.actionCtx.GetCachedStatus() - - member, ok := cache.ArangoMember(m.ArangoMemberName(a.actionCtx.GetName(), a.action.Group)) + member, ok := a.actionCtx.GetCachedStatus().ArangoMember().V1().GetSimple(m.ArangoMemberName(a.actionCtx.GetName(), a.action.Group)) if !ok { err := errors.Newf("ArangoMember not found") log.Error().Err(err).Msg("ArangoMember not found") @@ -88,7 +86,7 @@ func (a *actionArangoMemberUpdatePodStatus) Start(ctx context.Context) (bool, er } if member.Status.Template == nil || !member.Status.Template.Equals(member.Spec.Template) { - if err := a.actionCtx.WithArangoMemberStatusUpdate(context.Background(), member.GetNamespace(), member.GetName(), func(obj *api.ArangoMember, status *api.ArangoMemberStatus) bool { + if err := a.actionCtx.WithCurrentArangoMember(member.GetName()).UpdateStatus(ctx, func(obj *api.ArangoMember, status *api.ArangoMemberStatus) bool { if status.Template == nil || !status.Template.Equals(member.Spec.Template) { status.Template = member.Spec.Template.DeepCopy() return true diff --git a/pkg/deployment/reconcile/action_bootstrap_set_password.go b/pkg/deployment/reconcile/action_bootstrap_set_password.go index 2631fc185..62e471269 100644 --- a/pkg/deployment/reconcile/action_bootstrap_set_password.go +++ b/pkg/deployment/reconcile/action_bootstrap_set_password.go @@ -138,9 +138,7 @@ func (a actionBootstrapSetPassword) setUserPassword(ctx context.Context, user, s } func (a actionBootstrapSetPassword) ensureUserPasswordSecret(ctx context.Context, user, secret string) (string, error) { - cache := a.actionCtx.GetCachedStatus() - - if auth, ok := cache.Secret(secret); !ok { + if auth, ok := a.actionCtx.GetCachedStatus().Secret().V1().GetSimple(secret); !ok { // Create new one tokenData := make([]byte, 32) if _, err := rand.Read(tokenData); err != nil { diff --git a/pkg/deployment/reconcile/action_context.go b/pkg/deployment/reconcile/action_context.go index e0c0193b6..fa5763c08 100644 --- a/pkg/deployment/reconcile/action_context.go +++ b/pkg/deployment/reconcile/action_context.go @@ -31,6 +31,8 @@ import ( "github.com/arangodb/arangosync-client/client" "github.com/arangodb/go-driver/agency" + "time" + "github.com/arangodb/go-driver" backupApi "github.com/arangodb/kube-arangodb/pkg/apis/backup/v1" api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" @@ -40,14 +42,13 @@ import ( "github.com/arangodb/kube-arangodb/pkg/util/errors" "github.com/arangodb/kube-arangodb/pkg/util/k8sutil" inspectorInterface "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector" - "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/arangomember" - "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/persistentvolumeclaim" - "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/pod" - "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/poddisruptionbudget" - "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/secret" - "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/service" - "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/serviceaccount" - "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/servicemonitor" + persistentvolumeclaimv1 "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/persistentvolumeclaim/v1" + podv1 "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/pod/v1" + poddisruptionbudgetv1beta1 "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/poddisruptionbudget/v1beta1" + secretv1 "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/secret/v1" + servicev1 "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/service/v1" + serviceaccountv1 "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/serviceaccount/v1" + servicemonitorv1 "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/servicemonitor/v1" ) // ActionContext provides methods to the Action implementations @@ -144,6 +145,14 @@ type actionContext struct { cachedStatus inspectorInterface.Inspector } +func (ac *actionContext) WithArangoMember(cache inspectorInterface.Inspector, timeout time.Duration, name string) reconciler.ArangoMemberModContext { + return ac.context.WithArangoMember(cache, timeout, name) +} + +func (ac *actionContext) WithCurrentArangoMember(name string) reconciler.ArangoMemberModContext { + return ac.context.WithCurrentArangoMember(name) +} + func (ac *actionContext) GetMembersState() member.StateInspector { return ac.context.GetMembersState() } @@ -188,14 +197,6 @@ func (ac *actionContext) SetAgencyMaintenanceMode(ctx context.Context, enabled b return ac.context.SetAgencyMaintenanceMode(ctx, enabled) } -func (ac *actionContext) WithArangoMemberUpdate(ctx context.Context, namespace, name string, action reconciler.ArangoMemberUpdateFunc) error { - return ac.context.WithArangoMemberUpdate(ctx, namespace, name, action) -} - -func (ac *actionContext) WithArangoMemberStatusUpdate(ctx context.Context, namespace, name string, action reconciler.ArangoMemberStatusUpdateFunc) error { - return ac.context.WithArangoMemberStatusUpdate(ctx, namespace, name, action) -} - func (ac *actionContext) RenderPodForMember(ctx context.Context, cachedStatus inspectorInterface.Inspector, spec api.DeploymentSpec, status api.DeploymentStatus, memberID string, imageInfo api.ImageInfo) (*core.Pod, error) { return ac.context.RenderPodForMember(ctx, cachedStatus, spec, status, memberID, imageInfo) } @@ -228,38 +229,34 @@ func (ac *actionContext) WithStatusUpdate(ctx context.Context, action reconciler return ac.context.WithStatusUpdate(ctx, action, force...) } -func (ac *actionContext) SecretsModInterface() secret.ModInterface { +func (ac *actionContext) SecretsModInterface() secretv1.ModInterface { return ac.context.SecretsModInterface() } -func (ac *actionContext) PodsModInterface() pod.ModInterface { +func (ac *actionContext) PodsModInterface() podv1.ModInterface { return ac.context.PodsModInterface() } -func (ac *actionContext) ServiceAccountsModInterface() serviceaccount.ModInterface { +func (ac *actionContext) ServiceAccountsModInterface() serviceaccountv1.ModInterface { return ac.context.ServiceAccountsModInterface() } -func (ac *actionContext) ServicesModInterface() service.ModInterface { +func (ac *actionContext) ServicesModInterface() servicev1.ModInterface { return ac.context.ServicesModInterface() } -func (ac *actionContext) PersistentVolumeClaimsModInterface() persistentvolumeclaim.ModInterface { +func (ac *actionContext) PersistentVolumeClaimsModInterface() persistentvolumeclaimv1.ModInterface { return ac.context.PersistentVolumeClaimsModInterface() } -func (ac *actionContext) PodDisruptionBudgetsModInterface() poddisruptionbudget.ModInterface { +func (ac *actionContext) PodDisruptionBudgetsModInterface() poddisruptionbudgetv1beta1.ModInterface { return ac.context.PodDisruptionBudgetsModInterface() } -func (ac *actionContext) ServiceMonitorsModInterface() servicemonitor.ModInterface { +func (ac *actionContext) ServiceMonitorsModInterface() servicemonitorv1.ModInterface { return ac.context.ServiceMonitorsModInterface() } -func (ac *actionContext) ArangoMembersModInterface() arangomember.ModInterface { - return ac.context.ArangoMembersModInterface() -} - func (ac *actionContext) UpdateClusterCondition(ctx context.Context, conditionType api.ConditionType, status bool, reason, message string) error { return ac.context.WithStatusUpdate(ctx, func(s *api.DeploymentStatus) bool { return s.Conditions.Update(conditionType, status, reason, message) diff --git a/pkg/deployment/reconcile/action_encryption_add.go b/pkg/deployment/reconcile/action_encryption_add.go index 3d015cd61..24c6b36d2 100644 --- a/pkg/deployment/reconcile/action_encryption_add.go +++ b/pkg/deployment/reconcile/action_encryption_add.go @@ -85,7 +85,7 @@ func (a *encryptionKeyAddAction) Start(ctx context.Context) (bool, error) { secret = s } - sha, d, exists, err := pod.GetEncryptionKey(ctx, a.actionCtx.GetCachedStatus().SecretReadInterface(), secret) + sha, d, exists, err := pod.GetEncryptionKey(ctx, a.actionCtx.GetCachedStatus().Secret().V1().Read(), secret) if err != nil { a.log.Error().Err(err).Msgf("Unable to fetch current encryption key") return true, nil diff --git a/pkg/deployment/reconcile/action_encryption_refresh.go b/pkg/deployment/reconcile/action_encryption_refresh.go index a4f36f2a6..3b185ceee 100644 --- a/pkg/deployment/reconcile/action_encryption_refresh.go +++ b/pkg/deployment/reconcile/action_encryption_refresh.go @@ -57,7 +57,7 @@ func (a *encryptionKeyRefreshAction) Start(ctx context.Context) (bool, error) { func (a *encryptionKeyRefreshAction) CheckProgress(ctx context.Context) (bool, bool, error) { ctxChild, cancel := globals.GetGlobalTimeouts().Kubernetes().WithTimeout(ctx) defer cancel() - keyfolder, err := a.actionCtx.GetCachedStatus().SecretReadInterface().Get(ctxChild, pod.GetEncryptionFolderSecretName(a.actionCtx.GetName()), meta.GetOptions{}) + keyfolder, err := a.actionCtx.GetCachedStatus().Secret().V1().Read().Get(ctxChild, pod.GetEncryptionFolderSecretName(a.actionCtx.GetName()), meta.GetOptions{}) if err != nil { a.log.Err(err).Msgf("Unable to fetch encryption folder") return true, false, nil diff --git a/pkg/deployment/reconcile/action_encryption_status_update.go b/pkg/deployment/reconcile/action_encryption_status_update.go index b2d28aa52..ad440c21a 100644 --- a/pkg/deployment/reconcile/action_encryption_status_update.go +++ b/pkg/deployment/reconcile/action_encryption_status_update.go @@ -60,7 +60,7 @@ func (a *encryptionKeyStatusUpdateAction) Start(ctx context.Context) (bool, erro ctxChild, cancel := globals.GetGlobalTimeouts().Kubernetes().WithTimeout(ctx) defer cancel() - f, err := a.actionCtx.GetCachedStatus().SecretReadInterface().Get(ctxChild, pod.GetEncryptionFolderSecretName(a.actionCtx.GetAPIObject().GetName()), meta.GetOptions{}) + f, err := a.actionCtx.GetCachedStatus().Secret().V1().Read().Get(ctxChild, pod.GetEncryptionFolderSecretName(a.actionCtx.GetAPIObject().GetName()), meta.GetOptions{}) if err != nil { a.log.Error().Err(err).Msgf("Unable to get folder info") return true, nil diff --git a/pkg/deployment/reconcile/action_jwt_add.go b/pkg/deployment/reconcile/action_jwt_add.go index bd7070e12..38a2c0fc4 100644 --- a/pkg/deployment/reconcile/action_jwt_add.go +++ b/pkg/deployment/reconcile/action_jwt_add.go @@ -77,7 +77,7 @@ func (a *jwtAddAction) Start(ctx context.Context) (bool, error) { return true, nil } - s, ok := a.actionCtx.GetCachedStatus().Secret(a.actionCtx.GetSpec().Authentication.GetJWTSecretName()) + s, ok := a.actionCtx.GetCachedStatus().Secret().V1().GetSimple(a.actionCtx.GetSpec().Authentication.GetJWTSecretName()) if !ok { a.log.Error().Msgf("JWT Secret is missing, no rotation will take place") return true, nil @@ -96,7 +96,7 @@ func (a *jwtAddAction) Start(ctx context.Context) (bool, error) { return true, nil } - f, ok := a.actionCtx.GetCachedStatus().Secret(pod.JWTSecretFolder(a.actionCtx.GetName())) + f, ok := a.actionCtx.GetCachedStatus().Secret().V1().GetSimple(pod.JWTSecretFolder(a.actionCtx.GetName())) if !ok { a.log.Error().Msgf("Unable to get JWT folder info") return true, nil diff --git a/pkg/deployment/reconcile/action_jwt_clean.go b/pkg/deployment/reconcile/action_jwt_clean.go index 69dbee25f..66406f11b 100644 --- a/pkg/deployment/reconcile/action_jwt_clean.go +++ b/pkg/deployment/reconcile/action_jwt_clean.go @@ -79,7 +79,7 @@ func (a *jwtCleanAction) Start(ctx context.Context) (bool, error) { return true, nil } - f, ok := a.actionCtx.GetCachedStatus().Secret(pod.JWTSecretFolder(a.actionCtx.GetName())) + f, ok := a.actionCtx.GetCachedStatus().Secret().V1().GetSimple(pod.JWTSecretFolder(a.actionCtx.GetName())) if !ok { a.log.Error().Msgf("Unable to get JWT folder info") return true, nil diff --git a/pkg/deployment/reconcile/action_jwt_refresh.go b/pkg/deployment/reconcile/action_jwt_refresh.go index 2772d1a29..a24ab0f69 100644 --- a/pkg/deployment/reconcile/action_jwt_refresh.go +++ b/pkg/deployment/reconcile/action_jwt_refresh.go @@ -52,7 +52,7 @@ func (a *jwtRefreshAction) CheckProgress(ctx context.Context) (bool, bool, error return true, false, nil } - folder, ok := a.actionCtx.GetCachedStatus().Secret(pod.JWTSecretFolder(a.actionCtx.GetAPIObject().GetName())) + folder, ok := a.actionCtx.GetCachedStatus().Secret().V1().GetSimple(pod.JWTSecretFolder(a.actionCtx.GetAPIObject().GetName())) if !ok { a.log.Error().Msgf("Unable to get JWT folder info") return true, false, nil diff --git a/pkg/deployment/reconcile/action_jwt_set_active.go b/pkg/deployment/reconcile/action_jwt_set_active.go index f89978783..f705e36d2 100644 --- a/pkg/deployment/reconcile/action_jwt_set_active.go +++ b/pkg/deployment/reconcile/action_jwt_set_active.go @@ -77,7 +77,7 @@ func (a *jwtSetActiveAction) Start(ctx context.Context) (bool, error) { return true, nil } - f, ok := a.actionCtx.GetCachedStatus().Secret(pod.JWTSecretFolder(a.actionCtx.GetName())) + f, ok := a.actionCtx.GetCachedStatus().Secret().V1().GetSimple(pod.JWTSecretFolder(a.actionCtx.GetName())) if !ok { a.log.Error().Msgf("Unable to get JWT folder info") return true, nil diff --git a/pkg/deployment/reconcile/action_jwt_status_update.go b/pkg/deployment/reconcile/action_jwt_status_update.go index 402b35efd..749e62326 100644 --- a/pkg/deployment/reconcile/action_jwt_status_update.go +++ b/pkg/deployment/reconcile/action_jwt_status_update.go @@ -90,7 +90,7 @@ func (a *jwtStatusUpdateAction) Start(ctx context.Context) (bool, error) { } if !folder { - f, ok := a.actionCtx.GetCachedStatus().Secret(a.actionCtx.GetSpec().Authentication.GetJWTSecretName()) + f, ok := a.actionCtx.GetCachedStatus().Secret().V1().GetSimple(a.actionCtx.GetSpec().Authentication.GetJWTSecretName()) if !ok { a.log.Error().Msgf("Unable to get JWT secret info") return true, nil @@ -123,7 +123,7 @@ func (a *jwtStatusUpdateAction) Start(ctx context.Context) (bool, error) { return true, nil } - f, ok := a.actionCtx.GetCachedStatus().Secret(pod.JWTSecretFolder(a.actionCtx.GetName())) + f, ok := a.actionCtx.GetCachedStatus().Secret().V1().GetSimple(pod.JWTSecretFolder(a.actionCtx.GetName())) if !ok { a.log.Error().Msgf("Unable to get JWT folder info") return true, nil diff --git a/pkg/deployment/reconcile/action_kill_member_pod.go b/pkg/deployment/reconcile/action_kill_member_pod.go index 5e0ccf045..9fe08823b 100644 --- a/pkg/deployment/reconcile/action_kill_member_pod.go +++ b/pkg/deployment/reconcile/action_kill_member_pod.go @@ -86,7 +86,6 @@ func (a *actionKillMemberPod) CheckProgress(ctx context.Context) (bool, bool, er if !features.GracefulShutdown().Enabled() { return true, false, nil } - log := a.log m, ok := a.actionCtx.GetMemberStatusByID(a.action.MemberID) if !ok { @@ -94,7 +93,7 @@ func (a *actionKillMemberPod) CheckProgress(ctx context.Context) (bool, bool, er return true, false, nil } - p, ok := a.actionCtx.GetCachedStatus().Pod(m.PodName) + p, ok := a.actionCtx.GetCachedStatus().Pod().V1().GetSimple(m.PodName) if !ok { log.Error().Msg("No such member") return true, false, nil diff --git a/pkg/deployment/reconcile/action_runtime_container_args_udpate.go b/pkg/deployment/reconcile/action_runtime_container_args_udpate.go index 01db64f31..9bbc3676a 100644 --- a/pkg/deployment/reconcile/action_runtime_container_args_udpate.go +++ b/pkg/deployment/reconcile/action_runtime_container_args_udpate.go @@ -34,6 +34,7 @@ import ( api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" "github.com/arangodb/kube-arangodb/pkg/deployment/rotation" "github.com/arangodb/kube-arangodb/pkg/util/k8sutil" + "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/throttle" ) func init() { @@ -48,7 +49,6 @@ func runtimeContainerArgsUpdate(log zerolog.Logger, action api.Action, actionCtx return a } -var _ ActionReloadCachedStatus = &actionRuntimeContainerArgsUpdate{} var _ ActionPost = &actionRuntimeContainerArgsUpdate{} type actionRuntimeContainerArgsUpdate struct { @@ -58,7 +58,6 @@ type actionRuntimeContainerArgsUpdate struct { // Post updates arguments for the specific Arango member. func (a actionRuntimeContainerArgsUpdate) Post(ctx context.Context) error { - m, ok := a.actionCtx.GetMemberStatusByID(a.action.MemberID) if !ok { a.log.Info().Msg("member is gone already") @@ -66,7 +65,7 @@ func (a actionRuntimeContainerArgsUpdate) Post(ctx context.Context) error { } memberName := m.ArangoMemberName(a.actionCtx.GetName(), a.action.Group) - member, ok := a.actionCtx.GetCachedStatus().ArangoMember(memberName) + member, ok := a.actionCtx.GetCachedStatus().ArangoMember().V1().GetSimple(memberName) if !ok { return errors.Errorf("ArangoMember %s not found", memberName) } @@ -108,7 +107,7 @@ func (a actionRuntimeContainerArgsUpdate) Post(ctx context.Context) error { return false } - err := a.actionCtx.WithArangoMemberStatusUpdate(ctx, member.GetNamespace(), member.GetName(), updateMemberStatusArgs) + err := a.actionCtx.WithCurrentArangoMember(member.GetName()).UpdateStatus(ctx, updateMemberStatusArgs) if err != nil { return errors.WithMessage(err, "Error while updating member status") } @@ -116,14 +115,14 @@ func (a actionRuntimeContainerArgsUpdate) Post(ctx context.Context) error { return nil } -// ReloadCachedStatus reloads the inspector cache when the action is done. -func (a actionRuntimeContainerArgsUpdate) ReloadCachedStatus() bool { - return true +func (a *actionRuntimeContainerArgsUpdate) ReloadComponents() []throttle.Component { + return []throttle.Component{ + throttle.Pod, + } } // Start starts the action for changing conditions on the provided member. func (a actionRuntimeContainerArgsUpdate) Start(ctx context.Context) (bool, error) { - m, ok := a.actionCtx.GetMemberStatusByID(a.action.MemberID) if !ok { a.log.Info().Msg("member is gone already") @@ -141,12 +140,12 @@ func (a actionRuntimeContainerArgsUpdate) Start(ctx context.Context) (bool, erro } memberName := m.ArangoMemberName(a.actionCtx.GetName(), a.action.Group) - member, ok := a.actionCtx.GetCachedStatus().ArangoMember(memberName) + member, ok := a.actionCtx.GetCachedStatus().ArangoMember().V1().GetSimple(memberName) if !ok { return false, errors.Errorf("ArangoMember %s not found", memberName) } - pod, ok := a.actionCtx.GetCachedStatus().Pod(m.PodName) + pod, ok := a.actionCtx.GetCachedStatus().Pod().V1().GetSimple(m.PodName) if !ok { a.log.Info().Str("podName", m.PodName).Msg("pod is not present") return true, nil diff --git a/pkg/deployment/reconcile/action_runtime_container_image_update.go b/pkg/deployment/reconcile/action_runtime_container_image_update.go index 8ec8d55ca..4740259c5 100644 --- a/pkg/deployment/reconcile/action_runtime_container_image_update.go +++ b/pkg/deployment/reconcile/action_runtime_container_image_update.go @@ -44,7 +44,6 @@ func runtimeContainerImageUpdate(log zerolog.Logger, action api.Action, actionCt return a } -var _ ActionReloadCachedStatus = &actionRuntimeContainerImageUpdate{} var _ ActionPost = &actionRuntimeContainerImageUpdate{} type actionRuntimeContainerImageUpdate struct { @@ -66,14 +65,14 @@ func (a actionRuntimeContainerImageUpdate) Post(ctx context.Context) error { return nil } - member, ok := a.actionCtx.GetCachedStatus().ArangoMember(m.ArangoMemberName(a.actionCtx.GetName(), a.action.Group)) + member, ok := a.actionCtx.GetCachedStatus().ArangoMember().V1().GetSimple(m.ArangoMemberName(a.actionCtx.GetName(), a.action.Group)) if !ok { err := errors.Newf("ArangoMember not found") a.log.Error().Err(err).Msg("ArangoMember not found") return err } - return a.actionCtx.WithArangoMemberStatusUpdate(ctx, member.GetNamespace(), member.GetName(), func(obj *api.ArangoMember, s *api.ArangoMemberStatus) bool { + return a.actionCtx.WithCurrentArangoMember(member.GetName()).UpdateStatus(ctx, func(obj *api.ArangoMember, s *api.ArangoMemberStatus) bool { if obj.Spec.Template == nil || s.Template == nil || obj.Spec.Template.PodSpec == nil || s.Template.PodSpec == nil { a.log.Info().Msgf("Nil Member definition") @@ -108,10 +107,6 @@ func (a actionRuntimeContainerImageUpdate) Post(ctx context.Context) error { }) } -func (a actionRuntimeContainerImageUpdate) ReloadCachedStatus() bool { - return true -} - func (a actionRuntimeContainerImageUpdate) getContainerDetails() (string, string, bool) { container, ok := a.action.GetParam(rotation.ContainerName) if !ok { @@ -145,14 +140,14 @@ func (a actionRuntimeContainerImageUpdate) Start(ctx context.Context) (bool, err return true, nil } - member, ok := a.actionCtx.GetCachedStatus().ArangoMember(m.ArangoMemberName(a.actionCtx.GetName(), a.action.Group)) + member, ok := a.actionCtx.GetCachedStatus().ArangoMember().V1().GetSimple(m.ArangoMemberName(a.actionCtx.GetName(), a.action.Group)) if !ok { err := errors.Newf("ArangoMember not found") a.log.Error().Err(err).Msg("ArangoMember not found") return false, err } - pod, ok := a.actionCtx.GetCachedStatus().Pod(m.PodName) + pod, ok := a.actionCtx.GetCachedStatus().Pod().V1().GetSimple(m.PodName) if !ok { a.log.Info().Msg("pod is not present") return true, nil @@ -207,7 +202,6 @@ func (a actionRuntimeContainerImageUpdate) Start(ctx context.Context) (bool, err } func (a actionRuntimeContainerImageUpdate) CheckProgress(ctx context.Context) (bool, bool, error) { - a.log.Info().Msgf("Update Progress") m, ok := a.actionCtx.GetMemberStatusByID(a.action.MemberID) if !ok { @@ -215,7 +209,7 @@ func (a actionRuntimeContainerImageUpdate) CheckProgress(ctx context.Context) (b return true, false, nil } - pod, ok := a.actionCtx.GetCachedStatus().Pod(m.PodName) + pod, ok := a.actionCtx.GetCachedStatus().Pod().V1().GetSimple(m.PodName) if !ok { a.log.Info().Msg("pod is not present") return true, false, nil diff --git a/pkg/deployment/reconcile/action_tls_ca_append.go b/pkg/deployment/reconcile/action_tls_ca_append.go index e046579d6..7cbbbf154 100644 --- a/pkg/deployment/reconcile/action_tls_ca_append.go +++ b/pkg/deployment/reconcile/action_tls_ca_append.go @@ -70,13 +70,13 @@ func (a *appendTLSCACertificateAction) Start(ctx context.Context) (bool, error) return true, nil } - caSecret, exists := a.actionCtx.GetCachedStatus().Secret(a.actionCtx.GetSpec().TLS.GetCASecretName()) + caSecret, exists := a.actionCtx.GetCachedStatus().Secret().V1().GetSimple(a.actionCtx.GetSpec().TLS.GetCASecretName()) if !exists { a.log.Warn().Msgf("Secret %s is missing", a.actionCtx.GetSpec().TLS.GetCASecretName()) return true, nil } - caFolder, exists := a.actionCtx.GetCachedStatus().Secret(resources.GetCASecretName(a.actionCtx.GetAPIObject())) + caFolder, exists := a.actionCtx.GetCachedStatus().Secret().V1().GetSimple(resources.GetCASecretName(a.actionCtx.GetAPIObject())) if !exists { a.log.Warn().Msgf("Secret %s is missing", resources.GetCASecretName(a.actionCtx.GetAPIObject())) return true, nil diff --git a/pkg/deployment/reconcile/action_tls_ca_clean.go b/pkg/deployment/reconcile/action_tls_ca_clean.go index 3d63e1a7a..a07b7ea78 100644 --- a/pkg/deployment/reconcile/action_tls_ca_clean.go +++ b/pkg/deployment/reconcile/action_tls_ca_clean.go @@ -71,13 +71,13 @@ func (a *cleanTLSCACertificateAction) Start(ctx context.Context) (bool, error) { return true, nil } - caSecret, exists := a.actionCtx.GetCachedStatus().Secret(a.actionCtx.GetSpec().TLS.GetCASecretName()) + caSecret, exists := a.actionCtx.GetCachedStatus().Secret().V1().GetSimple(a.actionCtx.GetSpec().TLS.GetCASecretName()) if !exists { a.log.Warn().Msgf("Secret %s is missing", a.actionCtx.GetSpec().TLS.GetCASecretName()) return true, nil } - caFolder, exists := a.actionCtx.GetCachedStatus().Secret(resources.GetCASecretName(a.actionCtx.GetAPIObject())) + caFolder, exists := a.actionCtx.GetCachedStatus().Secret().V1().GetSimple(resources.GetCASecretName(a.actionCtx.GetAPIObject())) if !exists { a.log.Warn().Msgf("Secret %s is missing", resources.GetCASecretName(a.actionCtx.GetAPIObject())) return true, nil diff --git a/pkg/deployment/reconcile/action_tls_keyfile_refresh.go b/pkg/deployment/reconcile/action_tls_keyfile_refresh.go index b99d82a01..98dcf745b 100644 --- a/pkg/deployment/reconcile/action_tls_keyfile_refresh.go +++ b/pkg/deployment/reconcile/action_tls_keyfile_refresh.go @@ -60,7 +60,7 @@ func (a *refreshTLSKeyfileCertificateAction) CheckProgress(ctx context.Context) return true, false, nil } - s, exists := a.actionCtx.GetCachedStatus().Secret(k8sutil.CreateTLSKeyfileSecretName(a.actionCtx.GetAPIObject().GetName(), a.action.Group.AsRole(), a.action.MemberID)) + s, exists := a.actionCtx.GetCachedStatus().Secret().V1().GetSimple(k8sutil.CreateTLSKeyfileSecretName(a.actionCtx.GetAPIObject().GetName(), a.action.Group.AsRole(), a.action.MemberID)) if !exists { a.log.Warn().Msg("Keyfile secret is missing") return true, false, nil diff --git a/pkg/deployment/reconcile/action_tls_status_update.go b/pkg/deployment/reconcile/action_tls_status_update.go index a9141902b..57a6609a8 100644 --- a/pkg/deployment/reconcile/action_tls_status_update.go +++ b/pkg/deployment/reconcile/action_tls_status_update.go @@ -58,7 +58,7 @@ func (a *tlsKeyStatusUpdateAction) Start(ctx context.Context) (bool, error) { ctxChild, cancel := globals.GetGlobalTimeouts().Kubernetes().WithTimeout(ctx) defer cancel() - f, err := a.actionCtx.GetCachedStatus().SecretReadInterface().Get(ctxChild, resources.GetCASecretName(a.actionCtx.GetAPIObject()), meta.GetOptions{}) + f, err := a.actionCtx.GetCachedStatus().Secret().V1().Read().Get(ctxChild, resources.GetCASecretName(a.actionCtx.GetAPIObject()), meta.GetOptions{}) if err != nil { a.log.Error().Err(err).Msgf("Unable to get folder info") return true, nil diff --git a/pkg/deployment/reconcile/condition_member_recreation.go b/pkg/deployment/reconcile/condition_member_recreation.go index 5de224f20..1832aac3c 100644 --- a/pkg/deployment/reconcile/condition_member_recreation.go +++ b/pkg/deployment/reconcile/condition_member_recreation.go @@ -117,7 +117,7 @@ func isStorageClassChanged(_ context.Context, log zerolog.Logger, apiObject k8su } // Check if a storage class changed. - if pvc, ok := cachedStatus.PersistentVolumeClaim(member.PersistentVolumeClaimName); !ok { + if pvc, ok := cachedStatus.PersistentVolumeClaim().V1().GetSimple(member.PersistentVolumeClaimName); !ok { log.Warn().Str("role", group.AsRole()).Str("id", member.ID).Msg("Failed to get PVC") return false, "", fmt.Errorf("failed to get PVC %s", member.PersistentVolumeClaimName) } else { @@ -141,7 +141,7 @@ func isStorageClassChanged(_ context.Context, log zerolog.Logger, apiObject k8su // From here on it is known that the member requires replacement, so `true` must be returned. // If pod does not exist then it will try next time. - if pod, ok := cachedStatus.Pod(member.PodName); ok { + if pod, ok := cachedStatus.Pod().V1().GetSimple(member.PodName); ok { if _, ok := pod.GetAnnotations()[deployment.ArangoDeploymentPodReplaceAnnotation]; !ok { log.Warn(). Str("pod-name", member.PodName). @@ -175,7 +175,7 @@ func isVolumeSizeChanged(_ context.Context, log zerolog.Logger, _ k8sutil.APIObj return false, "", nil } - pvc, ok := cachedStatus.PersistentVolumeClaim(member.PersistentVolumeClaimName) + pvc, ok := cachedStatus.PersistentVolumeClaim().V1().GetSimple(member.PersistentVolumeClaimName) if !ok { log.Warn(). Str("role", group.AsRole()). @@ -202,7 +202,7 @@ func isVolumeSizeChanged(_ context.Context, log zerolog.Logger, _ k8sutil.APIObj // From here on it is known that the member requires replacement, so `true` must be returned. // If pod does not exist then it will try next time. - if pod, ok := cachedStatus.Pod(member.PodName); ok { + if pod, ok := cachedStatus.Pod().V1().GetSimple(member.PodName); ok { if _, ok := pod.GetAnnotations()[deployment.ArangoDeploymentPodReplaceAnnotation]; !ok { log.Warn().Str("pod-name", member.PodName). Msgf("try shrinking volume size, but %s", getRequiredReplaceMessage(member.PodName)) diff --git a/pkg/deployment/reconcile/helper_shutdown.go b/pkg/deployment/reconcile/helper_shutdown.go index 49f1971cc..1a77704ee 100644 --- a/pkg/deployment/reconcile/helper_shutdown.go +++ b/pkg/deployment/reconcile/helper_shutdown.go @@ -53,7 +53,7 @@ func getShutdownHelper(a *api.Action, actionCtx ActionContext, log zerolog.Logge return NewActionSuccess(), m, true } - pod, ok := actionCtx.GetCachedStatus().Pod(m.PodName) + pod, ok := actionCtx.GetCachedStatus().Pod().V1().GetSimple(m.PodName) if !ok { log.Warn().Str("pod-name", m.PodName).Msg("pod is already gone") // Pod does not exist, so create success action to finish it immediately. diff --git a/pkg/deployment/reconcile/helper_tls_sni.go b/pkg/deployment/reconcile/helper_tls_sni.go index 081d1896a..62b23cfe8 100644 --- a/pkg/deployment/reconcile/helper_tls_sni.go +++ b/pkg/deployment/reconcile/helper_tls_sni.go @@ -44,7 +44,7 @@ func mapTLSSNIConfig(sni api.TLSSNISpec, cachedStatus inspectorInterface.Inspect } for name, servers := range mapping { - secret, exists := cachedStatus.Secret(name) + secret, exists := cachedStatus.Secret().V1().GetSimple(name) if !exists { return nil, errors.Newf("Secret %s does not exist", name) } diff --git a/pkg/deployment/reconcile/plan_builder_encryption.go b/pkg/deployment/reconcile/plan_builder_encryption.go index 082392146..15d7bb839 100644 --- a/pkg/deployment/reconcile/plan_builder_encryption.go +++ b/pkg/deployment/reconcile/plan_builder_encryption.go @@ -98,7 +98,7 @@ func createEncryptionKey(ctx context.Context, return nil } - secret, exists := cachedStatus.Secret(spec.RocksDB.Encryption.GetKeySecretName()) + secret, exists := cachedStatus.Secret().V1().GetSimple(spec.RocksDB.Encryption.GetKeySecretName()) if !exists { return nil } @@ -113,7 +113,7 @@ func createEncryptionKey(ctx context.Context, return nil } - keyfolder, exists := cachedStatus.Secret(pod.GetEncryptionFolderSecretName(context.GetName())) + keyfolder, exists := cachedStatus.Secret().V1().GetSimple(pod.GetEncryptionFolderSecretName(context.GetName())) if !exists { log.Error().Msgf("Encryption key folder does not exist") return nil @@ -166,7 +166,7 @@ func createEncryptionKeyStatusUpdateRequired(log zerolog.Logger, spec api.Deploy return false } - keyfolder, exists := cachedStatus.Secret(pod.GetEncryptionFolderSecretName(context.GetName())) + keyfolder, exists := cachedStatus.Secret().V1().GetSimple(pod.GetEncryptionFolderSecretName(context.GetName())) if !exists { log.Error().Msgf("Encryption key folder does not exist") return false @@ -185,7 +185,7 @@ func createEncryptionKeyCleanPlan(ctx context.Context, return nil } - keyfolder, exists := cachedStatus.Secret(pod.GetEncryptionFolderSecretName(context.GetName())) + keyfolder, exists := cachedStatus.Secret().V1().GetSimple(pod.GetEncryptionFolderSecretName(context.GetName())) if !exists { log.Error().Msgf("Encryption key folder does not exist") return nil @@ -201,7 +201,7 @@ func createEncryptionKeyCleanPlan(ctx context.Context, return nil } - secret, exists := cachedStatus.Secret(spec.RocksDB.Encryption.GetKeySecretName()) + secret, exists := cachedStatus.Secret().V1().GetSimple(spec.RocksDB.Encryption.GetKeySecretName()) if !exists { return nil } diff --git a/pkg/deployment/reconcile/plan_builder_high.go b/pkg/deployment/reconcile/plan_builder_high.go index af3884096..b858026e7 100644 --- a/pkg/deployment/reconcile/plan_builder_high.go +++ b/pkg/deployment/reconcile/plan_builder_high.go @@ -167,7 +167,7 @@ func updateMemberRotationConditionsPlan(ctx context.Context, if err := status.Members.ForeachServerGroup(func(group api.ServerGroup, list api.MemberStatusList) error { for _, m := range list { - p, ok := cachedStatus.Pod(m.PodName) + p, ok := cachedStatus.Pod().V1().GetSimple(m.PodName) if !ok { p = nil } @@ -193,7 +193,7 @@ func updateMemberRotationConditions(log zerolog.Logger, apiObject k8sutil.APIObj return nil, nil } - arangoMember, ok := cachedStatus.ArangoMember(member.ArangoMemberName(apiObject.GetName(), group)) + arangoMember, ok := cachedStatus.ArangoMember().V1().GetSimple(member.ArangoMemberName(apiObject.GetName(), group)) if !ok { return nil, nil } diff --git a/pkg/deployment/reconcile/plan_builder_jwt.go b/pkg/deployment/reconcile/plan_builder_jwt.go index 41846f0c9..d755a8c00 100644 --- a/pkg/deployment/reconcile/plan_builder_jwt.go +++ b/pkg/deployment/reconcile/plan_builder_jwt.go @@ -53,13 +53,13 @@ func createJWTKeyUpdate(ctx context.Context, return nil } - folder, ok := cachedStatus.Secret(pod.JWTSecretFolder(apiObject.GetName())) + folder, ok := cachedStatus.Secret().V1().GetSimple(pod.JWTSecretFolder(apiObject.GetName())) if !ok { log.Error().Msgf("Unable to get JWT folder info") return nil } - s, ok := cachedStatus.Secret(spec.Authentication.GetJWTSecretName()) + s, ok := cachedStatus.Secret().V1().GetSimple(spec.Authentication.GetJWTSecretName()) if !ok { log.Info().Msgf("JWT Secret is missing, no rotation will take place") return nil @@ -144,7 +144,7 @@ func createJWTStatusUpdateRequired(log zerolog.Logger, apiObject k8sutil.APIObje return true } - f, ok := cachedStatus.Secret(spec.Authentication.GetJWTSecretName()) + f, ok := cachedStatus.Secret().V1().GetSimple(spec.Authentication.GetJWTSecretName()) if !ok { log.Error().Msgf("Unable to get JWT secret info") return false @@ -166,7 +166,7 @@ func createJWTStatusUpdateRequired(log zerolog.Logger, apiObject k8sutil.APIObje return false } - f, ok := cachedStatus.Secret(pod.JWTSecretFolder(apiObject.GetName())) + f, ok := cachedStatus.Secret().V1().GetSimple(pod.JWTSecretFolder(apiObject.GetName())) if !ok { log.Error().Msgf("Unable to get JWT folder info") return false diff --git a/pkg/deployment/reconcile/plan_builder_restore.go b/pkg/deployment/reconcile/plan_builder_restore.go index fa43f49c4..e10f5fb6b 100644 --- a/pkg/deployment/reconcile/plan_builder_restore.go +++ b/pkg/deployment/reconcile/plan_builder_restore.go @@ -93,6 +93,7 @@ func restorePlan(spec api.DeploymentSpec) api.Plan { } func createRestorePlanEncryption(ctx context.Context, log zerolog.Logger, spec api.DeploymentSpec, status api.DeploymentStatus, builderCtx PlanBuilderContext) (bool, api.Plan) { + if spec.RestoreEncryptionSecret != nil { if !spec.RocksDB.IsEncrypted() { return true, nil @@ -109,7 +110,7 @@ func createRestorePlanEncryption(ctx context.Context, log zerolog.Logger, spec a secret := *spec.RestoreEncryptionSecret // Additional logic to do restore with encryption key - name, _, exists, err := pod.GetEncryptionKey(ctx, builderCtx.GetCachedStatus().SecretReadInterface(), secret) + name, _, exists, err := pod.GetEncryptionKey(ctx, builderCtx.GetCachedStatus().Secret().V1().Read(), secret) if err != nil { log.Err(err).Msgf("Unable to fetch encryption key") return false, nil diff --git a/pkg/deployment/reconcile/plan_builder_rotate_upgrade.go b/pkg/deployment/reconcile/plan_builder_rotate_upgrade.go index 0e9d4671e..1ac79e4e5 100644 --- a/pkg/deployment/reconcile/plan_builder_rotate_upgrade.go +++ b/pkg/deployment/reconcile/plan_builder_rotate_upgrade.go @@ -98,7 +98,7 @@ func createMarkToRemovePlan(ctx context.Context, continue } - pod, found := cachedStatus.Pod(m.PodName) + pod, found := cachedStatus.Pod().V1().GetSimple(m.PodName) if !found { continue } @@ -202,12 +202,12 @@ func createRotateOrUpgradePlanInternal(log zerolog.Logger, apiObject k8sutil.API if m.Member.Conditions.IsTrue(api.ConditionTypeRestart) { return createRotateMemberPlan(log, m.Member, m.Group, spec, "Restart flag present"), false } - arangoMember, ok := cachedStatus.ArangoMember(m.Member.ArangoMemberName(apiObject.GetName(), m.Group)) + arangoMember, ok := cachedStatus.ArangoMember().V1().GetSimple(m.Member.ArangoMemberName(apiObject.GetName(), m.Group)) if !ok { continue } - p, ok := cachedStatus.Pod(m.Member.PodName) + p, ok := cachedStatus.Pod().V1().GetSimple(m.Member.PodName) if !ok { p = nil } @@ -343,7 +343,7 @@ func getPodDetails(ctx context.Context, log zerolog.Logger, apiObject k8sutil.AP return "", nil, nil, false } - member, ok := cachedStatus.ArangoMember(m.ArangoMemberName(apiObject.GetName(), group)) + member, ok := cachedStatus.ArangoMember().V1().GetSimple(m.ArangoMemberName(apiObject.GetName(), group)) if !ok { return "", nil, nil, false } diff --git a/pkg/deployment/reconcile/plan_builder_storage.go b/pkg/deployment/reconcile/plan_builder_storage.go index 240db550d..07400e07a 100644 --- a/pkg/deployment/reconcile/plan_builder_storage.go +++ b/pkg/deployment/reconcile/plan_builder_storage.go @@ -56,7 +56,7 @@ func createRotateServerStorageResizePlan(ctx context.Context, } // Load PVC - pvc, exists := cachedStatus.PersistentVolumeClaim(m.PersistentVolumeClaimName) + pvc, exists := cachedStatus.PersistentVolumeClaim().V1().GetSimple(m.PersistentVolumeClaimName) if !exists { log.Warn(). Str("role", group.AsRole()). @@ -96,7 +96,7 @@ func createRotateServerStoragePVCPendingResizeConditionPlan(ctx context.Context, continue } - pvc, exists := cachedStatus.PersistentVolumeClaim(i.Member.PersistentVolumeClaimName) + pvc, exists := cachedStatus.PersistentVolumeClaim().V1().GetSimple(i.Member.PersistentVolumeClaimName) if !exists { continue } diff --git a/pkg/deployment/reconcile/plan_builder_test.go b/pkg/deployment/reconcile/plan_builder_test.go index ce45a161c..b743b2980 100644 --- a/pkg/deployment/reconcile/plan_builder_test.go +++ b/pkg/deployment/reconcile/plan_builder_test.go @@ -26,13 +26,11 @@ import ( "io/ioutil" "testing" - monitoring "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" monitoringClient "github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/typed/monitoring/v1" "github.com/rs/zerolog" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" core "k8s.io/api/core/v1" - policy "k8s.io/api/policy/v1beta1" apiErrors "k8s.io/apimachinery/pkg/api/errors" meta "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" @@ -41,6 +39,8 @@ import ( "github.com/arangodb/arangosync-client/client" "github.com/arangodb/go-driver/agency" + "time" + "github.com/arangodb/go-driver" backupApi "github.com/arangodb/kube-arangodb/pkg/apis/backup/v1" api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" @@ -51,21 +51,22 @@ import ( pod2 "github.com/arangodb/kube-arangodb/pkg/deployment/pod" "github.com/arangodb/kube-arangodb/pkg/deployment/reconciler" "github.com/arangodb/kube-arangodb/pkg/deployment/resources" - "github.com/arangodb/kube-arangodb/pkg/deployment/resources/inspector" "github.com/arangodb/kube-arangodb/pkg/generated/clientset/versioned" "github.com/arangodb/kube-arangodb/pkg/util" "github.com/arangodb/kube-arangodb/pkg/util/arangod/conn" "github.com/arangodb/kube-arangodb/pkg/util/errors" "github.com/arangodb/kube-arangodb/pkg/util/k8sutil" inspectorInterface "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector" - "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/arangomember" - "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/persistentvolumeclaim" - "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/pod" - "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/poddisruptionbudget" - "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/secret" - "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/service" - "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/serviceaccount" - "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/servicemonitor" + arangomemberv1 "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/arangomember/v1" + persistentvolumeclaimv1 "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/persistentvolumeclaim/v1" + podv1 "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/pod/v1" + poddisruptionbudgetv1beta1 "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/poddisruptionbudget/v1beta1" + secretv1 "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/secret/v1" + servicev1 "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/service/v1" + serviceaccountv1 "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/serviceaccount/v1" + servicemonitorv1 "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/servicemonitor/v1" + "github.com/arangodb/kube-arangodb/pkg/util/kclient" + "github.com/arangodb/kube-arangodb/pkg/util/tests" ) const pvcName = "pvc_test" @@ -84,6 +85,14 @@ type testContext struct { Inspector inspectorInterface.Inspector } +func (c *testContext) WithArangoMember(cache inspectorInterface.Inspector, timeout time.Duration, name string) reconciler.ArangoMemberModContext { + return reconciler.NewArangoMemberModContext(cache, timeout, name) +} + +func (c *testContext) WithCurrentArangoMember(name string) reconciler.ArangoMemberModContext { + return c.WithArangoMember(c.Inspector, 0, name) +} + func (c *testContext) GetMembersState() member.StateInspector { //TODO implement me panic("implement me") @@ -130,35 +139,35 @@ func (c *testContext) GetAgencyCache() (agencyCache.State, bool) { return agencyCache.State{}, true } -func (c *testContext) SecretsModInterface() secret.ModInterface { +func (c *testContext) SecretsModInterface() secretv1.ModInterface { panic("implement me") } -func (c *testContext) PodsModInterface() pod.ModInterface { +func (c *testContext) PodsModInterface() podv1.ModInterface { panic("implement me") } -func (c *testContext) ServiceAccountsModInterface() serviceaccount.ModInterface { +func (c *testContext) ServiceAccountsModInterface() serviceaccountv1.ModInterface { panic("implement me") } -func (c *testContext) ServicesModInterface() service.ModInterface { +func (c *testContext) ServicesModInterface() servicev1.ModInterface { panic("implement me") } -func (c *testContext) PersistentVolumeClaimsModInterface() persistentvolumeclaim.ModInterface { +func (c *testContext) PersistentVolumeClaimsModInterface() persistentvolumeclaimv1.ModInterface { panic("implement me") } -func (c *testContext) PodDisruptionBudgetsModInterface() poddisruptionbudget.ModInterface { +func (c *testContext) PodDisruptionBudgetsModInterface() poddisruptionbudgetv1beta1.ModInterface { panic("implement me") } -func (c *testContext) ServiceMonitorsModInterface() servicemonitor.ModInterface { +func (c *testContext) ServiceMonitorsModInterface() servicemonitorv1.ModInterface { panic("implement me") } -func (c *testContext) ArangoMembersModInterface() arangomember.ModInterface { +func (c *testContext) ArangoMembersModInterface() arangomemberv1.ModInterface { panic("implement me") } @@ -252,7 +261,7 @@ func (c *testContext) GetBackup(_ context.Context, backup string) (*backupApi.Ar panic("implement me") } -func (c *testContext) SecretsInterface() secret.Interface { +func (c *testContext) SecretsInterface() secretv1.Interface { panic("implement me") } @@ -437,7 +446,7 @@ func TestCreatePlanSingleScale(t *testing.T) { status.Hashes.TLS.Propagated = true status.Hashes.Encryption.Propagated = true - newPlan, _, changed := createNormalPlan(ctx, log, depl, nil, spec, status, inspector.NewEmptyInspector(), c) + newPlan, _, changed := createNormalPlan(ctx, log, depl, nil, spec, status, tests.NewEmptyInspector(t), c) assert.True(t, changed) assert.Len(t, newPlan, 1) @@ -448,12 +457,12 @@ func TestCreatePlanSingleScale(t *testing.T) { PodName: "something", }, } - newPlan, _, changed = createNormalPlan(ctx, log, depl, nil, spec, status, inspector.NewEmptyInspector(), c) + newPlan, _, changed = createNormalPlan(ctx, log, depl, nil, spec, status, tests.NewEmptyInspector(t), c) assert.True(t, changed) assert.Len(t, newPlan, 0) // Single mode does not scale spec.Single.Count = util.NewInt(2) - newPlan, _, changed = createNormalPlan(ctx, log, depl, nil, spec, status, inspector.NewEmptyInspector(), c) + newPlan, _, changed = createNormalPlan(ctx, log, depl, nil, spec, status, tests.NewEmptyInspector(t), c) assert.True(t, changed) assert.Len(t, newPlan, 0) // Single mode does not scale @@ -469,7 +478,7 @@ func TestCreatePlanSingleScale(t *testing.T) { PodName: "something1", }, } - newPlan, _, changed = createNormalPlan(ctx, log, depl, nil, spec, status, inspector.NewEmptyInspector(), c) + newPlan, _, changed = createNormalPlan(ctx, log, depl, nil, spec, status, tests.NewEmptyInspector(t), c) assert.True(t, changed) assert.Len(t, newPlan, 0) // Single mode does not scale down } @@ -498,7 +507,7 @@ func TestCreatePlanActiveFailoverScale(t *testing.T) { var status api.DeploymentStatus addAgentsToStatus(t, &status, 3) - newPlan, _, changed := createNormalPlan(ctx, log, depl, nil, spec, status, inspector.NewEmptyInspector(), c) + newPlan, _, changed := createNormalPlan(ctx, log, depl, nil, spec, status, tests.NewEmptyInspector(t), c) assert.True(t, changed) require.Len(t, newPlan, 2) assert.Equal(t, api.ActionTypeAddMember, newPlan[0].Type) @@ -511,7 +520,7 @@ func TestCreatePlanActiveFailoverScale(t *testing.T) { PodName: "something", }, } - newPlan, _, changed = createNormalPlan(ctx, log, depl, nil, spec, status, inspector.NewEmptyInspector(), c) + newPlan, _, changed = createNormalPlan(ctx, log, depl, nil, spec, status, tests.NewEmptyInspector(t), c) assert.True(t, changed) require.Len(t, newPlan, 1) assert.Equal(t, api.ActionTypeAddMember, newPlan[0].Type) @@ -536,7 +545,7 @@ func TestCreatePlanActiveFailoverScale(t *testing.T) { PodName: "something4", }, } - newPlan, _, changed = createNormalPlan(ctx, log, depl, nil, spec, status, inspector.NewEmptyInspector(), c) + newPlan, _, changed = createNormalPlan(ctx, log, depl, nil, spec, status, tests.NewEmptyInspector(t), c) assert.True(t, changed) require.Len(t, newPlan, 3) // Note: Downscaling is only down 1 at a time assert.Equal(t, api.ActionTypeKillMemberPod, newPlan[0].Type) @@ -570,7 +579,7 @@ func TestCreatePlanClusterScale(t *testing.T) { var status api.DeploymentStatus addAgentsToStatus(t, &status, 3) - newPlan, _, changed := createNormalPlan(ctx, log, depl, nil, spec, status, inspector.NewEmptyInspector(), c) + newPlan, _, changed := createNormalPlan(ctx, log, depl, nil, spec, status, tests.NewEmptyInspector(t), c) assert.True(t, changed) require.Len(t, newPlan, 6) // Adding 3 dbservers & 3 coordinators (note: agents do not scale now) assert.Equal(t, api.ActionTypeAddMember, newPlan[0].Type) @@ -603,7 +612,7 @@ func TestCreatePlanClusterScale(t *testing.T) { PodName: "coordinator1", }, } - newPlan, _, changed = createNormalPlan(ctx, log, depl, nil, spec, status, inspector.NewEmptyInspector(), c) + newPlan, _, changed = createNormalPlan(ctx, log, depl, nil, spec, status, tests.NewEmptyInspector(t), c) assert.True(t, changed) require.Len(t, newPlan, 3) assert.Equal(t, api.ActionTypeAddMember, newPlan[0].Type) @@ -640,7 +649,7 @@ func TestCreatePlanClusterScale(t *testing.T) { } spec.DBServers.Count = util.NewInt(1) spec.Coordinators.Count = util.NewInt(1) - newPlan, _, changed = createNormalPlan(ctx, log, depl, nil, spec, status, inspector.NewEmptyInspector(), c) + newPlan, _, changed = createNormalPlan(ctx, log, depl, nil, spec, status, tests.NewEmptyInspector(t), c) assert.True(t, changed) require.Len(t, newPlan, 7) // Note: Downscaling is done 1 at a time assert.Equal(t, api.ActionTypeCleanOutMember, newPlan[0].Type) @@ -679,25 +688,14 @@ type testCase struct { ExpectedLog string ExpectedEvent *k8sutil.Event - Pods map[string]*core.Pod - Secrets map[string]*core.Secret - Services map[string]*core.Service - PVCS map[string]*core.PersistentVolumeClaim - ServiceAccounts map[string]*core.ServiceAccount - PDBS map[string]*policy.PodDisruptionBudget - ServiceMonitors map[string]*monitoring.ServiceMonitor - ArangoMembers map[string]*api.ArangoMember - Nodes map[string]*core.Node - ACS map[string]*api.ArangoClusterSynchronization - AT map[string]*api.ArangoTask - VersionInfo driver.Version + kclient.FakeDataInput Extender func(t *testing.T, r *Reconciler, c *testCase) } -func (t testCase) Inspector() inspectorInterface.Inspector { - return inspector.NewInspectorFromData(t.Pods, t.Secrets, t.PVCS, t.Services, t.ServiceAccounts, t.PDBS, - t.ServiceMonitors, t.ArangoMembers, t.Nodes, t.ACS, t.AT, t.VersionInfo) +func (t testCase) Inspector(test *testing.T) inspectorInterface.Inspector { + t.FakeDataInput.Namespace = tests.FakeNamespace + return tests.NewInspector(test, t.FakeDataInput.Client()) } func TestCreatePlan(t *testing.T) { @@ -734,7 +732,7 @@ func TestCreatePlan(t *testing.T) { deploymentTemplate := &api.ArangoDeployment{ ObjectMeta: meta.ObjectMeta{ Name: "test_depl", - Namespace: "test", + Namespace: tests.FakeNamespace, }, Spec: api.DeploymentSpec{ Mode: api.NewMode(api.DeploymentModeCluster), @@ -787,19 +785,21 @@ func TestCreatePlan(t *testing.T) { }, { Name: "Change Storage for DBServers", - PVCS: map[string]*core.PersistentVolumeClaim{ - pvcName: { - Spec: core.PersistentVolumeClaimSpec{ - StorageClassName: util.NewString("oldStorage"), + FakeDataInput: kclient.FakeDataInput{ + PVCS: map[string]*core.PersistentVolumeClaim{ + pvcName: { + Spec: core.PersistentVolumeClaimSpec{ + StorageClassName: util.NewString("oldStorage"), + }, }, }, + Pods: map[string]*core.Pod{ + "dbserver1": {}, + }, }, context: &testContext{ ArangoDeployment: deploymentTemplate.DeepCopy(), }, - Pods: map[string]*core.Pod{ - "dbserver1": {}, - }, Helper: func(ad *api.ArangoDeployment) { ad.Spec.DBServers = api.ServerGroupSpec{ Count: util.NewInt(3), @@ -832,10 +832,12 @@ func TestCreatePlan(t *testing.T) { }, { Name: "Wait for changing Storage for DBServers", - PVCS: map[string]*core.PersistentVolumeClaim{ - pvcName: { - Spec: core.PersistentVolumeClaimSpec{ - StorageClassName: util.NewString("oldStorage"), + FakeDataInput: kclient.FakeDataInput{ + PVCS: map[string]*core.PersistentVolumeClaim{ + pvcName: { + Spec: core.PersistentVolumeClaimSpec{ + StorageClassName: util.NewString("oldStorage"), + }, }, }, }, @@ -862,10 +864,12 @@ func TestCreatePlan(t *testing.T) { }, { Name: "Change Storage for Agents with deprecated storage class name", - PVCS: map[string]*core.PersistentVolumeClaim{ - pvcName: { - Spec: core.PersistentVolumeClaimSpec{ - StorageClassName: util.NewString(""), + FakeDataInput: kclient.FakeDataInput{ + PVCS: map[string]*core.PersistentVolumeClaim{ + pvcName: { + Spec: core.PersistentVolumeClaimSpec{ + StorageClassName: util.NewString(""), + }, }, }, }, @@ -893,10 +897,12 @@ func TestCreatePlan(t *testing.T) { }, { Name: "Storage for Coordinators is not possible", - PVCS: map[string]*core.PersistentVolumeClaim{ - pvcName: { - Spec: core.PersistentVolumeClaimSpec{ - StorageClassName: util.NewString("oldStorage"), + FakeDataInput: kclient.FakeDataInput{ + PVCS: map[string]*core.PersistentVolumeClaim{ + pvcName: { + Spec: core.PersistentVolumeClaimSpec{ + StorageClassName: util.NewString("oldStorage"), + }, }, }, }, @@ -932,16 +938,18 @@ func TestCreatePlan(t *testing.T) { }, { Name: "Create rotation plan", - PVCS: map[string]*core.PersistentVolumeClaim{ - "pvc_test": { - Spec: core.PersistentVolumeClaimSpec{ - StorageClassName: util.NewString("oldStorage"), - }, - Status: core.PersistentVolumeClaimStatus{ - Conditions: []core.PersistentVolumeClaimCondition{ - { - Type: core.PersistentVolumeClaimFileSystemResizePending, - Status: core.ConditionTrue, + FakeDataInput: kclient.FakeDataInput{ + PVCS: map[string]*core.PersistentVolumeClaim{ + "pvc_test": { + Spec: core.PersistentVolumeClaimSpec{ + StorageClassName: util.NewString("oldStorage"), + }, + Status: core.PersistentVolumeClaimStatus{ + Conditions: []core.PersistentVolumeClaimCondition{ + { + Type: core.PersistentVolumeClaimFileSystemResizePending, + Status: core.ConditionTrue, + }, }, }, }, @@ -951,7 +959,7 @@ func TestCreatePlan(t *testing.T) { // Add ArangoMember builderCtx := newPlanBuilderContext(r.context) - template, err := builderCtx.RenderPodTemplateForMemberFromCurrent(context.Background(), c.Inspector(), c.context.ArangoDeployment.Status.Members.Agents[0].ID) + template, err := builderCtx.RenderPodTemplateForMemberFromCurrent(context.Background(), c.Inspector(t), c.context.ArangoDeployment.Status.Members.Agents[0].ID) require.NoError(t, err) checksum, err := resources.ChecksumArangoPod(c.context.ArangoDeployment.Spec.Agents, resources.CreatePodFromTemplate(template)) @@ -1134,7 +1142,7 @@ func TestCreatePlan(t *testing.T) { testCase.Helper(testCase.context.ArangoDeployment) } - err, _ := r.CreatePlan(ctx, testCase.Inspector()) + err, _ := r.CreatePlan(ctx, testCase.Inspector(t)) // Assert if testCase.ExpectedEvent != nil { diff --git a/pkg/deployment/reconcile/plan_builder_tls.go b/pkg/deployment/reconcile/plan_builder_tls.go index c76fa8293..5959cddea 100644 --- a/pkg/deployment/reconcile/plan_builder_tls.go +++ b/pkg/deployment/reconcile/plan_builder_tls.go @@ -122,7 +122,7 @@ func createTLSStatusUpdateRequired(log zerolog.Logger, apiObject k8sutil.APIObje return false } - trusted, exists := cachedStatus.Secret(resources.GetCASecretName(apiObject)) + trusted, exists := cachedStatus.Secret().V1().GetSimple(resources.GetCASecretName(apiObject)) if !exists { log.Warn().Str("secret", resources.GetCASecretName(apiObject)).Msg("Folder with secrets does not exist") return false @@ -160,7 +160,7 @@ func createCAAppendPlan(ctx context.Context, return nil } - caSecret, exists := cachedStatus.Secret(spec.TLS.GetCASecretName()) + caSecret, exists := cachedStatus.Secret().V1().GetSimple(spec.TLS.GetCASecretName()) if !exists { log.Warn().Str("secret", spec.TLS.GetCASecretName()).Msg("CA Secret does not exists") return nil @@ -177,7 +177,7 @@ func createCAAppendPlan(ctx context.Context, return nil } - trusted, exists := cachedStatus.Secret(resources.GetCASecretName(apiObject)) + trusted, exists := cachedStatus.Secret().V1().GetSimple(resources.GetCASecretName(apiObject)) if !exists { log.Warn().Str("secret", resources.GetCASecretName(apiObject)).Msg("Folder with secrets does not exist") return nil @@ -208,7 +208,7 @@ func createCARenewalPlan(ctx context.Context, return nil } - caSecret, exists := cachedStatus.Secret(spec.TLS.GetCASecretName()) + caSecret, exists := cachedStatus.Secret().V1().GetSimple(spec.TLS.GetCASecretName()) if !exists { log.Warn().Str("secret", spec.TLS.GetCASecretName()).Msg("CA Secret does not exists") return nil @@ -244,7 +244,7 @@ func createCACleanPlan(ctx context.Context, return nil } - caSecret, exists := cachedStatus.Secret(spec.TLS.GetCASecretName()) + caSecret, exists := cachedStatus.Secret().V1().GetSimple(spec.TLS.GetCASecretName()) if !exists { log.Warn().Str("secret", spec.TLS.GetCASecretName()).Msg("CA Secret does not exists") return nil @@ -261,7 +261,7 @@ func createCACleanPlan(ctx context.Context, return nil } - trusted, exists := cachedStatus.Secret(resources.GetCASecretName(apiObject)) + trusted, exists := cachedStatus.Secret().V1().GetSimple(resources.GetCASecretName(apiObject)) if !exists { log.Warn().Str("secret", resources.GetCASecretName(apiObject)).Msg("Folder with secrets does not exist") return nil @@ -452,13 +452,13 @@ func keyfileRenewalRequired(ctx context.Context, memberName := member.ArangoMemberName(apiObject.GetName(), group) - service, ok := cachedStatus.Service(memberName) + service, ok := cachedStatus.Service().V1().GetSimple(memberName) if !ok { log.Warn().Str("service", memberName).Msg("Service does not exists") return false, false } - caSecret, exists := cachedStatus.Secret(spec.TLS.GetCASecretName()) + caSecret, exists := cachedStatus.Secret().V1().GetSimple(spec.TLS.GetCASecretName()) if !exists { log.Warn().Str("secret", spec.TLS.GetCASecretName()).Msg("CA Secret does not exists") return false, false @@ -531,7 +531,7 @@ func keyfileRenewalRequired(ctx context.Context, return false, false } - s, exists := cachedStatus.Secret(k8sutil.CreateTLSKeyfileSecretName(apiObject.GetName(), group.AsRole(), member.ID)) + s, exists := cachedStatus.Secret().V1().GetSimple(k8sutil.CreateTLSKeyfileSecretName(apiObject.GetName(), group.AsRole(), member.ID)) if !exists { log.Warn().Msg("Keyfile secret is missing") return false, false diff --git a/pkg/deployment/reconcile/plan_executor.go b/pkg/deployment/reconcile/plan_executor.go index 9ddc4e5d3..eb32d507a 100644 --- a/pkg/deployment/reconcile/plan_executor.go +++ b/pkg/deployment/reconcile/plan_executor.go @@ -217,7 +217,9 @@ func (d *Reconciler) executePlan(ctx context.Context, cachedStatus inspectorInte plan = nil } - if getActionReloadCachedStatus(action) { + if components := getActionReloadCachedStatus(action); len(components) > 0 { + cachedStatus.GetThrottles().Invalidate(components...) + log.Info().Msgf("Reloading cached status") if err := cachedStatus.Refresh(ctx); err != nil { log.Warn().Err(err).Msgf("Unable to reload cached status") diff --git a/pkg/deployment/reconcile/utils.go b/pkg/deployment/reconcile/utils.go index b248f92d0..9fb8f3122 100644 --- a/pkg/deployment/reconcile/utils.go +++ b/pkg/deployment/reconcile/utils.go @@ -83,7 +83,7 @@ func ifPodUIDMismatch(m api.MemberStatus, a api.Action, i pod.Inspector) bool { return false } - p, ok := i.Pod(m.PodName) + p, ok := i.Pod().V1().GetSimple(m.PodName) if !ok { return true } diff --git a/pkg/deployment/reconciler/context.go b/pkg/deployment/reconciler/context.go index 455083a9f..a7a230f04 100644 --- a/pkg/deployment/reconciler/context.go +++ b/pkg/deployment/reconciler/context.go @@ -31,14 +31,13 @@ import ( "github.com/arangodb/kube-arangodb/pkg/deployment/patch" "github.com/arangodb/kube-arangodb/pkg/util/k8sutil" inspectorInterface "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector" - "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/arangomember" - "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/persistentvolumeclaim" - "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/pod" - "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/poddisruptionbudget" - "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/secret" - "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/service" - "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/serviceaccount" - "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/servicemonitor" + persistentvolumeclaimv1 "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/persistentvolumeclaim/v1" + podv1 "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/pod/v1" + poddisruptionbudgetv1beta1 "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/poddisruptionbudget/v1beta1" + secretv1 "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/secret/v1" + servicev1 "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/service/v1" + serviceaccountv1 "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/serviceaccount/v1" + servicemonitorv1 "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/servicemonitor/v1" core "k8s.io/api/core/v1" ) @@ -100,23 +99,20 @@ type DeploymentImageManager interface { type DeploymentModInterfaces interface { // SecretsModInterface define secret modification interface - SecretsModInterface() secret.ModInterface + SecretsModInterface() secretv1.ModInterface // PodsModInterface define pod modification interface - PodsModInterface() pod.ModInterface + PodsModInterface() podv1.ModInterface // ServiceAccountsModInterface define serviceaccounts modification interface - ServiceAccountsModInterface() serviceaccount.ModInterface + ServiceAccountsModInterface() serviceaccountv1.ModInterface // ServicesModInterface define services modification interface - ServicesModInterface() service.ModInterface + ServicesModInterface() servicev1.ModInterface // PersistentVolumeClaimsModInterface define persistentvolumeclaims modification interface - PersistentVolumeClaimsModInterface() persistentvolumeclaim.ModInterface + PersistentVolumeClaimsModInterface() persistentvolumeclaimv1.ModInterface // PodDisruptionBudgetsModInterface define poddisruptionbudgets modification interface - PodDisruptionBudgetsModInterface() poddisruptionbudget.ModInterface + PodDisruptionBudgetsModInterface() poddisruptionbudgetv1beta1.ModInterface // ServiceMonitorsModInterface define servicemonitor modification interface - ServiceMonitorsModInterface() servicemonitor.ModInterface - - // ArangoMembersModInterface define arangomembers modification interface - ArangoMembersModInterface() arangomember.ModInterface + ServiceMonitorsModInterface() servicemonitorv1.ModInterface } type DeploymentCachedStatus interface { @@ -124,16 +120,6 @@ type DeploymentCachedStatus interface { GetCachedStatus() inspectorInterface.Inspector } -type ArangoMemberUpdateFunc func(obj *api.ArangoMember) bool -type ArangoMemberStatusUpdateFunc func(obj *api.ArangoMember, s *api.ArangoMemberStatus) bool - -type ArangoMemberContext interface { - // WithArangoMemberUpdate run action with update of ArangoMember - WithArangoMemberUpdate(ctx context.Context, namespace, name string, action ArangoMemberUpdateFunc) error - // WithArangoMemberStatusUpdate run action with update of ArangoMember Status - WithArangoMemberStatusUpdate(ctx context.Context, namespace, name string, action ArangoMemberStatusUpdateFunc) error -} - type ArangoAgencyGet interface { GetAgencyCache() (agencyCache.State, bool) } diff --git a/pkg/deployment/reconciler/context_arangomember.go b/pkg/deployment/reconciler/context_arangomember.go new file mode 100644 index 000000000..78ee561ee --- /dev/null +++ b/pkg/deployment/reconciler/context_arangomember.go @@ -0,0 +1,172 @@ +// +// DISCLAIMER +// +// Copyright 2016-2022 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// + +package reconciler + +import ( + "context" + "time" + + api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" + "github.com/arangodb/kube-arangodb/pkg/util/errors" + "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector" + meta "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +type ArangoMemberCreateFunc func(obj *api.ArangoMember) +type ArangoMemberUpdateFunc func(obj *api.ArangoMember) bool +type ArangoMemberStatusUpdateFunc func(obj *api.ArangoMember, s *api.ArangoMemberStatus) bool + +type ArangoMemberContext interface { + // WithArangoMember start ArangoMember scope. Used in ACS + WithArangoMember(cache inspector.Inspector, timeout time.Duration, name string) ArangoMemberModContext + + // WithCurrentArangoMember start ArangoMember scope within current deployment scope + WithCurrentArangoMember(name string) ArangoMemberModContext +} + +func NewArangoMemberModContext(cache inspector.Inspector, timeout time.Duration, name string) ArangoMemberModContext { + return arangoMemberModContext{ + cache: cache, + name: name, + timeout: timeout, + } +} + +type ArangoMemberModContext interface { + // Exists returns true if object exists + Exists(ctx context.Context) bool + // Create creates ArangoMember + Create(ctx context.Context, obj *api.ArangoMember) error + // Update run action with update of ArangoMember + Update(ctx context.Context, action ArangoMemberUpdateFunc) error + // UpdateStatus run action with update of ArangoMember Status + UpdateStatus(ctx context.Context, action ArangoMemberStatusUpdateFunc) error + // Delete deletes object + Delete(ctx context.Context) error +} + +type arangoMemberModContext struct { + cache inspector.Inspector + name string + timeout time.Duration +} + +func (a arangoMemberModContext) withTimeout(ctx context.Context) (context.Context, func()) { + if a.timeout != 0 { + return context.WithTimeout(ctx, a.timeout) + } + + return ctx, func() {} +} + +func (a arangoMemberModContext) Delete(ctx context.Context) error { + ctx, c := a.withTimeout(ctx) + defer c() + + if err := a.cache.Client().Arango().DatabaseV1().ArangoMembers(a.cache.Namespace()).Delete(ctx, a.name, meta.DeleteOptions{}); err != nil { + if api.IsNotFound(err) { + return nil + } + + return err + } + + return nil +} + +func (a arangoMemberModContext) Exists(ctx context.Context) bool { + _, ok := a.cache.ArangoMember().V1().GetSimple(a.name) + return ok +} + +func (a arangoMemberModContext) Create(ctx context.Context, obj *api.ArangoMember) error { + ctx, c := a.withTimeout(ctx) + defer c() + + if obj.GetName() == "" { + obj.Name = a.name + } else if obj.GetName() != a.name { + return errors.Newf("Name is invalid") + } + + if obj.GetNamespace() == "" { + obj.Namespace = a.cache.Namespace() + } else if obj.GetNamespace() != a.cache.Namespace() { + return errors.Newf("Namespace is invalid") + } + + if _, err := a.cache.Client().Arango().DatabaseV1().ArangoMembers(obj.GetNamespace()).Create(ctx, obj, meta.CreateOptions{}); err != nil { + return err + } + + if err := a.cache.ArangoMember().Refresh(ctx); err != nil { + return err + } + + return nil +} + +func (a arangoMemberModContext) Update(ctx context.Context, action ArangoMemberUpdateFunc) error { + ctx, c := a.withTimeout(ctx) + defer c() + + o, err := a.cache.ArangoMember().V1().Read().Get(ctx, a.name, meta.GetOptions{}) + if err != nil { + return err + } + + if action(o) { + if _, err := a.cache.Client().Arango().DatabaseV1().ArangoMembers(a.cache.Namespace()).Update(ctx, o, meta.UpdateOptions{}); err != nil { + return err + } + + if err := a.cache.ArangoMember().Refresh(ctx); err != nil { + return err + } + } + + return nil +} + +func (a arangoMemberModContext) UpdateStatus(ctx context.Context, action ArangoMemberStatusUpdateFunc) error { + ctx, c := a.withTimeout(ctx) + defer c() + + o, err := a.cache.ArangoMember().V1().Read().Get(ctx, a.name, meta.GetOptions{}) + if err != nil { + return err + } + + status := o.Status.DeepCopy() + + if action(o, status) { + o.Status = *status + if _, err := a.cache.Client().Arango().DatabaseV1().ArangoMembers(a.cache.Namespace()).UpdateStatus(ctx, o, meta.UpdateOptions{}); err != nil { + return err + } + + if err := a.cache.ArangoMember().Refresh(ctx); err != nil { + return err + } + } + + return nil +} diff --git a/pkg/deployment/resources/annotations.go b/pkg/deployment/resources/annotations.go index 4bda563f9..4fd92dce0 100644 --- a/pkg/deployment/resources/annotations.go +++ b/pkg/deployment/resources/annotations.go @@ -168,7 +168,7 @@ func (r *Resources) EnsureAnnotations(ctx context.Context, cachedStatus inspecto } func ensureSecretsAnnotations(patch PatchFunc, cachedStatus inspectorInterface.Inspector, kind, name, namespace string, spec api.DeploymentSpec) error { - if err := cachedStatus.IterateSecrets(func(secret *core.Secret) error { + if err := cachedStatus.Secret().V1().Iterate(func(secret *core.Secret) error { ensureAnnotationsMap(secret.Kind, secret, spec, patch) return nil }, func(secret *core.Secret) bool { @@ -181,7 +181,7 @@ func ensureSecretsAnnotations(patch PatchFunc, cachedStatus inspectorInterface.I } func ensureServiceAccountsAnnotations(patch PatchFunc, cachedStatus inspectorInterface.Inspector, kind, name, namespace string, spec api.DeploymentSpec) error { - if err := cachedStatus.IterateServiceAccounts(func(serviceAccount *core.ServiceAccount) error { + if err := cachedStatus.ServiceAccount().V1().Iterate(func(serviceAccount *core.ServiceAccount) error { ensureAnnotationsMap(serviceAccount.Kind, serviceAccount, spec, patch) return nil }, func(serviceAccount *core.ServiceAccount) bool { @@ -194,7 +194,7 @@ func ensureServiceAccountsAnnotations(patch PatchFunc, cachedStatus inspectorInt } func ensureServicesAnnotations(patch PatchFunc, cachedStatus inspectorInterface.Inspector, kind, name, namespace string, spec api.DeploymentSpec) error { - if err := cachedStatus.IterateServices(func(service *core.Service) error { + if err := cachedStatus.Service().V1().Iterate(func(service *core.Service) error { ensureAnnotationsMap(service.Kind, service, spec, patch) return nil }, func(service *core.Service) bool { @@ -207,7 +207,11 @@ func ensureServicesAnnotations(patch PatchFunc, cachedStatus inspectorInterface. } func ensurePdbsAnnotations(patch PatchFunc, cachedStatus inspectorInterface.Inspector, kind, name, namespace string, spec api.DeploymentSpec) error { - if err := cachedStatus.IteratePodDisruptionBudgets(func(podDisruptionBudget *policy.PodDisruptionBudget) error { + i, err := cachedStatus.PodDisruptionBudget().V1Beta1() + if err != nil { + return err + } + if err := i.Iterate(func(podDisruptionBudget *policy.PodDisruptionBudget) error { ensureAnnotationsMap(podDisruptionBudget.Kind, podDisruptionBudget, spec, patch) return nil }, func(podDisruptionBudget *policy.PodDisruptionBudget) bool { @@ -220,7 +224,7 @@ func ensurePdbsAnnotations(patch PatchFunc, cachedStatus inspectorInterface.Insp } func ensurePvcsAnnotations(patch PatchFunc, cachedStatus inspectorInterface.Inspector, kind, name, namespace string, spec api.DeploymentSpec) error { - if err := cachedStatus.IteratePersistentVolumeClaims(func(persistentVolumeClaim *core.PersistentVolumeClaim) error { + if err := cachedStatus.PersistentVolumeClaim().V1().Iterate(func(persistentVolumeClaim *core.PersistentVolumeClaim) error { ensureGroupAnnotationsMap(persistentVolumeClaim.Kind, persistentVolumeClaim, spec, patch) return nil }, func(persistentVolumeClaim *core.PersistentVolumeClaim) bool { @@ -233,7 +237,11 @@ func ensurePvcsAnnotations(patch PatchFunc, cachedStatus inspectorInterface.Insp } func ensureServiceMonitorsAnnotations(patch PatchFunc, cachedStatus inspectorInterface.Inspector, kind, name, namespace string, spec api.DeploymentSpec) error { - if err := cachedStatus.IterateServiceMonitors(func(serviceMonitor *monitoring.ServiceMonitor) error { + i, err := cachedStatus.ServiceMonitor().V1() + if err != nil { + return err + } + if err := i.Iterate(func(serviceMonitor *monitoring.ServiceMonitor) error { ensureAnnotationsMap(serviceMonitor.Kind, serviceMonitor, spec, patch) return nil }, func(serviceMonitor *monitoring.ServiceMonitor) bool { @@ -260,7 +268,8 @@ func getObjectGroup(obj meta.Object) api.ServerGroup { } func ensurePodsAnnotations(patch PatchFunc, cachedStatus inspectorInterface.Inspector, kind, name, namespace string, spec api.DeploymentSpec) error { - if err := cachedStatus.IteratePods(func(pod *core.Pod) error { + + if err := cachedStatus.Pod().V1().Iterate(func(pod *core.Pod) error { ensureGroupAnnotationsMap(pod.Kind, pod, spec, patch) return nil }, func(pod *core.Pod) bool { diff --git a/pkg/deployment/resources/certicicates.go b/pkg/deployment/resources/certicicates.go index e78ee2c87..dcffeb141 100644 --- a/pkg/deployment/resources/certicicates.go +++ b/pkg/deployment/resources/certicicates.go @@ -117,7 +117,7 @@ func GetCertsFromSecret(log zerolog.Logger, secret *core.Secret) Certificates { } func GetKeyCertFromCache(log zerolog.Logger, cachedStatus inspectorInterface.Inspector, spec api.DeploymentSpec, certName, keyName string) (Certificates, interface{}, error) { - caSecret, exists := cachedStatus.Secret(spec.TLS.GetCASecretName()) + caSecret, exists := cachedStatus.Secret().V1().GetSimple(spec.TLS.GetCASecretName()) if !exists { return nil, nil, errors.Newf("CA Secret does not exists") } diff --git a/pkg/deployment/resources/certificates_client_auth.go b/pkg/deployment/resources/certificates_client_auth.go index c747239c4..852879dc3 100644 --- a/pkg/deployment/resources/certificates_client_auth.go +++ b/pkg/deployment/resources/certificates_client_auth.go @@ -25,8 +25,6 @@ import ( "fmt" "time" - "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/secret" - "github.com/rs/zerolog" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -34,6 +32,7 @@ import ( api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" "github.com/arangodb/kube-arangodb/pkg/util/errors" "github.com/arangodb/kube-arangodb/pkg/util/k8sutil" + secretv1 "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/secret/v1" ) const ( @@ -42,7 +41,7 @@ const ( // createClientAuthCACertificate creates a client authentication CA certificate and stores it in a secret with name // specified in the given spec. -func createClientAuthCACertificate(ctx context.Context, log zerolog.Logger, secrets secret.ModInterface, spec api.SyncAuthenticationSpec, deploymentName string, ownerRef *metav1.OwnerReference) error { +func createClientAuthCACertificate(ctx context.Context, log zerolog.Logger, secrets secretv1.ModInterface, spec api.SyncAuthenticationSpec, deploymentName string, ownerRef *metav1.OwnerReference) error { log = log.With().Str("secret", spec.GetClientCASecretName()).Logger() options := certificates.CreateCertificateOptions{ CommonName: fmt.Sprintf("%s Client Authentication Root Certificate", deploymentName), diff --git a/pkg/deployment/resources/certificates_tls.go b/pkg/deployment/resources/certificates_tls.go index be11cdde7..c7f7cf6f6 100644 --- a/pkg/deployment/resources/certificates_tls.go +++ b/pkg/deployment/resources/certificates_tls.go @@ -28,8 +28,6 @@ import ( "github.com/arangodb/kube-arangodb/pkg/util/globals" - "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/secret" - inspectorInterface "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector" "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/tls" @@ -38,6 +36,7 @@ import ( certificates "github.com/arangodb-helper/go-certificates" api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" "github.com/arangodb/kube-arangodb/pkg/util/k8sutil" + secretv1 "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/secret/v1" "github.com/rs/zerolog" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -49,7 +48,7 @@ const ( // createTLSCACertificate creates a CA certificate and stores it in a secret with name // specified in the given spec. -func createTLSCACertificate(ctx context.Context, log zerolog.Logger, secrets secret.ModInterface, spec api.TLSSpec, +func createTLSCACertificate(ctx context.Context, log zerolog.Logger, secrets secretv1.ModInterface, spec api.TLSSpec, deploymentName string, ownerRef *metav1.OwnerReference) error { log = log.With().Str("secret", spec.GetCASecretName()).Logger() @@ -79,14 +78,13 @@ func createTLSCACertificate(ctx context.Context, log zerolog.Logger, secrets sec // createTLSServerCertificate creates a TLS certificate for a specific server and stores // it in a secret with the given name. -func createTLSServerCertificate(ctx context.Context, log zerolog.Logger, cachedStatus inspectorInterface.Inspector, secrets secret.ModInterface, names tls.KeyfileInput, spec api.TLSSpec, +func createTLSServerCertificate(ctx context.Context, log zerolog.Logger, cachedStatus inspectorInterface.Inspector, secrets secretv1.ModInterface, names tls.KeyfileInput, spec api.TLSSpec, secretName string, ownerRef *metav1.OwnerReference) (bool, error) { - log = log.With().Str("secret", secretName).Logger() // Load CA certificate ctxChild, cancel := globals.GetGlobalTimeouts().Kubernetes().WithTimeout(ctx) defer cancel() - caCert, caKey, _, err := k8sutil.GetCASecret(ctxChild, cachedStatus.SecretReadInterface(), spec.GetCASecretName(), nil) + caCert, caKey, _, err := k8sutil.GetCASecret(ctxChild, cachedStatus.Secret().V1().Read(), spec.GetCASecretName(), nil) if err != nil { log.Debug().Err(err).Msg("Failed to load CA certificate") return false, errors.WithStack(err) diff --git a/pkg/deployment/resources/context.go b/pkg/deployment/resources/context.go index e7990aa37..ba209957b 100644 --- a/pkg/deployment/resources/context.go +++ b/pkg/deployment/resources/context.go @@ -32,7 +32,6 @@ import ( "github.com/arangodb/kube-arangodb/pkg/deployment/reconciler" "github.com/arangodb/kube-arangodb/pkg/operator/scope" "github.com/arangodb/kube-arangodb/pkg/util/k8sutil" - inspectorInterface "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector" ) // Context provides all functions needed by the Resources service @@ -79,6 +78,4 @@ type Context interface { // GetBackup receives information about a backup resource GetBackup(ctx context.Context, backup string) (*backupApi.ArangoBackup, error) GetScope() scope.Scope - - SetCachedStatus(i inspectorInterface.Inspector) } diff --git a/pkg/deployment/resources/inspector/acs.go b/pkg/deployment/resources/inspector/acs.go index 15e6f829e..77cfec359 100644 --- a/pkg/deployment/resources/inspector/acs.go +++ b/pkg/deployment/resources/inspector/acs.go @@ -22,170 +22,168 @@ package inspector import ( "context" + "time" - "github.com/arangodb/kube-arangodb/pkg/apis/deployment" api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" - "github.com/arangodb/kube-arangodb/pkg/generated/clientset/versioned" "github.com/arangodb/kube-arangodb/pkg/util/errors" "github.com/arangodb/kube-arangodb/pkg/util/globals" - "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/arangoclustersynchronization" - apiErrors "k8s.io/apimachinery/pkg/api/errors" + "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/throttle" meta "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime/schema" ) -func (i *inspector) GetArangoClusterSynchronizations() (arangoclustersynchronization.Inspector, bool) { - i.lock.Lock() - defer i.lock.Unlock() +func init() { + requireRegisterInspectorLoader(arangoClusterSynchronizationsInspectorLoaderObj) +} - if i.acs == nil { - return nil, false - } +var arangoClusterSynchronizationsInspectorLoaderObj = arangoClusterSynchronizationsInspectorLoader{} - return i.acs, i.acs.accessible +type arangoClusterSynchronizationsInspectorLoader struct { } -type arangoClusterSynchronizationLoader struct { - accessible bool +func (p arangoClusterSynchronizationsInspectorLoader) Component() throttle.Component { + return throttle.ArangoClusterSynchronization +} - acs map[string]*api.ArangoClusterSynchronization +func (p arangoClusterSynchronizationsInspectorLoader) Load(ctx context.Context, i *inspectorState) { + var q arangoClusterSynchronizationsInspector + p.loadV1(ctx, i, &q) + i.arangoClusterSynchronizations = &q + q.state = i + q.last = time.Now() } -func (a *arangoClusterSynchronizationLoader) FilterArangoClusterSynchronizations(filters ...arangoclustersynchronization.Filter) []*api.ArangoClusterSynchronization { - q := make([]*api.ArangoClusterSynchronization, 0, len(a.acs)) +func (p arangoClusterSynchronizationsInspectorLoader) loadV1(ctx context.Context, i *inspectorState, q *arangoClusterSynchronizationsInspector) { + var z arangoClusterSynchronizationsInspectorV1 - for _, obj := range a.acs { - if a.filterArangoClusterSynchronizations(obj, filters...) { - q = append(q, obj) - } - } + z.arangoClusterSynchronizationInspector = q + + z.arangoClusterSynchronizations, z.err = p.getV1ArangoClusterSynchronizations(ctx, i) - return q + q.v1 = &z } -func (a *arangoClusterSynchronizationLoader) filterArangoClusterSynchronizations(obj *api.ArangoClusterSynchronization, filters ...arangoclustersynchronization.Filter) bool { - for _, f := range filters { - if !f(obj) { - return false - } +func (p arangoClusterSynchronizationsInspectorLoader) getV1ArangoClusterSynchronizations(ctx context.Context, i *inspectorState) (map[string]*api.ArangoClusterSynchronization, error) { + objs, err := p.getV1ArangoClusterSynchronizationsList(ctx, i) + if err != nil { + return nil, err } - return true -} + r := make(map[string]*api.ArangoClusterSynchronization, len(objs)) -func (a *arangoClusterSynchronizationLoader) ArangoClusterSynchronizations() []*api.ArangoClusterSynchronization { - var r []*api.ArangoClusterSynchronization - for _, acs := range a.acs { - r = append(r, acs) + for id := range objs { + r[objs[id].GetName()] = objs[id] } - return r + return r, nil } -func (a *arangoClusterSynchronizationLoader) ArangoClusterSynchronization(name string) (*api.ArangoClusterSynchronization, bool) { - acs, ok := a.acs[name] - if !ok { - return nil, false +func (p arangoClusterSynchronizationsInspectorLoader) getV1ArangoClusterSynchronizationsList(ctx context.Context, i *inspectorState) ([]*api.ArangoClusterSynchronization, error) { + ctxChild, cancel := globals.GetGlobalTimeouts().Kubernetes().WithTimeout(ctx) + defer cancel() + obj, err := i.client.Arango().DatabaseV1().ArangoClusterSynchronizations(i.namespace).List(ctxChild, meta.ListOptions{ + Limit: globals.GetGlobals().Kubernetes().RequestBatchSize().Get(), + }) + + if err != nil { + return nil, err + } + + items := obj.Items + cont := obj.Continue + var s = int64(len(items)) + + if z := obj.RemainingItemCount; z != nil { + s += *z } - return acs, true -} + ptrs := make([]*api.ArangoClusterSynchronization, 0, s) -func (a *arangoClusterSynchronizationLoader) IterateArangoClusterSynchronizations(action arangoclustersynchronization.Action, filters ...arangoclustersynchronization.Filter) error { - for _, node := range a.ArangoClusterSynchronizations() { - if err := a.iterateArangoClusterSynchronization(node, action, filters...); err != nil { - return err + for { + for id := range items { + ptrs = append(ptrs, &items[id]) } - } - return nil -} -func (a *arangoClusterSynchronizationLoader) iterateArangoClusterSynchronization(acs *api.ArangoClusterSynchronization, action arangoclustersynchronization.Action, filters ...arangoclustersynchronization.Filter) error { - for _, filter := range filters { - if !filter(acs) { - return nil + if cont == "" { + break + } + + items, cont, err = p.getV1ArangoClusterSynchronizationsListRequest(ctx, i, cont) + + if err != nil { + return nil, err } } - return action(acs) + return ptrs, nil } -func (a *arangoClusterSynchronizationLoader) ArangoClusterSynchronizationReadInterface() arangoclustersynchronization.ReadInterface { - return &arangoClusterSynchronizationReadInterface{i: a} +func (p arangoClusterSynchronizationsInspectorLoader) getV1ArangoClusterSynchronizationsListRequest(ctx context.Context, i *inspectorState, cont string) ([]api.ArangoClusterSynchronization, string, error) { + ctxChild, cancel := globals.GetGlobalTimeouts().Kubernetes().WithTimeout(ctx) + defer cancel() + obj, err := i.client.Arango().DatabaseV1().ArangoClusterSynchronizations(i.namespace).List(ctxChild, meta.ListOptions{ + Limit: globals.GetGlobals().Kubernetes().RequestBatchSize().Get(), + Continue: cont, + }) + + if err != nil { + return nil, "", err + } + + return obj.Items, obj.Continue, err } -type arangoClusterSynchronizationReadInterface struct { - i *arangoClusterSynchronizationLoader +func (p arangoClusterSynchronizationsInspectorLoader) Verify(i *inspectorState) error { + return nil } -func (a *arangoClusterSynchronizationReadInterface) Get(ctx context.Context, name string, opts meta.GetOptions) (*api.ArangoClusterSynchronization, error) { - if s, ok := a.i.ArangoClusterSynchronization(name); !ok { - return nil, apiErrors.NewNotFound(schema.GroupResource{ - Group: deployment.ArangoDeploymentGroupName, - Resource: "arangoclustersynchronizations", - }, name) - } else { - return s, nil +func (p arangoClusterSynchronizationsInspectorLoader) Copy(from, to *inspectorState, override bool) { + if to.arangoClusterSynchronizations != nil { + if !override { + return + } } + + to.arangoClusterSynchronizations = from.arangoClusterSynchronizations + to.arangoClusterSynchronizations.state = to } -func arangoClusterSynchronizationPointer(acs api.ArangoClusterSynchronization) *api.ArangoClusterSynchronization { - return &acs +func (p arangoClusterSynchronizationsInspectorLoader) Name() string { + return "arangoClusterSynchronizations" } -func arangoClusterSynchronizationsToMap(ctx context.Context, inspector *inspector, k versioned.Interface, namespace string) func() error { - return func() error { - acss, err := getArangoClusterSynchronizations(ctx, k, namespace, "") - if err != nil { - if apiErrors.IsUnauthorized(err) || apiErrors.IsNotFound(err) { - inspector.acs = &arangoClusterSynchronizationLoader{ - accessible: false, - } - return nil - } - return err - } +type arangoClusterSynchronizationsInspector struct { + state *inspectorState - acssMap := map[string]*api.ArangoClusterSynchronization{} + last time.Time - for _, acs := range acss { - _, exists := acssMap[acs.GetName()] - if exists { - return errors.Newf("ArangoMember %s already exists in map, error received", acs.GetName()) - } + v1 *arangoClusterSynchronizationsInspectorV1 +} - acssMap[acs.GetName()] = arangoClusterSynchronizationPointer(acs) - } +func (p *arangoClusterSynchronizationsInspector) LastRefresh() time.Time { + return p.last +} - inspector.acs = &arangoClusterSynchronizationLoader{ - accessible: true, - acs: acssMap, - } +func (p *arangoClusterSynchronizationsInspector) IsStatic() bool { + return p.state.IsStatic() +} - return nil - } +func (p *arangoClusterSynchronizationsInspector) Refresh(ctx context.Context) error { + p.Throttle(p.state.throttles).Invalidate() + return p.state.refresh(ctx, arangoClusterSynchronizationsInspectorLoaderObj) } -func getArangoClusterSynchronizations(ctx context.Context, k versioned.Interface, namespace, cont string) ([]api.ArangoClusterSynchronization, error) { - ctxChild, cancel := globals.GetGlobalTimeouts().Kubernetes().WithTimeout(ctx) - defer cancel() - acss, err := k.DatabaseV1().ArangoClusterSynchronizations(namespace).List(ctxChild, meta.ListOptions{ - Limit: globals.GetGlobals().Kubernetes().RequestBatchSize().Get(), - Continue: cont, - }) +func (p arangoClusterSynchronizationsInspector) Throttle(c throttle.Components) throttle.Throttle { + return c.ArangoClusterSynchronization() +} - if err != nil { - return nil, err +func (p *arangoClusterSynchronizationsInspector) validate() error { + if p == nil { + return errors.Newf("ArangoClusterSynchronizationInspector is nil") } - if acss.Continue != "" { - newACSLoader, err := getArangoClusterSynchronizations(ctx, k, namespace, acss.Continue) - if err != nil { - return nil, err - } - - return append(acss.Items, newACSLoader...), nil + if p.state == nil { + return errors.Newf("Parent is nil") } - return acss.Items, nil + return p.v1.validate() } diff --git a/pkg/deployment/resources/inspector/acs_v1.go b/pkg/deployment/resources/inspector/acs_v1.go new file mode 100644 index 000000000..bff16d68c --- /dev/null +++ b/pkg/deployment/resources/inspector/acs_v1.go @@ -0,0 +1,141 @@ +// +// DISCLAIMER +// +// Copyright 2016-2022 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// + +package inspector + +import ( + "context" + + "github.com/arangodb/kube-arangodb/pkg/apis/deployment" + api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" + "github.com/arangodb/kube-arangodb/pkg/util/errors" + ins "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/arangoclustersynchronization/v1" + apiErrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +func (p *arangoClusterSynchronizationsInspector) V1() (ins.Inspector, error) { + if p.v1.err != nil { + return nil, p.v1.err + } + + return p.v1, nil +} + +type arangoClusterSynchronizationsInspectorV1 struct { + arangoClusterSynchronizationInspector *arangoClusterSynchronizationsInspector + + arangoClusterSynchronizations map[string]*api.ArangoClusterSynchronization + err error +} + +func (p *arangoClusterSynchronizationsInspectorV1) Filter(filters ...ins.Filter) []*api.ArangoClusterSynchronization { + z := p.ListSimple() + + r := make([]*api.ArangoClusterSynchronization, 0, len(z)) + + for _, o := range z { + if !ins.FilterObject(o, filters...) { + continue + } + + r = append(r, o) + } + + return r +} + +func (p *arangoClusterSynchronizationsInspectorV1) validate() error { + if p == nil { + return errors.Newf("ArangoClusterSynchronizationsV1Inspector is nil") + } + + if p.arangoClusterSynchronizationInspector == nil { + return errors.Newf("Parent is nil") + } + + if p.arangoClusterSynchronizations == nil && p.err == nil { + return errors.Newf("ListSimple or err should be not nil") + } + + if p.arangoClusterSynchronizations != nil && p.err != nil { + return errors.Newf("ListSimple or err cannot be not nil together") + } + + return nil +} + +func (p *arangoClusterSynchronizationsInspectorV1) ListSimple() []*api.ArangoClusterSynchronization { + var r []*api.ArangoClusterSynchronization + for _, arangoClusterSynchronization := range p.arangoClusterSynchronizations { + r = append(r, arangoClusterSynchronization) + } + + return r +} + +func (p *arangoClusterSynchronizationsInspectorV1) GetSimple(name string) (*api.ArangoClusterSynchronization, bool) { + arangoClusterSynchronization, ok := p.arangoClusterSynchronizations[name] + if !ok { + return nil, false + } + + return arangoClusterSynchronization, true +} + +func (p *arangoClusterSynchronizationsInspectorV1) Iterate(action ins.Action, filters ...ins.Filter) error { + for _, arangoClusterSynchronization := range p.arangoClusterSynchronizations { + if err := p.iterateArangoClusterSynchronization(arangoClusterSynchronization, action, filters...); err != nil { + return err + } + } + + return nil +} + +func (p *arangoClusterSynchronizationsInspectorV1) iterateArangoClusterSynchronization(arangoClusterSynchronization *api.ArangoClusterSynchronization, action ins.Action, filters ...ins.Filter) error { + for _, f := range filters { + if f == nil { + continue + } + + if !f(arangoClusterSynchronization) { + return nil + } + } + + return action(arangoClusterSynchronization) +} + +func (p *arangoClusterSynchronizationsInspectorV1) Read() ins.ReadInterface { + return p +} + +func (p *arangoClusterSynchronizationsInspectorV1) Get(ctx context.Context, name string, opts metav1.GetOptions) (*api.ArangoClusterSynchronization, error) { + if s, ok := p.GetSimple(name); !ok { + return nil, apiErrors.NewNotFound(schema.GroupResource{ + Group: deployment.ArangoDeploymentGroupName, + Resource: deployment.ArangoClusterSynchronizationResourcePlural, + }, name) + } else { + return s, nil + } +} diff --git a/pkg/deployment/resources/inspector/am.go b/pkg/deployment/resources/inspector/am.go new file mode 100644 index 000000000..db4f2e6e7 --- /dev/null +++ b/pkg/deployment/resources/inspector/am.go @@ -0,0 +1,193 @@ +// +// DISCLAIMER +// +// Copyright 2016-2022 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// + +package inspector + +import ( + "context" + "time" + + api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" + "github.com/arangodb/kube-arangodb/pkg/util/errors" + "github.com/arangodb/kube-arangodb/pkg/util/globals" + "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/throttle" + meta "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func init() { + requireRegisterInspectorLoader(arangoMembersInspectorLoaderObj) +} + +var arangoMembersInspectorLoaderObj = arangoMembersInspectorLoader{} + +type arangoMembersInspectorLoader struct { +} + +func (p arangoMembersInspectorLoader) Component() throttle.Component { + return throttle.ArangoMember +} + +func (p arangoMembersInspectorLoader) Load(ctx context.Context, i *inspectorState) { + var q arangoMembersInspector + p.loadV1(ctx, i, &q) + i.arangoMembers = &q + q.state = i + q.last = time.Now() +} + +func (p arangoMembersInspectorLoader) loadV1(ctx context.Context, i *inspectorState, q *arangoMembersInspector) { + var z arangoMembersInspectorV1 + + z.arangoMemberInspector = q + + z.arangoMembers, z.err = p.getV1ArangoMembers(ctx, i) + + q.v1 = &z +} + +func (p arangoMembersInspectorLoader) getV1ArangoMembers(ctx context.Context, i *inspectorState) (map[string]*api.ArangoMember, error) { + objs, err := p.getV1ArangoMembersList(ctx, i) + if err != nil { + return nil, err + } + + r := make(map[string]*api.ArangoMember, len(objs)) + + for id := range objs { + r[objs[id].GetName()] = objs[id] + } + + return r, nil +} + +func (p arangoMembersInspectorLoader) getV1ArangoMembersList(ctx context.Context, i *inspectorState) ([]*api.ArangoMember, error) { + ctxChild, cancel := globals.GetGlobalTimeouts().Kubernetes().WithTimeout(ctx) + defer cancel() + obj, err := i.client.Arango().DatabaseV1().ArangoMembers(i.namespace).List(ctxChild, meta.ListOptions{ + Limit: globals.GetGlobals().Kubernetes().RequestBatchSize().Get(), + }) + + if err != nil { + return nil, err + } + + items := obj.Items + cont := obj.Continue + var s = int64(len(items)) + + if z := obj.RemainingItemCount; z != nil { + s += *z + } + + ptrs := make([]*api.ArangoMember, 0, s) + + for { + for id := range items { + ptrs = append(ptrs, &items[id]) + } + + if cont == "" { + break + } + + items, cont, err = p.getV1ArangoMembersListRequest(ctx, i, cont) + + if err != nil { + return nil, err + } + } + + return ptrs, nil +} + +func (p arangoMembersInspectorLoader) getV1ArangoMembersListRequest(ctx context.Context, i *inspectorState, cont string) ([]api.ArangoMember, string, error) { + ctxChild, cancel := globals.GetGlobalTimeouts().Kubernetes().WithTimeout(ctx) + defer cancel() + obj, err := i.client.Arango().DatabaseV1().ArangoMembers(i.namespace).List(ctxChild, meta.ListOptions{ + Limit: globals.GetGlobals().Kubernetes().RequestBatchSize().Get(), + Continue: cont, + }) + + if err != nil { + return nil, "", err + } + + return obj.Items, obj.Continue, err +} + +func (p arangoMembersInspectorLoader) Verify(i *inspectorState) error { + if err := i.arangoMembers.v1.err; err != nil { + return err + } + + return nil +} + +func (p arangoMembersInspectorLoader) Copy(from, to *inspectorState, override bool) { + if to.arangoMembers != nil { + if !override { + return + } + } + + to.arangoMembers = from.arangoMembers + to.arangoMembers.state = to +} + +func (p arangoMembersInspectorLoader) Name() string { + return "arangoMembers" +} + +type arangoMembersInspector struct { + state *inspectorState + + last time.Time + + v1 *arangoMembersInspectorV1 +} + +func (p *arangoMembersInspector) LastRefresh() time.Time { + return p.last +} + +func (p *arangoMembersInspector) IsStatic() bool { + return p.state.IsStatic() +} + +func (p *arangoMembersInspector) Refresh(ctx context.Context) error { + p.Throttle(p.state.throttles).Invalidate() + return p.state.refresh(ctx, arangoMembersInspectorLoaderObj) +} + +func (p arangoMembersInspector) Throttle(c throttle.Components) throttle.Throttle { + return c.ArangoMember() +} + +func (p *arangoMembersInspector) validate() error { + if p == nil { + return errors.Newf("ArangoMemberInspector is nil") + } + + if p.state == nil { + return errors.Newf("Parent is nil") + } + + return p.v1.validate() +} diff --git a/pkg/deployment/resources/inspector/am_v1.go b/pkg/deployment/resources/inspector/am_v1.go new file mode 100644 index 000000000..b4e5a83dd --- /dev/null +++ b/pkg/deployment/resources/inspector/am_v1.go @@ -0,0 +1,121 @@ +// +// DISCLAIMER +// +// Copyright 2016-2022 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// + +package inspector + +import ( + "context" + + "github.com/arangodb/kube-arangodb/pkg/apis/deployment" + api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" + "github.com/arangodb/kube-arangodb/pkg/util/errors" + ins "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/arangomember/v1" + apiErrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +func (p *arangoMembersInspector) V1() ins.Inspector { + return p.v1 +} + +type arangoMembersInspectorV1 struct { + arangoMemberInspector *arangoMembersInspector + + arangoMembers map[string]*api.ArangoMember + err error +} + +func (p *arangoMembersInspectorV1) validate() error { + if p == nil { + return errors.Newf("ArangoMembersV1Inspector is nil") + } + + if p.arangoMemberInspector == nil { + return errors.Newf("Parent is nil") + } + + if p.arangoMembers == nil { + return errors.Newf("ArangoMembers or err should be not nil") + } + + if p.err != nil { + return errors.Newf("ArangoMembers or err cannot be not nil together") + } + + return nil +} + +func (p *arangoMembersInspectorV1) ArangoMembers() []*api.ArangoMember { + var r []*api.ArangoMember + for _, arangoMember := range p.arangoMembers { + r = append(r, arangoMember) + } + + return r +} + +func (p *arangoMembersInspectorV1) GetSimple(name string) (*api.ArangoMember, bool) { + arangoMember, ok := p.arangoMembers[name] + if !ok { + return nil, false + } + + return arangoMember, true +} + +func (p *arangoMembersInspectorV1) Iterate(action ins.Action, filters ...ins.Filter) error { + for _, arangoMember := range p.arangoMembers { + if err := p.iterateArangoMember(arangoMember, action, filters...); err != nil { + return err + } + } + + return nil +} + +func (p *arangoMembersInspectorV1) iterateArangoMember(arangoMember *api.ArangoMember, action ins.Action, filters ...ins.Filter) error { + for _, f := range filters { + if f == nil { + continue + } + + if !f(arangoMember) { + return nil + } + } + + return action(arangoMember) +} + +func (p *arangoMembersInspectorV1) Read() ins.ReadInterface { + return p +} + +func (p *arangoMembersInspectorV1) Get(ctx context.Context, name string, opts metav1.GetOptions) (*api.ArangoMember, error) { + if s, ok := p.GetSimple(name); !ok { + return nil, apiErrors.NewNotFound(schema.GroupResource{ + Group: deployment.ArangoDeploymentGroupName, + Resource: deployment.ArangoMemberResourcePlural, + }, name) + } else { + return s, nil + } +} diff --git a/pkg/deployment/resources/inspector/at.go b/pkg/deployment/resources/inspector/at.go index 1c9fada00..c658e61ba 100644 --- a/pkg/deployment/resources/inspector/at.go +++ b/pkg/deployment/resources/inspector/at.go @@ -22,170 +22,168 @@ package inspector import ( "context" + "time" - "github.com/arangodb/kube-arangodb/pkg/apis/deployment" api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" - "github.com/arangodb/kube-arangodb/pkg/generated/clientset/versioned" "github.com/arangodb/kube-arangodb/pkg/util/errors" "github.com/arangodb/kube-arangodb/pkg/util/globals" - "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/arangotask" - apiErrors "k8s.io/apimachinery/pkg/api/errors" + "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/throttle" meta "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime/schema" ) -func (i *inspector) GetArangoTasks() (arangotask.Inspector, bool) { - i.lock.Lock() - defer i.lock.Unlock() +func init() { + requireRegisterInspectorLoader(arangoTasksInspectorLoaderObj) +} - if i.at == nil { - return nil, false - } +var arangoTasksInspectorLoaderObj = arangoTasksInspectorLoader{} - return i.at, i.at.accessible +type arangoTasksInspectorLoader struct { } -type arangoTaskLoader struct { - accessible bool +func (p arangoTasksInspectorLoader) Component() throttle.Component { + return throttle.ArangoTask +} - at map[string]*api.ArangoTask +func (p arangoTasksInspectorLoader) Load(ctx context.Context, i *inspectorState) { + var q arangoTasksInspector + p.loadV1(ctx, i, &q) + i.arangoTasks = &q + q.state = i + q.last = time.Now() } -func (a *arangoTaskLoader) FilterArangoTasks(filters ...arangotask.Filter) []*api.ArangoTask { - q := make([]*api.ArangoTask, 0, len(a.at)) +func (p arangoTasksInspectorLoader) loadV1(ctx context.Context, i *inspectorState, q *arangoTasksInspector) { + var z arangoTasksInspectorV1 - for _, obj := range a.at { - if a.filterArangoTasks(obj, filters...) { - q = append(q, obj) - } - } + z.arangoTaskInspector = q + + z.arangoTasks, z.err = p.getV1ArangoTasks(ctx, i) - return q + q.v1 = &z } -func (a *arangoTaskLoader) filterArangoTasks(obj *api.ArangoTask, filters ...arangotask.Filter) bool { - for _, f := range filters { - if !f(obj) { - return false - } +func (p arangoTasksInspectorLoader) getV1ArangoTasks(ctx context.Context, i *inspectorState) (map[string]*api.ArangoTask, error) { + objs, err := p.getV1ArangoTasksList(ctx, i) + if err != nil { + return nil, err } - return true -} + r := make(map[string]*api.ArangoTask, len(objs)) -func (a *arangoTaskLoader) ArangoTasks() []*api.ArangoTask { - var r []*api.ArangoTask - for _, at := range a.at { - r = append(r, at) + for id := range objs { + r[objs[id].GetName()] = objs[id] } - return r + return r, nil } -func (a *arangoTaskLoader) ArangoTask(name string) (*api.ArangoTask, bool) { - at, ok := a.at[name] - if !ok { - return nil, false +func (p arangoTasksInspectorLoader) getV1ArangoTasksList(ctx context.Context, i *inspectorState) ([]*api.ArangoTask, error) { + ctxChild, cancel := globals.GetGlobalTimeouts().Kubernetes().WithTimeout(ctx) + defer cancel() + obj, err := i.client.Arango().DatabaseV1().ArangoTasks(i.namespace).List(ctxChild, meta.ListOptions{ + Limit: globals.GetGlobals().Kubernetes().RequestBatchSize().Get(), + }) + + if err != nil { + return nil, err + } + + items := obj.Items + cont := obj.Continue + var s = int64(len(items)) + + if z := obj.RemainingItemCount; z != nil { + s += *z } - return at, true -} + ptrs := make([]*api.ArangoTask, 0, s) -func (a *arangoTaskLoader) IterateArangoTasks(action arangotask.Action, filters ...arangotask.Filter) error { - for _, node := range a.ArangoTasks() { - if err := a.iterateArangoTask(node, action, filters...); err != nil { - return err + for { + for id := range items { + ptrs = append(ptrs, &items[id]) } - } - return nil -} -func (a *arangoTaskLoader) iterateArangoTask(at *api.ArangoTask, action arangotask.Action, filters ...arangotask.Filter) error { - for _, filter := range filters { - if !filter(at) { - return nil + if cont == "" { + break + } + + items, cont, err = p.getV1ArangoTasksListRequest(ctx, i, cont) + + if err != nil { + return nil, err } } - return action(at) + return ptrs, nil } -func (a *arangoTaskLoader) ArangoTaskReadInterface() arangotask.ReadInterface { - return &arangoTaskReadInterface{i: a} +func (p arangoTasksInspectorLoader) getV1ArangoTasksListRequest(ctx context.Context, i *inspectorState, cont string) ([]api.ArangoTask, string, error) { + ctxChild, cancel := globals.GetGlobalTimeouts().Kubernetes().WithTimeout(ctx) + defer cancel() + obj, err := i.client.Arango().DatabaseV1().ArangoTasks(i.namespace).List(ctxChild, meta.ListOptions{ + Limit: globals.GetGlobals().Kubernetes().RequestBatchSize().Get(), + Continue: cont, + }) + + if err != nil { + return nil, "", err + } + + return obj.Items, obj.Continue, err } -type arangoTaskReadInterface struct { - i *arangoTaskLoader +func (p arangoTasksInspectorLoader) Verify(i *inspectorState) error { + return nil } -func (a *arangoTaskReadInterface) Get(ctx context.Context, name string, opts meta.GetOptions) (*api.ArangoTask, error) { - if s, ok := a.i.ArangoTask(name); !ok { - return nil, apiErrors.NewNotFound(schema.GroupResource{ - Group: deployment.ArangoDeploymentGroupName, - Resource: "arangotasks", - }, name) - } else { - return s, nil +func (p arangoTasksInspectorLoader) Copy(from, to *inspectorState, override bool) { + if to.arangoTasks != nil { + if !override { + return + } } + + to.arangoTasks = from.arangoTasks + to.arangoTasks.state = to } -func arangoTaskPointer(at api.ArangoTask) *api.ArangoTask { - return &at +func (p arangoTasksInspectorLoader) Name() string { + return "arangoTasks" } -func arangoTasksToMap(ctx context.Context, inspector *inspector, k versioned.Interface, namespace string) func() error { - return func() error { - ats, err := getArangoTasks(ctx, k, namespace, "") - if err != nil { - if apiErrors.IsUnauthorized(err) || apiErrors.IsNotFound(err) { - inspector.at = &arangoTaskLoader{ - accessible: false, - } - return nil - } - return err - } +type arangoTasksInspector struct { + state *inspectorState - atsMap := map[string]*api.ArangoTask{} + last time.Time - for _, at := range ats { - _, exists := atsMap[at.GetName()] - if exists { - return errors.Newf("ArangoMember %s already exists in map, error received", at.GetName()) - } + v1 *arangoTasksInspectorV1 +} - atsMap[at.GetName()] = arangoTaskPointer(at) - } +func (p *arangoTasksInspector) LastRefresh() time.Time { + return p.last +} - inspector.at = &arangoTaskLoader{ - accessible: true, - at: atsMap, - } +func (p *arangoTasksInspector) IsStatic() bool { + return p.state.IsStatic() +} - return nil - } +func (p *arangoTasksInspector) Refresh(ctx context.Context) error { + p.Throttle(p.state.throttles).Invalidate() + return p.state.refresh(ctx, arangoTasksInspectorLoaderObj) } -func getArangoTasks(ctx context.Context, k versioned.Interface, namespace, cont string) ([]api.ArangoTask, error) { - ctxChild, cancel := globals.GetGlobalTimeouts().Kubernetes().WithTimeout(ctx) - defer cancel() - ats, err := k.DatabaseV1().ArangoTasks(namespace).List(ctxChild, meta.ListOptions{ - Limit: globals.GetGlobals().Kubernetes().RequestBatchSize().Get(), - Continue: cont, - }) +func (p arangoTasksInspector) Throttle(c throttle.Components) throttle.Throttle { + return c.ArangoTask() +} - if err != nil { - return nil, err +func (p *arangoTasksInspector) validate() error { + if p == nil { + return errors.Newf("ArangoTaskInspector is nil") } - if ats.Continue != "" { - newATLoader, err := getArangoTasks(ctx, k, namespace, ats.Continue) - if err != nil { - return nil, err - } - - return append(ats.Items, newATLoader...), nil + if p.state == nil { + return errors.Newf("Parent is nil") } - return ats.Items, nil + return p.v1.validate() } diff --git a/pkg/deployment/resources/inspector/at_v1.go b/pkg/deployment/resources/inspector/at_v1.go new file mode 100644 index 000000000..6ef4a8f4d --- /dev/null +++ b/pkg/deployment/resources/inspector/at_v1.go @@ -0,0 +1,141 @@ +// +// DISCLAIMER +// +// Copyright 2016-2022 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// + +package inspector + +import ( + "context" + + "github.com/arangodb/kube-arangodb/pkg/apis/deployment" + api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" + "github.com/arangodb/kube-arangodb/pkg/util/errors" + ins "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/arangotask/v1" + apiErrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +func (p *arangoTasksInspector) V1() (ins.Inspector, error) { + if p.v1.err != nil { + return nil, p.v1.err + } + + return p.v1, nil +} + +type arangoTasksInspectorV1 struct { + arangoTaskInspector *arangoTasksInspector + + arangoTasks map[string]*api.ArangoTask + err error +} + +func (p *arangoTasksInspectorV1) Filter(filters ...ins.Filter) []*api.ArangoTask { + z := p.ListSimple() + + r := make([]*api.ArangoTask, 0, len(z)) + + for _, o := range z { + if !ins.FilterObject(o, filters...) { + continue + } + + r = append(r, o) + } + + return r +} + +func (p *arangoTasksInspectorV1) validate() error { + if p == nil { + return errors.Newf("ArangoTasksV1Inspector is nil") + } + + if p.arangoTaskInspector == nil { + return errors.Newf("Parent is nil") + } + + if p.arangoTasks == nil && p.err == nil { + return errors.Newf("ArangoTasks or err should be not nil") + } + + if p.arangoTasks != nil && p.err != nil { + return errors.Newf("ArangoTasks or err cannot be not nil together") + } + + return nil +} + +func (p *arangoTasksInspectorV1) ListSimple() []*api.ArangoTask { + var r []*api.ArangoTask + for _, arangoTask := range p.arangoTasks { + r = append(r, arangoTask) + } + + return r +} + +func (p *arangoTasksInspectorV1) GetSimple(name string) (*api.ArangoTask, bool) { + arangoTask, ok := p.arangoTasks[name] + if !ok { + return nil, false + } + + return arangoTask, true +} + +func (p *arangoTasksInspectorV1) Iterate(action ins.Action, filters ...ins.Filter) error { + for _, arangoTask := range p.arangoTasks { + if err := p.iterateArangoTask(arangoTask, action, filters...); err != nil { + return err + } + } + + return nil +} + +func (p *arangoTasksInspectorV1) iterateArangoTask(arangoTask *api.ArangoTask, action ins.Action, filters ...ins.Filter) error { + for _, f := range filters { + if f == nil { + continue + } + + if !f(arangoTask) { + return nil + } + } + + return action(arangoTask) +} + +func (p *arangoTasksInspectorV1) Read() ins.ReadInterface { + return p +} + +func (p *arangoTasksInspectorV1) Get(ctx context.Context, name string, opts metav1.GetOptions) (*api.ArangoTask, error) { + if s, ok := p.GetSimple(name); !ok { + return nil, apiErrors.NewNotFound(schema.GroupResource{ + Group: deployment.ArangoDeploymentGroupName, + Resource: deployment.ArangoTaskResourcePlural, + }, name) + } else { + return s, nil + } +} diff --git a/pkg/deployment/resources/inspector/inspector.go b/pkg/deployment/resources/inspector/inspector.go index ea699aa16..00a23a30a 100644 --- a/pkg/deployment/resources/inspector/inspector.go +++ b/pkg/deployment/resources/inspector/inspector.go @@ -22,180 +22,349 @@ package inspector import ( "context" + "strings" "sync" + "time" "github.com/arangodb/go-driver" - api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" + "github.com/arangodb/kube-arangodb/pkg/logging" "github.com/arangodb/kube-arangodb/pkg/util" - "github.com/arangodb/kube-arangodb/pkg/util/errors" - inspectorInterface "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector" + "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector" + "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/arangoclustersynchronization" + "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/arangomember" + "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/arangotask" + "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/node" + "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/persistentvolumeclaim" + "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/pod" + "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/poddisruptionbudget" + "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/secret" + "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/service" + "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/serviceaccount" + "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/servicemonitor" + "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/throttle" "github.com/arangodb/kube-arangodb/pkg/util/kclient" - monitoring "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" - core "k8s.io/api/core/v1" - policy "k8s.io/api/policy/v1beta1" - meta "k8s.io/apimachinery/pkg/apis/meta/v1" + "github.com/rs/zerolog" ) -// SecretReadInterface has methods to work with Secret resources with ReadOnly mode. -type SecretReadInterface interface { - Get(ctx context.Context, name string, opts meta.GetOptions) (*core.Secret, error) -} - -func NewInspector(ctx context.Context, client kclient.Client, namespace string) (inspectorInterface.Inspector, error) { - i := &inspector{ - namespace: namespace, - client: client, - } +var ( + inspectorLoadersList inspectorLoaders + inspectorLoadersLock sync.Mutex +) - if err := i.Refresh(ctx); err != nil { - return nil, err +func requireRegisterInspectorLoader(i inspectorLoader) { + if !registerInspectorLoader(i) { + panic("Unable to register inspector loader") } - - return i, nil } -func newInspector(ctx context.Context, client kclient.Client, namespace string) (*inspector, error) { - var i inspector +func registerInspectorLoader(i inspectorLoader) bool { + inspectorLoadersLock.Lock() + defer inspectorLoadersLock.Unlock() - i.namespace = namespace - i.client = client + n := i.Name() - if err := util.RunParallel(15, - getVersionInfo(ctx, &i, client.Kubernetes(), namespace), - podsToMap(ctx, &i, client.Kubernetes(), namespace), - secretsToMap(ctx, &i, client.Kubernetes(), namespace), - pvcsToMap(ctx, &i, client.Kubernetes(), namespace), - servicesToMap(ctx, &i, client.Kubernetes(), namespace), - serviceAccountsToMap(ctx, &i, client.Kubernetes(), namespace), - podDisruptionBudgetsToMap(ctx, &i, client.Kubernetes(), namespace), - serviceMonitorsToMap(ctx, &i, client.Monitoring(), namespace), - arangoMembersToMap(ctx, &i, client.Arango(), namespace), - nodesToMap(ctx, &i, client.Kubernetes()), - arangoClusterSynchronizationsToMap(ctx, &i, client.Arango(), namespace), - arangoTasksToMap(ctx, &i, client.Arango(), namespace), - ); err != nil { - return nil, err + if inspectorLoadersList.Get(n) != -1 { + return false } - return &i, nil -} + inspectorLoadersList = append(inspectorLoadersList, i) -func NewEmptyInspector() inspectorInterface.Inspector { - return NewInspectorFromData(nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, "") + return true } -func NewInspectorFromData(pods map[string]*core.Pod, - secrets map[string]*core.Secret, - pvcs map[string]*core.PersistentVolumeClaim, - services map[string]*core.Service, - serviceAccounts map[string]*core.ServiceAccount, - podDisruptionBudgets map[string]*policy.PodDisruptionBudget, - serviceMonitors map[string]*monitoring.ServiceMonitor, - arangoMembers map[string]*api.ArangoMember, - nodes map[string]*core.Node, - acs map[string]*api.ArangoClusterSynchronization, - at map[string]*api.ArangoTask, - version driver.Version) inspectorInterface.Inspector { - i := &inspector{ - pods: pods, - secrets: secrets, - pvcs: pvcs, - services: services, - serviceAccounts: serviceAccounts, - podDisruptionBudgets: podDisruptionBudgets, - serviceMonitors: serviceMonitors, - arangoMembers: arangoMembers, - versionInfo: version, - } +type inspectorLoaders []inspectorLoader - if nodes == nil { - i.nodes = &nodeLoader{ - accessible: false, - nodes: nil, - } - } else { - i.nodes = &nodeLoader{ - accessible: true, - nodes: nodes, +func (i inspectorLoaders) Get(name string) int { + for id, k := range i { + if k.Name() == name { + return id } } - if acs == nil { - i.acs = &arangoClusterSynchronizationLoader{ - accessible: false, - acs: nil, - } - } else { - i.acs = &arangoClusterSynchronizationLoader{ - accessible: true, - acs: acs, - } + return -1 +} + +type inspectorLoader interface { + Name() string + + Component() throttle.Component + + Load(context context.Context, i *inspectorState) + + Verify(i *inspectorState) error + + Copy(from, to *inspectorState, override bool) +} + +var _ inspector.Inspector = &inspectorState{} + +func NewInspector(throttles throttle.Components, client kclient.Client, namespace string) inspector.Inspector { + if throttles == nil { + throttles = throttle.NewAlwaysThrottleComponents() } - if at == nil { - i.at = &arangoTaskLoader{ - accessible: false, - at: nil, - } - } else { - i.at = &arangoTaskLoader{ - accessible: true, - at: at, - } + i := &inspectorState{ + namespace: namespace, + client: client, + throttles: throttles, + logger: logging.GlobalLogger().MustGetLogger(logging.LoggerNameInspector), } return i } -type inspector struct { +type inspectorState struct { lock sync.Mutex namespace string + client kclient.Client + + last time.Time + + logger zerolog.Logger - client kclient.Client + pods *podsInspector + secrets *secretsInspector + persistentVolumeClaims *persistentVolumeClaimsInspector + services *servicesInspector + serviceAccounts *serviceAccountsInspector + nodes *nodesInspector + podDisruptionBudgets *podDisruptionBudgetsInspector + serviceMonitors *serviceMonitorsInspector + arangoMembers *arangoMembersInspector + arangoTasks *arangoTasksInspector + arangoClusterSynchronizations *arangoClusterSynchronizationsInspector + + throttles throttle.Components + + versionInfo driver.Version + + static bool +} - pods map[string]*core.Pod - secrets map[string]*core.Secret - pvcs map[string]*core.PersistentVolumeClaim - services map[string]*core.Service - serviceAccounts map[string]*core.ServiceAccount - podDisruptionBudgets map[string]*policy.PodDisruptionBudget - serviceMonitors map[string]*monitoring.ServiceMonitor - arangoMembers map[string]*api.ArangoMember - nodes *nodeLoader - acs *arangoClusterSynchronizationLoader - at *arangoTaskLoader - versionInfo driver.Version +func (i *inspectorState) Client() kclient.Client { + return i.client } -func (i *inspector) IsStatic() bool { - return i.namespace == "" +func (i *inspectorState) Namespace() string { + return i.namespace } -func (i *inspector) Refresh(ctx context.Context) error { +func (i *inspectorState) LastRefresh() time.Time { + return i.last +} + +func (i *inspectorState) Secret() secret.Definition { + return i.secrets +} + +func (i *inspectorState) PersistentVolumeClaim() persistentvolumeclaim.Definition { + return i.persistentVolumeClaims +} + +func (i *inspectorState) Service() service.Definition { + return i.services +} + +func (i *inspectorState) PodDisruptionBudget() poddisruptionbudget.Definition { + return i.podDisruptionBudgets +} + +func (i *inspectorState) ServiceMonitor() servicemonitor.Definition { + return i.serviceMonitors +} + +func (i *inspectorState) ServiceAccount() serviceaccount.Definition { + return i.serviceAccounts +} + +func (i *inspectorState) ArangoMember() arangomember.Definition { + return i.arangoMembers +} + +func (i *inspectorState) GetVersionInfo() driver.Version { + return i.versionInfo +} + +func (i *inspectorState) Node() node.Definition { + return i.nodes +} + +func (i *inspectorState) ArangoClusterSynchronization() arangoclustersynchronization.Definition { + return i.arangoClusterSynchronizations +} + +func (i *inspectorState) ArangoTask() arangotask.Definition { + return i.arangoTasks +} + +func (i *inspectorState) Refresh(ctx context.Context) error { + return i.refresh(ctx, inspectorLoadersList...) +} + +func (i *inspectorState) GetThrottles() throttle.Components { + return i.throttles +} + +func (i *inspectorState) Pod() pod.Definition { + return i.pods +} + +func (i *inspectorState) IsStatic() bool { + return i.static +} + +func (i *inspectorState) refresh(ctx context.Context, loaders ...inspectorLoader) error { + if i.IsStatic() { + return nil + } + + return i.refreshInThreads(ctx, 15, loaders...) +} + +func (i *inspectorState) refreshInThreads(ctx context.Context, threads int, loaders ...inspectorLoader) error { i.lock.Lock() defer i.lock.Unlock() - if i.namespace == "" { - return errors.New("Inspector created from static data") + var m sync.WaitGroup + + p, close := util.ParallelThread(threads) + defer close() + + m.Add(len(loaders)) + + n := i.copyCore() + + if v, err := n.client.Kubernetes().Discovery().ServerVersion(); err != nil { + n.versionInfo = "" + } else { + n.versionInfo = driver.Version(strings.TrimPrefix(v.GitVersion, "v")) + } + + start := time.Now() + + i.logger.Debug().Msg("Inspector refresh start") + + for id := range loaders { + go func(id int) { + defer m.Done() + + c := loaders[id].Component() + + t := n.throttles.Get(c) + + if !t.Throttle() { + i.logger.Debug().Str("component", string(c)).Msg("Inspector refresh skipped") + return + } + + i.logger.Debug().Str("component", string(c)).Msg("Inspector refresh") + + defer func() { + i.logger.Debug().Str("component", string(c)).Str("duration", time.Since(start).String()).Msg("Inspector done") + t.Delay() + }() + + <-p + defer func() { + p <- struct{}{} + }() + + loaders[id].Load(ctx, n) + }(id) + } + + m.Wait() + + i.logger.Debug().Str("duration", time.Since(start).String()).Msg("Inspector refresh done") + + for id := range loaders { + if err := loaders[id].Verify(n); err != nil { + return err + } + } + + if err := n.validate(); err != nil { + return err + } + + for id := range loaders { + loaders[id].Copy(n, i, true) + } + + i.throttles = n.throttles + + i.last = time.Now() + + return nil +} + +func (i *inspectorState) validate() error { + if err := i.pods.validate(); err != nil { + return err } - new, err := newInspector(ctx, i.client, i.namespace) - if err != nil { + if err := i.secrets.validate(); err != nil { return err } - i.pods = new.pods - i.secrets = new.secrets - i.pvcs = new.pvcs - i.services = new.services - i.serviceAccounts = new.serviceAccounts - i.podDisruptionBudgets = new.podDisruptionBudgets - i.serviceMonitors = new.serviceMonitors - i.arangoMembers = new.arangoMembers - i.nodes = new.nodes - i.acs = new.acs - i.versionInfo = new.versionInfo + if err := i.serviceAccounts.validate(); err != nil { + return err + } + + if err := i.persistentVolumeClaims.validate(); err != nil { + return err + } + + if err := i.services.validate(); err != nil { + return err + } + + if err := i.nodes.validate(); err != nil { + return err + } + + if err := i.podDisruptionBudgets.validate(); err != nil { + return err + } + + if err := i.serviceMonitors.validate(); err != nil { + return err + } + + if err := i.arangoMembers.validate(); err != nil { + return err + } + + if err := i.arangoTasks.validate(); err != nil { + return err + } + + if err := i.arangoClusterSynchronizations.validate(); err != nil { + return err + } return nil } + +func (i *inspectorState) copyCore() *inspectorState { + return &inspectorState{ + namespace: i.namespace, + client: i.client, + pods: i.pods, + secrets: i.secrets, + persistentVolumeClaims: i.persistentVolumeClaims, + services: i.services, + serviceAccounts: i.serviceAccounts, + nodes: i.nodes, + podDisruptionBudgets: i.podDisruptionBudgets, + serviceMonitors: i.serviceMonitors, + arangoMembers: i.arangoMembers, + arangoTasks: i.arangoTasks, + arangoClusterSynchronizations: i.arangoClusterSynchronizations, + throttles: i.throttles.Copy(), + versionInfo: i.versionInfo, + static: i.static, + logger: i.logger, + } +} diff --git a/pkg/deployment/resources/inspector/inspector_test.go b/pkg/deployment/resources/inspector/inspector_test.go new file mode 100644 index 000000000..33facebee --- /dev/null +++ b/pkg/deployment/resources/inspector/inspector_test.go @@ -0,0 +1,370 @@ +// +// DISCLAIMER +// +// Copyright 2016-2022 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// + +package inspector + +import ( + "context" + "testing" + "time" + + "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector" + "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/refresh" + "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/throttle" + "github.com/arangodb/kube-arangodb/pkg/util/kclient" + "github.com/stretchr/testify/require" +) + +type loaderTestDefinition struct { + tg func(t throttle.Components) throttle.Throttle + get func(i inspector.Inspector) refresh.Inspector +} + +var loaderTestDefinitions = map[string]loaderTestDefinition{ + "Secret": { + tg: func(t throttle.Components) throttle.Throttle { + return t.Secret() + }, + get: func(i inspector.Inspector) refresh.Inspector { + return i.Secret() + }, + }, + "Service": { + tg: func(t throttle.Components) throttle.Throttle { + return t.Service() + }, + get: func(i inspector.Inspector) refresh.Inspector { + return i.Service() + }, + }, + "ServiceAccount": { + tg: func(t throttle.Components) throttle.Throttle { + return t.ServiceAccount() + }, + get: func(i inspector.Inspector) refresh.Inspector { + return i.ServiceAccount() + }, + }, + "Node": { + tg: func(t throttle.Components) throttle.Throttle { + return t.Node() + }, + get: func(i inspector.Inspector) refresh.Inspector { + return i.Node() + }, + }, + "Pod": { + tg: func(t throttle.Components) throttle.Throttle { + return t.Pod() + }, + get: func(i inspector.Inspector) refresh.Inspector { + return i.Pod() + }, + }, + "PodDisruptionBudget": { + tg: func(t throttle.Components) throttle.Throttle { + return t.PodDisruptionBudget() + }, + get: func(i inspector.Inspector) refresh.Inspector { + return i.PodDisruptionBudget() + }, + }, + "ServiceMonitor": { + tg: func(t throttle.Components) throttle.Throttle { + return t.ServiceMonitor() + }, + get: func(i inspector.Inspector) refresh.Inspector { + return i.ServiceMonitor() + }, + }, + "ArangoMember": { + tg: func(t throttle.Components) throttle.Throttle { + return t.ArangoMember() + }, + get: func(i inspector.Inspector) refresh.Inspector { + return i.ArangoMember() + }, + }, + "ArangoTask": { + tg: func(t throttle.Components) throttle.Throttle { + return t.ArangoTask() + }, + get: func(i inspector.Inspector) refresh.Inspector { + return i.ArangoTask() + }, + }, + "ArangoClusterSynchronizations": { + tg: func(t throttle.Components) throttle.Throttle { + return t.ArangoClusterSynchronization() + }, + get: func(i inspector.Inspector) refresh.Inspector { + return i.ArangoClusterSynchronization() + }, + }, +} + +func getAllTypes() []string { + r := make([]string, 0, len(loaderTestDefinitions)) + + for k := range loaderTestDefinitions { + r = append(r, k) + } + + return r +} + +func Test_Inspector_RefreshMatrix(t *testing.T) { + c := kclient.NewFakeClient() + + tc := throttle.NewThrottleComponents(time.Hour, time.Hour, time.Hour, time.Hour, time.Hour, time.Hour, time.Hour, time.Hour, time.Hour, time.Hour, time.Hour) + + i := NewInspector(tc, c, "test") + + require.NoError(t, i.Refresh(context.Background())) + + combineAllTypes(func(changed, not []string) { + { + times := getTimes(i) + time.Sleep(time.Millisecond) + + for _, k := range changed { + require.NoError(t, loaderTestDefinitions[k].get(i).Refresh(context.Background())) + } + + ntimes := getTimes(i) + + for _, k := range changed { + require.NotEqual(t, times[k], ntimes[k]) + } + + for _, k := range not { + require.Equal(t, times[k], ntimes[k]) + } + + for _, k := range changed { + loaderTestDefinitions[k].tg(i.GetThrottles()).Invalidate() + } + + for _, k := range changed { + require.NoError(t, loaderTestDefinitions[k].get(i).Refresh(context.Background())) + } + + ntimes = getTimes(i) + + for _, k := range changed { + require.NotEqual(t, times[k], ntimes[k]) + } + + for _, k := range not { + require.Equal(t, times[k], ntimes[k]) + } + } + + { + times := getTimes(i) + time.Sleep(time.Millisecond) + + require.NoError(t, i.Refresh(context.Background())) + + ntimes := getTimes(i) + for k, v := range times { + require.Equal(t, v, ntimes[k]) + } + + for _, k := range changed { + loaderTestDefinitions[k].tg(i.GetThrottles()).Invalidate() + } + + require.NoError(t, i.Refresh(context.Background())) + + ntimes = getTimes(i) + + for _, k := range changed { + require.NotEqual(t, times[k], ntimes[k]) + } + + for _, k := range not { + require.Equal(t, times[k], ntimes[k]) + } + } + }) +} + +func combineAllTypes(f func(changed, not []string)) { + t := getAllTypes() + cmb := make([]bool, len(t)) + + cmbc := make([]bool, len(t)) + + getAllCombinations(cmb, func() { + copy(cmbc, cmb) + z := 0 + for i := 0; i < len(cmb); i++ { + if !cmbc[i] { + for j := len(cmb) - 1; j > i; j-- { + if cmbc[j] { + t[i], t[j] = t[j], t[i] + cmbc[i], cmbc[j] = cmbc[j], cmbc[i] + break + } + } + } + } + for i := 0; i < len(cmb); i++ { + if cmb[i] { + z++ + } + } + f(t[0:z], t[z:]) + copy(cmbc, cmb) + for i := 0; i < len(cmb); i++ { + if !cmbc[i] { + for j := len(cmb) - 1; j > i; j-- { + if cmbc[j] { + t[i], t[j] = t[j], t[i] + cmbc[i], cmbc[j] = cmbc[j], cmbc[i] + break + } + } + } + } + }) +} + +func getAllCombinations(cmb []bool, f func()) { + for { + f() + + if !bumpCombination(cmb, 0) { + return + } + } +} + +func bumpCombination(cmd []bool, index int) bool { + if index >= len(cmd) { + return false + } + + if cmd[index] { + cmd[index] = false + return bumpCombination(cmd, index+1) + } + + cmd[index] = true + return true +} + +func getTimes(i inspector.Inspector) map[string]time.Time { + r := map[string]time.Time{} + + for k, v := range loaderTestDefinitions { + r[k] = v.get(i).LastRefresh() + } + + return r +} + +func Test_Inspector_Load(t *testing.T) { + c := kclient.NewFakeClient() + + i := NewInspector(throttle.NewAlwaysThrottleComponents(), c, "test") + + require.NoError(t, i.Refresh(context.Background())) +} + +func Test_Inspector_Invalidate(t *testing.T) { + c := kclient.NewFakeClient() + + tc := throttle.NewThrottleComponents(time.Hour, time.Hour, time.Hour, time.Hour, time.Hour, time.Hour, time.Hour, time.Hour, time.Hour, time.Hour, time.Hour) + + i := NewInspector(tc, c, "test") + + require.NoError(t, i.Refresh(context.Background())) + + for n, q := range loaderTestDefinitions { + t.Run(n, func(t *testing.T) { + t.Run("Specific", func(t *testing.T) { + times := getTimes(i) + time.Sleep(20 * time.Millisecond) + + t.Run("Refresh", func(t *testing.T) { + require.NoError(t, q.get(i).Refresh(context.Background())) + }) + + t.Run("Ensure time changed", func(t *testing.T) { + ntimes := getTimes(i) + for k, v := range times { + if k == n { + require.NotEqual(t, v, ntimes[k]) + } else { + require.Equal(t, v, ntimes[k]) + } + } + }) + + t.Run("Ensure time changed", func(t *testing.T) { + ntimes := getTimes(i) + for k, v := range times { + if k == n { + require.NotEqual(t, v, ntimes[k]) + } else { + require.Equal(t, v, ntimes[k]) + } + } + }) + }) + t.Run("All", func(t *testing.T) { + times := getTimes(i) + time.Sleep(20 * time.Millisecond) + + t.Run("Refresh", func(t *testing.T) { + require.NoError(t, i.Refresh(context.Background())) + }) + + t.Run("Ensure time did not change", func(t *testing.T) { + ntimes := getTimes(i) + for k, v := range times { + require.Equal(t, v, ntimes[k]) + } + }) + + t.Run("Invalidate", func(t *testing.T) { + q.tg(i.GetThrottles()).Invalidate() + }) + + t.Run("Refresh after invalidate", func(t *testing.T) { + require.NoError(t, i.Refresh(context.Background())) + }) + + t.Run("Ensure time changed", func(t *testing.T) { + ntimes := getTimes(i) + for k, v := range times { + if k == n { + require.NotEqual(t, v, ntimes[k]) + } else { + require.Equal(t, v, ntimes[k]) + } + } + }) + }) + }) + } +} diff --git a/pkg/deployment/resources/inspector/members.go b/pkg/deployment/resources/inspector/members.go deleted file mode 100644 index 7eb9e5321..000000000 --- a/pkg/deployment/resources/inspector/members.go +++ /dev/null @@ -1,151 +0,0 @@ -// -// DISCLAIMER -// -// Copyright 2016-2022 ArangoDB GmbH, Cologne, Germany -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Copyright holder is ArangoDB GmbH, Cologne, Germany -// - -package inspector - -import ( - "context" - - "github.com/arangodb/kube-arangodb/pkg/util/globals" - - "github.com/arangodb/kube-arangodb/pkg/apis/deployment" - apiErrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/runtime/schema" - - api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" - "github.com/arangodb/kube-arangodb/pkg/generated/clientset/versioned" - "github.com/arangodb/kube-arangodb/pkg/util/errors" - "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/arangomember" - meta "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -func (i *inspector) IterateArangoMembers(action arangomember.Action, filters ...arangomember.Filter) error { - for _, arangoMember := range i.ArangoMembers() { - if err := i.iterateArangoMembers(arangoMember, action, filters...); err != nil { - return err - } - } - return nil -} - -func (i *inspector) iterateArangoMembers(arangoMember *api.ArangoMember, action arangomember.Action, filters ...arangomember.Filter) error { - for _, filter := range filters { - if !filter(arangoMember) { - return nil - } - } - - return action(arangoMember) -} - -func (i *inspector) ArangoMembers() []*api.ArangoMember { - i.lock.Lock() - defer i.lock.Unlock() - - var r []*api.ArangoMember - for _, arangoMember := range i.arangoMembers { - r = append(r, arangoMember) - } - - return r -} - -func (i *inspector) ArangoMember(name string) (*api.ArangoMember, bool) { - i.lock.Lock() - defer i.lock.Unlock() - - arangoMember, ok := i.arangoMembers[name] - if !ok { - return nil, false - } - - return arangoMember, true -} - -func (i *inspector) ArangoMemberReadInterface() arangomember.ReadInterface { - return &arangoMemberReadInterface{i: i} -} - -type arangoMemberReadInterface struct { - i *inspector -} - -func (s arangoMemberReadInterface) Get(ctx context.Context, name string, opts meta.GetOptions) (*api.ArangoMember, error) { - if s, ok := s.i.ArangoMember(name); !ok { - return nil, apiErrors.NewNotFound(schema.GroupResource{ - Group: deployment.ArangoDeploymentGroupName, - Resource: "arangomembers", - }, name) - } else { - return s, nil - } -} - -func arangoMembersToMap(ctx context.Context, inspector *inspector, k versioned.Interface, namespace string) func() error { - return func() error { - arangoMembers, err := getArangoMembers(ctx, k, namespace, "") - if err != nil { - return err - } - - arangoMemberMap := map[string]*api.ArangoMember{} - - for _, arangoMember := range arangoMembers { - _, exists := arangoMemberMap[arangoMember.GetName()] - if exists { - return errors.Newf("ArangoMember %s already exists in map, error received", arangoMember.GetName()) - } - - arangoMemberMap[arangoMember.GetName()] = arangoMemberPointer(arangoMember) - } - - inspector.arangoMembers = arangoMemberMap - - return nil - } -} - -func arangoMemberPointer(pod api.ArangoMember) *api.ArangoMember { - return &pod -} - -func getArangoMembers(ctx context.Context, k versioned.Interface, namespace, cont string) ([]api.ArangoMember, error) { - ctxChild, cancel := globals.GetGlobalTimeouts().Kubernetes().WithTimeout(ctx) - defer cancel() - arangoMembers, err := k.DatabaseV1().ArangoMembers(namespace).List(ctxChild, meta.ListOptions{ - Limit: globals.GetGlobals().Kubernetes().RequestBatchSize().Get(), - Continue: cont, - }) - - if err != nil { - return nil, err - } - - if arangoMembers.Continue != "" { - nextArangoMembersLayer, err := getArangoMembers(ctx, k, namespace, arangoMembers.Continue) - if err != nil { - return nil, err - } - - return append(arangoMembers.Items, nextArangoMembersLayer...), nil - } - - return arangoMembers.Items, nil -} diff --git a/pkg/deployment/resources/inspector/nodes.go b/pkg/deployment/resources/inspector/nodes.go index f9baa6e55..3595e2b0d 100644 --- a/pkg/deployment/resources/inspector/nodes.go +++ b/pkg/deployment/resources/inspector/nodes.go @@ -22,149 +22,168 @@ package inspector import ( "context" - - "github.com/arangodb/kube-arangodb/pkg/util/globals" + "time" "github.com/arangodb/kube-arangodb/pkg/util/errors" - "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/node" + "github.com/arangodb/kube-arangodb/pkg/util/globals" + "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/throttle" core "k8s.io/api/core/v1" - policy "k8s.io/api/policy/v1beta1" - apiErrors "k8s.io/apimachinery/pkg/api/errors" meta "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/client-go/kubernetes" ) -func (i *inspector) GetNodes() (node.Inspector, bool) { - i.lock.Lock() - defer i.lock.Unlock() +func init() { + requireRegisterInspectorLoader(nodesInspectorLoaderObj) +} - if i.nodes == nil { - return nil, false - } +var nodesInspectorLoaderObj = nodesInspectorLoader{} - return i.nodes, i.nodes.accessible +type nodesInspectorLoader struct { } -type nodeLoader struct { - accessible bool +func (p nodesInspectorLoader) Component() throttle.Component { + return throttle.Node +} - nodes map[string]*core.Node +func (p nodesInspectorLoader) Load(ctx context.Context, i *inspectorState) { + var q nodesInspector + p.loadV1(ctx, i, &q) + i.nodes = &q + q.state = i + q.last = time.Now() } -func (n *nodeLoader) Node(name string) (*core.Node, bool) { - node, ok := n.nodes[name] - if !ok { - return nil, false - } +func (p nodesInspectorLoader) loadV1(ctx context.Context, i *inspectorState, q *nodesInspector) { + var z nodesInspectorV1 - return node, true -} + z.nodeInspector = q -func (n *nodeLoader) Nodes() []*core.Node { - var r []*core.Node - for _, node := range n.nodes { - r = append(r, node) - } + z.nodes, z.err = p.getV1Nodes(ctx, i) - return r + q.v1 = &z } -func (n *nodeLoader) IterateNodes(action node.Action, filters ...node.Filter) error { - for _, node := range n.Nodes() { - if err := n.iterateNode(node, action, filters...); err != nil { - return err - } +func (p nodesInspectorLoader) getV1Nodes(ctx context.Context, i *inspectorState) (map[string]*core.Node, error) { + objs, err := p.getV1NodesList(ctx, i) + if err != nil { + return nil, err } - return nil -} -func (n *nodeLoader) iterateNode(node *core.Node, action node.Action, filters ...node.Filter) error { - for _, filter := range filters { - if !filter(node) { - return nil - } - } + r := make(map[string]*core.Node, len(objs)) - return action(node) -} + for id := range objs { + r[objs[id].GetName()] = objs[id] + } -func (n *nodeLoader) NodeReadInterface() node.ReadInterface { - return &nodeReadInterface{i: n} + return r, nil } -type nodeReadInterface struct { - i *nodeLoader -} +func (p nodesInspectorLoader) getV1NodesList(ctx context.Context, i *inspectorState) ([]*core.Node, error) { + ctxChild, cancel := globals.GetGlobalTimeouts().Kubernetes().WithTimeout(ctx) + defer cancel() + obj, err := i.client.Kubernetes().CoreV1().Nodes().List(ctxChild, meta.ListOptions{ + Limit: globals.GetGlobals().Kubernetes().RequestBatchSize().Get(), + }) -func (s nodeReadInterface) Get(ctx context.Context, name string, opts meta.GetOptions) (*core.Node, error) { - if s, ok := s.i.Node(name); !ok { - return nil, apiErrors.NewNotFound(schema.GroupResource{ - Group: policy.GroupName, - Resource: "nodes", - }, name) - } else { - return s, nil + if err != nil { + return nil, err } -} -func nodePointer(node core.Node) *core.Node { - return &node -} + items := obj.Items + cont := obj.Continue + var s = int64(len(items)) -func nodesToMap(ctx context.Context, inspector *inspector, k kubernetes.Interface) func() error { - return func() error { - nodes, err := getNodes(ctx, k, "") - if err != nil { - if apiErrors.IsUnauthorized(err) { - inspector.nodes = &nodeLoader{ - accessible: false, - } - return nil - } - return err - } - - nodesMap := map[string]*core.Node{} + if z := obj.RemainingItemCount; z != nil { + s += *z + } - for _, node := range nodes { - _, exists := nodesMap[node.GetName()] - if exists { - return errors.Newf("ArangoMember %s already exists in map, error received", node.GetName()) - } + ptrs := make([]*core.Node, 0, s) - nodesMap[node.GetName()] = nodePointer(node) + for { + for id := range items { + ptrs = append(ptrs, &items[id]) } - inspector.nodes = &nodeLoader{ - accessible: true, - nodes: nodesMap, + if cont == "" { + break } - return nil + items, cont, err = p.getV1NodesListRequest(ctx, i, cont) + + if err != nil { + return nil, err + } } + + return ptrs, nil } -func getNodes(ctx context.Context, k kubernetes.Interface, cont string) ([]core.Node, error) { +func (p nodesInspectorLoader) getV1NodesListRequest(ctx context.Context, i *inspectorState, cont string) ([]core.Node, string, error) { ctxChild, cancel := globals.GetGlobalTimeouts().Kubernetes().WithTimeout(ctx) defer cancel() - nodes, err := k.CoreV1().Nodes().List(ctxChild, meta.ListOptions{ + obj, err := i.client.Kubernetes().CoreV1().Nodes().List(ctxChild, meta.ListOptions{ Limit: globals.GetGlobals().Kubernetes().RequestBatchSize().Get(), Continue: cont, }) if err != nil { - return nil, err + return nil, "", err } - if nodes.Continue != "" { - nextNodeLayer, err := getNodes(ctx, k, nodes.Continue) - if err != nil { - return nil, err + return obj.Items, obj.Continue, err +} + +func (p nodesInspectorLoader) Verify(i *inspectorState) error { + return nil +} + +func (p nodesInspectorLoader) Copy(from, to *inspectorState, override bool) { + if to.nodes != nil { + if !override { + return } + } + + to.nodes = from.nodes + to.nodes.state = to +} + +func (p nodesInspectorLoader) Name() string { + return "nodes" +} + +type nodesInspector struct { + state *inspectorState + + last time.Time + + v1 *nodesInspectorV1 +} + +func (p *nodesInspector) LastRefresh() time.Time { + return p.last +} + +func (p *nodesInspector) IsStatic() bool { + return p.state.IsStatic() +} + +func (p *nodesInspector) Refresh(ctx context.Context) error { + p.Throttle(p.state.throttles).Invalidate() + return p.state.refresh(ctx, nodesInspectorLoaderObj) +} + +func (p nodesInspector) Throttle(c throttle.Components) throttle.Throttle { + return c.Node() +} + +func (p *nodesInspector) validate() error { + if p == nil { + return errors.Newf("NodeInspector is nil") + } - return append(nodes.Items, nextNodeLayer...), nil + if p.state == nil { + return errors.Newf("Parent is nil") } - return nodes.Items, nil + return p.v1.validate() } diff --git a/pkg/deployment/resources/inspector/nodes_v1.go b/pkg/deployment/resources/inspector/nodes_v1.go new file mode 100644 index 000000000..d17facd5a --- /dev/null +++ b/pkg/deployment/resources/inspector/nodes_v1.go @@ -0,0 +1,124 @@ +// +// DISCLAIMER +// +// Copyright 2016-2022 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// + +package inspector + +import ( + "context" + + "github.com/arangodb/kube-arangodb/pkg/util/errors" + ins "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/node/v1" + core "k8s.io/api/core/v1" + apiErrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +func (p *nodesInspector) V1() (ins.Inspector, error) { + if p.v1.err != nil { + return nil, p.v1.err + } + + return p.v1, nil +} + +type nodesInspectorV1 struct { + nodeInspector *nodesInspector + + nodes map[string]*core.Node + err error +} + +func (p *nodesInspectorV1) validate() error { + if p == nil { + return errors.Newf("NodesV1Inspector is nil") + } + + if p.nodeInspector == nil { + return errors.Newf("Parent is nil") + } + + if p.nodes == nil && p.err == nil { + return errors.Newf("Nodes or err should be not nil") + } + + if p.nodes != nil && p.err != nil { + return errors.Newf("Nodes or err cannot be not nil together") + } + + return nil +} + +func (p *nodesInspectorV1) ListSimple() []*core.Node { + var r []*core.Node + for _, node := range p.nodes { + r = append(r, node) + } + + return r +} + +func (p *nodesInspectorV1) GetSimple(name string) (*core.Node, bool) { + node, ok := p.nodes[name] + if !ok { + return nil, false + } + + return node, true +} + +func (p *nodesInspectorV1) Iterate(action ins.Action, filters ...ins.Filter) error { + for _, node := range p.nodes { + if err := p.iterateNode(node, action, filters...); err != nil { + return err + } + } + + return nil +} + +func (p *nodesInspectorV1) iterateNode(node *core.Node, action ins.Action, filters ...ins.Filter) error { + for _, f := range filters { + if f == nil { + continue + } + + if !f(node) { + return nil + } + } + + return action(node) +} + +func (p *nodesInspectorV1) Read() ins.ReadInterface { + return p +} + +func (p *nodesInspectorV1) Get(ctx context.Context, name string, opts metav1.GetOptions) (*core.Node, error) { + if s, ok := p.GetSimple(name); !ok { + return nil, apiErrors.NewNotFound(schema.GroupResource{ + Group: core.GroupName, + Resource: "nodes", + }, name) + } else { + return s, nil + } +} diff --git a/pkg/deployment/resources/inspector/pdbs.go b/pkg/deployment/resources/inspector/pdbs.go index ceecb4559..7179dd935 100644 --- a/pkg/deployment/resources/inspector/pdbs.go +++ b/pkg/deployment/resources/inspector/pdbs.go @@ -22,146 +22,287 @@ package inspector import ( "context" + "time" + "github.com/arangodb/kube-arangodb/pkg/util/errors" "github.com/arangodb/kube-arangodb/pkg/util/globals" - + "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/throttle" + policyv1 "k8s.io/api/policy/v1" + policyv1beta1 "k8s.io/api/policy/v1beta1" apiErrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/runtime/schema" - - "github.com/arangodb/kube-arangodb/pkg/util/errors" - "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/poddisruptionbudget" - policy "k8s.io/api/policy/v1beta1" meta "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" + "k8s.io/apimachinery/pkg/runtime/schema" ) -func (i *inspector) IteratePodDisruptionBudgets(action poddisruptionbudget.Action, filters ...poddisruptionbudget.Filter) error { - for _, podDisruptionBudget := range i.PodDisruptionBudgets() { - if err := i.iteratePodDisruptionBudget(podDisruptionBudget, action, filters...); err != nil { - return err - } - } - return nil +func init() { + requireRegisterInspectorLoader(podDisruptionBudgetsInspectorLoaderObj) } -func (i *inspector) iteratePodDisruptionBudget(podDisruptionBudget *policy.PodDisruptionBudget, action poddisruptionbudget.Action, filters ...poddisruptionbudget.Filter) error { - for _, filter := range filters { - if !filter(podDisruptionBudget) { - return nil - } - } +var podDisruptionBudgetsInspectorLoaderObj = podDisruptionBudgetsInspectorLoader{} - return action(podDisruptionBudget) +type podDisruptionBudgetsInspectorLoader struct { } -func (i *inspector) PodDisruptionBudgets() []*policy.PodDisruptionBudget { - i.lock.Lock() - defer i.lock.Unlock() +func (p podDisruptionBudgetsInspectorLoader) Component() throttle.Component { + return throttle.PodDisruptionBudget +} - var r []*policy.PodDisruptionBudget - for _, podDisruptionBudget := range i.podDisruptionBudgets { - r = append(r, podDisruptionBudget) +func (p podDisruptionBudgetsInspectorLoader) Load(ctx context.Context, i *inspectorState) { + var q podDisruptionBudgetsInspector + + if i.versionInfo.Major() == 128 && i.versionInfo.Minor() >= 21 { // Above 1.21, disable temporally + p.loadV1(ctx, i, &q) + + q.v1beta1 = &podDisruptionBudgetsInspectorV1Beta1{ + podDisruptionBudgetInspector: &q, + err: apiErrors.NewNotFound(schema.GroupResource{ + Group: policyv1beta1.GroupName, + Resource: "podDisruptionBudgets", + }, ""), + } + } else { + p.loadV1Beta1(ctx, i, &q) + + q.v1 = &podDisruptionBudgetsInspectorV1{ + podDisruptionBudgetInspector: &q, + err: apiErrors.NewNotFound(schema.GroupResource{ + Group: policyv1.GroupName, + Resource: "podDisruptionBudgets", + }, ""), + } } + i.podDisruptionBudgets = &q + q.state = i + q.last = time.Now() +} + +func (p podDisruptionBudgetsInspectorLoader) loadV1Beta1(ctx context.Context, i *inspectorState, q *podDisruptionBudgetsInspector) { + var z podDisruptionBudgetsInspectorV1Beta1 - return r + z.podDisruptionBudgetInspector = q + + z.podDisruptionBudgets, z.err = p.getV1Beta1PodDisruptionBudgets(ctx, i) + + q.v1beta1 = &z } -func (i *inspector) PodDisruptionBudget(name string) (*policy.PodDisruptionBudget, bool) { - i.lock.Lock() - defer i.lock.Unlock() +func (p podDisruptionBudgetsInspectorLoader) getV1Beta1PodDisruptionBudgets(ctx context.Context, i *inspectorState) (map[string]*policyv1beta1.PodDisruptionBudget, error) { + objs, err := p.getV1Beta1PodDisruptionBudgetsList(ctx, i) + if err != nil { + return nil, err + } + + r := make(map[string]*policyv1beta1.PodDisruptionBudget, len(objs)) - podDisruptionBudget, ok := i.podDisruptionBudgets[name] - if !ok { - return nil, false + for id := range objs { + r[objs[id].GetName()] = objs[id] } - return podDisruptionBudget, true + return r, nil } -func (i *inspector) PodDisruptionBudgetReadInterface() poddisruptionbudget.ReadInterface { - return &podDisruptionBudgetReadInterface{i: i} -} +func (p podDisruptionBudgetsInspectorLoader) getV1Beta1PodDisruptionBudgetsList(ctx context.Context, i *inspectorState) ([]*policyv1beta1.PodDisruptionBudget, error) { + ctxChild, cancel := globals.GetGlobalTimeouts().Kubernetes().WithTimeout(ctx) + defer cancel() + obj, err := i.client.Kubernetes().PolicyV1beta1().PodDisruptionBudgets(i.namespace).List(ctxChild, meta.ListOptions{ + Limit: globals.GetGlobals().Kubernetes().RequestBatchSize().Get(), + }) -type podDisruptionBudgetReadInterface struct { - i *inspector -} + if err != nil { + return nil, err + } -func (s podDisruptionBudgetReadInterface) Get(ctx context.Context, name string, opts meta.GetOptions) (*policy.PodDisruptionBudget, error) { - if s, ok := s.i.PodDisruptionBudget(name); !ok { - return nil, apiErrors.NewNotFound(schema.GroupResource{ - Group: policy.GroupName, - Resource: "poddisruptionbudgets", - }, name) - } else { - return s, nil + items := obj.Items + cont := obj.Continue + var s = int64(len(items)) + + if z := obj.RemainingItemCount; z != nil { + s += *z } -} -func podDisruptionBudgetsToMap(ctx context.Context, inspector *inspector, k kubernetes.Interface, namespace string) func() error { - return func() error { - podDisruptionBudgets, err := getPodDisruptionBudgets(ctx, k, namespace, "") - if err != nil { - return err + ptrs := make([]*policyv1beta1.PodDisruptionBudget, 0, s) + + for { + for id := range items { + ptrs = append(ptrs, &items[id]) } - podDisruptionBudgetMap := map[string]*policy.PodDisruptionBudget{} + if cont == "" { + break + } - for _, podDisruptionBudget := range podDisruptionBudgets { - _, exists := podDisruptionBudgetMap[podDisruptionBudget.GetName()] - if exists { - return errors.Newf("PodDisruptionBudget %s already exists in map, error received", podDisruptionBudget.GetName()) - } + items, cont, err = p.getV1Beta1PodDisruptionBudgetsListRequest(ctx, i, cont) - podDisruptionBudgetMap[podDisruptionBudget.GetName()] = podDisruptionBudgetPointer(podDisruptionBudget) + if err != nil { + return nil, err } + } + + return ptrs, nil +} - inspector.podDisruptionBudgets = podDisruptionBudgetMap +func (p podDisruptionBudgetsInspectorLoader) getV1Beta1PodDisruptionBudgetsListRequest(ctx context.Context, i *inspectorState, cont string) ([]policyv1beta1.PodDisruptionBudget, string, error) { + ctxChild, cancel := globals.GetGlobalTimeouts().Kubernetes().WithTimeout(ctx) + defer cancel() + obj, err := i.client.Kubernetes().PolicyV1beta1().PodDisruptionBudgets(i.namespace).List(ctxChild, meta.ListOptions{ + Limit: globals.GetGlobals().Kubernetes().RequestBatchSize().Get(), + Continue: cont, + }) - return nil + if err != nil { + return nil, "", err } + + return obj.Items, obj.Continue, err } -func podDisruptionBudgetPointer(podDisruptionBudget policy.PodDisruptionBudget) *policy.PodDisruptionBudget { - return &podDisruptionBudget +func (p podDisruptionBudgetsInspectorLoader) loadV1(ctx context.Context, i *inspectorState, q *podDisruptionBudgetsInspector) { + var z podDisruptionBudgetsInspectorV1 + + z.podDisruptionBudgetInspector = q + + z.podDisruptionBudgets, z.err = p.getV1PodDisruptionBudgets(ctx, i) + + q.v1 = &z } -func getPodDisruptionBudgets(ctx context.Context, k kubernetes.Interface, namespace, cont string) ([]policy.PodDisruptionBudget, error) { +func (p podDisruptionBudgetsInspectorLoader) getV1PodDisruptionBudgets(ctx context.Context, i *inspectorState) (map[string]*policyv1.PodDisruptionBudget, error) { + objs, err := p.getV1PodDisruptionBudgetsList(ctx, i) + if err != nil { + return nil, err + } + + r := make(map[string]*policyv1.PodDisruptionBudget, len(objs)) + + for id := range objs { + r[objs[id].GetName()] = objs[id] + } + + return r, nil +} + +func (p podDisruptionBudgetsInspectorLoader) getV1PodDisruptionBudgetsList(ctx context.Context, i *inspectorState) ([]*policyv1.PodDisruptionBudget, error) { ctxChild, cancel := globals.GetGlobalTimeouts().Kubernetes().WithTimeout(ctx) defer cancel() - podDisruptionBudgets, err := k.PolicyV1beta1().PodDisruptionBudgets(namespace).List(ctxChild, meta.ListOptions{ - Limit: globals.GetGlobals().Kubernetes().RequestBatchSize().Get(), - Continue: cont, + obj, err := i.client.Kubernetes().PolicyV1().PodDisruptionBudgets(i.namespace).List(ctxChild, meta.ListOptions{ + Limit: globals.GetGlobals().Kubernetes().RequestBatchSize().Get(), }) if err != nil { return nil, err } - if podDisruptionBudgets.Continue != "" { - nextPodDisruptionBudgetsLayer, err := getPodDisruptionBudgets(ctx, k, namespace, podDisruptionBudgets.Continue) + items := obj.Items + cont := obj.Continue + var s = int64(len(items)) + + if z := obj.RemainingItemCount; z != nil { + s += *z + } + + ptrs := make([]*policyv1.PodDisruptionBudget, 0, s) + + for { + for id := range items { + ptrs = append(ptrs, &items[id]) + } + + if cont == "" { + break + } + + items, cont, err = p.getV1PodDisruptionBudgetsListRequest(ctx, i, cont) + if err != nil { return nil, err } + } + + return ptrs, nil +} + +func (p podDisruptionBudgetsInspectorLoader) getV1PodDisruptionBudgetsListRequest(ctx context.Context, i *inspectorState, cont string) ([]policyv1.PodDisruptionBudget, string, error) { + ctxChild, cancel := globals.GetGlobalTimeouts().Kubernetes().WithTimeout(ctx) + defer cancel() + obj, err := i.client.Kubernetes().PolicyV1().PodDisruptionBudgets(i.namespace).List(ctxChild, meta.ListOptions{ + Limit: globals.GetGlobals().Kubernetes().RequestBatchSize().Get(), + Continue: cont, + }) - return append(podDisruptionBudgets.Items, nextPodDisruptionBudgetsLayer...), nil + if err != nil { + return nil, "", err } - return podDisruptionBudgets.Items, nil + return obj.Items, obj.Continue, err } -func FilterPodDisruptionBudgetsByLabels(labels map[string]string) poddisruptionbudget.Filter { - return func(podDisruptionBudget *policy.PodDisruptionBudget) bool { - for key, value := range labels { - v, ok := podDisruptionBudget.Labels[key] - if !ok { - return false - } +func (p podDisruptionBudgetsInspectorLoader) Verify(i *inspectorState) error { + if errv1, errv1beta1 := i.podDisruptionBudgets.v1.err, i.podDisruptionBudgets.v1beta1.err; errv1 != nil && errv1beta1 != nil { + return errors.Wrap(errv1, "Both requests failed") + } else if errv1 == nil && errv1beta1 == nil { + return errors.Newf("V1 and V1beta1 are not nil - only one should be picked") + } + + return nil +} - if v != value { - return false - } +func (p podDisruptionBudgetsInspectorLoader) Copy(from, to *inspectorState, override bool) { + if to.podDisruptionBudgets != nil { + if !override { + return } + } + + to.podDisruptionBudgets = from.podDisruptionBudgets + to.podDisruptionBudgets.state = to +} + +func (p podDisruptionBudgetsInspectorLoader) Name() string { + return "podDisruptionBudgets" +} + +type podDisruptionBudgetsInspector struct { + state *inspectorState + + last time.Time + + v1 *podDisruptionBudgetsInspectorV1 + v1beta1 *podDisruptionBudgetsInspectorV1Beta1 +} + +func (p *podDisruptionBudgetsInspector) LastRefresh() time.Time { + return p.last +} - return true +func (p *podDisruptionBudgetsInspector) IsStatic() bool { + return p.state.IsStatic() +} + +func (p *podDisruptionBudgetsInspector) Refresh(ctx context.Context) error { + p.Throttle(p.state.throttles).Invalidate() + return p.state.refresh(ctx, podDisruptionBudgetsInspectorLoaderObj) +} + +func (p podDisruptionBudgetsInspector) Throttle(c throttle.Components) throttle.Throttle { + return c.PodDisruptionBudget() +} + +func (p *podDisruptionBudgetsInspector) validate() error { + if p == nil { + return errors.Newf("PodDisruptionBudgetInspector is nil") + } + + if p.state == nil { + return errors.Newf("Parent is nil") + } + + if err := p.v1.validate(); err != nil { + return err } + + if err := p.v1beta1.validate(); err != nil { + return err + } + + return nil } diff --git a/pkg/deployment/resources/inspector/pdbs_v1.go b/pkg/deployment/resources/inspector/pdbs_v1.go new file mode 100644 index 000000000..60a2653d6 --- /dev/null +++ b/pkg/deployment/resources/inspector/pdbs_v1.go @@ -0,0 +1,124 @@ +// +// DISCLAIMER +// +// Copyright 2016-2022 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// + +package inspector + +import ( + "context" + + "github.com/arangodb/kube-arangodb/pkg/util/errors" + ins "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/poddisruptionbudget/v1" + policy "k8s.io/api/policy/v1" + apiErrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +func (p *podDisruptionBudgetsInspector) V1() (ins.Inspector, error) { + if p.v1.err != nil { + return nil, p.v1.err + } + + return p.v1, nil +} + +type podDisruptionBudgetsInspectorV1 struct { + podDisruptionBudgetInspector *podDisruptionBudgetsInspector + + podDisruptionBudgets map[string]*policy.PodDisruptionBudget + err error +} + +func (p *podDisruptionBudgetsInspectorV1) validate() error { + if p == nil { + return errors.Newf("PodDisruptionBudgetsV1Inspector is nil") + } + + if p.podDisruptionBudgetInspector == nil { + return errors.Newf("Parent is nil") + } + + if p.podDisruptionBudgets == nil && p.err == nil { + return errors.Newf("PodDisruptionBudgets or err should be not nil") + } + + if p.podDisruptionBudgets != nil && p.err != nil { + return errors.Newf("PodDisruptionBudgets or err cannot be not nil together") + } + + return nil +} + +func (p *podDisruptionBudgetsInspectorV1) PodDisruptionBudgets() []*policy.PodDisruptionBudget { + var r []*policy.PodDisruptionBudget + for _, podDisruptionBudget := range p.podDisruptionBudgets { + r = append(r, podDisruptionBudget) + } + + return r +} + +func (p *podDisruptionBudgetsInspectorV1) GetSimple(name string) (*policy.PodDisruptionBudget, bool) { + podDisruptionBudget, ok := p.podDisruptionBudgets[name] + if !ok { + return nil, false + } + + return podDisruptionBudget, true +} + +func (p *podDisruptionBudgetsInspectorV1) Iterate(action ins.Action, filters ...ins.Filter) error { + for _, podDisruptionBudget := range p.podDisruptionBudgets { + if err := p.iteratePodDisruptionBudget(podDisruptionBudget, action, filters...); err != nil { + return err + } + } + + return nil +} + +func (p *podDisruptionBudgetsInspectorV1) iteratePodDisruptionBudget(podDisruptionBudget *policy.PodDisruptionBudget, action ins.Action, filters ...ins.Filter) error { + for _, f := range filters { + if f == nil { + continue + } + + if !f(podDisruptionBudget) { + return nil + } + } + + return action(podDisruptionBudget) +} + +func (p *podDisruptionBudgetsInspectorV1) Read() ins.ReadInterface { + return p +} + +func (p *podDisruptionBudgetsInspectorV1) Get(ctx context.Context, name string, opts metav1.GetOptions) (*policy.PodDisruptionBudget, error) { + if s, ok := p.GetSimple(name); !ok { + return nil, apiErrors.NewNotFound(schema.GroupResource{ + Group: policy.GroupName, + Resource: "podDisruptionBudgets", + }, name) + } else { + return s, nil + } +} diff --git a/pkg/deployment/resources/inspector/pdbs_v1beta1.go b/pkg/deployment/resources/inspector/pdbs_v1beta1.go new file mode 100644 index 000000000..bdca5ea10 --- /dev/null +++ b/pkg/deployment/resources/inspector/pdbs_v1beta1.go @@ -0,0 +1,124 @@ +// +// DISCLAIMER +// +// Copyright 2016-2022 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// + +package inspector + +import ( + "context" + + "github.com/arangodb/kube-arangodb/pkg/util/errors" + ins "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/poddisruptionbudget/v1beta1" + policy "k8s.io/api/policy/v1beta1" + apiErrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +func (p *podDisruptionBudgetsInspector) V1Beta1() (ins.Inspector, error) { + if p.v1beta1.err != nil { + return nil, p.v1beta1.err + } + + return p.v1beta1, nil +} + +type podDisruptionBudgetsInspectorV1Beta1 struct { + podDisruptionBudgetInspector *podDisruptionBudgetsInspector + + podDisruptionBudgets map[string]*policy.PodDisruptionBudget + err error +} + +func (p *podDisruptionBudgetsInspectorV1Beta1) validate() error { + if p == nil { + return errors.Newf("PodDisruptionBudgetsV1Beta1Inspector is nil") + } + + if p.podDisruptionBudgetInspector == nil { + return errors.Newf("Parent is nil") + } + + if p.podDisruptionBudgets == nil && p.err == nil { + return errors.Newf("PodDisruptionBudgets or err should be not nil") + } + + if p.podDisruptionBudgets != nil && p.err != nil { + return errors.Newf("PodDisruptionBudgets or err cannot be not nil together") + } + + return nil +} + +func (p *podDisruptionBudgetsInspectorV1Beta1) PodDisruptionBudgets() []*policy.PodDisruptionBudget { + var r []*policy.PodDisruptionBudget + for _, podDisruptionBudget := range p.podDisruptionBudgets { + r = append(r, podDisruptionBudget) + } + + return r +} + +func (p *podDisruptionBudgetsInspectorV1Beta1) GetSimple(name string) (*policy.PodDisruptionBudget, bool) { + podDisruptionBudget, ok := p.podDisruptionBudgets[name] + if !ok { + return nil, false + } + + return podDisruptionBudget, true +} + +func (p *podDisruptionBudgetsInspectorV1Beta1) Iterate(action ins.Action, filters ...ins.Filter) error { + for _, podDisruptionBudget := range p.podDisruptionBudgets { + if err := p.iteratePodDisruptionBudget(podDisruptionBudget, action, filters...); err != nil { + return err + } + } + + return nil +} + +func (p *podDisruptionBudgetsInspectorV1Beta1) iteratePodDisruptionBudget(podDisruptionBudget *policy.PodDisruptionBudget, action ins.Action, filters ...ins.Filter) error { + for _, f := range filters { + if f == nil { + continue + } + + if !f(podDisruptionBudget) { + return nil + } + } + + return action(podDisruptionBudget) +} + +func (p *podDisruptionBudgetsInspectorV1Beta1) Read() ins.ReadInterface { + return p +} + +func (p *podDisruptionBudgetsInspectorV1Beta1) Get(ctx context.Context, name string, opts metav1.GetOptions) (*policy.PodDisruptionBudget, error) { + if s, ok := p.GetSimple(name); !ok { + return nil, apiErrors.NewNotFound(schema.GroupResource{ + Group: policy.GroupName, + Resource: "podDisruptionBudgets", + }, name) + } else { + return s, nil + } +} diff --git a/pkg/deployment/resources/inspector/pods.go b/pkg/deployment/resources/inspector/pods.go index 28004c5cc..82e6a9e2b 100644 --- a/pkg/deployment/resources/inspector/pods.go +++ b/pkg/deployment/resources/inspector/pods.go @@ -22,146 +22,172 @@ package inspector import ( "context" - - core "k8s.io/api/core/v1" - apiErrors "k8s.io/apimachinery/pkg/api/errors" - meta "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/client-go/kubernetes" + "time" "github.com/arangodb/kube-arangodb/pkg/util/errors" "github.com/arangodb/kube-arangodb/pkg/util/globals" - "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/pod" + "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/throttle" + core "k8s.io/api/core/v1" + meta "k8s.io/apimachinery/pkg/apis/meta/v1" ) -func (i *inspector) IteratePods(action pod.Action, filters ...pod.Filter) error { - for _, pod := range i.Pods() { - if err := i.iteratePod(pod, action, filters...); err != nil { - return err - } - } - return nil +func init() { + requireRegisterInspectorLoader(podsInspectorLoaderObj) } -func (i *inspector) iteratePod(pod *core.Pod, action pod.Action, filters ...pod.Filter) error { - for _, filter := range filters { - if !filter(pod) { - return nil - } - } +var podsInspectorLoaderObj = podsInspectorLoader{} - return action(pod) +type podsInspectorLoader struct { } -func (i *inspector) Pods() []*core.Pod { - i.lock.Lock() - defer i.lock.Unlock() - - var r []*core.Pod - for _, pod := range i.pods { - r = append(r, pod) - } +func (p podsInspectorLoader) Component() throttle.Component { + return throttle.Pod +} - return r +func (p podsInspectorLoader) Load(ctx context.Context, i *inspectorState) { + var q podsInspector + p.loadV1(ctx, i, &q) + i.pods = &q + q.state = i + q.last = time.Now() } -func (i *inspector) Pod(name string) (*core.Pod, bool) { - i.lock.Lock() - defer i.lock.Unlock() +func (p podsInspectorLoader) loadV1(ctx context.Context, i *inspectorState, q *podsInspector) { + var z podsInspectorV1 - pod, ok := i.pods[name] - if !ok { - return nil, false - } + z.podInspector = q - return pod, true -} + z.pods, z.err = p.getV1Pods(ctx, i) -func (i *inspector) PodReadInterface() pod.ReadInterface { - return &podReadInterface{i: i} + q.v1 = &z } -type podReadInterface struct { - i *inspector -} +func (p podsInspectorLoader) getV1Pods(ctx context.Context, i *inspectorState) (map[string]*core.Pod, error) { + objs, err := p.getV1PodsList(ctx, i) + if err != nil { + return nil, err + } -func (s podReadInterface) Get(ctx context.Context, name string, opts meta.GetOptions) (*core.Pod, error) { - if s, ok := s.i.Pod(name); !ok { - return nil, apiErrors.NewNotFound(schema.GroupResource{ - Group: core.GroupName, - Resource: "pods", - }, name) - } else { - return s, nil + r := make(map[string]*core.Pod, len(objs)) + + for id := range objs { + r[objs[id].GetName()] = objs[id] } + + return r, nil } -func podsToMap(ctx context.Context, inspector *inspector, k kubernetes.Interface, namespace string) func() error { - return func() error { - pods, err := getPods(ctx, k, namespace, "") - if err != nil { - return err - } +func (p podsInspectorLoader) getV1PodsList(ctx context.Context, i *inspectorState) ([]*core.Pod, error) { + ctxChild, cancel := globals.GetGlobalTimeouts().Kubernetes().WithTimeout(ctx) + defer cancel() + obj, err := i.client.Kubernetes().CoreV1().Pods(i.namespace).List(ctxChild, meta.ListOptions{ + Limit: globals.GetGlobals().Kubernetes().RequestBatchSize().Get(), + }) + + if err != nil { + return nil, err + } + + items := obj.Items + cont := obj.Continue + var s = int64(len(items)) - podMap := map[string]*core.Pod{} + if z := obj.RemainingItemCount; z != nil { + s += *z + } - for _, pod := range pods { - _, exists := podMap[pod.GetName()] - if exists { - return errors.Newf("Pod %s already exists in map, error received", pod.GetName()) - } + ptrs := make([]*core.Pod, 0, s) - podMap[pod.GetName()] = podPointer(pod) + for { + for id := range items { + ptrs = append(ptrs, &items[id]) } - inspector.pods = podMap + if cont == "" { + break + } - return nil + items, cont, err = p.getV1PodsListRequest(ctx, i, cont) + + if err != nil { + return nil, err + } } -} -func podPointer(pod core.Pod) *core.Pod { - return &pod + return ptrs, nil } -func getPods(ctx context.Context, k kubernetes.Interface, namespace, cont string) ([]core.Pod, error) { +func (p podsInspectorLoader) getV1PodsListRequest(ctx context.Context, i *inspectorState, cont string) ([]core.Pod, string, error) { ctxChild, cancel := globals.GetGlobalTimeouts().Kubernetes().WithTimeout(ctx) defer cancel() - pods, err := k.CoreV1().Pods(namespace).List(ctxChild, meta.ListOptions{ + obj, err := i.client.Kubernetes().CoreV1().Pods(i.namespace).List(ctxChild, meta.ListOptions{ Limit: globals.GetGlobals().Kubernetes().RequestBatchSize().Get(), Continue: cont, }) if err != nil { - return nil, err + return nil, "", err } - if pods.Continue != "" { - // pass the original context - nextPodsLayer, err := getPods(ctx, k, namespace, pods.Continue) - if err != nil { - return nil, err - } + return obj.Items, obj.Continue, err +} - return append(pods.Items, nextPodsLayer...), nil +func (p podsInspectorLoader) Verify(i *inspectorState) error { + if err := i.pods.v1.err; err != nil { + return err } - return pods.Items, nil + return nil } -func FilterPodsByLabels(labels map[string]string) pod.Filter { - return func(pod *core.Pod) bool { - for key, value := range labels { - v, ok := pod.Labels[key] - if !ok { - return false - } - - if v != value { - return false - } +func (p podsInspectorLoader) Copy(from, to *inspectorState, override bool) { + if to.pods != nil { + if !override { + return } + } + + to.pods = from.pods + to.pods.state = to +} + +func (p podsInspectorLoader) Name() string { + return "pods" +} + +type podsInspector struct { + state *inspectorState + + last time.Time + + v1 *podsInspectorV1 +} + +func (p *podsInspector) LastRefresh() time.Time { + return p.last +} + +func (p *podsInspector) IsStatic() bool { + return p.state.IsStatic() +} + +func (p *podsInspector) Refresh(ctx context.Context) error { + p.Throttle(p.state.throttles).Invalidate() + return p.state.refresh(ctx, podsInspectorLoaderObj) +} + +func (p podsInspector) Throttle(c throttle.Components) throttle.Throttle { + return c.Pod() +} + +func (p *podsInspector) validate() error { + if p == nil { + return errors.Newf("PodInspector is nil") + } - return true + if p.state == nil { + return errors.Newf("Parent is nil") } + + return p.v1.validate() } diff --git a/pkg/deployment/resources/inspector/pods_v1.go b/pkg/deployment/resources/inspector/pods_v1.go new file mode 100644 index 000000000..2a5cedc29 --- /dev/null +++ b/pkg/deployment/resources/inspector/pods_v1.go @@ -0,0 +1,121 @@ +// +// DISCLAIMER +// +// Copyright 2016-2022 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// + +package inspector + +import ( + "context" + + "github.com/arangodb/kube-arangodb/pkg/util/errors" + ins "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/pod/v1" + core "k8s.io/api/core/v1" + apiErrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +func (p *podsInspector) V1() ins.Inspector { + + return p.v1 +} + +type podsInspectorV1 struct { + podInspector *podsInspector + + pods map[string]*core.Pod + err error +} + +func (p *podsInspectorV1) validate() error { + if p == nil { + return errors.Newf("PodsV1Inspector is nil") + } + + if p.podInspector == nil { + return errors.Newf("Parent is nil") + } + + if p.pods == nil { + return errors.Newf("Pods or err should be not nil") + } + + if p.err != nil { + return errors.Newf("Pods or err cannot be not nil together") + } + + return nil +} + +func (p *podsInspectorV1) ListSimple() []*core.Pod { + var r []*core.Pod + for _, pod := range p.pods { + r = append(r, pod) + } + + return r +} + +func (p *podsInspectorV1) GetSimple(name string) (*core.Pod, bool) { + pod, ok := p.pods[name] + if !ok { + return nil, false + } + + return pod, true +} + +func (p *podsInspectorV1) Iterate(action ins.Action, filters ...ins.Filter) error { + for _, pod := range p.pods { + if err := p.iteratePod(pod, action, filters...); err != nil { + return err + } + } + + return nil +} + +func (p *podsInspectorV1) iteratePod(pod *core.Pod, action ins.Action, filters ...ins.Filter) error { + for _, f := range filters { + if f == nil { + continue + } + + if !f(pod) { + return nil + } + } + + return action(pod) +} + +func (p *podsInspectorV1) Read() ins.ReadInterface { + return p +} + +func (p *podsInspectorV1) Get(ctx context.Context, name string, opts metav1.GetOptions) (*core.Pod, error) { + if s, ok := p.GetSimple(name); !ok { + return nil, apiErrors.NewNotFound(schema.GroupResource{ + Group: core.GroupName, + Resource: "pods", + }, name) + } else { + return s, nil + } +} diff --git a/pkg/deployment/resources/inspector/pvcs.go b/pkg/deployment/resources/inspector/pvcs.go index 1cfdc0ed5..3dc3b56ed 100644 --- a/pkg/deployment/resources/inspector/pvcs.go +++ b/pkg/deployment/resources/inspector/pvcs.go @@ -22,146 +22,172 @@ package inspector import ( "context" - - "github.com/arangodb/kube-arangodb/pkg/util/globals" - - apiErrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/runtime/schema" + "time" "github.com/arangodb/kube-arangodb/pkg/util/errors" - "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/persistentvolumeclaim" + "github.com/arangodb/kube-arangodb/pkg/util/globals" + "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/throttle" core "k8s.io/api/core/v1" meta "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" ) -func (i *inspector) IteratePersistentVolumeClaims(action persistentvolumeclaim.Action, filters ...persistentvolumeclaim.Filter) error { - for _, pvc := range i.PersistentVolumeClaims() { - if err := i.iteratePersistentVolumeClaim(pvc, action, filters...); err != nil { - return err - } - } - return nil +func init() { + requireRegisterInspectorLoader(persistentVolumeClaimsInspectorLoaderObj) } -func (i *inspector) iteratePersistentVolumeClaim(pvc *core.PersistentVolumeClaim, action persistentvolumeclaim.Action, filters ...persistentvolumeclaim.Filter) error { - for _, filter := range filters { - if !filter(pvc) { - return nil - } - } +var persistentVolumeClaimsInspectorLoaderObj = persistentVolumeClaimsInspectorLoader{} - return action(pvc) +type persistentVolumeClaimsInspectorLoader struct { } -func (i *inspector) PersistentVolumeClaims() []*core.PersistentVolumeClaim { - i.lock.Lock() - defer i.lock.Unlock() - - var r []*core.PersistentVolumeClaim - for _, persistentVolumeClaim := range i.pvcs { - r = append(r, persistentVolumeClaim) - } +func (p persistentVolumeClaimsInspectorLoader) Component() throttle.Component { + return throttle.PersistentVolumeClaim +} - return r +func (p persistentVolumeClaimsInspectorLoader) Load(ctx context.Context, i *inspectorState) { + var q persistentVolumeClaimsInspector + p.loadV1(ctx, i, &q) + i.persistentVolumeClaims = &q + q.state = i + q.last = time.Now() } -func (i *inspector) PersistentVolumeClaim(name string) (*core.PersistentVolumeClaim, bool) { - i.lock.Lock() - defer i.lock.Unlock() +func (p persistentVolumeClaimsInspectorLoader) loadV1(ctx context.Context, i *inspectorState, q *persistentVolumeClaimsInspector) { + var z persistentVolumeClaimsInspectorV1 - pvc, ok := i.pvcs[name] - if !ok { - return nil, false - } + z.persistentVolumeClaimInspector = q - return pvc, true -} + z.persistentVolumeClaims, z.err = p.getV1PersistentVolumeClaims(ctx, i) -func (i *inspector) PersistentVolumeClaimReadInterface() persistentvolumeclaim.ReadInterface { - return &persistentVolumeClaimReadInterface{i: i} + q.v1 = &z } -type persistentVolumeClaimReadInterface struct { - i *inspector -} +func (p persistentVolumeClaimsInspectorLoader) getV1PersistentVolumeClaims(ctx context.Context, i *inspectorState) (map[string]*core.PersistentVolumeClaim, error) { + objs, err := p.getV1PersistentVolumeClaimsList(ctx, i) + if err != nil { + return nil, err + } -func (s persistentVolumeClaimReadInterface) Get(ctx context.Context, name string, opts meta.GetOptions) (*core.PersistentVolumeClaim, error) { - if s, ok := s.i.PersistentVolumeClaim(name); !ok { - return nil, apiErrors.NewNotFound(schema.GroupResource{ - Group: core.GroupName, - Resource: "persistentvolumeclaims", - }, name) - } else { - return s, nil + r := make(map[string]*core.PersistentVolumeClaim, len(objs)) + + for id := range objs { + r[objs[id].GetName()] = objs[id] } + + return r, nil } -func pvcsToMap(ctx context.Context, inspector *inspector, k kubernetes.Interface, namespace string) func() error { - return func() error { - pvcs, err := getPersistentVolumeClaims(ctx, k, namespace, "") - if err != nil { - return err - } +func (p persistentVolumeClaimsInspectorLoader) getV1PersistentVolumeClaimsList(ctx context.Context, i *inspectorState) ([]*core.PersistentVolumeClaim, error) { + ctxChild, cancel := globals.GetGlobalTimeouts().Kubernetes().WithTimeout(ctx) + defer cancel() + obj, err := i.client.Kubernetes().CoreV1().PersistentVolumeClaims(i.namespace).List(ctxChild, meta.ListOptions{ + Limit: globals.GetGlobals().Kubernetes().RequestBatchSize().Get(), + }) + + if err != nil { + return nil, err + } + + items := obj.Items + cont := obj.Continue + var s = int64(len(items)) + + if z := obj.RemainingItemCount; z != nil { + s += *z + } - pvcMap := map[string]*core.PersistentVolumeClaim{} + ptrs := make([]*core.PersistentVolumeClaim, 0, s) - for _, pvc := range pvcs { - _, exists := pvcMap[pvc.GetName()] - if exists { - return errors.Newf("PersistentVolumeClaim %s already exists in map, error received", pvc.GetName()) - } + for { + for id := range items { + ptrs = append(ptrs, &items[id]) + } - pvcMap[pvc.GetName()] = pvcPointer(pvc) + if cont == "" { + break } - inspector.pvcs = pvcMap + items, cont, err = p.getV1PersistentVolumeClaimsListRequest(ctx, i, cont) - return nil + if err != nil { + return nil, err + } } -} -func pvcPointer(pvc core.PersistentVolumeClaim) *core.PersistentVolumeClaim { - return &pvc + return ptrs, nil } -func getPersistentVolumeClaims(ctx context.Context, k kubernetes.Interface, namespace, cont string) ([]core.PersistentVolumeClaim, error) { +func (p persistentVolumeClaimsInspectorLoader) getV1PersistentVolumeClaimsListRequest(ctx context.Context, i *inspectorState, cont string) ([]core.PersistentVolumeClaim, string, error) { ctxChild, cancel := globals.GetGlobalTimeouts().Kubernetes().WithTimeout(ctx) defer cancel() - pvcs, err := k.CoreV1().PersistentVolumeClaims(namespace).List(ctxChild, meta.ListOptions{ + obj, err := i.client.Kubernetes().CoreV1().PersistentVolumeClaims(i.namespace).List(ctxChild, meta.ListOptions{ Limit: globals.GetGlobals().Kubernetes().RequestBatchSize().Get(), Continue: cont, }) if err != nil { - return nil, err + return nil, "", err } - if pvcs.Continue != "" { - nextPersistentVolumeClaimsLayer, err := getPersistentVolumeClaims(ctx, k, namespace, pvcs.Continue) - if err != nil { - return nil, err - } + return obj.Items, obj.Continue, err +} - return append(pvcs.Items, nextPersistentVolumeClaimsLayer...), nil +func (p persistentVolumeClaimsInspectorLoader) Verify(i *inspectorState) error { + if err := i.persistentVolumeClaims.v1.err; err != nil { + return err } - return pvcs.Items, nil + return nil } -func FilterPersistentVolumeClaimsByLabels(labels map[string]string) persistentvolumeclaim.Filter { - return func(pvc *core.PersistentVolumeClaim) bool { - for key, value := range labels { - v, ok := pvc.Labels[key] - if !ok { - return false - } - - if v != value { - return false - } +func (p persistentVolumeClaimsInspectorLoader) Copy(from, to *inspectorState, override bool) { + if to.persistentVolumeClaims != nil { + if !override { + return } + } + + to.persistentVolumeClaims = from.persistentVolumeClaims + to.persistentVolumeClaims.state = to +} + +func (p persistentVolumeClaimsInspectorLoader) Name() string { + return "persistentVolumeClaims" +} + +type persistentVolumeClaimsInspector struct { + state *inspectorState + + last time.Time + + v1 *persistentVolumeClaimsInspectorV1 +} + +func (p *persistentVolumeClaimsInspector) LastRefresh() time.Time { + return p.last +} + +func (p *persistentVolumeClaimsInspector) IsStatic() bool { + return p.state.IsStatic() +} + +func (p *persistentVolumeClaimsInspector) Refresh(ctx context.Context) error { + p.Throttle(p.state.throttles).Invalidate() + return p.state.refresh(ctx, persistentVolumeClaimsInspectorLoaderObj) +} + +func (p persistentVolumeClaimsInspector) Throttle(c throttle.Components) throttle.Throttle { + return c.PersistentVolumeClaim() +} + +func (p *persistentVolumeClaimsInspector) validate() error { + if p == nil { + return errors.Newf("PersistentVolumeClaimInspector is nil") + } - return true + if p.state == nil { + return errors.Newf("Parent is nil") } + + return p.v1.validate() } diff --git a/pkg/deployment/resources/inspector/pvcs_v1.go b/pkg/deployment/resources/inspector/pvcs_v1.go new file mode 100644 index 000000000..815cc2f2d --- /dev/null +++ b/pkg/deployment/resources/inspector/pvcs_v1.go @@ -0,0 +1,120 @@ +// +// DISCLAIMER +// +// Copyright 2016-2022 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// + +package inspector + +import ( + "context" + + "github.com/arangodb/kube-arangodb/pkg/util/errors" + ins "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/persistentvolumeclaim/v1" + core "k8s.io/api/core/v1" + apiErrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +func (p *persistentVolumeClaimsInspector) V1() ins.Inspector { + return p.v1 +} + +type persistentVolumeClaimsInspectorV1 struct { + persistentVolumeClaimInspector *persistentVolumeClaimsInspector + + persistentVolumeClaims map[string]*core.PersistentVolumeClaim + err error +} + +func (p *persistentVolumeClaimsInspectorV1) validate() error { + if p == nil { + return errors.Newf("PersistentVolumeClaimsV1Inspector is nil") + } + + if p.persistentVolumeClaimInspector == nil { + return errors.Newf("Parent is nil") + } + + if p.persistentVolumeClaims == nil { + return errors.Newf("PersistentVolumeClaims or err should be not nil") + } + + if p.err != nil { + return errors.Newf("PersistentVolumeClaims or err cannot be not nil together") + } + + return nil +} + +func (p *persistentVolumeClaimsInspectorV1) ListSimple() []*core.PersistentVolumeClaim { + var r []*core.PersistentVolumeClaim + for _, persistentVolumeClaim := range p.persistentVolumeClaims { + r = append(r, persistentVolumeClaim) + } + + return r +} + +func (p *persistentVolumeClaimsInspectorV1) GetSimple(name string) (*core.PersistentVolumeClaim, bool) { + persistentVolumeClaim, ok := p.persistentVolumeClaims[name] + if !ok { + return nil, false + } + + return persistentVolumeClaim, true +} + +func (p *persistentVolumeClaimsInspectorV1) Iterate(action ins.Action, filters ...ins.Filter) error { + for _, persistentVolumeClaim := range p.persistentVolumeClaims { + if err := p.iteratePersistentVolumeClaim(persistentVolumeClaim, action, filters...); err != nil { + return err + } + } + + return nil +} + +func (p *persistentVolumeClaimsInspectorV1) iteratePersistentVolumeClaim(persistentVolumeClaim *core.PersistentVolumeClaim, action ins.Action, filters ...ins.Filter) error { + for _, f := range filters { + if f == nil { + continue + } + + if !f(persistentVolumeClaim) { + return nil + } + } + + return action(persistentVolumeClaim) +} + +func (p *persistentVolumeClaimsInspectorV1) Read() ins.ReadInterface { + return p +} + +func (p *persistentVolumeClaimsInspectorV1) Get(ctx context.Context, name string, opts metav1.GetOptions) (*core.PersistentVolumeClaim, error) { + if s, ok := p.GetSimple(name); !ok { + return nil, apiErrors.NewNotFound(schema.GroupResource{ + Group: core.GroupName, + Resource: "persistentVolumeClaims", + }, name) + } else { + return s, nil + } +} diff --git a/pkg/deployment/resources/inspector/sa.go b/pkg/deployment/resources/inspector/sa.go index 055452bbe..14ef1ef21 100644 --- a/pkg/deployment/resources/inspector/sa.go +++ b/pkg/deployment/resources/inspector/sa.go @@ -22,146 +22,172 @@ package inspector import ( "context" - - "github.com/arangodb/kube-arangodb/pkg/util/globals" - - apiErrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/runtime/schema" + "time" "github.com/arangodb/kube-arangodb/pkg/util/errors" - "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/serviceaccount" + "github.com/arangodb/kube-arangodb/pkg/util/globals" + "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/throttle" core "k8s.io/api/core/v1" meta "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" ) -func (i *inspector) IterateServiceAccounts(action serviceaccount.Action, filters ...serviceaccount.Filter) error { - for _, serviceAccount := range i.ServiceAccounts() { - if err := i.iterateServiceAccount(serviceAccount, action, filters...); err != nil { - return err - } - } - return nil +func init() { + requireRegisterInspectorLoader(serviceAccountsInspectorLoaderObj) } -func (i *inspector) iterateServiceAccount(serviceAccount *core.ServiceAccount, action serviceaccount.Action, filters ...serviceaccount.Filter) error { - for _, filter := range filters { - if !filter(serviceAccount) { - return nil - } - } +var serviceAccountsInspectorLoaderObj = serviceAccountsInspectorLoader{} - return action(serviceAccount) +type serviceAccountsInspectorLoader struct { } -func (i *inspector) ServiceAccounts() []*core.ServiceAccount { - i.lock.Lock() - defer i.lock.Unlock() - - var r []*core.ServiceAccount - for _, serviceAccount := range i.serviceAccounts { - r = append(r, serviceAccount) - } +func (p serviceAccountsInspectorLoader) Component() throttle.Component { + return throttle.ServiceAccount +} - return r +func (p serviceAccountsInspectorLoader) Load(ctx context.Context, i *inspectorState) { + var q serviceAccountsInspector + p.loadV1(ctx, i, &q) + i.serviceAccounts = &q + q.state = i + q.last = time.Now() } -func (i *inspector) ServiceAccount(name string) (*core.ServiceAccount, bool) { - i.lock.Lock() - defer i.lock.Unlock() +func (p serviceAccountsInspectorLoader) loadV1(ctx context.Context, i *inspectorState, q *serviceAccountsInspector) { + var z serviceAccountsInspectorV1 - serviceAccount, ok := i.serviceAccounts[name] - if !ok { - return nil, false - } + z.serviceAccountInspector = q - return serviceAccount, true -} + z.serviceAccounts, z.err = p.getV1ServiceAccounts(ctx, i) -func (i *inspector) ServiceAccountReadInterface() serviceaccount.ReadInterface { - return &serviceAccountReadInterface{i: i} + q.v1 = &z } -type serviceAccountReadInterface struct { - i *inspector -} +func (p serviceAccountsInspectorLoader) getV1ServiceAccounts(ctx context.Context, i *inspectorState) (map[string]*core.ServiceAccount, error) { + objs, err := p.getV1ServiceAccountsList(ctx, i) + if err != nil { + return nil, err + } -func (s serviceAccountReadInterface) Get(ctx context.Context, name string, opts meta.GetOptions) (*core.ServiceAccount, error) { - if s, ok := s.i.ServiceAccount(name); !ok { - return nil, apiErrors.NewNotFound(schema.GroupResource{ - Group: core.GroupName, - Resource: "serviceaccounts", - }, name) - } else { - return s, nil + r := make(map[string]*core.ServiceAccount, len(objs)) + + for id := range objs { + r[objs[id].GetName()] = objs[id] } + + return r, nil } -func serviceAccountsToMap(ctx context.Context, inspector *inspector, k kubernetes.Interface, namespace string) func() error { - return func() error { - serviceAccounts, err := getServiceAccounts(ctx, k, namespace, "") - if err != nil { - return err - } +func (p serviceAccountsInspectorLoader) getV1ServiceAccountsList(ctx context.Context, i *inspectorState) ([]*core.ServiceAccount, error) { + ctxChild, cancel := globals.GetGlobalTimeouts().Kubernetes().WithTimeout(ctx) + defer cancel() + obj, err := i.client.Kubernetes().CoreV1().ServiceAccounts(i.namespace).List(ctxChild, meta.ListOptions{ + Limit: globals.GetGlobals().Kubernetes().RequestBatchSize().Get(), + }) + + if err != nil { + return nil, err + } + + items := obj.Items + cont := obj.Continue + var s = int64(len(items)) + + if z := obj.RemainingItemCount; z != nil { + s += *z + } - serviceAccountMap := map[string]*core.ServiceAccount{} + ptrs := make([]*core.ServiceAccount, 0, s) - for _, serviceAccount := range serviceAccounts { - _, exists := serviceAccountMap[serviceAccount.GetName()] - if exists { - return errors.Newf("ServiceAccount %s already exists in map, error received", serviceAccount.GetName()) - } + for { + for id := range items { + ptrs = append(ptrs, &items[id]) + } - serviceAccountMap[serviceAccount.GetName()] = serviceAccountPointer(serviceAccount) + if cont == "" { + break } - inspector.serviceAccounts = serviceAccountMap + items, cont, err = p.getV1ServiceAccountsListRequest(ctx, i, cont) - return nil + if err != nil { + return nil, err + } } -} -func serviceAccountPointer(serviceAccount core.ServiceAccount) *core.ServiceAccount { - return &serviceAccount + return ptrs, nil } -func getServiceAccounts(ctx context.Context, k kubernetes.Interface, namespace, cont string) ([]core.ServiceAccount, error) { +func (p serviceAccountsInspectorLoader) getV1ServiceAccountsListRequest(ctx context.Context, i *inspectorState, cont string) ([]core.ServiceAccount, string, error) { ctxChild, cancel := globals.GetGlobalTimeouts().Kubernetes().WithTimeout(ctx) defer cancel() - serviceAccounts, err := k.CoreV1().ServiceAccounts(namespace).List(ctxChild, meta.ListOptions{ + obj, err := i.client.Kubernetes().CoreV1().ServiceAccounts(i.namespace).List(ctxChild, meta.ListOptions{ Limit: globals.GetGlobals().Kubernetes().RequestBatchSize().Get(), Continue: cont, }) if err != nil { - return nil, err + return nil, "", err } - if serviceAccounts.Continue != "" { - nextServiceAccountsLayer, err := getServiceAccounts(ctx, k, namespace, serviceAccounts.Continue) - if err != nil { - return nil, err - } + return obj.Items, obj.Continue, err +} - return append(serviceAccounts.Items, nextServiceAccountsLayer...), nil +func (p serviceAccountsInspectorLoader) Verify(i *inspectorState) error { + if err := i.serviceAccounts.v1.err; err != nil { + return err } - return serviceAccounts.Items, nil + return nil } -func FilterServiceAccountsByLabels(labels map[string]string) serviceaccount.Filter { - return func(serviceAccount *core.ServiceAccount) bool { - for key, value := range labels { - v, ok := serviceAccount.Labels[key] - if !ok { - return false - } - - if v != value { - return false - } +func (p serviceAccountsInspectorLoader) Copy(from, to *inspectorState, override bool) { + if to.serviceAccounts != nil { + if !override { + return } + } + + to.serviceAccounts = from.serviceAccounts + to.serviceAccounts.state = to +} + +func (p serviceAccountsInspectorLoader) Name() string { + return "serviceAccounts" +} + +type serviceAccountsInspector struct { + state *inspectorState + + last time.Time + + v1 *serviceAccountsInspectorV1 +} + +func (p *serviceAccountsInspector) LastRefresh() time.Time { + return p.last +} + +func (p *serviceAccountsInspector) IsStatic() bool { + return p.state.IsStatic() +} + +func (p *serviceAccountsInspector) Refresh(ctx context.Context) error { + p.Throttle(p.state.throttles).Invalidate() + return p.state.refresh(ctx, serviceAccountsInspectorLoaderObj) +} + +func (p serviceAccountsInspector) Throttle(c throttle.Components) throttle.Throttle { + return c.ServiceAccount() +} + +func (p *serviceAccountsInspector) validate() error { + if p == nil { + return errors.Newf("ServiceAccountInspector is nil") + } - return true + if p.state == nil { + return errors.Newf("Parent is nil") } + + return p.v1.validate() } diff --git a/pkg/deployment/resources/inspector/sa_v1.go b/pkg/deployment/resources/inspector/sa_v1.go new file mode 100644 index 000000000..2bf08ee2f --- /dev/null +++ b/pkg/deployment/resources/inspector/sa_v1.go @@ -0,0 +1,120 @@ +// +// DISCLAIMER +// +// Copyright 2016-2022 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// + +package inspector + +import ( + "context" + + "github.com/arangodb/kube-arangodb/pkg/util/errors" + ins "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/serviceaccount/v1" + core "k8s.io/api/core/v1" + apiErrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +func (p *serviceAccountsInspector) V1() ins.Inspector { + return p.v1 +} + +type serviceAccountsInspectorV1 struct { + serviceAccountInspector *serviceAccountsInspector + + serviceAccounts map[string]*core.ServiceAccount + err error +} + +func (p *serviceAccountsInspectorV1) validate() error { + if p == nil { + return errors.Newf("ServiceAccountsV1Inspector is nil") + } + + if p.serviceAccountInspector == nil { + return errors.Newf("Parent is nil") + } + + if p.serviceAccounts == nil { + return errors.Newf("ServiceAccounts or err should be not nil") + } + + if p.err != nil { + return errors.Newf("ServiceAccounts or err cannot be not nil together") + } + + return nil +} + +func (p *serviceAccountsInspectorV1) ServiceAccounts() []*core.ServiceAccount { + var r []*core.ServiceAccount + for _, serviceAccount := range p.serviceAccounts { + r = append(r, serviceAccount) + } + + return r +} + +func (p *serviceAccountsInspectorV1) GetSimple(name string) (*core.ServiceAccount, bool) { + serviceAccount, ok := p.serviceAccounts[name] + if !ok { + return nil, false + } + + return serviceAccount, true +} + +func (p *serviceAccountsInspectorV1) Iterate(action ins.Action, filters ...ins.Filter) error { + for _, serviceAccount := range p.serviceAccounts { + if err := p.iterateServiceAccount(serviceAccount, action, filters...); err != nil { + return err + } + } + + return nil +} + +func (p *serviceAccountsInspectorV1) iterateServiceAccount(serviceAccount *core.ServiceAccount, action ins.Action, filters ...ins.Filter) error { + for _, f := range filters { + if f == nil { + continue + } + + if !f(serviceAccount) { + return nil + } + } + + return action(serviceAccount) +} + +func (p *serviceAccountsInspectorV1) Read() ins.ReadInterface { + return p +} + +func (p *serviceAccountsInspectorV1) Get(ctx context.Context, name string, opts metav1.GetOptions) (*core.ServiceAccount, error) { + if s, ok := p.GetSimple(name); !ok { + return nil, apiErrors.NewNotFound(schema.GroupResource{ + Group: core.GroupName, + Resource: "serviceAccounts", + }, name) + } else { + return s, nil + } +} diff --git a/pkg/deployment/resources/inspector/secrets.go b/pkg/deployment/resources/inspector/secrets.go index 8569a6ebd..3a9c7c77b 100644 --- a/pkg/deployment/resources/inspector/secrets.go +++ b/pkg/deployment/resources/inspector/secrets.go @@ -22,128 +22,172 @@ package inspector import ( "context" - - "github.com/arangodb/kube-arangodb/pkg/util/globals" + "time" "github.com/arangodb/kube-arangodb/pkg/util/errors" - "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/secret" + "github.com/arangodb/kube-arangodb/pkg/util/globals" + "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/throttle" core "k8s.io/api/core/v1" - apiErrors "k8s.io/apimachinery/pkg/api/errors" meta "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/client-go/kubernetes" ) -func (i *inspector) IterateSecrets(action secret.Action, filters ...secret.Filter) error { - for _, secret := range i.Secrets() { - if err := i.iterateSecrets(secret, action, filters...); err != nil { - return err - } - } - return nil +func init() { + requireRegisterInspectorLoader(secretsInspectorLoaderObj) } -func (i *inspector) iterateSecrets(secret *core.Secret, action secret.Action, filters ...secret.Filter) error { - for _, filter := range filters { - if !filter(secret) { - return nil - } - } +var secretsInspectorLoaderObj = secretsInspectorLoader{} - return action(secret) +type secretsInspectorLoader struct { } -func (i *inspector) Secrets() []*core.Secret { - i.lock.Lock() - defer i.lock.Unlock() - - var r []*core.Secret - for _, secret := range i.secrets { - r = append(r, secret) - } +func (p secretsInspectorLoader) Component() throttle.Component { + return throttle.Secret +} - return r +func (p secretsInspectorLoader) Load(ctx context.Context, i *inspectorState) { + var q secretsInspector + p.loadV1(ctx, i, &q) + i.secrets = &q + q.state = i + q.last = time.Now() } -func (i *inspector) Secret(name string) (*core.Secret, bool) { - i.lock.Lock() - defer i.lock.Unlock() +func (p secretsInspectorLoader) loadV1(ctx context.Context, i *inspectorState, q *secretsInspector) { + var z secretsInspectorV1 - secret, ok := i.secrets[name] - if !ok { - return nil, false - } + z.secretInspector = q - return secret, true -} + z.secrets, z.err = p.getV1Secrets(ctx, i) -func (i *inspector) SecretReadInterface() secret.ReadInterface { - return &secretReadInterface{i: i} + q.v1 = &z } -type secretReadInterface struct { - i *inspector -} +func (p secretsInspectorLoader) getV1Secrets(ctx context.Context, i *inspectorState) (map[string]*core.Secret, error) { + objs, err := p.getV1SecretsList(ctx, i) + if err != nil { + return nil, err + } -func (s secretReadInterface) Get(ctx context.Context, name string, opts meta.GetOptions) (*core.Secret, error) { - if s, ok := s.i.Secret(name); !ok { - return nil, apiErrors.NewNotFound(schema.GroupResource{ - Group: core.GroupName, - Resource: "secrets", - }, name) - } else { - return s, nil + r := make(map[string]*core.Secret, len(objs)) + + for id := range objs { + r[objs[id].GetName()] = objs[id] } + + return r, nil } -func secretsToMap(ctx context.Context, inspector *inspector, k kubernetes.Interface, namespace string) func() error { - return func() error { - secrets, err := getSecrets(ctx, k, namespace, "") - if err != nil { - return err - } +func (p secretsInspectorLoader) getV1SecretsList(ctx context.Context, i *inspectorState) ([]*core.Secret, error) { + ctxChild, cancel := globals.GetGlobalTimeouts().Kubernetes().WithTimeout(ctx) + defer cancel() + obj, err := i.client.Kubernetes().CoreV1().Secrets(i.namespace).List(ctxChild, meta.ListOptions{ + Limit: globals.GetGlobals().Kubernetes().RequestBatchSize().Get(), + }) + + if err != nil { + return nil, err + } + + items := obj.Items + cont := obj.Continue + var s = int64(len(items)) - secretMap := map[string]*core.Secret{} + if z := obj.RemainingItemCount; z != nil { + s += *z + } + + ptrs := make([]*core.Secret, 0, s) - for _, secret := range secrets { - _, exists := secretMap[secret.GetName()] - if exists { - return errors.Newf("Secret %s already exists in map, error received", secret.GetName()) - } + for { + for id := range items { + ptrs = append(ptrs, &items[id]) + } - secretMap[secret.GetName()] = secretPointer(secret) + if cont == "" { + break } - inspector.secrets = secretMap + items, cont, err = p.getV1SecretsListRequest(ctx, i, cont) - return nil + if err != nil { + return nil, err + } } -} -func secretPointer(pod core.Secret) *core.Secret { - return &pod + return ptrs, nil } -func getSecrets(ctx context.Context, k kubernetes.Interface, namespace, cont string) ([]core.Secret, error) { +func (p secretsInspectorLoader) getV1SecretsListRequest(ctx context.Context, i *inspectorState, cont string) ([]core.Secret, string, error) { ctxChild, cancel := globals.GetGlobalTimeouts().Kubernetes().WithTimeout(ctx) defer cancel() - secrets, err := k.CoreV1().Secrets(namespace).List(ctxChild, meta.ListOptions{ + obj, err := i.client.Kubernetes().CoreV1().Secrets(i.namespace).List(ctxChild, meta.ListOptions{ Limit: globals.GetGlobals().Kubernetes().RequestBatchSize().Get(), Continue: cont, }) if err != nil { - return nil, err + return nil, "", err } - if secrets.Continue != "" { - nextSecretsLayer, err := getSecrets(ctx, k, namespace, secrets.Continue) - if err != nil { - return nil, err + return obj.Items, obj.Continue, err +} + +func (p secretsInspectorLoader) Verify(i *inspectorState) error { + if err := i.secrets.v1.err; err != nil { + return err + } + + return nil +} + +func (p secretsInspectorLoader) Copy(from, to *inspectorState, override bool) { + if to.secrets != nil { + if !override { + return } + } + + to.secrets = from.secrets + to.secrets.state = to +} + +func (p secretsInspectorLoader) Name() string { + return "secrets" +} + +type secretsInspector struct { + state *inspectorState + + last time.Time + + v1 *secretsInspectorV1 +} + +func (p *secretsInspector) LastRefresh() time.Time { + return p.last +} + +func (p *secretsInspector) IsStatic() bool { + return p.state.IsStatic() +} + +func (p *secretsInspector) Refresh(ctx context.Context) error { + p.Throttle(p.state.throttles).Invalidate() + return p.state.refresh(ctx, secretsInspectorLoaderObj) +} + +func (p secretsInspector) Throttle(c throttle.Components) throttle.Throttle { + return c.Secret() +} + +func (p *secretsInspector) validate() error { + if p == nil { + return errors.Newf("SecretInspector is nil") + } - return append(secrets.Items, nextSecretsLayer...), nil + if p.state == nil { + return errors.Newf("Parent is nil") } - return secrets.Items, nil + return p.v1.validate() } diff --git a/pkg/deployment/resources/inspector/secrets_v1.go b/pkg/deployment/resources/inspector/secrets_v1.go new file mode 100644 index 000000000..8fcf468c8 --- /dev/null +++ b/pkg/deployment/resources/inspector/secrets_v1.go @@ -0,0 +1,120 @@ +// +// DISCLAIMER +// +// Copyright 2016-2022 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// + +package inspector + +import ( + "context" + + "github.com/arangodb/kube-arangodb/pkg/util/errors" + ins "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/secret/v1" + core "k8s.io/api/core/v1" + apiErrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +func (p *secretsInspector) V1() ins.Inspector { + return p.v1 +} + +type secretsInspectorV1 struct { + secretInspector *secretsInspector + + secrets map[string]*core.Secret + err error +} + +func (p *secretsInspectorV1) validate() error { + if p == nil { + return errors.Newf("SecretsV1Inspector is nil") + } + + if p.secretInspector == nil { + return errors.Newf("Parent is nil") + } + + if p.secrets == nil { + return errors.Newf("Secrets or err should be not nil") + } + + if p.err != nil { + return errors.Newf("Secrets or err cannot be not nil together") + } + + return nil +} + +func (p *secretsInspectorV1) ListSimple() []*core.Secret { + var r []*core.Secret + for _, secret := range p.secrets { + r = append(r, secret) + } + + return r +} + +func (p *secretsInspectorV1) GetSimple(name string) (*core.Secret, bool) { + secret, ok := p.secrets[name] + if !ok { + return nil, false + } + + return secret, true +} + +func (p *secretsInspectorV1) Iterate(action ins.Action, filters ...ins.Filter) error { + for _, secret := range p.secrets { + if err := p.iterateSecret(secret, action, filters...); err != nil { + return err + } + } + + return nil +} + +func (p *secretsInspectorV1) iterateSecret(secret *core.Secret, action ins.Action, filters ...ins.Filter) error { + for _, f := range filters { + if f == nil { + continue + } + + if !f(secret) { + return nil + } + } + + return action(secret) +} + +func (p *secretsInspectorV1) Read() ins.ReadInterface { + return p +} + +func (p *secretsInspectorV1) Get(ctx context.Context, name string, opts metav1.GetOptions) (*core.Secret, error) { + if s, ok := p.GetSimple(name); !ok { + return nil, apiErrors.NewNotFound(schema.GroupResource{ + Group: core.GroupName, + Resource: "secrets", + }, name) + } else { + return s, nil + } +} diff --git a/pkg/deployment/resources/inspector/server.go b/pkg/deployment/resources/inspector/server.go deleted file mode 100644 index 3d4391c16..000000000 --- a/pkg/deployment/resources/inspector/server.go +++ /dev/null @@ -1,51 +0,0 @@ -// -// DISCLAIMER -// -// Copyright 2016-2022 ArangoDB GmbH, Cologne, Germany -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Copyright holder is ArangoDB GmbH, Cologne, Germany -// - -package inspector - -import ( - "context" - - "strings" - - "github.com/arangodb/go-driver" - "k8s.io/client-go/kubernetes" -) - -// GetVersionInfo returns kubernetes server version information. -func (i *inspector) GetVersionInfo() driver.Version { - i.lock.Lock() - defer i.lock.Unlock() - - return i.versionInfo -} - -func getVersionInfo(_ context.Context, inspector *inspector, k kubernetes.Interface, _ string) func() error { - return func() error { - inspector.versionInfo = "" - if v, err := k.Discovery().ServerVersion(); err != nil { - return err - } else { - inspector.versionInfo = driver.Version(strings.TrimPrefix(v.GitVersion, "v")) - } - - return nil - } -} diff --git a/pkg/deployment/resources/inspector/services.go b/pkg/deployment/resources/inspector/services.go index d2b3cf8a6..42166c219 100644 --- a/pkg/deployment/resources/inspector/services.go +++ b/pkg/deployment/resources/inspector/services.go @@ -22,129 +22,172 @@ package inspector import ( "context" - - "github.com/arangodb/kube-arangodb/pkg/util/globals" - - apiErrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/runtime/schema" + "time" "github.com/arangodb/kube-arangodb/pkg/util/errors" - "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/service" + "github.com/arangodb/kube-arangodb/pkg/util/globals" + "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/throttle" core "k8s.io/api/core/v1" meta "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" ) -func (i *inspector) IterateServices(action service.Action, filters ...service.Filter) error { - for _, service := range i.Services() { - if err := i.iterateServices(service, action, filters...); err != nil { - return err - } - } - return nil +func init() { + requireRegisterInspectorLoader(servicesInspectorLoaderObj) } -func (i *inspector) iterateServices(service *core.Service, action service.Action, filters ...service.Filter) error { - for _, filter := range filters { - if !filter(service) { - return nil - } - } +var servicesInspectorLoaderObj = servicesInspectorLoader{} - return action(service) +type servicesInspectorLoader struct { } -func (i *inspector) Services() []*core.Service { - i.lock.Lock() - defer i.lock.Unlock() - - var r []*core.Service - for _, service := range i.services { - r = append(r, service) - } - - return r +func (p servicesInspectorLoader) Component() throttle.Component { + return throttle.Service } -func (i *inspector) ServiceReadInterface() service.ReadInterface { - return &serviceReadInterface{i: i} +func (p servicesInspectorLoader) Load(ctx context.Context, i *inspectorState) { + var q servicesInspector + p.loadV1(ctx, i, &q) + i.services = &q + q.state = i + q.last = time.Now() } -type serviceReadInterface struct { - i *inspector +func (p servicesInspectorLoader) loadV1(ctx context.Context, i *inspectorState, q *servicesInspector) { + var z servicesInspectorV1 + + z.serviceInspector = q + + z.services, z.err = p.getV1Services(ctx, i) + + q.v1 = &z } -func (s serviceReadInterface) Get(ctx context.Context, name string, opts meta.GetOptions) (*core.Service, error) { - if s, ok := s.i.Service(name); !ok { - return nil, apiErrors.NewNotFound(schema.GroupResource{ - Group: core.GroupName, - Resource: "services", - }, name) - } else { - return s, nil +func (p servicesInspectorLoader) getV1Services(ctx context.Context, i *inspectorState) (map[string]*core.Service, error) { + objs, err := p.getV1ServicesList(ctx, i) + if err != nil { + return nil, err + } + + r := make(map[string]*core.Service, len(objs)) + + for id := range objs { + r[objs[id].GetName()] = objs[id] } + + return r, nil } -func (i *inspector) Service(name string) (*core.Service, bool) { - i.lock.Lock() - defer i.lock.Unlock() +func (p servicesInspectorLoader) getV1ServicesList(ctx context.Context, i *inspectorState) ([]*core.Service, error) { + ctxChild, cancel := globals.GetGlobalTimeouts().Kubernetes().WithTimeout(ctx) + defer cancel() + obj, err := i.client.Kubernetes().CoreV1().Services(i.namespace).List(ctxChild, meta.ListOptions{ + Limit: globals.GetGlobals().Kubernetes().RequestBatchSize().Get(), + }) - service, ok := i.services[name] - if !ok { - return nil, false + if err != nil { + return nil, err } - return service, true -} + items := obj.Items + cont := obj.Continue + var s = int64(len(items)) -func servicesToMap(ctx context.Context, inspector *inspector, k kubernetes.Interface, namespace string) func() error { - return func() error { - services, err := getServices(ctx, k, namespace, "") - if err != nil { - return err - } + if z := obj.RemainingItemCount; z != nil { + s += *z + } - serviceMap := map[string]*core.Service{} + ptrs := make([]*core.Service, 0, s) - for _, service := range services { - _, exists := serviceMap[service.GetName()] - if exists { - return errors.Newf("Service %s already exists in map, error received", service.GetName()) - } + for { + for id := range items { + ptrs = append(ptrs, &items[id]) + } - serviceMap[service.GetName()] = servicePointer(service) + if cont == "" { + break } - inspector.services = serviceMap + items, cont, err = p.getV1ServicesListRequest(ctx, i, cont) - return nil + if err != nil { + return nil, err + } } -} -func servicePointer(pod core.Service) *core.Service { - return &pod + return ptrs, nil } -func getServices(ctx context.Context, k kubernetes.Interface, namespace, cont string) ([]core.Service, error) { +func (p servicesInspectorLoader) getV1ServicesListRequest(ctx context.Context, i *inspectorState, cont string) ([]core.Service, string, error) { ctxChild, cancel := globals.GetGlobalTimeouts().Kubernetes().WithTimeout(ctx) defer cancel() - services, err := k.CoreV1().Services(namespace).List(ctxChild, meta.ListOptions{ + obj, err := i.client.Kubernetes().CoreV1().Services(i.namespace).List(ctxChild, meta.ListOptions{ Limit: globals.GetGlobals().Kubernetes().RequestBatchSize().Get(), Continue: cont, }) if err != nil { - return nil, err + return nil, "", err } - if services.Continue != "" { - nextServicesLayer, err := getServices(ctx, k, namespace, services.Continue) - if err != nil { - return nil, err + return obj.Items, obj.Continue, err +} + +func (p servicesInspectorLoader) Verify(i *inspectorState) error { + if err := i.services.v1.err; err != nil { + return err + } + + return nil +} + +func (p servicesInspectorLoader) Copy(from, to *inspectorState, override bool) { + if to.services != nil { + if !override { + return } + } + + to.services = from.services + to.services.state = to +} + +func (p servicesInspectorLoader) Name() string { + return "services" +} + +type servicesInspector struct { + state *inspectorState + + last time.Time + + v1 *servicesInspectorV1 +} + +func (p *servicesInspector) LastRefresh() time.Time { + return p.last +} + +func (p *servicesInspector) IsStatic() bool { + return p.state.IsStatic() +} + +func (p *servicesInspector) Refresh(ctx context.Context) error { + p.Throttle(p.state.throttles).Invalidate() + return p.state.refresh(ctx, servicesInspectorLoaderObj) +} + +func (p servicesInspector) Throttle(c throttle.Components) throttle.Throttle { + return c.Service() +} + +func (p *servicesInspector) validate() error { + if p == nil { + return errors.Newf("ServiceInspector is nil") + } - return append(services.Items, nextServicesLayer...), nil + if p.state == nil { + return errors.Newf("Parent is nil") } - return services.Items, nil + return p.v1.validate() } diff --git a/pkg/deployment/resources/inspector/services_v1.go b/pkg/deployment/resources/inspector/services_v1.go new file mode 100644 index 000000000..89028e66b --- /dev/null +++ b/pkg/deployment/resources/inspector/services_v1.go @@ -0,0 +1,120 @@ +// +// DISCLAIMER +// +// Copyright 2016-2022 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// + +package inspector + +import ( + "context" + + "github.com/arangodb/kube-arangodb/pkg/util/errors" + ins "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/service/v1" + core "k8s.io/api/core/v1" + apiErrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +func (p *servicesInspector) V1() ins.Inspector { + return p.v1 +} + +type servicesInspectorV1 struct { + serviceInspector *servicesInspector + + services map[string]*core.Service + err error +} + +func (p *servicesInspectorV1) validate() error { + if p == nil { + return errors.Newf("ServicesV1Inspector is nil") + } + + if p.serviceInspector == nil { + return errors.Newf("Parent is nil") + } + + if p.services == nil { + return errors.Newf("Services or err should be not nil") + } + + if p.err != nil { + return errors.Newf("Services or err cannot be not nil together") + } + + return nil +} + +func (p *servicesInspectorV1) Services() []*core.Service { + var r []*core.Service + for _, service := range p.services { + r = append(r, service) + } + + return r +} + +func (p *servicesInspectorV1) GetSimple(name string) (*core.Service, bool) { + service, ok := p.services[name] + if !ok { + return nil, false + } + + return service, true +} + +func (p *servicesInspectorV1) Iterate(action ins.Action, filters ...ins.Filter) error { + for _, service := range p.services { + if err := p.iterateService(service, action, filters...); err != nil { + return err + } + } + + return nil +} + +func (p *servicesInspectorV1) iterateService(service *core.Service, action ins.Action, filters ...ins.Filter) error { + for _, f := range filters { + if f == nil { + continue + } + + if !f(service) { + return nil + } + } + + return action(service) +} + +func (p *servicesInspectorV1) Read() ins.ReadInterface { + return p +} + +func (p *servicesInspectorV1) Get(ctx context.Context, name string, opts metav1.GetOptions) (*core.Service, error) { + if s, ok := p.GetSimple(name); !ok { + return nil, apiErrors.NewNotFound(schema.GroupResource{ + Group: core.GroupName, + Resource: "services", + }, name) + } else { + return s, nil + } +} diff --git a/pkg/deployment/resources/inspector/sm.go b/pkg/deployment/resources/inspector/sm.go new file mode 100644 index 000000000..844ce6c48 --- /dev/null +++ b/pkg/deployment/resources/inspector/sm.go @@ -0,0 +1,189 @@ +// +// DISCLAIMER +// +// Copyright 2016-2022 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// + +package inspector + +import ( + "context" + "time" + + "github.com/arangodb/kube-arangodb/pkg/util/errors" + "github.com/arangodb/kube-arangodb/pkg/util/globals" + "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/throttle" + monitoring "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + meta "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func init() { + requireRegisterInspectorLoader(serviceMonitorsInspectorLoaderObj) +} + +var serviceMonitorsInspectorLoaderObj = serviceMonitorsInspectorLoader{} + +type serviceMonitorsInspectorLoader struct { +} + +func (p serviceMonitorsInspectorLoader) Component() throttle.Component { + return throttle.ServiceMonitor +} + +func (p serviceMonitorsInspectorLoader) Load(ctx context.Context, i *inspectorState) { + var q serviceMonitorsInspector + p.loadV1(ctx, i, &q) + i.serviceMonitors = &q + q.state = i + q.last = time.Now() +} + +func (p serviceMonitorsInspectorLoader) loadV1(ctx context.Context, i *inspectorState, q *serviceMonitorsInspector) { + var z serviceMonitorsInspectorV1 + + z.serviceMonitorInspector = q + + z.serviceMonitors, z.err = p.getV1ServiceMonitors(ctx, i) + + q.v1 = &z +} + +func (p serviceMonitorsInspectorLoader) getV1ServiceMonitors(ctx context.Context, i *inspectorState) (map[string]*monitoring.ServiceMonitor, error) { + objs, err := p.getV1ServiceMonitorsList(ctx, i) + if err != nil { + return nil, err + } + + r := make(map[string]*monitoring.ServiceMonitor, len(objs)) + + for id := range objs { + r[objs[id].GetName()] = objs[id] + } + + return r, nil +} + +func (p serviceMonitorsInspectorLoader) getV1ServiceMonitorsList(ctx context.Context, i *inspectorState) ([]*monitoring.ServiceMonitor, error) { + ctxChild, cancel := globals.GetGlobalTimeouts().Kubernetes().WithTimeout(ctx) + defer cancel() + obj, err := i.client.Monitoring().MonitoringV1().ServiceMonitors(i.namespace).List(ctxChild, meta.ListOptions{ + Limit: globals.GetGlobals().Kubernetes().RequestBatchSize().Get(), + }) + + if err != nil { + return nil, err + } + + items := obj.Items + cont := obj.Continue + var s = int64(len(items)) + + if z := obj.RemainingItemCount; z != nil { + s += *z + } + + ptrs := make([]*monitoring.ServiceMonitor, 0, s) + + for { + for id := range items { + ptrs = append(ptrs, items[id]) + } + + if cont == "" { + break + } + + items, cont, err = p.getV1ServiceMonitorsListRequest(ctx, i, cont) + + if err != nil { + return nil, err + } + } + + return ptrs, nil +} + +func (p serviceMonitorsInspectorLoader) getV1ServiceMonitorsListRequest(ctx context.Context, i *inspectorState, cont string) ([]*monitoring.ServiceMonitor, string, error) { + ctxChild, cancel := globals.GetGlobalTimeouts().Kubernetes().WithTimeout(ctx) + defer cancel() + obj, err := i.client.Monitoring().MonitoringV1().ServiceMonitors(i.namespace).List(ctxChild, meta.ListOptions{ + Limit: globals.GetGlobals().Kubernetes().RequestBatchSize().Get(), + Continue: cont, + }) + + if err != nil { + return nil, "", err + } + + return obj.Items, obj.Continue, err +} + +func (p serviceMonitorsInspectorLoader) Verify(i *inspectorState) error { + return nil +} + +func (p serviceMonitorsInspectorLoader) Copy(from, to *inspectorState, override bool) { + if to.serviceMonitors != nil { + if !override { + return + } + } + + to.serviceMonitors = from.serviceMonitors + to.serviceMonitors.state = to +} + +func (p serviceMonitorsInspectorLoader) Name() string { + return "serviceMonitors" +} + +type serviceMonitorsInspector struct { + state *inspectorState + + last time.Time + + v1 *serviceMonitorsInspectorV1 +} + +func (p *serviceMonitorsInspector) LastRefresh() time.Time { + return p.last +} + +func (p *serviceMonitorsInspector) IsStatic() bool { + return p.state.IsStatic() +} + +func (p *serviceMonitorsInspector) Refresh(ctx context.Context) error { + p.Throttle(p.state.throttles).Invalidate() + return p.state.refresh(ctx, serviceMonitorsInspectorLoaderObj) +} + +func (p serviceMonitorsInspector) Throttle(c throttle.Components) throttle.Throttle { + return c.ServiceMonitor() +} + +func (p *serviceMonitorsInspector) validate() error { + if p == nil { + return errors.Newf("ServiceMonitorInspector is nil") + } + + if p.state == nil { + return errors.Newf("Parent is nil") + } + + return p.v1.validate() +} diff --git a/pkg/deployment/resources/inspector/sm_v1.go b/pkg/deployment/resources/inspector/sm_v1.go new file mode 100644 index 000000000..221aca0d3 --- /dev/null +++ b/pkg/deployment/resources/inspector/sm_v1.go @@ -0,0 +1,126 @@ +// +// DISCLAIMER +// +// Copyright 2016-2022 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// + +package inspector + +import ( + "context" + + "github.com/arangodb/kube-arangodb/pkg/util/errors" + ins "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/servicemonitor/v1" + core "k8s.io/api/core/v1" + apiErrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + monitoring "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" +) + +func (p *serviceMonitorsInspector) V1() (ins.Inspector, error) { + if p.v1.err != nil { + return nil, p.v1.err + } + + return p.v1, nil +} + +type serviceMonitorsInspectorV1 struct { + serviceMonitorInspector *serviceMonitorsInspector + + serviceMonitors map[string]*monitoring.ServiceMonitor + err error +} + +func (p *serviceMonitorsInspectorV1) validate() error { + if p == nil { + return errors.Newf("ServiceMonitorsV1Inspector is nil") + } + + if p.serviceMonitorInspector == nil { + return errors.Newf("Parent is nil") + } + + if p.serviceMonitors == nil && p.err == nil { + return errors.Newf("ServiceMonitors or err should be not nil") + } + + if p.serviceMonitors != nil && p.err != nil { + return errors.Newf("ServiceMonitors or err cannot be not nil together") + } + + return nil +} + +func (p *serviceMonitorsInspectorV1) ServiceMonitors() []*monitoring.ServiceMonitor { + var r []*monitoring.ServiceMonitor + for _, serviceMonitor := range p.serviceMonitors { + r = append(r, serviceMonitor) + } + + return r +} + +func (p *serviceMonitorsInspectorV1) GetSimple(name string) (*monitoring.ServiceMonitor, bool) { + serviceMonitor, ok := p.serviceMonitors[name] + if !ok { + return nil, false + } + + return serviceMonitor, true +} + +func (p *serviceMonitorsInspectorV1) Iterate(action ins.Action, filters ...ins.Filter) error { + for _, serviceMonitor := range p.serviceMonitors { + if err := p.iterateServiceMonitor(serviceMonitor, action, filters...); err != nil { + return err + } + } + + return nil +} + +func (p *serviceMonitorsInspectorV1) iterateServiceMonitor(serviceMonitor *monitoring.ServiceMonitor, action ins.Action, filters ...ins.Filter) error { + for _, f := range filters { + if f == nil { + continue + } + + if !f(serviceMonitor) { + return nil + } + } + + return action(serviceMonitor) +} + +func (p *serviceMonitorsInspectorV1) Read() ins.ReadInterface { + return p +} + +func (p *serviceMonitorsInspectorV1) Get(ctx context.Context, name string, opts metav1.GetOptions) (*monitoring.ServiceMonitor, error) { + if s, ok := p.GetSimple(name); !ok { + return nil, apiErrors.NewNotFound(schema.GroupResource{ + Group: core.GroupName, + Resource: "serviceMonitors", + }, name) + } else { + return s, nil + } +} diff --git a/pkg/deployment/resources/inspector/sms.go b/pkg/deployment/resources/inspector/sms.go deleted file mode 100644 index f61a2db41..000000000 --- a/pkg/deployment/resources/inspector/sms.go +++ /dev/null @@ -1,152 +0,0 @@ -// -// DISCLAIMER -// -// Copyright 2016-2022 ArangoDB GmbH, Cologne, Germany -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Copyright holder is ArangoDB GmbH, Cologne, Germany -// - -package inspector - -import ( - "context" - - "github.com/arangodb/kube-arangodb/pkg/util/globals" - - apiErrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/runtime/schema" - - "github.com/arangodb/kube-arangodb/pkg/util/errors" - "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/servicemonitor" - monitoringGroup "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring" - monitoring "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" - monitoringClient "github.com/prometheus-operator/prometheus-operator/pkg/client/versioned" - meta "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -func (i *inspector) IterateServiceMonitors(action servicemonitor.Action, filters ...servicemonitor.Filter) error { - for _, serviceMonitor := range i.ServiceMonitors() { - if err := i.iterateServiceMonitor(serviceMonitor, action, filters...); err != nil { - return err - } - } - return nil -} - -func (i *inspector) iterateServiceMonitor(serviceMonitor *monitoring.ServiceMonitor, action servicemonitor.Action, filters ...servicemonitor.Filter) error { - for _, filter := range filters { - if !filter(serviceMonitor) { - return nil - } - } - - return action(serviceMonitor) -} - -func (i *inspector) ServiceMonitors() []*monitoring.ServiceMonitor { - i.lock.Lock() - defer i.lock.Unlock() - - var r []*monitoring.ServiceMonitor - for _, sms := range i.serviceMonitors { - r = append(r, sms) - } - - return r -} - -func (i *inspector) ServiceMonitor(name string) (*monitoring.ServiceMonitor, bool) { - i.lock.Lock() - defer i.lock.Unlock() - - serviceMonitor, ok := i.serviceMonitors[name] - if !ok { - return nil, false - } - - return serviceMonitor, true -} - -func (i *inspector) ServiceMonitorReadInterface() servicemonitor.ReadInterface { - return &serviceMonitorReadInterface{i: i} -} - -type serviceMonitorReadInterface struct { - i *inspector -} - -func (s serviceMonitorReadInterface) Get(ctx context.Context, name string, opts meta.GetOptions) (*monitoring.ServiceMonitor, error) { - if s, ok := s.i.ServiceMonitor(name); !ok { - return nil, apiErrors.NewNotFound(schema.GroupResource{ - Group: monitoringGroup.GroupName, - Resource: "servicemonitors", - }, name) - } else { - return s, nil - } -} - -func serviceMonitorsToMap(ctx context.Context, inspector *inspector, m monitoringClient.Interface, namespace string) func() error { - return func() error { - serviceMonitors := getServiceMonitors(ctx, m, namespace, "") - - serviceMonitorMap := map[string]*monitoring.ServiceMonitor{} - - for _, serviceMonitor := range serviceMonitors { - _, exists := serviceMonitorMap[serviceMonitor.GetName()] - if exists { - return errors.Newf("ServiceMonitor %s already exists in map, error received", serviceMonitor.GetName()) - } - - serviceMonitorMap[serviceMonitor.GetName()] = serviceMonitor - } - - inspector.serviceMonitors = serviceMonitorMap - - return nil - } -} - -func getServiceMonitors(ctx context.Context, m monitoringClient.Interface, namespace, cont string) []*monitoring.ServiceMonitor { - ctxChild, cancel := globals.GetGlobalTimeouts().Kubernetes().WithTimeout(ctx) - defer cancel() - serviceMonitors, err := m.MonitoringV1().ServiceMonitors(namespace).List(ctxChild, meta.ListOptions{ - Limit: globals.GetGlobals().Kubernetes().RequestBatchSize().Get(), - Continue: cont, - }) - - if err != nil { - return []*monitoring.ServiceMonitor{} - } - - return serviceMonitors.Items -} - -func FilterServiceMonitorsByLabels(labels map[string]string) servicemonitor.Filter { - return func(serviceMonitor *monitoring.ServiceMonitor) bool { - for key, value := range labels { - v, ok := serviceMonitor.Labels[key] - if !ok { - return false - } - - if v != value { - return false - } - } - - return true - } -} diff --git a/pkg/deployment/resources/labels.go b/pkg/deployment/resources/labels.go index 24c21bca5..da8290b12 100644 --- a/pkg/deployment/resources/labels.go +++ b/pkg/deployment/resources/labels.go @@ -70,7 +70,7 @@ func (r *Resources) EnsureLabels(ctx context.Context, cachedStatus inspectorInte func (r *Resources) EnsureSecretLabels(ctx context.Context, cachedStatus inspectorInterface.Inspector) error { changed := false - if err := cachedStatus.IterateSecrets(func(secret *core.Secret) error { + if err := cachedStatus.Secret().V1().Iterate(func(secret *core.Secret) error { if ensureLabelsMap(secret.Kind, secret, r.context.GetSpec(), func(name string, d []byte) error { return globals.GetGlobalTimeouts().Kubernetes().RunWithTimeout(ctx, func(ctxChild context.Context) error { _, err := r.context.SecretsModInterface().Patch(ctxChild, @@ -97,7 +97,7 @@ func (r *Resources) EnsureSecretLabels(ctx context.Context, cachedStatus inspect func (r *Resources) EnsureServiceAccountsLabels(ctx context.Context, cachedStatus inspectorInterface.Inspector) error { changed := false - if err := cachedStatus.IterateServiceAccounts(func(serviceAccount *core.ServiceAccount) error { + if err := cachedStatus.ServiceAccount().V1().Iterate(func(serviceAccount *core.ServiceAccount) error { if ensureLabelsMap(serviceAccount.Kind, serviceAccount, r.context.GetSpec(), func(name string, d []byte) error { return globals.GetGlobalTimeouts().Kubernetes().RunWithTimeout(ctx, func(ctxChild context.Context) error { _, err := r.context.ServiceAccountsModInterface().Patch(ctxChild, name, types.JSONPatchType, d, meta.PatchOptions{}) @@ -123,7 +123,7 @@ func (r *Resources) EnsureServiceAccountsLabels(ctx context.Context, cachedStatu func (r *Resources) EnsureServicesLabels(ctx context.Context, cachedStatus inspectorInterface.Inspector) error { changed := false - if err := cachedStatus.IterateServices(func(service *core.Service) error { + if err := cachedStatus.Service().V1().Iterate(func(service *core.Service) error { if ensureLabelsMap(service.Kind, service, r.context.GetSpec(), func(name string, d []byte) error { return globals.GetGlobalTimeouts().Kubernetes().RunWithTimeout(ctx, func(ctxChild context.Context) error { _, err := r.context.ServicesModInterface().Patch(ctxChild, name, types.JSONPatchType, d, meta.PatchOptions{}) @@ -149,7 +149,11 @@ func (r *Resources) EnsureServicesLabels(ctx context.Context, cachedStatus inspe func (r *Resources) EnsureServiceMonitorsLabels(ctx context.Context, cachedStatus inspectorInterface.Inspector) error { changed := false - if err := cachedStatus.IterateServiceMonitors(func(serviceMonitor *monitoring.ServiceMonitor) error { + i, err := cachedStatus.ServiceMonitor().V1() + if err != nil { + return err + } + if err := i.Iterate(func(serviceMonitor *monitoring.ServiceMonitor) error { if ensureLabelsMap(serviceMonitor.Kind, serviceMonitor, r.context.GetSpec(), func(name string, d []byte) error { return globals.GetGlobalTimeouts().Kubernetes().RunWithTimeout(ctx, func(ctxChild context.Context) error { _, err := r.context.ServiceMonitorsModInterface().Patch(ctxChild, name, types.JSONPatchType, d, meta.PatchOptions{}) @@ -175,7 +179,7 @@ func (r *Resources) EnsureServiceMonitorsLabels(ctx context.Context, cachedStatu func (r *Resources) EnsurePodsLabels(ctx context.Context, cachedStatus inspectorInterface.Inspector) error { changed := false - if err := cachedStatus.IteratePods(func(pod *core.Pod) error { + if err := cachedStatus.Pod().V1().Iterate(func(pod *core.Pod) error { if ensureGroupLabelsMap(pod.Kind, pod, r.context.GetSpec(), func(name string, d []byte) error { return globals.GetGlobalTimeouts().Kubernetes().RunWithTimeout(ctx, func(ctxChild context.Context) error { _, err := r.context.PodsModInterface().Patch(ctxChild, name, types.JSONPatchType, d, meta.PatchOptions{}) @@ -201,7 +205,7 @@ func (r *Resources) EnsurePodsLabels(ctx context.Context, cachedStatus inspector func (r *Resources) EnsurePersistentVolumeClaimsLabels(ctx context.Context, cachedStatus inspectorInterface.Inspector) error { changed := false - if err := cachedStatus.IteratePersistentVolumeClaims(func(persistentVolumeClaim *core.PersistentVolumeClaim) error { + if err := cachedStatus.PersistentVolumeClaim().V1().Iterate(func(persistentVolumeClaim *core.PersistentVolumeClaim) error { if ensureGroupLabelsMap(persistentVolumeClaim.Kind, persistentVolumeClaim, r.context.GetSpec(), func(name string, d []byte) error { return globals.GetGlobalTimeouts().Kubernetes().RunWithTimeout(ctx, func(ctxChild context.Context) error { _, err := r.context.PersistentVolumeClaimsModInterface().Patch(ctxChild, name, types.JSONPatchType, d, meta.PatchOptions{}) @@ -227,7 +231,11 @@ func (r *Resources) EnsurePersistentVolumeClaimsLabels(ctx context.Context, cach func (r *Resources) EnsurePodDisruptionBudgetsLabels(ctx context.Context, cachedStatus inspectorInterface.Inspector) error { changed := false - if err := cachedStatus.IteratePodDisruptionBudgets(func(budget *policy.PodDisruptionBudget) error { + i, err := cachedStatus.PodDisruptionBudget().V1Beta1() + if err != nil { + return err + } + if err := i.Iterate(func(budget *policy.PodDisruptionBudget) error { if ensureLabelsMap(budget.Kind, budget, r.context.GetSpec(), func(name string, d []byte) error { return globals.GetGlobalTimeouts().Kubernetes().RunWithTimeout(ctx, func(ctxChild context.Context) error { _, err := r.context.PodDisruptionBudgetsModInterface().Patch(ctxChild, name, types.JSONPatchType, d, meta.PatchOptions{}) diff --git a/pkg/deployment/resources/license.go b/pkg/deployment/resources/license.go index 916aa103f..082286d6a 100644 --- a/pkg/deployment/resources/license.go +++ b/pkg/deployment/resources/license.go @@ -34,7 +34,7 @@ func (r *Resources) ValidateLicenseKeySecret(cachedStatus inspectorInterface.Ins if spec.HasSecretName() { secretName := spec.GetSecretName() - s, exists := cachedStatus.Secret(secretName) + s, exists := cachedStatus.Secret().V1().GetSimple(secretName) if !exists { return errors.Newf("License secret %s does not exist", s) diff --git a/pkg/deployment/resources/member_cleanup.go b/pkg/deployment/resources/member_cleanup.go index 52041cd9c..1ab246b52 100644 --- a/pkg/deployment/resources/member_cleanup.go +++ b/pkg/deployment/resources/member_cleanup.go @@ -24,10 +24,6 @@ import ( "context" "time" - "github.com/arangodb/kube-arangodb/pkg/util/globals" - - "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/arangomember" - inspectorInterface "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -38,6 +34,7 @@ import ( memberState "github.com/arangodb/kube-arangodb/pkg/deployment/member" "github.com/arangodb/kube-arangodb/pkg/metrics" "github.com/arangodb/kube-arangodb/pkg/util/k8sutil" + arangomemberv1 "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/arangomember/v1" ) const ( @@ -159,22 +156,20 @@ func (r *Resources) cleanupRemovedClusterMembers(ctx context.Context, health mem func (r *Resources) EnsureArangoMembers(ctx context.Context, cachedStatus inspectorInterface.Inspector) error { // Create all missing arangomembers - s, _ := r.context.GetStatus() obj := r.context.GetAPIObject() - reconcileRequired := k8sutil.NewReconcile(cachedStatus) - if err := s.Members.ForeachServerGroup(func(group api.ServerGroup, list api.MemberStatusList) error { for _, member := range list { name := member.ArangoMemberName(r.context.GetAPIObject().GetName(), group) - if m, ok := cachedStatus.ArangoMember(name); !ok { + c := r.context.WithCurrentArangoMember(name) + + if !c.Exists(ctx) { // Create ArangoMember - a := api.ArangoMember{ + obj := &api.ArangoMember{ ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: r.context.GetNamespace(), + Name: name, OwnerReferences: []metav1.OwnerReference{ obj.AsOwner(), }, @@ -186,41 +181,29 @@ func (r *Resources) EnsureArangoMembers(ctx context.Context, cachedStatus inspec }, } - err := globals.GetGlobalTimeouts().Kubernetes().RunWithTimeout(ctx, func(ctxChild context.Context) error { - _, err := r.context.ArangoMembersModInterface().Create(ctxChild, &a, metav1.CreateOptions{}) - return err - }) - if err != nil { + if err := r.context.WithCurrentArangoMember(name).Create(ctx, obj); err != nil { return err } - reconcileRequired.Required() continue } else { - changed := false - if len(m.OwnerReferences) == 0 { - m.OwnerReferences = []metav1.OwnerReference{ - obj.AsOwner(), + if err := c.Update(ctx, func(m *api.ArangoMember) bool { + changed := false + if len(m.OwnerReferences) == 0 { + m.OwnerReferences = []metav1.OwnerReference{ + obj.AsOwner(), + } + changed = true } - changed = true - } - if m.Spec.DeploymentUID == "" { - m.Spec.DeploymentUID = obj.GetUID() - changed = true - } - if changed { - - err := globals.GetGlobalTimeouts().Kubernetes().RunWithTimeout(ctx, func(ctxChild context.Context) error { - _, err := r.context.ArangoMembersModInterface().Update(ctxChild, m, metav1.UpdateOptions{}) - return err - }) - if err != nil { - return err + if m.Spec.DeploymentUID == "" { + m.Spec.DeploymentUID = obj.GetUID() + changed = true } - reconcileRequired.Required() - continue + return changed + }); err != nil { + return err } } } @@ -230,34 +213,18 @@ func (r *Resources) EnsureArangoMembers(ctx context.Context, cachedStatus inspec return err } - if err := reconcileRequired.Reconcile(ctx); err != nil { - return err - } - - if err := cachedStatus.IterateArangoMembers(func(member *api.ArangoMember) error { + if err := cachedStatus.ArangoMember().V1().Iterate(func(member *api.ArangoMember) error { _, g, ok := s.Members.ElementByID(member.Spec.ID) if !ok || g != member.Spec.Group { // Remove member - - err := globals.GetGlobalTimeouts().Kubernetes().RunWithTimeout(ctx, func(ctxChild context.Context) error { - return r.context.ArangoMembersModInterface().Delete(ctxChild, member.GetName(), metav1.DeleteOptions{}) - }) - if err != nil { - if !k8sutil.IsNotFound(err) { - return err - } + if err := r.context.WithCurrentArangoMember(member.GetName()).Delete(ctx); err != nil { + return err } - - reconcileRequired.Required() } return nil - }, arangomember.FilterByDeploymentUID(obj.GetUID())); err != nil { - return err - } - - if err := reconcileRequired.Reconcile(ctx); err != nil { + }, arangomemberv1.FilterByDeploymentUID(obj.GetUID())); err != nil { return err } diff --git a/pkg/deployment/resources/pdbs.go b/pkg/deployment/resources/pdbs.go index e77bacf49..56af02e56 100644 --- a/pkg/deployment/resources/pdbs.go +++ b/pkg/deployment/resources/pdbs.go @@ -106,6 +106,11 @@ func newPDB(minAvail int, deplname string, group api.ServerGroup, owner metav1.O // ensurePDBForGroup ensure pdb for a specific server group, if wantMinAvail is zero, the PDB is removed and not recreated func (r *Resources) ensurePDBForGroup(ctx context.Context, group api.ServerGroup, wantedMinAvail int) error { + i, err := r.context.GetCachedStatus().PodDisruptionBudget().V1Beta1() + if err != nil { + return err + } + deplname := r.context.GetAPIObject().GetName() pdbname := PDBNameForGroup(deplname, group) log := r.log.With().Str("group", group.AsRole()).Logger() @@ -114,7 +119,7 @@ func (r *Resources) ensurePDBForGroup(ctx context.Context, group api.ServerGroup var pdb *policyv1beta1.PodDisruptionBudget err := globals.GetGlobalTimeouts().Kubernetes().RunWithTimeout(ctx, func(ctxChild context.Context) error { var err error - pdb, err = r.context.GetCachedStatus().PodDisruptionBudgetReadInterface().Get(ctxChild, pdbname, metav1.GetOptions{}) + pdb, err = i.Read().Get(ctxChild, pdbname, metav1.GetOptions{}) return err }) if k8sutil.IsNotFound(err) { diff --git a/pkg/deployment/resources/pod_cleanup.go b/pkg/deployment/resources/pod_cleanup.go index 53e5a5c8d..97f26057f 100644 --- a/pkg/deployment/resources/pod_cleanup.go +++ b/pkg/deployment/resources/pod_cleanup.go @@ -24,13 +24,13 @@ import ( "context" "time" - v1 "k8s.io/api/core/v1" + core "k8s.io/api/core/v1" api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" - "github.com/arangodb/kube-arangodb/pkg/deployment/resources/inspector" "github.com/arangodb/kube-arangodb/pkg/util" "github.com/arangodb/kube-arangodb/pkg/util/k8sutil" inspectorInterface "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector" + podv1 "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/pod/v1" ) const ( @@ -46,7 +46,7 @@ func (r *Resources) CleanupTerminatedPods(ctx context.Context, cachedStatus insp // Update member status from all pods found status, _ := r.context.GetStatus() - err := cachedStatus.IteratePods(func(pod *v1.Pod) error { + err := cachedStatus.Pod().V1().Iterate(func(pod *core.Pod) error { if k8sutil.IsArangoDBImageIDAndVersionPod(pod) { // Image ID pods are not relevant to inspect here return nil @@ -91,7 +91,7 @@ func (r *Resources) CleanupTerminatedPods(ctx context.Context, cachedStatus insp } return nil - }, inspector.FilterPodsByLabels(k8sutil.LabelsForDeployment(r.context.GetAPIObject().GetName(), ""))) + }, podv1.FilterPodsByLabels(k8sutil.LabelsForDeployment(r.context.GetAPIObject().GetName(), ""))) if err != nil { return 0, err } diff --git a/pkg/deployment/resources/pod_creator.go b/pkg/deployment/resources/pod_creator.go index 238371513..57bba0a2d 100644 --- a/pkg/deployment/resources/pod_creator.go +++ b/pkg/deployment/resources/pod_creator.go @@ -36,7 +36,6 @@ import ( "github.com/arangodb/kube-arangodb/pkg/deployment/member" - podMod "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/pod" core "k8s.io/api/core/v1" meta "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -57,6 +56,7 @@ import ( "github.com/arangodb/kube-arangodb/pkg/deployment/pod" "github.com/arangodb/kube-arangodb/pkg/util/constants" "github.com/arangodb/kube-arangodb/pkg/util/k8sutil" + podv1 "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/pod/v1" ) // createArangodArgsWithUpgrade creates command line arguments for an arangod server upgrade in the given group. @@ -337,7 +337,7 @@ func (r *Resources) RenderPodForMember(ctx context.Context, cachedStatus inspect memberName := m.ArangoMemberName(r.context.GetAPIObject().GetName(), group) - member, ok := cachedStatus.ArangoMember(memberName) + member, ok := cachedStatus.ArangoMember().V1().GetSimple(memberName) if !ok { return nil, errors.Newf("ArangoMember %s not found", memberName) } @@ -646,7 +646,7 @@ func RenderArangoPod(ctx context.Context, cachedStatus inspectorInterface.Inspec // CreateArangoPod creates a new Pod with container provided by parameter 'containerCreator' // If the pod already exists, nil is returned. // If another error occurs, that error is returned. -func CreateArangoPod(ctx context.Context, c podMod.ModInterface, deployment k8sutil.APIObject, +func CreateArangoPod(ctx context.Context, c podv1.ModInterface, deployment k8sutil.APIObject, deploymentSpec api.DeploymentSpec, group api.ServerGroup, pod *core.Pod) (string, types.UID, error) { podName, uid, err := k8sutil.CreatePod(ctx, c, pod, deployment.GetNamespace(), deployment.AsOwner()) if err != nil { @@ -693,7 +693,7 @@ func (r *Resources) EnsurePods(ctx context.Context, cachedStatus inspectorInterf continue } - member, ok := cachedStatus.ArangoMember(m.ArangoMemberName(r.context.GetName(), group)) + member, ok := cachedStatus.ArangoMember().V1().GetSimple(m.ArangoMemberName(r.context.GetName(), group)) if !ok { // ArangoMember not found, skip continue diff --git a/pkg/deployment/resources/pod_creator_agent_args_test.go b/pkg/deployment/resources/pod_creator_agent_args_test.go index dab7aa1ab..73d6aeeb5 100644 --- a/pkg/deployment/resources/pod_creator_agent_args_test.go +++ b/pkg/deployment/resources/pod_creator_agent_args_test.go @@ -24,68 +24,49 @@ import ( "path/filepath" "testing" - "github.com/arangodb/kube-arangodb/pkg/deployment/resources/inspector" - inspectorInterface "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector" - "github.com/stretchr/testify/require" - core "k8s.io/api/core/v1" - "github.com/arangodb/kube-arangodb/pkg/deployment/pod" + "github.com/stretchr/testify/require" "github.com/stretchr/testify/assert" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "context" + api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" "github.com/arangodb/kube-arangodb/pkg/util" "github.com/arangodb/kube-arangodb/pkg/util/constants" "github.com/arangodb/kube-arangodb/pkg/util/k8sutil" + "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector" + "github.com/arangodb/kube-arangodb/pkg/util/kclient" + "github.com/arangodb/kube-arangodb/pkg/util/tests" + core "k8s.io/api/core/v1" ) -type inspectorMock interface { - AddService(t *testing.T, svc ...*core.Service) inspectorMock - - RegisterMemberStatus(t *testing.T, apiObject *api.ArangoDeployment, group api.ServerGroup, members ...api.MemberStatus) inspectorMock - - Get(t *testing.T) inspectorInterface.Inspector -} - -func newInspectorMock() inspectorMock { - return inspectorMockStruct{ - services: map[string]*core.Service{}, - } -} - -type inspectorMockStruct struct { - services map[string]*core.Service -} - -func (i inspectorMockStruct) RegisterMemberStatus(t *testing.T, apiObject *api.ArangoDeployment, group api.ServerGroup, members ...api.MemberStatus) inspectorMock { - var z inspectorMock = i - for _, member := range members { - memberName := member.ArangoMemberName(apiObject.GetName(), group) +func createClient(f kclient.FakeClientBuilder, apiObject *api.ArangoDeployment, group api.ServerGroup, statuses ...api.MemberStatus) kclient.FakeClientBuilder { + for _, a := range statuses { + memberName := a.ArangoMemberName(apiObject.GetName(), group) svc := core.Service{ ObjectMeta: metav1.ObjectMeta{ - Name: memberName, + Name: memberName, + Namespace: tests.FakeNamespace, }, Spec: core.ServiceSpec{ ClusterIP: "127.0.0.1", }, } - z = z.AddService(t, &svc) - } - return z -} -func (i inspectorMockStruct) AddService(t *testing.T, svc ...*core.Service) inspectorMock { - for _, s := range svc { - i.services[s.GetName()] = s + f = f.Add(&svc) } - return i + return f } -func (i inspectorMockStruct) Get(t *testing.T) inspectorInterface.Inspector { - return inspector.NewInspectorFromData(nil, nil, nil, i.services, nil, nil, nil, nil, nil, nil, nil, "") +func createInspector(t *testing.T, f kclient.FakeClientBuilder) inspector.Inspector { + c := f.Client() + i := tests.NewInspector(t, c) + require.NoError(t, i.Refresh(context.Background())) + return i } // TestCreateArangodArgsAgent tests createArangodArgs for agent. @@ -96,7 +77,7 @@ func TestCreateArangodArgsAgent(t *testing.T) { apiObject := &api.ArangoDeployment{ ObjectMeta: metav1.ObjectMeta{ Name: "name", - Namespace: "ns", + Namespace: tests.FakeNamespace, }, Spec: api.DeploymentSpec{ Mode: api.NewMode(api.DeploymentModeCluster), @@ -120,18 +101,19 @@ func TestCreateArangodArgsAgent(t *testing.T) { Member: api.MemberStatus{ID: "a1"}, } - i := newInspectorMock() - i = i.RegisterMemberStatus(t, apiObject, api.ServerGroupAgents, agents...) + f := kclient.NewFakeClientBuilder() + f = createClient(f, apiObject, api.ServerGroupAgents, agents...) + i := createInspector(t, f) - cmdline, err := createArangodArgs(i.Get(t), input) + cmdline, err := createArangodArgs(i, input) require.NoError(t, err) assert.Equal(t, []string{ "--agency.activate=true", "--agency.disaster-recovery-id=a1", - "--agency.endpoint=ssl://name-agent-a2.name-int.ns.svc:8529", - "--agency.endpoint=ssl://name-agent-a3.name-int.ns.svc:8529", - "--agency.my-address=ssl://name-agent-a1.name-int.ns.svc:8529", + "--agency.endpoint=ssl://name-agent-a2.name-int." + tests.FakeNamespace + ".svc:8529", + "--agency.endpoint=ssl://name-agent-a3.name-int." + tests.FakeNamespace + ".svc:8529", + "--agency.my-address=ssl://name-agent-a1.name-int." + tests.FakeNamespace + ".svc:8529", "--agency.size=3", "--agency.supervision=true", "--database.directory=/data", @@ -155,7 +137,7 @@ func TestCreateArangodArgsAgent(t *testing.T) { apiObject := &api.ArangoDeployment{ ObjectMeta: metav1.ObjectMeta{ Name: "name", - Namespace: "ns", + Namespace: tests.FakeNamespace, }, Spec: api.DeploymentSpec{ Mode: api.NewMode(api.DeploymentModeCluster), @@ -180,18 +162,19 @@ func TestCreateArangodArgsAgent(t *testing.T) { Member: api.MemberStatus{ID: "a1"}, } - i := newInspectorMock() - i = i.RegisterMemberStatus(t, apiObject, api.ServerGroupAgents, agents...) + f := kclient.NewFakeClientBuilder() + f = createClient(f, apiObject, api.ServerGroupAgents, agents...) + i := createInspector(t, f) - cmdline, err := createArangodArgsWithUpgrade(i.Get(t), input) + cmdline, err := createArangodArgsWithUpgrade(i, input) require.NoError(t, err) assert.Equal(t, []string{ "--agency.activate=true", "--agency.disaster-recovery-id=a1", - "--agency.endpoint=ssl://name-agent-a2.name-int.ns.svc:8529", - "--agency.endpoint=ssl://name-agent-a3.name-int.ns.svc:8529", - "--agency.my-address=ssl://name-agent-a1.name-int.ns.svc:8529", + "--agency.endpoint=ssl://name-agent-a2.name-int." + tests.FakeNamespace + ".svc:8529", + "--agency.endpoint=ssl://name-agent-a3.name-int." + tests.FakeNamespace + ".svc:8529", + "--agency.my-address=ssl://name-agent-a1.name-int." + tests.FakeNamespace + ".svc:8529", "--agency.size=3", "--agency.supervision=true", "--database.auto-upgrade=true", @@ -216,7 +199,7 @@ func TestCreateArangodArgsAgent(t *testing.T) { apiObject := &api.ArangoDeployment{ ObjectMeta: metav1.ObjectMeta{ Name: "name", - Namespace: "ns", + Namespace: tests.FakeNamespace, }, Spec: api.DeploymentSpec{ Mode: api.NewMode(api.DeploymentModeCluster), @@ -244,18 +227,19 @@ func TestCreateArangodArgsAgent(t *testing.T) { Member: api.MemberStatus{ID: "a1"}, } - i := newInspectorMock() - i = i.RegisterMemberStatus(t, apiObject, api.ServerGroupAgents, agents...) + f := kclient.NewFakeClientBuilder() + f = createClient(f, apiObject, api.ServerGroupAgents, agents...) + i := createInspector(t, f) - cmdline, err := createArangodArgs(i.Get(t), input) + cmdline, err := createArangodArgs(i, input) require.NoError(t, err) assert.Equal(t, []string{ "--agency.activate=true", "--agency.disaster-recovery-id=a1", - "--agency.endpoint=tcp://name-agent-a2.name-int.ns.svc:8529", - "--agency.endpoint=tcp://name-agent-a3.name-int.ns.svc:8529", - "--agency.my-address=tcp://name-agent-a1.name-int.ns.svc:8529", + "--agency.endpoint=tcp://name-agent-a2.name-int." + tests.FakeNamespace + ".svc:8529", + "--agency.endpoint=tcp://name-agent-a3.name-int." + tests.FakeNamespace + ".svc:8529", + "--agency.my-address=tcp://name-agent-a1.name-int." + tests.FakeNamespace + ".svc:8529", "--agency.size=3", "--agency.supervision=true", "--database.directory=/data", @@ -277,7 +261,7 @@ func TestCreateArangodArgsAgent(t *testing.T) { apiObject := &api.ArangoDeployment{ ObjectMeta: metav1.ObjectMeta{ Name: "name", - Namespace: "ns", + Namespace: tests.FakeNamespace, }, Spec: api.DeploymentSpec{ Mode: api.NewMode(api.DeploymentModeCluster), @@ -303,18 +287,19 @@ func TestCreateArangodArgsAgent(t *testing.T) { Member: api.MemberStatus{ID: "a1"}, } - i := newInspectorMock() - i = i.RegisterMemberStatus(t, apiObject, api.ServerGroupAgents, agents...) + f := kclient.NewFakeClientBuilder() + f = createClient(f, apiObject, api.ServerGroupAgents, agents...) + i := createInspector(t, f) - cmdline, err := createArangodArgs(i.Get(t), input) + cmdline, err := createArangodArgs(i, input) require.NoError(t, err) assert.Equal(t, []string{ "--agency.activate=true", "--agency.disaster-recovery-id=a1", - "--agency.endpoint=ssl://name-agent-a2.name-int.ns.svc:8529", - "--agency.endpoint=ssl://name-agent-a3.name-int.ns.svc:8529", - "--agency.my-address=ssl://name-agent-a1.name-int.ns.svc:8529", + "--agency.endpoint=ssl://name-agent-a2.name-int." + tests.FakeNamespace + ".svc:8529", + "--agency.endpoint=ssl://name-agent-a3.name-int." + tests.FakeNamespace + ".svc:8529", + "--agency.my-address=ssl://name-agent-a1.name-int." + tests.FakeNamespace + ".svc:8529", "--agency.size=3", "--agency.supervision=true", "--database.directory=/data", @@ -337,7 +322,7 @@ func TestCreateArangodArgsAgent(t *testing.T) { apiObject := &api.ArangoDeployment{ ObjectMeta: metav1.ObjectMeta{ Name: "name", - Namespace: "ns", + Namespace: tests.FakeNamespace, }, Spec: api.DeploymentSpec{ Mode: api.NewMode(api.DeploymentModeCluster), @@ -362,18 +347,19 @@ func TestCreateArangodArgsAgent(t *testing.T) { Member: api.MemberStatus{ID: "a1"}, } - i := newInspectorMock() - i = i.RegisterMemberStatus(t, apiObject, api.ServerGroupAgents, agents...) + f := kclient.NewFakeClientBuilder() + f = createClient(f, apiObject, api.ServerGroupAgents, agents...) + i := createInspector(t, f) - cmdline, err := createArangodArgs(i.Get(t), input) + cmdline, err := createArangodArgs(i, input) require.NoError(t, err) assert.Equal(t, []string{ "--agency.activate=true", "--agency.disaster-recovery-id=a1", - "--agency.endpoint=ssl://name-agent-a2.name-int.ns.svc:8529", - "--agency.endpoint=ssl://name-agent-a3.name-int.ns.svc:8529", - "--agency.my-address=ssl://name-agent-a1.name-int.ns.svc:8529", + "--agency.endpoint=ssl://name-agent-a2.name-int." + tests.FakeNamespace + ".svc:8529", + "--agency.endpoint=ssl://name-agent-a3.name-int." + tests.FakeNamespace + ".svc:8529", + "--agency.my-address=ssl://name-agent-a1.name-int." + tests.FakeNamespace + ".svc:8529", "--agency.size=3", "--agency.supervision=true", "--database.directory=/data", diff --git a/pkg/deployment/resources/pod_creator_coordinator_args_test.go b/pkg/deployment/resources/pod_creator_coordinator_args_test.go index ff7ca2297..fc8a236b3 100644 --- a/pkg/deployment/resources/pod_creator_coordinator_args_test.go +++ b/pkg/deployment/resources/pod_creator_coordinator_args_test.go @@ -35,6 +35,8 @@ import ( "github.com/arangodb/kube-arangodb/pkg/util" "github.com/arangodb/kube-arangodb/pkg/util/constants" "github.com/arangodb/kube-arangodb/pkg/util/k8sutil" + "github.com/arangodb/kube-arangodb/pkg/util/kclient" + "github.com/arangodb/kube-arangodb/pkg/util/tests" ) // TestCreateArangodArgsCoordinator tests createArangodArgs for coordinator. @@ -45,7 +47,7 @@ func TestCreateArangodArgsCoordinator(t *testing.T) { apiObject := &api.ArangoDeployment{ ObjectMeta: metav1.ObjectMeta{ Name: "name", - Namespace: "ns", + Namespace: tests.FakeNamespace, }, Spec: api.DeploymentSpec{ Mode: api.NewMode(api.DeploymentModeCluster), @@ -69,17 +71,19 @@ func TestCreateArangodArgsCoordinator(t *testing.T) { Member: api.MemberStatus{ID: "id1"}, } - i := newInspectorMock() - i = i.RegisterMemberStatus(t, apiObject, api.ServerGroupAgents, agents...).RegisterMemberStatus(t, apiObject, input.Group, input.Member) + f := kclient.NewFakeClientBuilder() + f = createClient(f, apiObject, api.ServerGroupAgents, agents...) + f = createClient(f, apiObject, input.Group, input.Member) + i := createInspector(t, f) - cmdline, err := createArangodArgs(i.Get(t), input) + cmdline, err := createArangodArgs(i, input) require.NoError(t, err) assert.Equal(t, []string{ - "--cluster.agency-endpoint=ssl://name-agent-a1.name-int.ns.svc:8529", - "--cluster.agency-endpoint=ssl://name-agent-a2.name-int.ns.svc:8529", - "--cluster.agency-endpoint=ssl://name-agent-a3.name-int.ns.svc:8529", - "--cluster.my-address=ssl://name-coordinator-id1.name-int.ns.svc:8529", + "--cluster.agency-endpoint=ssl://name-agent-a1.name-int." + tests.FakeNamespace + ".svc:8529", + "--cluster.agency-endpoint=ssl://name-agent-a2.name-int." + tests.FakeNamespace + ".svc:8529", + "--cluster.agency-endpoint=ssl://name-agent-a3.name-int." + tests.FakeNamespace + ".svc:8529", + "--cluster.my-address=ssl://name-coordinator-id1.name-int." + tests.FakeNamespace + ".svc:8529", "--cluster.my-role=COORDINATOR", "--database.directory=/data", "--foxx.queues=true", @@ -102,7 +106,7 @@ func TestCreateArangodArgsCoordinator(t *testing.T) { apiObject := &api.ArangoDeployment{ ObjectMeta: metav1.ObjectMeta{ Name: "name", - Namespace: "ns", + Namespace: tests.FakeNamespace, }, Spec: api.DeploymentSpec{ Mode: api.NewMode(api.DeploymentModeCluster), @@ -126,17 +130,19 @@ func TestCreateArangodArgsCoordinator(t *testing.T) { Member: api.MemberStatus{ID: "id1"}, } - i := newInspectorMock() - i = i.RegisterMemberStatus(t, apiObject, api.ServerGroupAgents, agents...).RegisterMemberStatus(t, apiObject, input.Group, input.Member) + f := kclient.NewFakeClientBuilder() + f = createClient(f, apiObject, api.ServerGroupAgents, agents...) + f = createClient(f, apiObject, input.Group, input.Member) + i := createInspector(t, f) - cmdline, err := createArangodArgsWithUpgrade(i.Get(t), input) + cmdline, err := createArangodArgsWithUpgrade(i, input) require.NoError(t, err) assert.Equal(t, []string{ - "--cluster.agency-endpoint=ssl://name-agent-a1.name-int.ns.svc:8529", - "--cluster.agency-endpoint=ssl://name-agent-a2.name-int.ns.svc:8529", - "--cluster.agency-endpoint=ssl://name-agent-a3.name-int.ns.svc:8529", - "--cluster.my-address=ssl://name-coordinator-id1.name-int.ns.svc:8529", + "--cluster.agency-endpoint=ssl://name-agent-a1.name-int." + tests.FakeNamespace + ".svc:8529", + "--cluster.agency-endpoint=ssl://name-agent-a2.name-int." + tests.FakeNamespace + ".svc:8529", + "--cluster.agency-endpoint=ssl://name-agent-a3.name-int." + tests.FakeNamespace + ".svc:8529", + "--cluster.my-address=ssl://name-coordinator-id1.name-int." + tests.FakeNamespace + ".svc:8529", "--cluster.my-role=COORDINATOR", "--database.auto-upgrade=true", "--database.directory=/data", @@ -160,7 +166,7 @@ func TestCreateArangodArgsCoordinator(t *testing.T) { apiObject := &api.ArangoDeployment{ ObjectMeta: metav1.ObjectMeta{ Name: "name", - Namespace: "ns", + Namespace: tests.FakeNamespace, }, Spec: api.DeploymentSpec{ Mode: api.NewMode(api.DeploymentModeCluster), @@ -184,17 +190,19 @@ func TestCreateArangodArgsCoordinator(t *testing.T) { Member: api.MemberStatus{ID: "id1"}, } - i := newInspectorMock() - i = i.RegisterMemberStatus(t, apiObject, api.ServerGroupAgents, agents...).RegisterMemberStatus(t, apiObject, input.Group, input.Member) + f := kclient.NewFakeClientBuilder() + f = createClient(f, apiObject, api.ServerGroupAgents, agents...) + f = createClient(f, apiObject, input.Group, input.Member) + i := createInspector(t, f) - cmdline, err := createArangodArgsWithUpgrade(i.Get(t), input) + cmdline, err := createArangodArgsWithUpgrade(i, input) require.NoError(t, err) assert.Equal(t, []string{ - "--cluster.agency-endpoint=ssl://name-agent-a1.name-int.ns.svc:8529", - "--cluster.agency-endpoint=ssl://name-agent-a2.name-int.ns.svc:8529", - "--cluster.agency-endpoint=ssl://name-agent-a3.name-int.ns.svc:8529", - "--cluster.my-address=ssl://name-coordinator-id1.name-int.ns.svc:8529", + "--cluster.agency-endpoint=ssl://name-agent-a1.name-int." + tests.FakeNamespace + ".svc:8529", + "--cluster.agency-endpoint=ssl://name-agent-a2.name-int." + tests.FakeNamespace + ".svc:8529", + "--cluster.agency-endpoint=ssl://name-agent-a3.name-int." + tests.FakeNamespace + ".svc:8529", + "--cluster.my-address=ssl://name-coordinator-id1.name-int." + tests.FakeNamespace + ".svc:8529", "--cluster.my-role=COORDINATOR", "--cluster.upgrade=online", "--database.directory=/data", @@ -218,7 +226,7 @@ func TestCreateArangodArgsCoordinator(t *testing.T) { apiObject := &api.ArangoDeployment{ ObjectMeta: metav1.ObjectMeta{ Name: "name", - Namespace: "ns", + Namespace: tests.FakeNamespace, }, Spec: api.DeploymentSpec{ Mode: api.NewMode(api.DeploymentModeCluster), @@ -245,17 +253,19 @@ func TestCreateArangodArgsCoordinator(t *testing.T) { Member: api.MemberStatus{ID: "id1"}, } - i := newInspectorMock() - i = i.RegisterMemberStatus(t, apiObject, api.ServerGroupAgents, agents...).RegisterMemberStatus(t, apiObject, input.Group, input.Member) + f := kclient.NewFakeClientBuilder() + f = createClient(f, apiObject, api.ServerGroupAgents, agents...) + f = createClient(f, apiObject, input.Group, input.Member) + i := createInspector(t, f) - cmdline, err := createArangodArgs(i.Get(t), input) + cmdline, err := createArangodArgs(i, input) require.NoError(t, err) assert.Equal(t, []string{ - "--cluster.agency-endpoint=tcp://name-agent-a1.name-int.ns.svc:8529", - "--cluster.agency-endpoint=tcp://name-agent-a2.name-int.ns.svc:8529", - "--cluster.agency-endpoint=tcp://name-agent-a3.name-int.ns.svc:8529", - "--cluster.my-address=tcp://name-coordinator-id1.name-int.ns.svc:8529", + "--cluster.agency-endpoint=tcp://name-agent-a1.name-int." + tests.FakeNamespace + ".svc:8529", + "--cluster.agency-endpoint=tcp://name-agent-a2.name-int." + tests.FakeNamespace + ".svc:8529", + "--cluster.agency-endpoint=tcp://name-agent-a3.name-int." + tests.FakeNamespace + ".svc:8529", + "--cluster.my-address=tcp://name-coordinator-id1.name-int." + tests.FakeNamespace + ".svc:8529", "--cluster.my-role=COORDINATOR", "--database.directory=/data", "--foxx.queues=true", @@ -276,7 +286,7 @@ func TestCreateArangodArgsCoordinator(t *testing.T) { apiObject := &api.ArangoDeployment{ ObjectMeta: metav1.ObjectMeta{ Name: "name", - Namespace: "ns", + Namespace: tests.FakeNamespace, }, Spec: api.DeploymentSpec{ Mode: api.NewMode(api.DeploymentModeCluster), @@ -301,17 +311,19 @@ func TestCreateArangodArgsCoordinator(t *testing.T) { Member: api.MemberStatus{ID: "id1"}, } - i := newInspectorMock() - i = i.RegisterMemberStatus(t, apiObject, api.ServerGroupAgents, agents...).RegisterMemberStatus(t, apiObject, input.Group, input.Member) + f := kclient.NewFakeClientBuilder() + f = createClient(f, apiObject, api.ServerGroupAgents, agents...) + f = createClient(f, apiObject, input.Group, input.Member) + i := createInspector(t, f) - cmdline, err := createArangodArgs(i.Get(t), input) + cmdline, err := createArangodArgs(i, input) require.NoError(t, err) assert.Equal(t, []string{ - "--cluster.agency-endpoint=ssl://name-agent-a1.name-int.ns.svc:8529", - "--cluster.agency-endpoint=ssl://name-agent-a2.name-int.ns.svc:8529", - "--cluster.agency-endpoint=ssl://name-agent-a3.name-int.ns.svc:8529", - "--cluster.my-address=ssl://name-coordinator-id1.name-int.ns.svc:8529", + "--cluster.agency-endpoint=ssl://name-agent-a1.name-int." + tests.FakeNamespace + ".svc:8529", + "--cluster.agency-endpoint=ssl://name-agent-a2.name-int." + tests.FakeNamespace + ".svc:8529", + "--cluster.agency-endpoint=ssl://name-agent-a3.name-int." + tests.FakeNamespace + ".svc:8529", + "--cluster.my-address=ssl://name-coordinator-id1.name-int." + tests.FakeNamespace + ".svc:8529", "--cluster.my-role=COORDINATOR", "--database.directory=/data", "--foxx.queues=true", @@ -333,7 +345,7 @@ func TestCreateArangodArgsCoordinator(t *testing.T) { apiObject := &api.ArangoDeployment{ ObjectMeta: metav1.ObjectMeta{ Name: "name", - Namespace: "ns", + Namespace: tests.FakeNamespace, }, Spec: api.DeploymentSpec{ Mode: api.NewMode(api.DeploymentModeCluster), @@ -359,17 +371,19 @@ func TestCreateArangodArgsCoordinator(t *testing.T) { Member: api.MemberStatus{ID: "id1"}, } - i := newInspectorMock() - i = i.RegisterMemberStatus(t, apiObject, api.ServerGroupAgents, agents...).RegisterMemberStatus(t, apiObject, input.Group, input.Member) + f := kclient.NewFakeClientBuilder() + f = createClient(f, apiObject, api.ServerGroupAgents, agents...) + f = createClient(f, apiObject, input.Group, input.Member) + i := createInspector(t, f) - cmdline, err := createArangodArgs(i.Get(t), input) + cmdline, err := createArangodArgs(i, input) require.NoError(t, err) assert.Equal(t, []string{ - "--cluster.agency-endpoint=ssl://name-agent-a1.name-int.ns.svc:8529", - "--cluster.agency-endpoint=ssl://name-agent-a2.name-int.ns.svc:8529", - "--cluster.agency-endpoint=ssl://name-agent-a3.name-int.ns.svc:8529", - "--cluster.my-address=ssl://name-coordinator-id1.name-int.ns.svc:8529", + "--cluster.agency-endpoint=ssl://name-agent-a1.name-int." + tests.FakeNamespace + ".svc:8529", + "--cluster.agency-endpoint=ssl://name-agent-a2.name-int." + tests.FakeNamespace + ".svc:8529", + "--cluster.agency-endpoint=ssl://name-agent-a3.name-int." + tests.FakeNamespace + ".svc:8529", + "--cluster.my-address=ssl://name-coordinator-id1.name-int." + tests.FakeNamespace + ".svc:8529", "--cluster.my-role=COORDINATOR", "--database.directory=/data", "--foxx.queues=true", diff --git a/pkg/deployment/resources/pod_creator_dbserver_args_test.go b/pkg/deployment/resources/pod_creator_dbserver_args_test.go index 59756cf3c..8c41d5000 100644 --- a/pkg/deployment/resources/pod_creator_dbserver_args_test.go +++ b/pkg/deployment/resources/pod_creator_dbserver_args_test.go @@ -35,6 +35,8 @@ import ( "github.com/arangodb/kube-arangodb/pkg/util" "github.com/arangodb/kube-arangodb/pkg/util/constants" "github.com/arangodb/kube-arangodb/pkg/util/k8sutil" + "github.com/arangodb/kube-arangodb/pkg/util/kclient" + "github.com/arangodb/kube-arangodb/pkg/util/tests" ) // TestCreateArangodArgsDBServer tests createArangodArgs for dbserver. @@ -45,7 +47,7 @@ func TestCreateArangodArgsDBServer(t *testing.T) { apiObject := &api.ArangoDeployment{ ObjectMeta: metav1.ObjectMeta{ Name: "name", - Namespace: "ns", + Namespace: tests.FakeNamespace, }, Spec: api.DeploymentSpec{ Mode: api.NewMode(api.DeploymentModeCluster), @@ -69,17 +71,19 @@ func TestCreateArangodArgsDBServer(t *testing.T) { Member: api.MemberStatus{ID: "id1"}, } - i := newInspectorMock() - i = i.RegisterMemberStatus(t, apiObject, api.ServerGroupAgents, agents...).RegisterMemberStatus(t, apiObject, input.Group, input.Member) + f := kclient.NewFakeClientBuilder() + f = createClient(f, apiObject, api.ServerGroupAgents, agents...) + f = createClient(f, apiObject, input.Group, input.Member) + i := createInspector(t, f) - cmdline, err := createArangodArgs(i.Get(t), input) + cmdline, err := createArangodArgs(i, input) require.NoError(t, err) assert.Equal(t, []string{ - "--cluster.agency-endpoint=ssl://name-agent-a1.name-int.ns.svc:8529", - "--cluster.agency-endpoint=ssl://name-agent-a2.name-int.ns.svc:8529", - "--cluster.agency-endpoint=ssl://name-agent-a3.name-int.ns.svc:8529", - "--cluster.my-address=ssl://name-dbserver-id1.name-int.ns.svc:8529", + "--cluster.agency-endpoint=ssl://name-agent-a1.name-int." + tests.FakeNamespace + ".svc:8529", + "--cluster.agency-endpoint=ssl://name-agent-a2.name-int." + tests.FakeNamespace + ".svc:8529", + "--cluster.agency-endpoint=ssl://name-agent-a3.name-int." + tests.FakeNamespace + ".svc:8529", + "--cluster.my-address=ssl://name-dbserver-id1.name-int." + tests.FakeNamespace + ".svc:8529", "--cluster.my-role=PRIMARY", "--database.directory=/data", "--foxx.queues=false", @@ -102,7 +106,7 @@ func TestCreateArangodArgsDBServer(t *testing.T) { apiObject := &api.ArangoDeployment{ ObjectMeta: metav1.ObjectMeta{ Name: "name", - Namespace: "ns", + Namespace: tests.FakeNamespace, }, Spec: api.DeploymentSpec{ Mode: api.NewMode(api.DeploymentModeCluster), @@ -126,17 +130,19 @@ func TestCreateArangodArgsDBServer(t *testing.T) { Member: api.MemberStatus{ID: "id1"}, } - i := newInspectorMock() - i = i.RegisterMemberStatus(t, apiObject, api.ServerGroupAgents, agents...).RegisterMemberStatus(t, apiObject, input.Group, input.Member) + f := kclient.NewFakeClientBuilder() + f = createClient(f, apiObject, api.ServerGroupAgents, agents...) + f = createClient(f, apiObject, input.Group, input.Member) + i := createInspector(t, f) - cmdline, err := createArangodArgsWithUpgrade(i.Get(t), input) + cmdline, err := createArangodArgsWithUpgrade(i, input) require.NoError(t, err) assert.Equal(t, []string{ - "--cluster.agency-endpoint=ssl://name-agent-a1.name-int.ns.svc:8529", - "--cluster.agency-endpoint=ssl://name-agent-a2.name-int.ns.svc:8529", - "--cluster.agency-endpoint=ssl://name-agent-a3.name-int.ns.svc:8529", - "--cluster.my-address=ssl://name-dbserver-id1.name-int.ns.svc:8529", + "--cluster.agency-endpoint=ssl://name-agent-a1.name-int." + tests.FakeNamespace + ".svc:8529", + "--cluster.agency-endpoint=ssl://name-agent-a2.name-int." + tests.FakeNamespace + ".svc:8529", + "--cluster.agency-endpoint=ssl://name-agent-a3.name-int." + tests.FakeNamespace + ".svc:8529", + "--cluster.my-address=ssl://name-dbserver-id1.name-int." + tests.FakeNamespace + ".svc:8529", "--cluster.my-role=PRIMARY", "--database.auto-upgrade=true", "--database.directory=/data", @@ -160,7 +166,7 @@ func TestCreateArangodArgsDBServer(t *testing.T) { apiObject := &api.ArangoDeployment{ ObjectMeta: metav1.ObjectMeta{ Name: "name", - Namespace: "ns", + Namespace: tests.FakeNamespace, }, Spec: api.DeploymentSpec{ Mode: api.NewMode(api.DeploymentModeCluster), @@ -185,17 +191,19 @@ func TestCreateArangodArgsDBServer(t *testing.T) { Member: api.MemberStatus{ID: "id1"}, } - i := newInspectorMock() - i = i.RegisterMemberStatus(t, apiObject, api.ServerGroupAgents, agents...).RegisterMemberStatus(t, apiObject, input.Group, input.Member) + f := kclient.NewFakeClientBuilder() + f = createClient(f, apiObject, api.ServerGroupAgents, agents...) + f = createClient(f, apiObject, input.Group, input.Member) + i := createInspector(t, f) - cmdline, err := createArangodArgsWithUpgrade(i.Get(t), input) + cmdline, err := createArangodArgsWithUpgrade(i, input) require.NoError(t, err) assert.Equal(t, []string{ - "--cluster.agency-endpoint=ssl://name-agent-a1.name-int.ns.svc.cluster.local:8529", - "--cluster.agency-endpoint=ssl://name-agent-a2.name-int.ns.svc.cluster.local:8529", - "--cluster.agency-endpoint=ssl://name-agent-a3.name-int.ns.svc.cluster.local:8529", - "--cluster.my-address=ssl://name-dbserver-id1.name-int.ns.svc.cluster.local:8529", + "--cluster.agency-endpoint=ssl://name-agent-a1.name-int." + tests.FakeNamespace + ".svc.cluster.local:8529", + "--cluster.agency-endpoint=ssl://name-agent-a2.name-int." + tests.FakeNamespace + ".svc.cluster.local:8529", + "--cluster.agency-endpoint=ssl://name-agent-a3.name-int." + tests.FakeNamespace + ".svc.cluster.local:8529", + "--cluster.my-address=ssl://name-dbserver-id1.name-int." + tests.FakeNamespace + ".svc.cluster.local:8529", "--cluster.my-role=PRIMARY", "--database.auto-upgrade=true", "--database.directory=/data", @@ -219,7 +227,7 @@ func TestCreateArangodArgsDBServer(t *testing.T) { apiObject := &api.ArangoDeployment{ ObjectMeta: metav1.ObjectMeta{ Name: "name", - Namespace: "ns", + Namespace: tests.FakeNamespace, }, Spec: api.DeploymentSpec{ Mode: api.NewMode(api.DeploymentModeCluster), @@ -246,17 +254,19 @@ func TestCreateArangodArgsDBServer(t *testing.T) { Member: api.MemberStatus{ID: "id1"}, } - i := newInspectorMock() - i = i.RegisterMemberStatus(t, apiObject, api.ServerGroupAgents, agents...).RegisterMemberStatus(t, apiObject, input.Group, input.Member) + f := kclient.NewFakeClientBuilder() + f = createClient(f, apiObject, api.ServerGroupAgents, agents...) + f = createClient(f, apiObject, input.Group, input.Member) + i := createInspector(t, f) - cmdline, err := createArangodArgs(i.Get(t), input) + cmdline, err := createArangodArgs(i, input) require.NoError(t, err) assert.Equal(t, []string{ - "--cluster.agency-endpoint=tcp://name-agent-a1.name-int.ns.svc:8529", - "--cluster.agency-endpoint=tcp://name-agent-a2.name-int.ns.svc:8529", - "--cluster.agency-endpoint=tcp://name-agent-a3.name-int.ns.svc:8529", - "--cluster.my-address=tcp://name-dbserver-id1.name-int.ns.svc:8529", + "--cluster.agency-endpoint=tcp://name-agent-a1.name-int." + tests.FakeNamespace + ".svc:8529", + "--cluster.agency-endpoint=tcp://name-agent-a2.name-int." + tests.FakeNamespace + ".svc:8529", + "--cluster.agency-endpoint=tcp://name-agent-a3.name-int." + tests.FakeNamespace + ".svc:8529", + "--cluster.my-address=tcp://name-dbserver-id1.name-int." + tests.FakeNamespace + ".svc:8529", "--cluster.my-role=PRIMARY", "--database.directory=/data", "--foxx.queues=false", @@ -277,7 +287,7 @@ func TestCreateArangodArgsDBServer(t *testing.T) { apiObject := &api.ArangoDeployment{ ObjectMeta: metav1.ObjectMeta{ Name: "name", - Namespace: "ns", + Namespace: tests.FakeNamespace, }, Spec: api.DeploymentSpec{ Mode: api.NewMode(api.DeploymentModeCluster), @@ -302,17 +312,19 @@ func TestCreateArangodArgsDBServer(t *testing.T) { Member: api.MemberStatus{ID: "id1"}, } - i := newInspectorMock() - i = i.RegisterMemberStatus(t, apiObject, api.ServerGroupAgents, agents...).RegisterMemberStatus(t, apiObject, input.Group, input.Member) + f := kclient.NewFakeClientBuilder() + f = createClient(f, apiObject, api.ServerGroupAgents, agents...) + f = createClient(f, apiObject, input.Group, input.Member) + i := createInspector(t, f) - cmdline, err := createArangodArgs(i.Get(t), input) + cmdline, err := createArangodArgs(i, input) require.NoError(t, err) assert.Equal(t, []string{ - "--cluster.agency-endpoint=ssl://name-agent-a1.name-int.ns.svc:8529", - "--cluster.agency-endpoint=ssl://name-agent-a2.name-int.ns.svc:8529", - "--cluster.agency-endpoint=ssl://name-agent-a3.name-int.ns.svc:8529", - "--cluster.my-address=ssl://name-dbserver-id1.name-int.ns.svc:8529", + "--cluster.agency-endpoint=ssl://name-agent-a1.name-int." + tests.FakeNamespace + ".svc:8529", + "--cluster.agency-endpoint=ssl://name-agent-a2.name-int." + tests.FakeNamespace + ".svc:8529", + "--cluster.agency-endpoint=ssl://name-agent-a3.name-int." + tests.FakeNamespace + ".svc:8529", + "--cluster.my-address=ssl://name-dbserver-id1.name-int." + tests.FakeNamespace + ".svc:8529", "--cluster.my-role=PRIMARY", "--database.directory=/data", "--foxx.queues=false", @@ -334,7 +346,7 @@ func TestCreateArangodArgsDBServer(t *testing.T) { apiObject := &api.ArangoDeployment{ ObjectMeta: metav1.ObjectMeta{ Name: "name", - Namespace: "ns", + Namespace: tests.FakeNamespace, }, Spec: api.DeploymentSpec{ Mode: api.NewMode(api.DeploymentModeCluster), @@ -360,17 +372,19 @@ func TestCreateArangodArgsDBServer(t *testing.T) { Member: api.MemberStatus{ID: "id1"}, } - i := newInspectorMock() - i = i.RegisterMemberStatus(t, apiObject, api.ServerGroupAgents, agents...).RegisterMemberStatus(t, apiObject, input.Group, input.Member) + f := kclient.NewFakeClientBuilder() + f = createClient(f, apiObject, api.ServerGroupAgents, agents...) + f = createClient(f, apiObject, input.Group, input.Member) + i := createInspector(t, f) - cmdline, err := createArangodArgs(i.Get(t), input) + cmdline, err := createArangodArgs(i, input) require.NoError(t, err) assert.Equal(t, []string{ - "--cluster.agency-endpoint=ssl://name-agent-a1.name-int.ns.svc:8529", - "--cluster.agency-endpoint=ssl://name-agent-a2.name-int.ns.svc:8529", - "--cluster.agency-endpoint=ssl://name-agent-a3.name-int.ns.svc:8529", - "--cluster.my-address=ssl://name-dbserver-id1.name-int.ns.svc:8529", + "--cluster.agency-endpoint=ssl://name-agent-a1.name-int." + tests.FakeNamespace + ".svc:8529", + "--cluster.agency-endpoint=ssl://name-agent-a2.name-int." + tests.FakeNamespace + ".svc:8529", + "--cluster.agency-endpoint=ssl://name-agent-a3.name-int." + tests.FakeNamespace + ".svc:8529", + "--cluster.my-address=ssl://name-dbserver-id1.name-int." + tests.FakeNamespace + ".svc:8529", "--cluster.my-role=PRIMARY", "--database.directory=/data", "--foxx.queues=false", diff --git a/pkg/deployment/resources/pod_creator_single_args_test.go b/pkg/deployment/resources/pod_creator_single_args_test.go index a9f47ed53..ee8b9d184 100644 --- a/pkg/deployment/resources/pod_creator_single_args_test.go +++ b/pkg/deployment/resources/pod_creator_single_args_test.go @@ -34,6 +34,8 @@ import ( "github.com/arangodb/kube-arangodb/pkg/util" "github.com/arangodb/kube-arangodb/pkg/util/constants" "github.com/arangodb/kube-arangodb/pkg/util/k8sutil" + "github.com/arangodb/kube-arangodb/pkg/util/kclient" + "github.com/arangodb/kube-arangodb/pkg/util/tests" "github.com/stretchr/testify/assert" ) @@ -59,9 +61,11 @@ func TestCreateArangodArgsSingle(t *testing.T) { Member: api.MemberStatus{ID: "a1"}, } - i := newInspectorMock().RegisterMemberStatus(t, apiObject, input.Group, input.Member) + f := kclient.NewFakeClientBuilder() + f = createClient(f, apiObject, input.Group, input.Member) + i := createInspector(t, f) - cmdline, err := createArangodArgs(i.Get(t), input) + cmdline, err := createArangodArgs(i, input) require.NoError(t, err) assert.Equal(t, []string{ @@ -100,9 +104,11 @@ func TestCreateArangodArgsSingle(t *testing.T) { Member: api.MemberStatus{ID: "a1"}, } - i := newInspectorMock().RegisterMemberStatus(t, apiObject, input.Group, input.Member) + f := kclient.NewFakeClientBuilder() + f = createClient(f, apiObject, input.Group, input.Member) + i := createInspector(t, f) - cmdline, err := createArangodArgsWithUpgrade(i.Get(t), input) + cmdline, err := createArangodArgsWithUpgrade(i, input) require.NoError(t, err) assert.Equal(t, []string{ @@ -145,9 +151,11 @@ func TestCreateArangodArgsSingle(t *testing.T) { Member: api.MemberStatus{ID: "a1"}, } - i := newInspectorMock().RegisterMemberStatus(t, apiObject, input.Group, input.Member) + f := kclient.NewFakeClientBuilder() + f = createClient(f, apiObject, input.Group, input.Member) + i := createInspector(t, f) - cmdline, err := createArangodArgs(i.Get(t), input) + cmdline, err := createArangodArgs(i, input) require.NoError(t, err) assert.Equal(t, []string{ @@ -185,9 +193,11 @@ func TestCreateArangodArgsSingle(t *testing.T) { Member: api.MemberStatus{ID: "a1"}, } - i := newInspectorMock().RegisterMemberStatus(t, apiObject, input.Group, input.Member) + f := kclient.NewFakeClientBuilder() + f = createClient(f, apiObject, input.Group, input.Member) + i := createInspector(t, f) - cmdline, err := createArangodArgs(i.Get(t), input) + cmdline, err := createArangodArgs(i, input) require.NoError(t, err) assert.Equal(t, []string{ @@ -227,9 +237,11 @@ func TestCreateArangodArgsSingle(t *testing.T) { Member: api.MemberStatus{ID: "a1"}, } - i := newInspectorMock().RegisterMemberStatus(t, apiObject, input.Group, input.Member) + f := kclient.NewFakeClientBuilder() + f = createClient(f, apiObject, input.Group, input.Member) + i := createInspector(t, f) - cmdline, err := createArangodArgs(i.Get(t), input) + cmdline, err := createArangodArgs(i, input) require.NoError(t, err) assert.Equal(t, []string{ @@ -268,9 +280,11 @@ func TestCreateArangodArgsSingle(t *testing.T) { Member: api.MemberStatus{ID: "a1"}, } - i := newInspectorMock().RegisterMemberStatus(t, apiObject, input.Group, input.Member) + f := kclient.NewFakeClientBuilder() + f = createClient(f, apiObject, input.Group, input.Member) + i := createInspector(t, f) - cmdline, err := createArangodArgs(i.Get(t), input) + cmdline, err := createArangodArgs(i, input) require.NoError(t, err) assert.Equal(t, []string{ @@ -297,7 +311,7 @@ func TestCreateArangodArgsSingle(t *testing.T) { apiObject := &api.ArangoDeployment{ ObjectMeta: metav1.ObjectMeta{ Name: "name", - Namespace: "ns", + Namespace: tests.FakeNamespace, }, Spec: api.DeploymentSpec{ Mode: api.NewMode(api.DeploymentModeActiveFailover), @@ -321,17 +335,19 @@ func TestCreateArangodArgsSingle(t *testing.T) { Member: api.MemberStatus{ID: "id1"}, } - i := newInspectorMock().RegisterMemberStatus(t, apiObject, input.Group, input.Member) - i = i.RegisterMemberStatus(t, apiObject, api.ServerGroupAgents, agents...) + f := kclient.NewFakeClientBuilder() + f = createClient(f, apiObject, api.ServerGroupAgents, agents...) + f = createClient(f, apiObject, input.Group, input.Member) + i := createInspector(t, f) - cmdline, err := createArangodArgs(i.Get(t), input) + cmdline, err := createArangodArgs(i, input) require.NoError(t, err) assert.Equal(t, []string{ - "--cluster.agency-endpoint=ssl://name-agent-a1.name-int.ns.svc:8529", - "--cluster.agency-endpoint=ssl://name-agent-a2.name-int.ns.svc:8529", - "--cluster.agency-endpoint=ssl://name-agent-a3.name-int.ns.svc:8529", - "--cluster.my-address=ssl://name-single-id1.name-int.ns.svc:8529", + "--cluster.agency-endpoint=ssl://name-agent-a1.name-int." + tests.FakeNamespace + ".svc:8529", + "--cluster.agency-endpoint=ssl://name-agent-a2.name-int." + tests.FakeNamespace + ".svc:8529", + "--cluster.agency-endpoint=ssl://name-agent-a3.name-int." + tests.FakeNamespace + ".svc:8529", + "--cluster.my-address=ssl://name-single-id1.name-int." + tests.FakeNamespace + ".svc:8529", "--cluster.my-role=SINGLE", "--database.directory=/data", "--foxx.queues=true", diff --git a/pkg/deployment/resources/pod_creator_sync.go b/pkg/deployment/resources/pod_creator_sync.go index e74e254da..7ea16a16a 100644 --- a/pkg/deployment/resources/pod_creator_sync.go +++ b/pkg/deployment/resources/pod_creator_sync.go @@ -319,7 +319,7 @@ func (m *MemberSyncPod) Init(ctx context.Context, cachedStatus interfaces.Inspec m.masterJWTSecretName = m.spec.Sync.Authentication.GetJWTSecretName() err := globals.GetGlobalTimeouts().Kubernetes().RunWithTimeout(ctx, func(ctxChild context.Context) error { - return k8sutil.ValidateTokenSecret(ctxChild, cachedStatus.SecretReadInterface(), m.masterJWTSecretName) + return k8sutil.ValidateTokenSecret(ctxChild, cachedStatus.Secret().V1().Read(), m.masterJWTSecretName) }) if err != nil { return errors.Wrapf(err, "Master JWT secret validation failed") @@ -327,7 +327,7 @@ func (m *MemberSyncPod) Init(ctx context.Context, cachedStatus interfaces.Inspec monitoringTokenSecretName := m.spec.Sync.Monitoring.GetTokenSecretName() err = globals.GetGlobalTimeouts().Kubernetes().RunWithTimeout(ctx, func(ctxChild context.Context) error { - return k8sutil.ValidateTokenSecret(ctxChild, cachedStatus.SecretReadInterface(), monitoringTokenSecretName) + return k8sutil.ValidateTokenSecret(ctxChild, cachedStatus.Secret().V1().Read(), monitoringTokenSecretName) }) if err != nil { return errors.Wrapf(err, "Monitoring token secret validation failed") @@ -340,7 +340,7 @@ func (m *MemberSyncPod) Init(ctx context.Context, cachedStatus interfaces.Inspec if m.spec.IsAuthenticated() { m.clusterJWTSecretName = m.spec.Authentication.GetJWTSecretName() err = globals.GetGlobalTimeouts().Kubernetes().RunWithTimeout(ctx, func(ctxChild context.Context) error { - return k8sutil.ValidateTokenSecret(ctxChild, cachedStatus.SecretReadInterface(), m.clusterJWTSecretName) + return k8sutil.ValidateTokenSecret(ctxChild, cachedStatus.Secret().V1().Read(), m.clusterJWTSecretName) }) if err != nil { return errors.Wrapf(err, "Cluster JWT secret validation failed") @@ -349,7 +349,7 @@ func (m *MemberSyncPod) Init(ctx context.Context, cachedStatus interfaces.Inspec // Check client-auth CA certificate secret m.clientAuthCASecretName = m.spec.Sync.Authentication.GetClientCASecretName() err = globals.GetGlobalTimeouts().Kubernetes().RunWithTimeout(ctx, func(ctxChild context.Context) error { - return k8sutil.ValidateCACertificateSecret(ctxChild, cachedStatus.SecretReadInterface(), m.clientAuthCASecretName) + return k8sutil.ValidateCACertificateSecret(ctxChild, cachedStatus.Secret().V1().Read(), m.clientAuthCASecretName) }) if err != nil { return errors.Wrapf(err, "Client authentication CA certificate secret validation failed") diff --git a/pkg/deployment/resources/pod_inspector.go b/pkg/deployment/resources/pod_inspector.go index 16443a221..26ee0e6f8 100644 --- a/pkg/deployment/resources/pod_inspector.go +++ b/pkg/deployment/resources/pod_inspector.go @@ -30,8 +30,6 @@ import ( "github.com/arangodb/kube-arangodb/pkg/util/errors" inspectorInterface "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector" - "github.com/arangodb/kube-arangodb/pkg/deployment/resources/inspector" - v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -41,6 +39,7 @@ import ( "github.com/arangodb/kube-arangodb/pkg/metrics" "github.com/arangodb/kube-arangodb/pkg/util" "github.com/arangodb/kube-arangodb/pkg/util/k8sutil" + podv1 "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/pod/v1" ) var ( @@ -70,7 +69,7 @@ func (r *Resources) InspectPods(ctx context.Context, cachedStatus inspectorInter var podNamesWithScheduleTimeout []string var unscheduledPodNames []string - err := cachedStatus.IteratePods(func(pod *v1.Pod) error { + err := cachedStatus.Pod().V1().Iterate(func(pod *v1.Pod) error { if k8sutil.IsArangoDBImageIDAndVersionPod(pod) { // Image ID pods are not relevant to inspect here return nil @@ -239,9 +238,9 @@ func (r *Resources) InspectPods(ctx context.Context, cachedStatus inspectorInter log.Debug().Str("pod-name", pod.GetName()).Msg("Updating member condition Ready, Started & Serving to true") if status.Topology.IsTopologyOwned(memberStatus.Topology) { - nodes, ok := cachedStatus.GetNodes() - if ok { - node, ok := nodes.Node(pod.Spec.NodeName) + nodes, err := cachedStatus.Node().V1() + if err == nil { + node, ok := nodes.GetSimple(pod.Spec.NodeName) if ok { label, ok := node.Labels[status.Topology.Label] if ok { @@ -306,7 +305,7 @@ func (r *Resources) InspectPods(ctx context.Context, cachedStatus inspectorInter } return nil - }, inspector.FilterPodsByLabels(k8sutil.LabelsForDeployment(deploymentName, ""))) + }, podv1.FilterPodsByLabels(k8sutil.LabelsForDeployment(deploymentName, ""))) if err != nil { return 0, err } @@ -315,7 +314,7 @@ func (r *Resources) InspectPods(ctx context.Context, cachedStatus inspectorInter status.Members.ForeachServerGroup(func(group api.ServerGroup, members api.MemberStatusList) error { for _, m := range members { if podName := m.PodName; podName != "" { - if _, exists := cachedStatus.Pod(podName); !exists { + if _, exists := cachedStatus.Pod().V1().GetSimple(podName); !exists { log.Debug().Str("pod-name", podName).Msg("Does not exist") switch m.Phase { case api.MemberPhaseNone, api.MemberPhasePending: diff --git a/pkg/deployment/resources/pod_termination.go b/pkg/deployment/resources/pod_termination.go index b5ddaca8e..e57d1eac2 100644 --- a/pkg/deployment/resources/pod_termination.go +++ b/pkg/deployment/resources/pod_termination.go @@ -57,9 +57,9 @@ func (r *Resources) prepareAgencyPodTermination(ctx context.Context, log zerolog // Check node the pod is scheduled on. Only if not in namespaced scope agentDataWillBeGone := false - if nodes, ok := r.context.GetCachedStatus().GetNodes(); ok { + if nodes, err := r.context.GetCachedStatus().Node().V1(); err == nil { if !r.context.GetScope().IsNamespaced() && p.Spec.NodeName != "" { - node, ok := nodes.Node(p.Spec.NodeName) + node, ok := nodes.GetSimple(p.Spec.NodeName) if !ok { log.Warn().Msg("Node not found") } else if node.Spec.Unschedulable { @@ -71,7 +71,7 @@ func (r *Resources) prepareAgencyPodTermination(ctx context.Context, log zerolog // Check PVC ctxChild, cancel := globals.GetGlobalTimeouts().Kubernetes().WithTimeout(ctx) defer cancel() - pvc, err := r.context.GetCachedStatus().PersistentVolumeClaimReadInterface().Get(ctxChild, memberStatus.PersistentVolumeClaimName, metav1.GetOptions{}) + pvc, err := r.context.GetCachedStatus().PersistentVolumeClaim().V1().Read().Get(ctxChild, memberStatus.PersistentVolumeClaimName, metav1.GetOptions{}) if err != nil { log.Warn().Err(err).Msg("Failed to get PVC for member") return errors.WithStack(err) @@ -142,8 +142,8 @@ func (r *Resources) prepareDBServerPodTermination(ctx context.Context, log zerol // Check node the pod is scheduled on dbserverDataWillBeGone := false - if nodes, ok := r.context.GetCachedStatus().GetNodes(); ok { - node, ok := nodes.Node(p.Spec.NodeName) + if nodes, err := r.context.GetCachedStatus().Node().V1(); err == nil { + node, ok := nodes.GetSimple(p.Spec.NodeName) if !ok { log.Warn().Msg("Node not found") } else if node.Spec.Unschedulable { @@ -156,7 +156,7 @@ func (r *Resources) prepareDBServerPodTermination(ctx context.Context, log zerol // Check PVC ctxChild, cancel := globals.GetGlobalTimeouts().Kubernetes().WithTimeout(ctx) defer cancel() - pvc, err := r.context.GetCachedStatus().PersistentVolumeClaimReadInterface().Get(ctxChild, memberStatus.PersistentVolumeClaimName, metav1.GetOptions{}) + pvc, err := r.context.GetCachedStatus().PersistentVolumeClaim().V1().Read().Get(ctxChild, memberStatus.PersistentVolumeClaimName, metav1.GetOptions{}) if err != nil { log.Warn().Err(err).Msg("Failed to get PVC for member") return errors.WithStack(err) diff --git a/pkg/deployment/resources/pvc_inspector.go b/pkg/deployment/resources/pvc_inspector.go index a35a718fa..8d89d8625 100644 --- a/pkg/deployment/resources/pvc_inspector.go +++ b/pkg/deployment/resources/pvc_inspector.go @@ -27,12 +27,12 @@ import ( "github.com/arangodb/kube-arangodb/pkg/util/errors" inspectorInterface "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector" - "github.com/arangodb/kube-arangodb/pkg/deployment/resources/inspector" v1 "k8s.io/api/core/v1" "github.com/arangodb/kube-arangodb/pkg/metrics" "github.com/arangodb/kube-arangodb/pkg/util" "github.com/arangodb/kube-arangodb/pkg/util/k8sutil" + pvcv1 "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/persistentvolumeclaim/v1" ) var ( @@ -55,7 +55,7 @@ func (r *Resources) InspectPVCs(ctx context.Context, cachedStatus inspectorInter // Update member status from all pods found status, _ := r.context.GetStatus() - if err := cachedStatus.IteratePersistentVolumeClaims(func(pvc *v1.PersistentVolumeClaim) error { + if err := cachedStatus.PersistentVolumeClaim().V1().Iterate(func(pvc *v1.PersistentVolumeClaim) error { // PVC belongs to this deployment, update metric inspectedPVCsCounters.WithLabelValues(deploymentName).Inc() @@ -87,7 +87,7 @@ func (r *Resources) InspectPVCs(ctx context.Context, cachedStatus inspectorInter } return nil - }, inspector.FilterPersistentVolumeClaimsByLabels(k8sutil.LabelsForDeployment(deploymentName, ""))); err != nil { + }, pvcv1.FilterPersistentVolumeClaimsByLabels(k8sutil.LabelsForDeployment(deploymentName, ""))); err != nil { return 0, err } diff --git a/pkg/deployment/resources/pvcs.go b/pkg/deployment/resources/pvcs.go index f5f3f7a75..3ba8e1447 100644 --- a/pkg/deployment/resources/pvcs.go +++ b/pkg/deployment/resources/pvcs.go @@ -52,7 +52,7 @@ func (r *Resources) EnsurePVCs(ctx context.Context, cachedStatus inspectorInterf continue } - if _, exists := cachedStatus.PersistentVolumeClaim(m.PersistentVolumeClaimName); exists { + if _, exists := cachedStatus.PersistentVolumeClaim().V1().GetSimple(m.PersistentVolumeClaimName); exists { continue } diff --git a/pkg/deployment/resources/secret_hashes.go b/pkg/deployment/resources/secret_hashes.go index 3a777ac73..4f53b3609 100644 --- a/pkg/deployment/resources/secret_hashes.go +++ b/pkg/deployment/resources/secret_hashes.go @@ -147,7 +147,7 @@ func (r *Resources) ValidateSecretHashes(ctx context.Context, cachedStatus inspe badSecretNames = append(badSecretNames, secretName) } } else { - if _, exists := cachedStatus.Secret(pod.JWTSecretFolder(deploymentName)); !exists { + if _, exists := cachedStatus.Secret().V1().GetSimple(pod.JWTSecretFolder(deploymentName)); !exists { secretName := spec.Authentication.GetJWTSecretName() getExpectedHash := func() string { return getHashes().AuthJWT } setExpectedHash := func(h string) error { @@ -174,7 +174,7 @@ func (r *Resources) ValidateSecretHashes(ctx context.Context, cachedStatus inspe badSecretNames = append(badSecretNames, secretName) } } else { - if _, exists := cachedStatus.Secret(pod.GetEncryptionFolderSecretName(deploymentName)); !exists { + if _, exists := cachedStatus.Secret().V1().GetSimple(pod.GetEncryptionFolderSecretName(deploymentName)); !exists { secretName := spec.RocksDB.Encryption.GetKeySecretName() getExpectedHash := func() string { return getHashes().RocksDBEncryptionKey } setExpectedHash := func(h string) error { @@ -231,7 +231,7 @@ func (r *Resources) ValidateSecretHashes(ctx context.Context, cachedStatus inspe // getSecretHash fetches a secret with given name and returns a hash over its value. func (r *Resources) getSecretHash(cachedStatus inspectorInterface.Inspector, secretName string) (*core.Secret, string, bool) { - s, exists := cachedStatus.Secret(secretName) + s, exists := cachedStatus.Secret().V1().GetSimple(secretName) if !exists { return nil, "", false } diff --git a/pkg/deployment/resources/secrets.go b/pkg/deployment/resources/secrets.go index 0f227c710..847a34394 100644 --- a/pkg/deployment/resources/secrets.go +++ b/pkg/deployment/resources/secrets.go @@ -30,8 +30,6 @@ import ( "github.com/arangodb/kube-arangodb/pkg/util/globals" - "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/secret" - "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/tls" "github.com/arangodb/kube-arangodb/pkg/util/errors" @@ -57,6 +55,7 @@ import ( "github.com/arangodb/kube-arangodb/pkg/metrics" "github.com/arangodb/kube-arangodb/pkg/util/constants" "github.com/arangodb/kube-arangodb/pkg/util/k8sutil" + secretv1 "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/secret/v1" jg "github.com/golang-jwt/jwt" "k8s.io/apimachinery/pkg/api/equality" ) @@ -142,18 +141,18 @@ func (r *Resources) EnsureSecrets(ctx context.Context, log zerolog.Logger, cache memberName := members[id].Member.ArangoMemberName(r.context.GetAPIObject().GetName(), members[id].Group) - member, ok := cachedStatus.ArangoMember(memberName) + member, ok := cachedStatus.ArangoMember().V1().GetSimple(memberName) if !ok { return errors.Newf("Member %s not found", memberName) } - service, ok := cachedStatus.Service(memberName) + service, ok := cachedStatus.Service().V1().GetSimple(memberName) if !ok { return errors.Newf("Service of member %s not found", memberName) } tlsKeyfileSecretName := k8sutil.AppendTLSKeyfileSecretPostfix(member.GetName()) - if _, exists := cachedStatus.Secret(tlsKeyfileSecretName); !exists { + if _, exists := cachedStatus.Secret().V1().GetSimple(tlsKeyfileSecretName); !exists { serverNames, err := tls.GetServerAltNames(apiObject, spec, spec.TLS, service, members[id].Group, members[id].Member) if err != nil { return errors.WithStack(errors.Wrapf(err, "Failed to render alt names")) @@ -198,10 +197,10 @@ func (r *Resources) EnsureSecrets(ctx context.Context, log zerolog.Logger, cache return reconcileRequired.Reconcile(ctx) } -func (r *Resources) ensureTokenSecretFolder(ctx context.Context, cachedStatus inspectorInterface.Inspector, secrets secret.ModInterface, secretName, folderSecretName string) error { - if f, exists := cachedStatus.Secret(folderSecretName); exists { +func (r *Resources) ensureTokenSecretFolder(ctx context.Context, cachedStatus inspectorInterface.Inspector, secrets secretv1.ModInterface, secretName, folderSecretName string) error { + if f, exists := cachedStatus.Secret().V1().GetSimple(folderSecretName); exists { if len(f.Data) == 0 { - s, exists := cachedStatus.Secret(secretName) + s, exists := cachedStatus.Secret().V1().GetSimple(secretName) if !exists { return errors.Newf("Token secret does not exist") } @@ -275,7 +274,7 @@ func (r *Resources) ensureTokenSecretFolder(ctx context.Context, cachedStatus in return nil } - s, exists := cachedStatus.Secret(secretName) + s, exists := cachedStatus.Secret().V1().GetSimple(secretName) if !exists { return errors.Newf("Token secret does not exist") } @@ -296,23 +295,23 @@ func (r *Resources) ensureTokenSecretFolder(ctx context.Context, cachedStatus in return nil } -func (r *Resources) ensureTokenSecret(ctx context.Context, cachedStatus inspectorInterface.Inspector, secrets secret.ModInterface, secretName string) error { - if _, exists := cachedStatus.Secret(secretName); !exists { +func (r *Resources) ensureTokenSecret(ctx context.Context, cachedStatus inspectorInterface.Inspector, secrets secretv1.ModInterface, secretName string) error { + if _, exists := cachedStatus.Secret().V1().GetSimple(secretName); !exists { return r.createTokenSecret(ctx, secrets, secretName) } return nil } -func (r *Resources) ensureSecretWithEmptyKey(ctx context.Context, cachedStatus inspectorInterface.Inspector, secrets secret.ModInterface, secretName, keyName string) error { - if _, exists := cachedStatus.Secret(secretName); !exists { +func (r *Resources) ensureSecretWithEmptyKey(ctx context.Context, cachedStatus inspectorInterface.Inspector, secrets secretv1.ModInterface, secretName, keyName string) error { + if _, exists := cachedStatus.Secret().V1().GetSimple(secretName); !exists { return r.createSecretWithKey(ctx, secrets, secretName, keyName, nil) } return nil } -func (r *Resources) createSecretWithMod(ctx context.Context, secrets secret.ModInterface, secretName string, f func(s *core.Secret)) error { +func (r *Resources) createSecretWithMod(ctx context.Context, secrets secretv1.ModInterface, secretName string, f func(s *core.Secret)) error { // Create secret secret := &core.Secret{ ObjectMeta: meta.ObjectMeta{ @@ -338,13 +337,13 @@ func (r *Resources) createSecretWithMod(ctx context.Context, secrets secret.ModI return operatorErrors.Reconcile() } -func (r *Resources) createSecretWithKey(ctx context.Context, secrets secret.ModInterface, secretName, keyName string, value []byte) error { +func (r *Resources) createSecretWithKey(ctx context.Context, secrets secretv1.ModInterface, secretName, keyName string, value []byte) error { return r.createSecretWithMod(ctx, secrets, secretName, func(s *core.Secret) { s.Data[keyName] = value }) } -func (r *Resources) createTokenSecret(ctx context.Context, secrets secret.ModInterface, secretName string) error { +func (r *Resources) createTokenSecret(ctx context.Context, secrets secretv1.ModInterface, secretName string) error { tokenData := make([]byte, 32) rand.Read(tokenData) token := hex.EncodeToString(tokenData) @@ -365,10 +364,10 @@ func (r *Resources) createTokenSecret(ctx context.Context, secrets secret.ModInt return operatorErrors.Reconcile() } -func (r *Resources) ensureEncryptionKeyfolderSecret(ctx context.Context, cachedStatus inspectorInterface.Inspector, secrets secret.ModInterface, keyfileSecretName, secretName string) error { - _, folderExists := cachedStatus.Secret(secretName) +func (r *Resources) ensureEncryptionKeyfolderSecret(ctx context.Context, cachedStatus inspectorInterface.Inspector, secrets secretv1.ModInterface, keyfileSecretName, secretName string) error { + _, folderExists := cachedStatus.Secret().V1().GetSimple(secretName) - keyfile, exists := cachedStatus.Secret(keyfileSecretName) + keyfile, exists := cachedStatus.Secret().V1().GetSimple(keyfileSecretName) if !exists { if folderExists { return nil @@ -402,9 +401,9 @@ func (r *Resources) ensureEncryptionKeyfolderSecret(ctx context.Context, cachedS } func AppendKeyfileToKeyfolder(ctx context.Context, cachedStatus inspectorInterface.Inspector, - secrets secret.ModInterface, ownerRef *meta.OwnerReference, secretName string, encryptionKey []byte) error { + secrets secretv1.ModInterface, ownerRef *meta.OwnerReference, secretName string, encryptionKey []byte) error { encSha := fmt.Sprintf("%0x", sha256.Sum256(encryptionKey)) - if _, exists := cachedStatus.Secret(secretName); !exists { + if _, exists := cachedStatus.Secret().V1().GetSimple(secretName); !exists { // Create secret secret := &core.Secret{ @@ -441,14 +440,14 @@ var ( // ensureExporterTokenSecret checks if a secret with given name exists in the namespace // of the deployment. If not, it will add such a secret with correct access. func (r *Resources) ensureExporterTokenSecret(ctx context.Context, cachedStatus inspectorInterface.Inspector, - secrets secret.ModInterface, tokenSecretName, secretSecretName string) error { + secrets secretv1.ModInterface, tokenSecretName, secretSecretName string) error { if update, exists, err := r.ensureExporterTokenSecretCreateRequired(cachedStatus, tokenSecretName, secretSecretName); err != nil { return err } else if update { // Create secret if !exists { owner := r.context.GetAPIObject().AsOwner() - err = k8sutil.CreateJWTFromSecret(ctx, cachedStatus.SecretReadInterface(), secrets, tokenSecretName, secretSecretName, exporterTokenClaims, &owner) + err = k8sutil.CreateJWTFromSecret(ctx, cachedStatus.Secret().V1().Read(), secrets, tokenSecretName, secretSecretName, exporterTokenClaims, &owner) if k8sutil.IsAlreadyExists(err) { // Secret added while we tried it also return nil @@ -464,7 +463,7 @@ func (r *Resources) ensureExporterTokenSecret(ctx context.Context, cachedStatus } func (r *Resources) ensureExporterTokenSecretCreateRequired(cachedStatus inspectorInterface.Inspector, tokenSecretName, secretSecretName string) (bool, bool, error) { - if secret, exists := cachedStatus.Secret(tokenSecretName); !exists { + if secret, exists := cachedStatus.Secret().V1().GetSimple(tokenSecretName); !exists { return true, false, nil } else { // Check if claims are fine @@ -473,7 +472,7 @@ func (r *Resources) ensureExporterTokenSecretCreateRequired(cachedStatus inspect return true, true, nil } - jwtSecret, exists := cachedStatus.Secret(secretSecretName) + jwtSecret, exists := cachedStatus.Secret().V1().GetSimple(secretSecretName) if !exists { return true, true, errors.Newf("Secret %s does not exists", secretSecretName) } @@ -502,8 +501,8 @@ func (r *Resources) ensureExporterTokenSecretCreateRequired(cachedStatus inspect // ensureTLSCACertificateSecret checks if a secret with given name exists in the namespace // of the deployment. If not, it will add such a secret with a generated CA certificate. -func (r *Resources) ensureTLSCACertificateSecret(ctx context.Context, cachedStatus inspectorInterface.Inspector, secrets secret.ModInterface, spec api.TLSSpec) error { - if _, exists := cachedStatus.Secret(spec.GetCASecretName()); !exists { +func (r *Resources) ensureTLSCACertificateSecret(ctx context.Context, cachedStatus inspectorInterface.Inspector, secrets secretv1.ModInterface, spec api.TLSSpec) error { + if _, exists := cachedStatus.Secret().V1().GetSimple(spec.GetCASecretName()); !exists { // Secret not found, create it apiObject := r.context.GetAPIObject() owner := apiObject.AsOwner() @@ -526,8 +525,8 @@ func (r *Resources) ensureTLSCACertificateSecret(ctx context.Context, cachedStat // ensureClientAuthCACertificateSecret checks if a secret with given name exists in the namespace // of the deployment. If not, it will add such a secret with a generated CA certificate. -func (r *Resources) ensureClientAuthCACertificateSecret(ctx context.Context, cachedStatus inspectorInterface.Inspector, secrets secret.ModInterface, spec api.SyncAuthenticationSpec) error { - if _, exists := cachedStatus.Secret(spec.GetClientCASecretName()); !exists { +func (r *Resources) ensureClientAuthCACertificateSecret(ctx context.Context, cachedStatus inspectorInterface.Inspector, secrets secretv1.ModInterface, spec api.SyncAuthenticationSpec) error { + if _, exists := cachedStatus.Secret().V1().GetSimple(spec.GetClientCASecretName()); !exists { // Secret not found, create it apiObject := r.context.GetAPIObject() owner := apiObject.AsOwner() @@ -554,7 +553,7 @@ func (r *Resources) getJWTSecret(spec api.DeploymentSpec) (string, error) { return "", nil } secretName := spec.Authentication.GetJWTSecretName() - s, err := k8sutil.GetTokenSecret(context.Background(), r.context.GetCachedStatus().SecretReadInterface(), secretName) + s, err := k8sutil.GetTokenSecret(context.Background(), r.context.GetCachedStatus().Secret().V1().Read(), secretName) if err != nil { r.log.Debug().Err(err).Str("secret-name", secretName).Msg("Failed to get JWT secret") return "", errors.WithStack(err) @@ -565,7 +564,7 @@ func (r *Resources) getJWTSecret(spec api.DeploymentSpec) (string, error) { // getSyncJWTSecret loads the JWT secret used for syncmasters from a Secret configured in apiObject.Spec.Sync.Authentication.JWTSecretName. func (r *Resources) getSyncJWTSecret(spec api.DeploymentSpec) (string, error) { secretName := spec.Sync.Authentication.GetJWTSecretName() - s, err := k8sutil.GetTokenSecret(context.Background(), r.context.GetCachedStatus().SecretReadInterface(), secretName) + s, err := k8sutil.GetTokenSecret(context.Background(), r.context.GetCachedStatus().Secret().V1().Read(), secretName) if err != nil { r.log.Debug().Err(err).Str("secret-name", secretName).Msg("Failed to get sync JWT secret") return "", errors.WithStack(err) @@ -576,7 +575,7 @@ func (r *Resources) getSyncJWTSecret(spec api.DeploymentSpec) (string, error) { // getSyncMonitoringToken loads the token secret used for monitoring sync masters & workers. func (r *Resources) getSyncMonitoringToken(spec api.DeploymentSpec) (string, error) { secretName := spec.Sync.Monitoring.GetTokenSecretName() - s, err := k8sutil.GetTokenSecret(context.Background(), r.context.GetCachedStatus().SecretReadInterface(), secretName) + s, err := k8sutil.GetTokenSecret(context.Background(), r.context.GetCachedStatus().Secret().V1().Read(), secretName) if err != nil { r.log.Debug().Err(err).Str("secret-name", secretName).Msg("Failed to get sync monitoring secret") return "", errors.WithStack(err) diff --git a/pkg/deployment/resources/services.go b/pkg/deployment/resources/services.go index e67d363f0..ed4424f24 100644 --- a/pkg/deployment/resources/services.go +++ b/pkg/deployment/resources/services.go @@ -27,8 +27,6 @@ import ( "github.com/arangodb/kube-arangodb/pkg/util/globals" - "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/service" - "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/util/intstr" @@ -41,6 +39,7 @@ import ( api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" "github.com/arangodb/kube-arangodb/pkg/metrics" "github.com/arangodb/kube-arangodb/pkg/util/k8sutil" + servicev1 "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/service/v1" "github.com/rs/zerolog" ) @@ -71,12 +70,12 @@ func (r *Resources) EnsureServices(ctx context.Context, cachedStatus inspectorIn for _, m := range list { memberName := m.ArangoMemberName(r.context.GetAPIObject().GetName(), group) - member, ok := cachedStatus.ArangoMember(memberName) + member, ok := cachedStatus.ArangoMember().V1().GetSimple(memberName) if !ok { return errors.Newf("Member %s not found", memberName) } - if s, ok := cachedStatus.Service(member.GetName()); !ok { + if s, ok := cachedStatus.Service().V1().GetSimple(member.GetName()); !ok { s = &core.Service{ ObjectMeta: metav1.ObjectMeta{ Name: member.GetName(), @@ -151,7 +150,7 @@ func (r *Resources) EnsureServices(ctx context.Context, cachedStatus inspectorIn // Headless service counterMetric.Inc() - if _, exists := cachedStatus.Service(k8sutil.CreateHeadlessServiceName(deploymentName)); !exists { + if _, exists := cachedStatus.Service().V1().GetSimple(k8sutil.CreateHeadlessServiceName(deploymentName)); !exists { ctxChild, cancel := globals.GetGlobalTimeouts().Kubernetes().WithTimeout(ctx) defer cancel() svcName, newlyCreated, err := k8sutil.CreateHeadlessService(ctxChild, svcs, apiObject, owner) @@ -167,7 +166,7 @@ func (r *Resources) EnsureServices(ctx context.Context, cachedStatus inspectorIn // Internal database client service single := spec.GetMode().HasSingleServers() counterMetric.Inc() - if _, exists := cachedStatus.Service(k8sutil.CreateDatabaseClientServiceName(deploymentName)); !exists { + if _, exists := cachedStatus.Service().V1().GetSimple(k8sutil.CreateDatabaseClientServiceName(deploymentName)); !exists { ctxChild, cancel := globals.GetGlobalTimeouts().Kubernetes().WithTimeout(ctx) defer cancel() svcName, newlyCreated, err := k8sutil.CreateDatabaseClientService(ctxChild, svcs, apiObject, single, owner) @@ -238,13 +237,13 @@ func (r *Resources) EnsureServices(ctx context.Context, cachedStatus inspectorIn // EnsureServices creates all services needed to service the deployment func (r *Resources) ensureExternalAccessServices(ctx context.Context, cachedStatus inspectorInterface.Inspector, - svcs service.ModInterface, eaServiceName, svcRole, title string, port int, noneIsClusterIP bool, + svcs servicev1.ModInterface, eaServiceName, svcRole, title string, port int, noneIsClusterIP bool, spec api.ExternalAccessSpec, apiObject k8sutil.APIObject, log zerolog.Logger) error { // Database external access service createExternalAccessService := false deleteExternalAccessService := false eaServiceType := spec.GetType().AsServiceType() // Note: Type auto defaults to ServiceTypeLoadBalancer - if existing, exists := cachedStatus.Service(eaServiceName); exists { + if existing, exists := cachedStatus.Service().V1().GetSimple(eaServiceName); exists { // External access service exists updateExternalAccessService := false loadBalancerIP := spec.GetLoadBalancerIP() diff --git a/pkg/deployment/server_api.go b/pkg/deployment/server_api.go index 44dd02e76..5ffa8de12 100644 --- a/pkg/deployment/server_api.go +++ b/pkg/deployment/server_api.go @@ -24,9 +24,7 @@ import ( "context" "sort" - "github.com/arangodb/kube-arangodb/pkg/util/errors" - - v1 "k8s.io/api/core/v1" + core "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" @@ -153,7 +151,7 @@ func (d *Deployment) ReadyVolumeCount() int { // Find status for _, pvc := range pvcs { if pvc.Name == m.PersistentVolumeClaimName { - if pvc.Status.Phase == v1.ClaimBound { + if pvc.Status.Phase == core.ClaimBound { count++ } } @@ -190,8 +188,7 @@ func (d *Deployment) StorageClasses() []string { // Empty string means that the database is not reachable outside the Kubernetes cluster. func (d *Deployment) DatabaseURL() string { eaSvcName := k8sutil.CreateDatabaseExternalAccessServiceName(d.Name()) - ns := d.apiObject.Namespace - svc, err := d.deps.Client.Kubernetes().CoreV1().Services(ns).Get(context.Background(), eaSvcName, metav1.GetOptions{}) + svc, err := d.currentState.Service().V1().Read().Get(context.Background(), eaSvcName, metav1.GetOptions{}) if err != nil { return "" } @@ -199,14 +196,14 @@ func (d *Deployment) DatabaseURL() string { if !d.GetSpec().IsSecure() { scheme = "http" } - nodeFetcher := func() (v1.NodeList, error) { - result, err := d.deps.Client.Kubernetes().CoreV1().Nodes().List(context.Background(), metav1.ListOptions{}) - if err != nil { - return v1.NodeList{}, errors.WithStack(err) + nodeFetcher := func() ([]*core.Node, error) { + if n, err := d.currentState.Node().V1(); err != nil { + return nil, nil + } else { + return n.ListSimple(), nil } - return *result, nil } - portPredicate := func(p v1.ServicePort) bool { + portPredicate := func(p core.ServicePort) bool { return p.TargetPort.IntValue() == k8sutil.ArangoPort } url, err := k8sutil.CreateServiceURL(*svc, scheme, portPredicate, nodeFetcher) diff --git a/pkg/handlers/clustersync/handler.go b/pkg/handlers/clustersync/handler.go index b41033349..b359ffe1a 100644 --- a/pkg/handlers/clustersync/handler.go +++ b/pkg/handlers/clustersync/handler.go @@ -59,13 +59,13 @@ func (h *handler) Handle(item operation.Item) error { if k8sutil.IsNotFound(err) { return nil } - h.operator.GetLogger().Error().Msgf("ArangoClusterSynchronizations fetch error %v", err) + h.operator.GetLogger().Error().Msgf("ListSimple fetch error %v", err) return err } // Update status on object if _, err = h.client.DatabaseV1().ArangoClusterSynchronizations(item.Namespace).UpdateStatus(context.Background(), clusterSync, meta.UpdateOptions{}); err != nil { - h.operator.GetLogger().Error().Msgf("ArangoClusterSynchronizations status update error %v", err) + h.operator.GetLogger().Error().Msgf("ListSimple status update error %v", err) return err } diff --git a/pkg/logging/const.go b/pkg/logging/const.go index 52884ba80..92046f593 100644 --- a/pkg/logging/const.go +++ b/pkg/logging/const.go @@ -20,9 +20,12 @@ package logging +import "github.com/rs/zerolog" + const ( LoggerNameOperator = "operator" LoggerNameDeployment = "deployment" + LoggerNameInspector = "inspector" LoggerNameKLog = "klog" LoggerNameServer = "server" LoggerNameDeploymentReplication = "deployment-replication" @@ -32,10 +35,15 @@ const ( LoggerNameEventRecorder = "event-recorder" ) +var defaultLogLevels = map[string]zerolog.Level{ + LoggerNameInspector: zerolog.WarnLevel, +} + func LoggerNames() []string { return []string{ LoggerNameOperator, LoggerNameDeployment, + LoggerNameInspector, LoggerNameKLog, LoggerNameServer, LoggerNameDeploymentReplication, diff --git a/pkg/logging/logger.go b/pkg/logging/logger.go index 65a9eb7b5..3f40f7673 100644 --- a/pkg/logging/logger.go +++ b/pkg/logging/logger.go @@ -31,6 +31,45 @@ import ( "github.com/rs/zerolog" ) +var ( + globalLoggerLock sync.Mutex + globalLogger Service +) + +func GlobalLogger() Service { + globalLoggerLock.Lock() + defer globalLoggerLock.Unlock() + + if globalLogger == nil { + if err := initGlobalLogger("info", nil); err != nil { + panic(err) + } + } + + return globalLogger +} + +func InitGlobalLogger(defaultLevel string, overrides []string) error { + globalLoggerLock.Lock() + defer globalLoggerLock.Unlock() + + return initGlobalLogger(defaultLevel, overrides) +} + +func initGlobalLogger(defaultLevel string, overrides []string) error { + if globalLogger != nil { + return errors.Newf("GlobalLogger already created") + } + + s, err := newService(defaultLevel, overrides) + if err != nil { + return err + } + + globalLogger = s + return nil +} + // Service exposes the interfaces for a logger service // that supports different loggers with different levels. type Service interface { @@ -59,8 +98,8 @@ func NewRootLogger() zerolog.Logger { }).With().Timestamp().Logger() } -// NewService creates a new Service. -func NewService(defaultLevel string, overrides []string) (Service, error) { +// newService creates a new Service. +func newService(defaultLevel string, overrides []string) (Service, error) { l, err := stringToLevel(defaultLevel) if err != nil { return nil, errors.WithStack(err) @@ -72,6 +111,10 @@ func NewService(defaultLevel string, overrides []string) (Service, error) { levels: make(map[string]zerolog.Level), } + for k, v := range defaultLogLevels { + s.levels[k] = v + } + for _, override := range overrides { levelParts := strings.Split(override, "=") switch size := len(levelParts); size { diff --git a/pkg/operator/server_discovery_api.go b/pkg/operator/server_discovery_api.go index c11a04c6f..cac775833 100644 --- a/pkg/operator/server_discovery_api.go +++ b/pkg/operator/server_discovery_api.go @@ -27,7 +27,7 @@ import ( "github.com/arangodb/kube-arangodb/pkg/util/errors" "github.com/rs/zerolog" - v1 "k8s.io/api/core/v1" + core "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/arangodb/kube-arangodb/pkg/server" @@ -99,15 +99,22 @@ func (o *Operator) findOtherOperatorsInNamespace(log zerolog.Logger, namespace s log.Debug().Err(err).Msg("Failed to list services") return nil } - nodeFetcher := func() (v1.NodeList, error) { + nodeFetcher := func() ([]*core.Node, error) { if o.Scope.IsNamespaced() { - return v1.NodeList{}, nil + return nil, nil } result, err := o.Dependencies.Client.Kubernetes().CoreV1().Nodes().List(context.Background(), metav1.ListOptions{}) if err != nil { - return v1.NodeList{}, errors.WithStack(err) + return nil, errors.WithStack(err) } - return *result, nil + + r := make([]*core.Node, len(result.Items)) + + for id := range result.Items { + r[id] = &result.Items[id] + } + + return r, nil } for _, svc := range services.Items { // Filter out unwanted services @@ -133,7 +140,7 @@ func (o *Operator) findOtherOperatorsInNamespace(log zerolog.Logger, namespace s } var url string switch svc.Spec.Type { - case v1.ServiceTypeNodePort, v1.ServiceTypeLoadBalancer: + case core.ServiceTypeNodePort, core.ServiceTypeLoadBalancer: if x, err := k8sutil.CreateServiceURL(svc, "https", nil, nodeFetcher); err == nil { url = x } else { diff --git a/pkg/util/k8sutil/finalizers.go b/pkg/util/k8sutil/finalizers.go index dc28e7e9f..ce89f8ccb 100644 --- a/pkg/util/k8sutil/finalizers.go +++ b/pkg/util/k8sutil/finalizers.go @@ -29,6 +29,8 @@ import ( "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/pod" "github.com/arangodb/kube-arangodb/pkg/util/errors" + persistentvolumeclaimv1 "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/persistentvolumeclaim/v1" + podv1 "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/pod/v1" "github.com/rs/zerolog" core "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -39,13 +41,13 @@ const ( ) // RemovePodFinalizers removes the given finalizers from the given pod. -func RemovePodFinalizers(ctx context.Context, cachedStatus pod.Inspector, log zerolog.Logger, c pod.ModInterface, p *core.Pod, +func RemovePodFinalizers(ctx context.Context, cachedStatus pod.Inspector, log zerolog.Logger, c podv1.ModInterface, p *core.Pod, finalizers []string, ignoreNotFound bool) (int, error) { getFunc := func() (metav1.Object, error) { ctxChild, cancel := globals.GetGlobalTimeouts().Kubernetes().WithTimeout(ctx) defer cancel() - result, err := cachedStatus.PodReadInterface().Get(ctxChild, p.GetName(), metav1.GetOptions{}) + result, err := cachedStatus.Pod().V1().Read().Get(ctxChild, p.GetName(), metav1.GetOptions{}) if err != nil { return nil, errors.WithStack(err) } @@ -71,13 +73,13 @@ func RemovePodFinalizers(ctx context.Context, cachedStatus pod.Inspector, log ze } // RemovePVCFinalizers removes the given finalizers from the given PVC. -func RemovePVCFinalizers(ctx context.Context, cachedStatus persistentvolumeclaim.Inspector, log zerolog.Logger, c persistentvolumeclaim.ModInterface, +func RemovePVCFinalizers(ctx context.Context, cachedStatus persistentvolumeclaim.Inspector, log zerolog.Logger, c persistentvolumeclaimv1.ModInterface, p *core.PersistentVolumeClaim, finalizers []string, ignoreNotFound bool) (int, error) { getFunc := func() (metav1.Object, error) { ctxChild, cancel := globals.GetGlobalTimeouts().Kubernetes().WithTimeout(ctx) defer cancel() - result, err := cachedStatus.PersistentVolumeClaimReadInterface().Get(ctxChild, p.GetName(), metav1.GetOptions{}) + result, err := cachedStatus.PersistentVolumeClaim().V1().Read().Get(ctxChild, p.GetName(), metav1.GetOptions{}) if err != nil { return nil, errors.WithStack(err) } diff --git a/pkg/util/k8sutil/inspector/arangoclustersynchronization/definition.go b/pkg/util/k8sutil/inspector/arangoclustersynchronization/definition.go new file mode 100644 index 000000000..19ad12779 --- /dev/null +++ b/pkg/util/k8sutil/inspector/arangoclustersynchronization/definition.go @@ -0,0 +1,36 @@ +// +// DISCLAIMER +// +// Copyright 2016-2022 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// + +package arangoclustersynchronization + +import ( + v1 "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/arangoclustersynchronization/v1" + "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/refresh" +) + +type Inspector interface { + ArangoClusterSynchronization() Definition +} + +type Definition interface { + refresh.Inspector + + V1() (v1.Inspector, error) +} diff --git a/pkg/util/k8sutil/inspector/arangoclustersynchronization/reader.go b/pkg/util/k8sutil/inspector/arangoclustersynchronization/reader.go deleted file mode 100644 index b2a4bf064..000000000 --- a/pkg/util/k8sutil/inspector/arangoclustersynchronization/reader.go +++ /dev/null @@ -1,38 +0,0 @@ -// -// DISCLAIMER -// -// Copyright 2016-2022 ArangoDB GmbH, Cologne, Germany -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Copyright holder is ArangoDB GmbH, Cologne, Germany -// - -package arangoclustersynchronization - -import ( - "context" - - api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" - meta "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// Interface has methods to work with Node resources. -type Interface interface { - ReadInterface -} - -// ReadInterface has methods to work with Node resources with ReadOnly mode. -type ReadInterface interface { - Get(ctx context.Context, name string, opts meta.GetOptions) (*api.ArangoClusterSynchronization, error) -} diff --git a/pkg/util/k8sutil/inspector/arangoclustersynchronization/loader.go b/pkg/util/k8sutil/inspector/arangoclustersynchronization/v1/loader.go similarity index 64% rename from pkg/util/k8sutil/inspector/arangoclustersynchronization/loader.go rename to pkg/util/k8sutil/inspector/arangoclustersynchronization/v1/loader.go index 754bab793..ac9d081be 100644 --- a/pkg/util/k8sutil/inspector/arangoclustersynchronization/loader.go +++ b/pkg/util/k8sutil/inspector/arangoclustersynchronization/v1/loader.go @@ -18,23 +18,33 @@ // Copyright holder is ArangoDB GmbH, Cologne, Germany // -package arangoclustersynchronization +package v1 import ( api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" ) -type Loader interface { - GetArangoClusterSynchronizations() (Inspector, bool) -} - type Inspector interface { - ArangoClusterSynchronizations() []*api.ArangoClusterSynchronization - ArangoClusterSynchronization(name string) (*api.ArangoClusterSynchronization, bool) - FilterArangoClusterSynchronizations(filters ...Filter) []*api.ArangoClusterSynchronization - IterateArangoClusterSynchronizations(action Action, filters ...Filter) error - ArangoClusterSynchronizationReadInterface() ReadInterface + ListSimple() []*api.ArangoClusterSynchronization + GetSimple(name string) (*api.ArangoClusterSynchronization, bool) + Filter(filters ...Filter) []*api.ArangoClusterSynchronization + Iterate(action Action, filters ...Filter) error + Read() ReadInterface } type Filter func(acs *api.ArangoClusterSynchronization) bool type Action func(acs *api.ArangoClusterSynchronization) error + +func FilterObject(acs *api.ArangoClusterSynchronization, filters ...Filter) bool { + for _, f := range filters { + if f == nil { + continue + } + + if !f(acs) { + return false + } + } + + return true +} diff --git a/pkg/util/k8sutil/inspector/arangoclustersynchronization/v1/reader.go b/pkg/util/k8sutil/inspector/arangoclustersynchronization/v1/reader.go new file mode 100644 index 000000000..33e6ad24d --- /dev/null +++ b/pkg/util/k8sutil/inspector/arangoclustersynchronization/v1/reader.go @@ -0,0 +1,49 @@ +// +// DISCLAIMER +// +// Copyright 2016-2022 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// + +package v1 + +import ( + "context" + + api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" + meta "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" +) + +// ModInterface has methods to work with GetSimple resources only for creation +type ModInterface interface { + Create(ctx context.Context, acs *api.ArangoClusterSynchronization, opts meta.CreateOptions) (*api.ArangoClusterSynchronization, error) + Update(ctx context.Context, acs *api.ArangoClusterSynchronization, opts meta.UpdateOptions) (*api.ArangoClusterSynchronization, error) + UpdateStatus(ctx context.Context, acs *api.ArangoClusterSynchronization, opts meta.UpdateOptions) (*api.ArangoClusterSynchronization, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts meta.PatchOptions, subresources ...string) (result *api.ArangoClusterSynchronization, err error) + Delete(ctx context.Context, name string, opts meta.DeleteOptions) error +} + +// Interface has methods to work with GetSimple resources. +type Interface interface { + ModInterface + ReadInterface +} + +// ReadInterface has methods to work with GetSimple resources with ReadOnly mode. +type ReadInterface interface { + Get(ctx context.Context, name string, opts meta.GetOptions) (*api.ArangoClusterSynchronization, error) +} diff --git a/pkg/util/k8sutil/inspector/arangomember/definition.go b/pkg/util/k8sutil/inspector/arangomember/definition.go new file mode 100644 index 000000000..1db66b43c --- /dev/null +++ b/pkg/util/k8sutil/inspector/arangomember/definition.go @@ -0,0 +1,36 @@ +// +// DISCLAIMER +// +// Copyright 2016-2022 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// + +package arangomember + +import ( + v1 "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/arangomember/v1" + "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/refresh" +) + +type Inspector interface { + ArangoMember() Definition +} + +type Definition interface { + refresh.Inspector + + V1() v1.Inspector +} diff --git a/pkg/util/k8sutil/inspector/arangomember/member.go b/pkg/util/k8sutil/inspector/arangomember/v1/filters.go similarity index 76% rename from pkg/util/k8sutil/inspector/arangomember/member.go rename to pkg/util/k8sutil/inspector/arangomember/v1/filters.go index 1030b8682..a972a4d3a 100644 --- a/pkg/util/k8sutil/inspector/arangomember/member.go +++ b/pkg/util/k8sutil/inspector/arangomember/v1/filters.go @@ -18,22 +18,13 @@ // Copyright holder is ArangoDB GmbH, Cologne, Germany // -package arangomember +package v1 import ( api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" "k8s.io/apimachinery/pkg/types" ) -type Inspector interface { - ArangoMember(name string) (*api.ArangoMember, bool) - IterateArangoMembers(action Action, filters ...Filter) error - ArangoMemberReadInterface() ReadInterface -} - -type Filter func(pod *api.ArangoMember) bool -type Action func(pod *api.ArangoMember) error - func FilterByDeploymentUID(uid types.UID) Filter { return func(pod *api.ArangoMember) bool { return pod.Spec.DeploymentUID == "" || pod.Spec.DeploymentUID == uid diff --git a/pkg/util/k8sutil/inspector/arangomember/v1/loader.go b/pkg/util/k8sutil/inspector/arangomember/v1/loader.go new file mode 100644 index 000000000..7e53568ba --- /dev/null +++ b/pkg/util/k8sutil/inspector/arangomember/v1/loader.go @@ -0,0 +1,34 @@ +// +// DISCLAIMER +// +// Copyright 2016-2022 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// + +package v1 + +import ( + api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" +) + +type Inspector interface { + GetSimple(name string) (*api.ArangoMember, bool) + Iterate(action Action, filters ...Filter) error + Read() ReadInterface +} + +type Filter func(pod *api.ArangoMember) bool +type Action func(pod *api.ArangoMember) error diff --git a/pkg/util/k8sutil/inspector/arangomember/reader.go b/pkg/util/k8sutil/inspector/arangomember/v1/reader.go similarity index 98% rename from pkg/util/k8sutil/inspector/arangomember/reader.go rename to pkg/util/k8sutil/inspector/arangomember/v1/reader.go index 4dbada59e..164d36bd6 100644 --- a/pkg/util/k8sutil/inspector/arangomember/reader.go +++ b/pkg/util/k8sutil/inspector/arangomember/v1/reader.go @@ -18,7 +18,7 @@ // Copyright holder is ArangoDB GmbH, Cologne, Germany // -package arangomember +package v1 import ( "context" diff --git a/pkg/util/k8sutil/inspector/arangotask/definition.go b/pkg/util/k8sutil/inspector/arangotask/definition.go new file mode 100644 index 000000000..8aae54224 --- /dev/null +++ b/pkg/util/k8sutil/inspector/arangotask/definition.go @@ -0,0 +1,36 @@ +// +// DISCLAIMER +// +// Copyright 2016-2022 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// + +package arangotask + +import ( + v1 "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/arangotask/v1" + "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/refresh" +) + +type Inspector interface { + ArangoTask() Definition +} + +type Definition interface { + refresh.Inspector + + V1() (v1.Inspector, error) +} diff --git a/pkg/util/k8sutil/inspector/arangotask/reader.go b/pkg/util/k8sutil/inspector/arangotask/reader.go deleted file mode 100644 index de0049a59..000000000 --- a/pkg/util/k8sutil/inspector/arangotask/reader.go +++ /dev/null @@ -1,38 +0,0 @@ -// -// DISCLAIMER -// -// Copyright 2016-2022 ArangoDB GmbH, Cologne, Germany -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Copyright holder is ArangoDB GmbH, Cologne, Germany -// - -package arangotask - -import ( - "context" - - api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" - meta "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// Interface has methods to work with Node resources. -type Interface interface { - ReadInterface -} - -// ReadInterface has methods to work with Node resources with ReadOnly mode. -type ReadInterface interface { - Get(ctx context.Context, name string, opts meta.GetOptions) (*api.ArangoTask, error) -} diff --git a/pkg/util/k8sutil/inspector/arangotask/loader.go b/pkg/util/k8sutil/inspector/arangotask/v1/loader.go similarity index 63% rename from pkg/util/k8sutil/inspector/arangotask/loader.go rename to pkg/util/k8sutil/inspector/arangotask/v1/loader.go index 5a3795a10..c94534530 100644 --- a/pkg/util/k8sutil/inspector/arangotask/loader.go +++ b/pkg/util/k8sutil/inspector/arangotask/v1/loader.go @@ -18,23 +18,33 @@ // Copyright holder is ArangoDB GmbH, Cologne, Germany // -package arangotask +package v1 import ( api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" ) -type Loader interface { - GetArangoTasks() (Inspector, bool) -} - type Inspector interface { - ArangoTasks() []*api.ArangoTask - ArangoTask(name string) (*api.ArangoTask, bool) - FilterArangoTasks(filters ...Filter) []*api.ArangoTask - IterateArangoTasks(action Action, filters ...Filter) error - ArangoTaskReadInterface() ReadInterface + ListSimple() []*api.ArangoTask + GetSimple(name string) (*api.ArangoTask, bool) + Filter(filters ...Filter) []*api.ArangoTask + Iterate(action Action, filters ...Filter) error + Read() ReadInterface } -type Filter func(acs *api.ArangoTask) bool -type Action func(acs *api.ArangoTask) error +type Filter func(at *api.ArangoTask) bool +type Action func(at *api.ArangoTask) error + +func FilterObject(at *api.ArangoTask, filters ...Filter) bool { + for _, f := range filters { + if f == nil { + continue + } + + if !f(at) { + return false + } + } + + return true +} diff --git a/pkg/util/k8sutil/inspector/arangotask/v1/reader.go b/pkg/util/k8sutil/inspector/arangotask/v1/reader.go new file mode 100644 index 000000000..c7c222c50 --- /dev/null +++ b/pkg/util/k8sutil/inspector/arangotask/v1/reader.go @@ -0,0 +1,49 @@ +// +// DISCLAIMER +// +// Copyright 2016-2022 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// + +package v1 + +import ( + "context" + + api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" + meta "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" +) + +// ModInterface has methods to work with ArangoTask resources only for creation +type ModInterface interface { + Create(ctx context.Context, arangotask *api.ArangoTask, opts meta.CreateOptions) (*api.ArangoTask, error) + Update(ctx context.Context, arangotask *api.ArangoTask, opts meta.UpdateOptions) (*api.ArangoTask, error) + UpdateStatus(ctx context.Context, arangotask *api.ArangoTask, opts meta.UpdateOptions) (*api.ArangoTask, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts meta.PatchOptions, subresources ...string) (result *api.ArangoTask, err error) + Delete(ctx context.Context, name string, opts meta.DeleteOptions) error +} + +// Interface has methods to work with ArangoTask resources. +type Interface interface { + ModInterface + ReadInterface +} + +// ReadInterface has methods to work with ArangoTask resources with ReadOnly mode. +type ReadInterface interface { + Get(ctx context.Context, name string, opts meta.GetOptions) (*api.ArangoTask, error) +} diff --git a/pkg/util/k8sutil/inspector/inspector.go b/pkg/util/k8sutil/inspector/inspector.go index be0f2ece8..0211c220a 100644 --- a/pkg/util/k8sutil/inspector/inspector.go +++ b/pkg/util/k8sutil/inspector/inspector.go @@ -35,10 +35,17 @@ import ( "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/service" "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/serviceaccount" "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/servicemonitor" + "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/throttle" + "github.com/arangodb/kube-arangodb/pkg/util/kclient" ) type Inspector interface { + Client() kclient.Client + Namespace() string + refresh.Inspector + throttle.Inspector + pod.Inspector secret.Inspector persistentvolumeclaim.Inspector @@ -49,7 +56,7 @@ type Inspector interface { arangomember.Inspector server.Inspector - node.Loader - arangoclustersynchronization.Loader - arangotask.Loader + node.Inspector + arangoclustersynchronization.Inspector + arangotask.Inspector } diff --git a/pkg/util/k8sutil/inspector/node/definition.go b/pkg/util/k8sutil/inspector/node/definition.go new file mode 100644 index 000000000..eead59e0c --- /dev/null +++ b/pkg/util/k8sutil/inspector/node/definition.go @@ -0,0 +1,36 @@ +// +// DISCLAIMER +// +// Copyright 2016-2022 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// + +package node + +import ( + v1 "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/node/v1" + "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/refresh" +) + +type Inspector interface { + Node() Definition +} + +type Definition interface { + refresh.Inspector + + V1() (v1.Inspector, error) +} diff --git a/pkg/util/k8sutil/inspector/node/node.go b/pkg/util/k8sutil/inspector/node/v1/loader.go similarity index 79% rename from pkg/util/k8sutil/inspector/node/node.go rename to pkg/util/k8sutil/inspector/node/v1/loader.go index d50fbec0e..cc613a4e4 100644 --- a/pkg/util/k8sutil/inspector/node/node.go +++ b/pkg/util/k8sutil/inspector/node/v1/loader.go @@ -18,21 +18,17 @@ // Copyright holder is ArangoDB GmbH, Cologne, Germany // -package node +package v1 import ( core "k8s.io/api/core/v1" ) -type Loader interface { - GetNodes() (Inspector, bool) -} - type Inspector interface { - Nodes() []*core.Node - Node(name string) (*core.Node, bool) - IterateNodes(action Action, filters ...Filter) error - NodeReadInterface() ReadInterface + ListSimple() []*core.Node + GetSimple(name string) (*core.Node, bool) + Iterate(action Action, filters ...Filter) error + Read() ReadInterface } type Filter func(podDisruptionBudget *core.Node) bool diff --git a/pkg/util/k8sutil/inspector/node/reader.go b/pkg/util/k8sutil/inspector/node/v1/reader.go similarity index 98% rename from pkg/util/k8sutil/inspector/node/reader.go rename to pkg/util/k8sutil/inspector/node/v1/reader.go index c2cab5a7f..a3661d1a8 100644 --- a/pkg/util/k8sutil/inspector/node/reader.go +++ b/pkg/util/k8sutil/inspector/node/v1/reader.go @@ -18,7 +18,7 @@ // Copyright holder is ArangoDB GmbH, Cologne, Germany // -package node +package v1 import ( "context" diff --git a/pkg/util/k8sutil/inspector/persistentvolumeclaim/definition.go b/pkg/util/k8sutil/inspector/persistentvolumeclaim/definition.go new file mode 100644 index 000000000..223910856 --- /dev/null +++ b/pkg/util/k8sutil/inspector/persistentvolumeclaim/definition.go @@ -0,0 +1,36 @@ +// +// DISCLAIMER +// +// Copyright 2016-2022 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// + +package persistentvolumeclaim + +import ( + v1 "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/persistentvolumeclaim/v1" + "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/refresh" +) + +type Inspector interface { + PersistentVolumeClaim() Definition +} + +type Definition interface { + refresh.Inspector + + V1() v1.Inspector +} diff --git a/pkg/util/k8sutil/inspector/persistentvolumeclaim/v1/filters.go b/pkg/util/k8sutil/inspector/persistentvolumeclaim/v1/filters.go new file mode 100644 index 000000000..2741880db --- /dev/null +++ b/pkg/util/k8sutil/inspector/persistentvolumeclaim/v1/filters.go @@ -0,0 +1,42 @@ +// +// DISCLAIMER +// +// Copyright 2016-2022 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// + +package v1 + +import ( + core "k8s.io/api/core/v1" +) + +func FilterPersistentVolumeClaimsByLabels(labels map[string]string) Filter { + return func(pvc *core.PersistentVolumeClaim) bool { + for key, value := range labels { + v, ok := pvc.Labels[key] + if !ok { + return false + } + + if v != value { + return false + } + } + + return true + } +} diff --git a/pkg/util/k8sutil/inspector/persistentvolumeclaim/pvc.go b/pkg/util/k8sutil/inspector/persistentvolumeclaim/v1/loader.go similarity index 70% rename from pkg/util/k8sutil/inspector/persistentvolumeclaim/pvc.go rename to pkg/util/k8sutil/inspector/persistentvolumeclaim/v1/loader.go index 36a0a20fc..6738b3148 100644 --- a/pkg/util/k8sutil/inspector/persistentvolumeclaim/pvc.go +++ b/pkg/util/k8sutil/inspector/persistentvolumeclaim/v1/loader.go @@ -18,20 +18,17 @@ // Copyright holder is ArangoDB GmbH, Cologne, Germany // -package persistentvolumeclaim +package v1 import ( - "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/refresh" core "k8s.io/api/core/v1" ) type Inspector interface { - refresh.Inspector - - PersistentVolumeClaims() []*core.PersistentVolumeClaim - PersistentVolumeClaim(name string) (*core.PersistentVolumeClaim, bool) - IteratePersistentVolumeClaims(action Action, filters ...Filter) error - PersistentVolumeClaimReadInterface() ReadInterface + ListSimple() []*core.PersistentVolumeClaim + GetSimple(name string) (*core.PersistentVolumeClaim, bool) + Iterate(action Action, filters ...Filter) error + Read() ReadInterface } type Filter func(pvc *core.PersistentVolumeClaim) bool diff --git a/pkg/util/k8sutil/inspector/persistentvolumeclaim/reader.go b/pkg/util/k8sutil/inspector/persistentvolumeclaim/v1/reader.go similarity index 98% rename from pkg/util/k8sutil/inspector/persistentvolumeclaim/reader.go rename to pkg/util/k8sutil/inspector/persistentvolumeclaim/v1/reader.go index 1499715c8..c0d825b6e 100644 --- a/pkg/util/k8sutil/inspector/persistentvolumeclaim/reader.go +++ b/pkg/util/k8sutil/inspector/persistentvolumeclaim/v1/reader.go @@ -18,7 +18,7 @@ // Copyright holder is ArangoDB GmbH, Cologne, Germany // -package persistentvolumeclaim +package v1 import ( "context" diff --git a/pkg/util/k8sutil/inspector/pod/pod.go b/pkg/util/k8sutil/inspector/pod/definition.go similarity index 77% rename from pkg/util/k8sutil/inspector/pod/pod.go rename to pkg/util/k8sutil/inspector/pod/definition.go index ec0fa8418..7b4df666a 100644 --- a/pkg/util/k8sutil/inspector/pod/pod.go +++ b/pkg/util/k8sutil/inspector/pod/definition.go @@ -21,18 +21,16 @@ package pod import ( + v1 "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/pod/v1" "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/refresh" - core "k8s.io/api/core/v1" ) type Inspector interface { + Pod() Definition +} + +type Definition interface { refresh.Inspector - Pods() []*core.Pod - Pod(name string) (*core.Pod, bool) - IteratePods(action Action, filters ...Filter) error - PodReadInterface() ReadInterface + V1() v1.Inspector } - -type Filter func(pod *core.Pod) bool -type Action func(pod *core.Pod) error diff --git a/pkg/util/k8sutil/inspector/pod/v1/filters.go b/pkg/util/k8sutil/inspector/pod/v1/filters.go new file mode 100644 index 000000000..201242799 --- /dev/null +++ b/pkg/util/k8sutil/inspector/pod/v1/filters.go @@ -0,0 +1,42 @@ +// +// DISCLAIMER +// +// Copyright 2016-2022 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// + +package v1 + +import ( + core "k8s.io/api/core/v1" +) + +func FilterPodsByLabels(labels map[string]string) Filter { + return func(pod *core.Pod) bool { + for key, value := range labels { + v, ok := pod.Labels[key] + if !ok { + return false + } + + if v != value { + return false + } + } + + return true + } +} diff --git a/pkg/util/k8sutil/inspector/pod/v1/loader.go b/pkg/util/k8sutil/inspector/pod/v1/loader.go new file mode 100644 index 000000000..76b77aaba --- /dev/null +++ b/pkg/util/k8sutil/inspector/pod/v1/loader.go @@ -0,0 +1,35 @@ +// +// DISCLAIMER +// +// Copyright 2016-2022 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// + +package v1 + +import ( + core "k8s.io/api/core/v1" +) + +type Inspector interface { + ListSimple() []*core.Pod + GetSimple(name string) (*core.Pod, bool) + Iterate(action Action, filters ...Filter) error + Read() ReadInterface +} + +type Filter func(pod *core.Pod) bool +type Action func(pod *core.Pod) error diff --git a/pkg/util/k8sutil/inspector/pod/reader.go b/pkg/util/k8sutil/inspector/pod/v1/reader.go similarity index 99% rename from pkg/util/k8sutil/inspector/pod/reader.go rename to pkg/util/k8sutil/inspector/pod/v1/reader.go index 51b56215e..a344515e7 100644 --- a/pkg/util/k8sutil/inspector/pod/reader.go +++ b/pkg/util/k8sutil/inspector/pod/v1/reader.go @@ -18,7 +18,7 @@ // Copyright holder is ArangoDB GmbH, Cologne, Germany // -package pod +package v1 import ( "context" diff --git a/pkg/util/k8sutil/inspector/poddisruptionbudget/definition.go b/pkg/util/k8sutil/inspector/poddisruptionbudget/definition.go new file mode 100644 index 000000000..c34d00524 --- /dev/null +++ b/pkg/util/k8sutil/inspector/poddisruptionbudget/definition.go @@ -0,0 +1,38 @@ +// +// DISCLAIMER +// +// Copyright 2016-2022 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// + +package poddisruptionbudget + +import ( + v1 "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/poddisruptionbudget/v1" + "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/poddisruptionbudget/v1beta1" + "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/refresh" +) + +type Inspector interface { + PodDisruptionBudget() Definition +} + +type Definition interface { + refresh.Inspector + + V1() (v1.Inspector, error) + V1Beta1() (v1beta1.Inspector, error) +} diff --git a/pkg/util/k8sutil/inspector/poddisruptionbudget/v1/loader.go b/pkg/util/k8sutil/inspector/poddisruptionbudget/v1/loader.go new file mode 100644 index 000000000..a882fe4f4 --- /dev/null +++ b/pkg/util/k8sutil/inspector/poddisruptionbudget/v1/loader.go @@ -0,0 +1,32 @@ +// +// DISCLAIMER +// +// Copyright 2016-2022 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// + +package v1beta1 + +import policy "k8s.io/api/policy/v1" + +type Inspector interface { + GetSimple(name string) (*policy.PodDisruptionBudget, bool) + Iterate(action Action, filters ...Filter) error + Read() ReadInterface +} + +type Filter func(podDisruptionBudget *policy.PodDisruptionBudget) bool +type Action func(podDisruptionBudget *policy.PodDisruptionBudget) error diff --git a/pkg/util/k8sutil/inspector/poddisruptionbudget/v1/reader.go b/pkg/util/k8sutil/inspector/poddisruptionbudget/v1/reader.go new file mode 100644 index 000000000..b430ed08c --- /dev/null +++ b/pkg/util/k8sutil/inspector/poddisruptionbudget/v1/reader.go @@ -0,0 +1,48 @@ +// +// DISCLAIMER +// +// Copyright 2016-2022 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// + +package v1beta1 + +import ( + "context" + + policy "k8s.io/api/policy/v1" + meta "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" +) + +// ModInterface has methods to work with PodDisruptionBudget resources only for creation +type ModInterface interface { + Create(ctx context.Context, poddisruptionbudget *policy.PodDisruptionBudget, opts meta.CreateOptions) (*policy.PodDisruptionBudget, error) + Update(ctx context.Context, poddisruptionbudget *policy.PodDisruptionBudget, opts meta.UpdateOptions) (*policy.PodDisruptionBudget, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts meta.PatchOptions, subresources ...string) (result *policy.PodDisruptionBudget, err error) + Delete(ctx context.Context, name string, opts meta.DeleteOptions) error +} + +// Interface has methods to work with PodDisruptionBudget resources. +type Interface interface { + ModInterface + ReadInterface +} + +// ReadInterface has methods to work with PodDisruptionBudget resources with ReadOnly mode. +type ReadInterface interface { + Get(ctx context.Context, name string, opts meta.GetOptions) (*policy.PodDisruptionBudget, error) +} diff --git a/pkg/util/k8sutil/inspector/poddisruptionbudget/pdb.go b/pkg/util/k8sutil/inspector/poddisruptionbudget/v1beta1/loader.go similarity index 80% rename from pkg/util/k8sutil/inspector/poddisruptionbudget/pdb.go rename to pkg/util/k8sutil/inspector/poddisruptionbudget/v1beta1/loader.go index c4f9f0aa9..4a683fc68 100644 --- a/pkg/util/k8sutil/inspector/poddisruptionbudget/pdb.go +++ b/pkg/util/k8sutil/inspector/poddisruptionbudget/v1beta1/loader.go @@ -18,14 +18,14 @@ // Copyright holder is ArangoDB GmbH, Cologne, Germany // -package poddisruptionbudget +package v1beta1 import policy "k8s.io/api/policy/v1beta1" type Inspector interface { - PodDisruptionBudget(name string) (*policy.PodDisruptionBudget, bool) - IteratePodDisruptionBudgets(action Action, filters ...Filter) error - PodDisruptionBudgetReadInterface() ReadInterface + GetSimple(name string) (*policy.PodDisruptionBudget, bool) + Iterate(action Action, filters ...Filter) error + Read() ReadInterface } type Filter func(podDisruptionBudget *policy.PodDisruptionBudget) bool diff --git a/pkg/util/k8sutil/inspector/poddisruptionbudget/reader.go b/pkg/util/k8sutil/inspector/poddisruptionbudget/v1beta1/reader.go similarity index 98% rename from pkg/util/k8sutil/inspector/poddisruptionbudget/reader.go rename to pkg/util/k8sutil/inspector/poddisruptionbudget/v1beta1/reader.go index 7347db367..4e1b49270 100644 --- a/pkg/util/k8sutil/inspector/poddisruptionbudget/reader.go +++ b/pkg/util/k8sutil/inspector/poddisruptionbudget/v1beta1/reader.go @@ -18,7 +18,7 @@ // Copyright holder is ArangoDB GmbH, Cologne, Germany // -package poddisruptionbudget +package v1beta1 import ( "context" diff --git a/pkg/util/k8sutil/inspector/refresh/refresh.go b/pkg/util/k8sutil/inspector/refresh/refresh.go index c0fda2c07..3ed42ba81 100644 --- a/pkg/util/k8sutil/inspector/refresh/refresh.go +++ b/pkg/util/k8sutil/inspector/refresh/refresh.go @@ -22,9 +22,11 @@ package refresh import ( "context" + "time" ) type Inspector interface { IsStatic() bool Refresh(ctx context.Context) error + LastRefresh() time.Time } diff --git a/pkg/util/k8sutil/inspector/secret/definition.go b/pkg/util/k8sutil/inspector/secret/definition.go new file mode 100644 index 000000000..7a7d08aa1 --- /dev/null +++ b/pkg/util/k8sutil/inspector/secret/definition.go @@ -0,0 +1,36 @@ +// +// DISCLAIMER +// +// Copyright 2016-2022 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// + +package secret + +import ( + "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/refresh" + v1 "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/secret/v1" +) + +type Inspector interface { + Secret() Definition +} + +type Definition interface { + refresh.Inspector + + V1() v1.Inspector +} diff --git a/pkg/util/k8sutil/inspector/secret/secret.go b/pkg/util/k8sutil/inspector/secret/v1/loader.go similarity index 84% rename from pkg/util/k8sutil/inspector/secret/secret.go rename to pkg/util/k8sutil/inspector/secret/v1/loader.go index 906b69971..7ad0337b6 100644 --- a/pkg/util/k8sutil/inspector/secret/secret.go +++ b/pkg/util/k8sutil/inspector/secret/v1/loader.go @@ -17,7 +17,8 @@ // // Copyright holder is ArangoDB GmbH, Cologne, Germany // -package secret + +package v1 import ( core "k8s.io/api/core/v1" @@ -25,9 +26,10 @@ import ( // Inspector for secrets type Inspector interface { - Secret(name string) (*core.Secret, bool) - IterateSecrets(action Action, filters ...Filter) error - SecretReadInterface() ReadInterface + ListSimple() []*core.Secret + GetSimple(name string) (*core.Secret, bool) + Iterate(action Action, filters ...Filter) error + Read() ReadInterface } type Filter func(pod *core.Secret) bool diff --git a/pkg/util/k8sutil/inspector/secret/reader.go b/pkg/util/k8sutil/inspector/secret/v1/reader.go similarity index 99% rename from pkg/util/k8sutil/inspector/secret/reader.go rename to pkg/util/k8sutil/inspector/secret/v1/reader.go index 19622824b..a234e3b30 100644 --- a/pkg/util/k8sutil/inspector/secret/reader.go +++ b/pkg/util/k8sutil/inspector/secret/v1/reader.go @@ -17,8 +17,7 @@ // // Copyright holder is ArangoDB GmbH, Cologne, Germany // - -package secret +package v1 import ( "context" diff --git a/pkg/util/k8sutil/inspector/service/definition.go b/pkg/util/k8sutil/inspector/service/definition.go new file mode 100644 index 000000000..2cfd2c0b3 --- /dev/null +++ b/pkg/util/k8sutil/inspector/service/definition.go @@ -0,0 +1,36 @@ +// +// DISCLAIMER +// +// Copyright 2016-2022 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// + +package service + +import ( + "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/refresh" + v1 "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/service/v1" +) + +type Inspector interface { + Service() Definition +} + +type Definition interface { + refresh.Inspector + + V1() v1.Inspector +} diff --git a/pkg/util/k8sutil/inspector/service/service.go b/pkg/util/k8sutil/inspector/service/v1/loader.go similarity index 84% rename from pkg/util/k8sutil/inspector/service/service.go rename to pkg/util/k8sutil/inspector/service/v1/loader.go index 00cd77e0a..72015a1f2 100644 --- a/pkg/util/k8sutil/inspector/service/service.go +++ b/pkg/util/k8sutil/inspector/service/v1/loader.go @@ -18,16 +18,16 @@ // Copyright holder is ArangoDB GmbH, Cologne, Germany // -package service +package v1 import ( core "k8s.io/api/core/v1" ) type Inspector interface { - Service(name string) (*core.Service, bool) - IterateServices(action Action, filters ...Filter) error - ServiceReadInterface() ReadInterface + GetSimple(name string) (*core.Service, bool) + Iterate(action Action, filters ...Filter) error + Read() ReadInterface } type Filter func(pod *core.Service) bool diff --git a/pkg/util/k8sutil/inspector/service/reader.go b/pkg/util/k8sutil/inspector/service/v1/reader.go similarity index 99% rename from pkg/util/k8sutil/inspector/service/reader.go rename to pkg/util/k8sutil/inspector/service/v1/reader.go index cc6bc0980..aeb2fc0b0 100644 --- a/pkg/util/k8sutil/inspector/service/reader.go +++ b/pkg/util/k8sutil/inspector/service/v1/reader.go @@ -18,7 +18,7 @@ // Copyright holder is ArangoDB GmbH, Cologne, Germany // -package service +package v1 import ( "context" diff --git a/pkg/util/k8sutil/inspector/serviceaccount/definition.go b/pkg/util/k8sutil/inspector/serviceaccount/definition.go new file mode 100644 index 000000000..d6202b422 --- /dev/null +++ b/pkg/util/k8sutil/inspector/serviceaccount/definition.go @@ -0,0 +1,36 @@ +// +// DISCLAIMER +// +// Copyright 2016-2022 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// + +package serviceaccount + +import ( + "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/refresh" + v1 "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/serviceaccount/v1" +) + +type Inspector interface { + ServiceAccount() Definition +} + +type Definition interface { + refresh.Inspector + + V1() v1.Inspector +} diff --git a/pkg/util/k8sutil/inspector/serviceaccount/sa.go b/pkg/util/k8sutil/inspector/serviceaccount/v1/loader.go similarity index 81% rename from pkg/util/k8sutil/inspector/serviceaccount/sa.go rename to pkg/util/k8sutil/inspector/serviceaccount/v1/loader.go index 28b85a5ea..62a5121ba 100644 --- a/pkg/util/k8sutil/inspector/serviceaccount/sa.go +++ b/pkg/util/k8sutil/inspector/serviceaccount/v1/loader.go @@ -18,14 +18,14 @@ // Copyright holder is ArangoDB GmbH, Cologne, Germany // -package serviceaccount +package v1 import core "k8s.io/api/core/v1" type Inspector interface { - ServiceAccount(name string) (*core.ServiceAccount, bool) - IterateServiceAccounts(action Action, filters ...Filter) error - ServiceAccountReadInterface() ReadInterface + GetSimple(name string) (*core.ServiceAccount, bool) + Iterate(action Action, filters ...Filter) error + Read() ReadInterface } type Filter func(pod *core.ServiceAccount) bool diff --git a/pkg/util/k8sutil/inspector/serviceaccount/reader.go b/pkg/util/k8sutil/inspector/serviceaccount/v1/reader.go similarity index 98% rename from pkg/util/k8sutil/inspector/serviceaccount/reader.go rename to pkg/util/k8sutil/inspector/serviceaccount/v1/reader.go index 87b630871..0e2192c4f 100644 --- a/pkg/util/k8sutil/inspector/serviceaccount/reader.go +++ b/pkg/util/k8sutil/inspector/serviceaccount/v1/reader.go @@ -18,7 +18,7 @@ // Copyright holder is ArangoDB GmbH, Cologne, Germany // -package serviceaccount +package v1 import ( "context" diff --git a/pkg/util/k8sutil/inspector/servicemonitor/definition.go b/pkg/util/k8sutil/inspector/servicemonitor/definition.go new file mode 100644 index 000000000..c13bd54fd --- /dev/null +++ b/pkg/util/k8sutil/inspector/servicemonitor/definition.go @@ -0,0 +1,36 @@ +// +// DISCLAIMER +// +// Copyright 2016-2022 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// + +package servicemonitor + +import ( + "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/refresh" + v1 "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/servicemonitor/v1" +) + +type Inspector interface { + ServiceMonitor() Definition +} + +type Definition interface { + refresh.Inspector + + V1() (v1.Inspector, error) +} diff --git a/pkg/util/k8sutil/inspector/servicemonitor/sm.go b/pkg/util/k8sutil/inspector/servicemonitor/v1/loader.go similarity index 82% rename from pkg/util/k8sutil/inspector/servicemonitor/sm.go rename to pkg/util/k8sutil/inspector/servicemonitor/v1/loader.go index 856445f5d..b5a73d5ae 100644 --- a/pkg/util/k8sutil/inspector/servicemonitor/sm.go +++ b/pkg/util/k8sutil/inspector/servicemonitor/v1/loader.go @@ -18,14 +18,14 @@ // Copyright holder is ArangoDB GmbH, Cologne, Germany // -package servicemonitor +package v1 import monitoring "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" type Inspector interface { - ServiceMonitor(name string) (*monitoring.ServiceMonitor, bool) - IterateServiceMonitors(action Action, filters ...Filter) error - ServiceMonitorReadInterface() ReadInterface + GetSimple(name string) (*monitoring.ServiceMonitor, bool) + Iterate(action Action, filters ...Filter) error + Read() ReadInterface } type Filter func(serviceMonitor *monitoring.ServiceMonitor) bool diff --git a/pkg/util/k8sutil/inspector/servicemonitor/reader.go b/pkg/util/k8sutil/inspector/servicemonitor/v1/reader.go similarity index 98% rename from pkg/util/k8sutil/inspector/servicemonitor/reader.go rename to pkg/util/k8sutil/inspector/servicemonitor/v1/reader.go index 4007e765c..6eb6ff10e 100644 --- a/pkg/util/k8sutil/inspector/servicemonitor/reader.go +++ b/pkg/util/k8sutil/inspector/servicemonitor/v1/reader.go @@ -18,7 +18,7 @@ // Copyright holder is ArangoDB GmbH, Cologne, Germany // -package servicemonitor +package v1 import ( "context" diff --git a/pkg/util/k8sutil/inspector/throttle/throttle.go b/pkg/util/k8sutil/inspector/throttle/throttle.go new file mode 100644 index 000000000..834223034 --- /dev/null +++ b/pkg/util/k8sutil/inspector/throttle/throttle.go @@ -0,0 +1,318 @@ +// +// DISCLAIMER +// +// Copyright 2016-2022 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// + +package throttle + +import ( + "sync" + "time" +) + +type Inspector interface { + GetThrottles() Components +} + +func NewAlwaysThrottleComponents() Components { + return NewThrottleComponents(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) +} + +func NewThrottleComponents(acs, am, at, node, pvc, pod, pdb, secret, service, serviceAccount, sm time.Duration) Components { + return &throttleComponents{ + arangoClusterSynchronization: NewThrottle(acs), + arangoMember: NewThrottle(am), + arangoTask: NewThrottle(at), + node: NewThrottle(node), + persistentVolumeClaim: NewThrottle(pvc), + pod: NewThrottle(pod), + podDisruptionBudget: NewThrottle(pdb), + secret: NewThrottle(secret), + service: NewThrottle(service), + serviceAccount: NewThrottle(serviceAccount), + serviceMonitor: NewThrottle(sm), + } +} + +type ComponentCount map[Component]int + +type Component string + +const ( + ArangoClusterSynchronization Component = "ArangoClusterSynchronization" + ArangoMember Component = "ArangoMember" + ArangoTask Component = "ArangoTask" + Node Component = "Node" + PersistentVolumeClaim Component = "PersistentVolumeClaim" + Pod Component = "Pod" + PodDisruptionBudget Component = "PodDisruptionBudget" + Secret Component = "Secret" + Service Component = "Service" + ServiceAccount Component = "ServiceAccount" + ServiceMonitor Component = "ServiceMonitor" +) + +func AllComponents() []Component { + return []Component{ + ArangoClusterSynchronization, + ArangoMember, + ArangoTask, + Node, + PersistentVolumeClaim, + Pod, + PodDisruptionBudget, + Secret, + Service, + ServiceAccount, + ServiceMonitor, + } +} + +type Components interface { + ArangoClusterSynchronization() Throttle + ArangoMember() Throttle + ArangoTask() Throttle + Node() Throttle + PersistentVolumeClaim() Throttle + Pod() Throttle + PodDisruptionBudget() Throttle + Secret() Throttle + Service() Throttle + ServiceAccount() Throttle + ServiceMonitor() Throttle + + Get(c Component) Throttle + Invalidate(components ...Component) + + Counts() ComponentCount + Copy() Components +} + +type throttleComponents struct { + arangoClusterSynchronization Throttle + arangoMember Throttle + arangoTask Throttle + node Throttle + persistentVolumeClaim Throttle + pod Throttle + podDisruptionBudget Throttle + secret Throttle + service Throttle + serviceAccount Throttle + serviceMonitor Throttle +} + +func (t *throttleComponents) Counts() ComponentCount { + z := ComponentCount{} + + for _, c := range AllComponents() { + z[c] = t.Get(c).Count() + } + + return z +} + +func (t *throttleComponents) Invalidate(components ...Component) { + for _, c := range components { + t.Get(c).Invalidate() + } +} + +func (t *throttleComponents) Get(c Component) Throttle { + if t == nil { + return NewAlwaysThrottle() + } + switch c { + case ArangoClusterSynchronization: + return t.arangoClusterSynchronization + case ArangoMember: + return t.arangoMember + case ArangoTask: + return t.arangoTask + case Node: + return t.node + case PersistentVolumeClaim: + return t.persistentVolumeClaim + case Pod: + return t.pod + case PodDisruptionBudget: + return t.podDisruptionBudget + case Secret: + return t.secret + case Service: + return t.service + case ServiceAccount: + return t.serviceAccount + case ServiceMonitor: + return t.serviceMonitor + default: + return NewAlwaysThrottle() + } +} + +func (t *throttleComponents) Copy() Components { + return &throttleComponents{ + arangoClusterSynchronization: t.arangoClusterSynchronization.Copy(), + arangoMember: t.arangoMember.Copy(), + arangoTask: t.arangoTask.Copy(), + node: t.node.Copy(), + persistentVolumeClaim: t.persistentVolumeClaim.Copy(), + pod: t.pod.Copy(), + podDisruptionBudget: t.podDisruptionBudget.Copy(), + secret: t.secret.Copy(), + service: t.service.Copy(), + serviceAccount: t.serviceAccount.Copy(), + serviceMonitor: t.serviceMonitor.Copy(), + } +} + +func (t *throttleComponents) ArangoClusterSynchronization() Throttle { + return t.arangoClusterSynchronization +} + +func (t *throttleComponents) ArangoMember() Throttle { + return t.arangoMember +} + +func (t *throttleComponents) ArangoTask() Throttle { + return t.arangoTask +} + +func (t *throttleComponents) Node() Throttle { + return t.node +} + +func (t *throttleComponents) PersistentVolumeClaim() Throttle { + return t.persistentVolumeClaim +} + +func (t *throttleComponents) Pod() Throttle { + return t.pod +} + +func (t *throttleComponents) PodDisruptionBudget() Throttle { + return t.podDisruptionBudget +} + +func (t *throttleComponents) Secret() Throttle { + return t.secret +} + +func (t *throttleComponents) Service() Throttle { + return t.service +} + +func (t *throttleComponents) ServiceAccount() Throttle { + return t.serviceAccount +} + +func (t *throttleComponents) ServiceMonitor() Throttle { + return t.serviceMonitor +} + +type Throttle interface { + Invalidate() + Throttle() bool + Delay() + + Copy() Throttle + + Count() int +} + +func NewAlwaysThrottle() Throttle { + return &alwaysThrottle{} +} + +type alwaysThrottle struct { + count int +} + +func (a alwaysThrottle) Count() int { + return a.count +} + +func (a *alwaysThrottle) Copy() Throttle { + return a +} + +func (a alwaysThrottle) Invalidate() { + +} + +func (a alwaysThrottle) Throttle() bool { + return true +} + +func (a *alwaysThrottle) Delay() { + a.count++ +} + +func NewThrottle(delay time.Duration) Throttle { + if delay == 0 { + return NewAlwaysThrottle() + } + return &throttle{ + delay: delay, + } +} + +type throttle struct { + lock sync.Mutex + + delay time.Duration + next time.Time + count int +} + +func (t *throttle) Count() int { + t.lock.Lock() + defer t.lock.Unlock() + + return t.count +} + +func (t *throttle) Copy() Throttle { + return &throttle{ + delay: t.delay, + next: t.next, + count: t.count, + } +} + +func (t *throttle) Delay() { + t.lock.Lock() + defer t.lock.Unlock() + + t.next = time.Now().Add(t.delay) + t.count++ +} + +func (t *throttle) Throttle() bool { + t.lock.Lock() + defer t.lock.Unlock() + + return t.next.IsZero() || t.next.Before(time.Now()) +} + +func (t *throttle) Invalidate() { + t.lock.Lock() + defer t.lock.Unlock() + + t.next = time.UnixMilli(0) +} diff --git a/pkg/util/k8sutil/license.go b/pkg/util/k8sutil/license.go index d050a78fa..3552d0580 100644 --- a/pkg/util/k8sutil/license.go +++ b/pkg/util/k8sutil/license.go @@ -42,7 +42,7 @@ type LicenseSecret struct { } func GetLicenseFromSecret(secret secret.Inspector, name string) (LicenseSecret, bool) { - s, ok := secret.Secret(name) + s, ok := secret.Secret().V1().GetSimple(name) if !ok { return LicenseSecret{}, false } diff --git a/pkg/util/k8sutil/pods.go b/pkg/util/k8sutil/pods.go index d60d83306..efb9ffdb4 100644 --- a/pkg/util/k8sutil/pods.go +++ b/pkg/util/k8sutil/pods.go @@ -38,7 +38,7 @@ import ( "github.com/arangodb/kube-arangodb/pkg/util" "github.com/arangodb/kube-arangodb/pkg/util/constants" "github.com/arangodb/kube-arangodb/pkg/util/errors" - "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/pod" + podv1 "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/pod/v1" "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/interfaces" ) @@ -587,7 +587,7 @@ func GetPodSpecChecksum(podSpec core.PodSpec) (string, error) { // CreatePod adds an owner to the given pod and calls the k8s api-server to created it. // If the pod already exists, nil is returned. // If another error occurs, that error is returned. -func CreatePod(ctx context.Context, c pod.ModInterface, pod *core.Pod, ns string, +func CreatePod(ctx context.Context, c podv1.ModInterface, pod *core.Pod, ns string, owner metav1.OwnerReference) (string, types.UID, error) { AddOwnerRefToObject(pod.GetObjectMeta(), &owner) diff --git a/pkg/util/k8sutil/pvc.go b/pkg/util/k8sutil/pvc.go index 80fa2fcb3..3e109336c 100644 --- a/pkg/util/k8sutil/pvc.go +++ b/pkg/util/k8sutil/pvc.go @@ -24,14 +24,13 @@ import ( "context" "strconv" - "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/persistentvolumeclaim" - "github.com/arangodb/kube-arangodb/pkg/util/errors" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/arangodb/kube-arangodb/pkg/util/constants" + persistentvolumeclaimv1 "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/persistentvolumeclaim/v1" ) // IsPersistentVolumeClaimMarkedForDeletion returns true if the pvc has been marked for deletion. @@ -78,7 +77,7 @@ func ExtractStorageResourceRequirement(resources v1.ResourceRequirements) v1.Res // CreatePersistentVolumeClaim creates a persistent volume claim with given name and configuration. // If the pvc already exists, nil is returned. // If another error occurs, that error is returned. -func CreatePersistentVolumeClaim(ctx context.Context, pvcs persistentvolumeclaim.ModInterface, pvcName, deploymentName, +func CreatePersistentVolumeClaim(ctx context.Context, pvcs persistentvolumeclaimv1.ModInterface, pvcName, deploymentName, storageClassName, role string, enforceAntiAffinity bool, resources v1.ResourceRequirements, vct *v1.PersistentVolumeClaim, finalizers []string, owner metav1.OwnerReference) error { labels := LabelsForDeployment(deploymentName, role) diff --git a/pkg/util/k8sutil/secrets.go b/pkg/util/k8sutil/secrets.go index 3faff2f5f..b981fcb6e 100644 --- a/pkg/util/k8sutil/secrets.go +++ b/pkg/util/k8sutil/secrets.go @@ -25,10 +25,9 @@ import ( "github.com/arangodb/kube-arangodb/pkg/util/globals" - "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/secret" - "github.com/arangodb/kube-arangodb/pkg/util/constants" "github.com/arangodb/kube-arangodb/pkg/util/errors" + secretv1 "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/secret/v1" jg "github.com/golang-jwt/jwt" core "k8s.io/api/core/v1" meta "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -36,7 +35,7 @@ import ( // ValidateEncryptionKeySecret checks that a secret with given name in given namespace // exists and it contains a 'key' data field of exactly 32 bytes. -func ValidateEncryptionKeySecret(secrets secret.Interface, secretName string) error { +func ValidateEncryptionKeySecret(secrets secretv1.Interface, secretName string) error { s, err := secrets.Get(context.Background(), secretName, meta.GetOptions{}) if err != nil { return errors.WithStack(err) @@ -57,7 +56,7 @@ func ValidateEncryptionKeyFromSecret(s *core.Secret) error { } // CreateEncryptionKeySecret creates a secret used to store a RocksDB encryption key. -func CreateEncryptionKeySecret(secrets secret.ModInterface, secretName string, key []byte) error { +func CreateEncryptionKeySecret(secrets secretv1.ModInterface, secretName string, key []byte) error { if len(key) != 32 { return errors.WithStack(errors.Newf("Key in secret '%s' is expected to be 32 bytes long, got %d", secretName, len(key))) } @@ -79,7 +78,7 @@ func CreateEncryptionKeySecret(secrets secret.ModInterface, secretName string, k // ValidateCACertificateSecret checks that a secret with given name in given namespace // exists and it contains a 'ca.crt' data field. -func ValidateCACertificateSecret(ctx context.Context, secrets secret.ReadInterface, secretName string) error { +func ValidateCACertificateSecret(ctx context.Context, secrets secretv1.ReadInterface, secretName string) error { s, err := secrets.Get(ctx, secretName, meta.GetOptions{}) if err != nil { return errors.WithStack(err) @@ -97,7 +96,7 @@ func ValidateCACertificateSecret(ctx context.Context, secrets secret.ReadInterfa // If the secret does not exists the field is missing, // an error is returned. // Returns: certificate, error -func GetCACertficateSecret(ctx context.Context, secrets secret.Interface, secretName string) (string, error) { +func GetCACertficateSecret(ctx context.Context, secrets secretv1.ReadInterface, secretName string) (string, error) { ctxChild, cancel := globals.GetGlobalTimeouts().Kubernetes().WithTimeout(ctx) defer cancel() @@ -118,7 +117,7 @@ func GetCACertficateSecret(ctx context.Context, secrets secret.Interface, secret // If the secret does not exists or one of the fields is missing, // an error is returned. // Returns: certificate, private-key, isOwnedByDeployment, error -func GetCASecret(ctx context.Context, secrets secret.ReadInterface, secretName string, +func GetCASecret(ctx context.Context, secrets secretv1.ReadInterface, secretName string, ownerRef *meta.OwnerReference) (string, string, bool, error) { s, err := secrets.Get(ctx, secretName, meta.GetOptions{}) if err != nil { @@ -150,7 +149,7 @@ func GetCAFromSecret(s *core.Secret, ownerRef *meta.OwnerReference) (string, str } // CreateCASecret creates a secret used to store a PEM encoded CA certificate & private key. -func CreateCASecret(ctx context.Context, secrets secret.ModInterface, secretName string, certificate, key string, +func CreateCASecret(ctx context.Context, secrets secretv1.ModInterface, secretName string, certificate, key string, ownerRef *meta.OwnerReference) error { // Create secret secret := &core.Secret{ @@ -174,7 +173,7 @@ func CreateCASecret(ctx context.Context, secrets secret.ModInterface, secretName // GetTLSKeyfileSecret loads a secret used to store a PEM encoded keyfile // in the format ArangoDB accepts it for its `--ssl.keyfile` option. // Returns: keyfile (pem encoded), error -func GetTLSKeyfileSecret(secrets secret.ReadInterface, secretName string) (string, error) { +func GetTLSKeyfileSecret(secrets secretv1.ReadInterface, secretName string) (string, error) { s, err := secrets.Get(context.Background(), secretName, meta.GetOptions{}) if err != nil { return "", errors.WithStack(err) @@ -193,7 +192,7 @@ func GetTLSKeyfileFromSecret(s *core.Secret) (string, error) { // CreateTLSKeyfileSecret creates a secret used to store a PEM encoded keyfile // in the format ArangoDB accepts it for its `--ssl.keyfile` option. -func CreateTLSKeyfileSecret(ctx context.Context, secrets secret.ModInterface, secretName string, keyfile string, +func CreateTLSKeyfileSecret(ctx context.Context, secrets secretv1.ModInterface, secretName string, keyfile string, ownerRef *meta.OwnerReference) error { // Create secret secret := &core.Secret{ @@ -215,7 +214,7 @@ func CreateTLSKeyfileSecret(ctx context.Context, secrets secret.ModInterface, se // ValidateTokenSecret checks that a secret with given name in given namespace // exists and it contains a 'token' data field. -func ValidateTokenSecret(ctx context.Context, secrets secret.ReadInterface, secretName string) error { +func ValidateTokenSecret(ctx context.Context, secrets secretv1.ReadInterface, secretName string) error { s, err := secrets.Get(ctx, secretName, meta.GetOptions{}) if err != nil { return errors.WithStack(err) @@ -233,7 +232,7 @@ func ValidateTokenFromSecret(s *core.Secret) error { } // GetTokenSecret loads the token secret from a Secret with given name. -func GetTokenSecret(ctx context.Context, secrets secret.ReadInterface, secretName string) (string, error) { +func GetTokenSecret(ctx context.Context, secrets secretv1.ReadInterface, secretName string) (string, error) { s, err := secrets.Get(ctx, secretName, meta.GetOptions{}) if err != nil { return "", errors.WithStack(err) @@ -253,7 +252,7 @@ func GetTokenFromSecret(s *core.Secret) (string, error) { // CreateTokenSecret creates a secret with given name in given namespace // with a given token as value. -func CreateTokenSecret(ctx context.Context, secrets secret.ModInterface, secretName, token string, +func CreateTokenSecret(ctx context.Context, secrets secretv1.ModInterface, secretName, token string, ownerRef *meta.OwnerReference) error { // Create secret secret := &core.Secret{ @@ -290,7 +289,7 @@ func CreateJWTTokenFromSecret(secret string, claims map[string]interface{}) (str // CreateJWTFromSecret creates a JWT using the secret stored in secretSecretName and stores the // result in a new secret called tokenSecretName -func CreateJWTFromSecret(ctx context.Context, cachedSecrets secret.ReadInterface, secrets secret.ModInterface, tokenSecretName, secretSecretName string, claims map[string]interface{}, ownerRef *meta.OwnerReference) error { +func CreateJWTFromSecret(ctx context.Context, cachedSecrets secretv1.ReadInterface, secrets secretv1.ModInterface, tokenSecretName, secretSecretName string, claims map[string]interface{}, ownerRef *meta.OwnerReference) error { secret, err := GetTokenSecret(ctx, cachedSecrets, secretSecretName) if err != nil { return errors.WithStack(err) @@ -312,7 +311,7 @@ func CreateJWTFromSecret(ctx context.Context, cachedSecrets secret.ReadInterface // CreateBasicAuthSecret creates a secret with given name in given namespace // with a given username and password as value. -func CreateBasicAuthSecret(ctx context.Context, secrets secret.ModInterface, secretName, username, password string, +func CreateBasicAuthSecret(ctx context.Context, secrets secretv1.ModInterface, secretName, username, password string, ownerRef *meta.OwnerReference) error { // Create secret secret := &core.Secret{ @@ -342,7 +341,7 @@ func CreateBasicAuthSecret(ctx context.Context, secrets secret.ModInterface, sec // If the secret does not exists or one of the fields is missing, // an error is returned. // Returns: username, password, error -func GetBasicAuthSecret(secrets secret.Interface, secretName string) (string, string, error) { +func GetBasicAuthSecret(secrets secretv1.Interface, secretName string) (string, string, error) { s, err := secrets.Get(context.Background(), secretName, meta.GetOptions{}) if err != nil { return "", "", errors.WithStack(err) diff --git a/pkg/util/k8sutil/services.go b/pkg/util/k8sutil/services.go index 28402b25e..a68734ae7 100644 --- a/pkg/util/k8sutil/services.go +++ b/pkg/util/k8sutil/services.go @@ -31,6 +31,7 @@ import ( "github.com/arangodb/kube-arangodb/pkg/util/errors" + servicev1 "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/service/v1" core "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -66,14 +67,14 @@ func CreateExporterClientServiceName(deploymentName string) string { } // CreateExporterService -func CreateExporterService(ctx context.Context, cachedStatus service.Inspector, svcs service.ModInterface, +func CreateExporterService(ctx context.Context, cachedStatus service.Inspector, svcs servicev1.ModInterface, deployment metav1.Object, owner metav1.OwnerReference) (string, bool, error) { deploymentName := deployment.GetName() svcName := CreateExporterClientServiceName(deploymentName) selectorLabels := LabelsForExporterServiceSelector(deploymentName) - if _, exists := cachedStatus.Service(svcName); exists { + if _, exists := cachedStatus.Service().V1().GetSimple(svcName); exists { return svcName, false, nil } @@ -85,7 +86,7 @@ func CreateExporterService(ctx context.Context, cachedStatus service.Inspector, Spec: core.ServiceSpec{ ClusterIP: core.ClusterIPNone, Ports: []core.ServicePort{ - core.ServicePort{ + { Name: "exporter", Protocol: core.ProtocolTCP, Port: ArangoExporterPort, @@ -108,12 +109,12 @@ func CreateExporterService(ctx context.Context, cachedStatus service.Inspector, // If the service already exists, nil is returned. // If another error occurs, that error is returned. // The returned bool is true if the service is created, or false when the service already existed. -func CreateHeadlessService(ctx context.Context, svcs service.ModInterface, deployment metav1.Object, +func CreateHeadlessService(ctx context.Context, svcs servicev1.ModInterface, deployment metav1.Object, owner metav1.OwnerReference) (string, bool, error) { deploymentName := deployment.GetName() svcName := CreateHeadlessServiceName(deploymentName) ports := []core.ServicePort{ - core.ServicePort{ + { Name: "server", Protocol: core.ProtocolTCP, Port: ArangoPort, @@ -132,12 +133,12 @@ func CreateHeadlessService(ctx context.Context, svcs service.ModInterface, deplo // If the service already exists, nil is returned. // If another error occurs, that error is returned. // The returned bool is true if the service is created, or false when the service already existed. -func CreateDatabaseClientService(ctx context.Context, svcs service.ModInterface, deployment metav1.Object, single bool, +func CreateDatabaseClientService(ctx context.Context, svcs servicev1.ModInterface, deployment metav1.Object, single bool, owner metav1.OwnerReference) (string, bool, error) { deploymentName := deployment.GetName() svcName := CreateDatabaseClientServiceName(deploymentName) ports := []core.ServicePort{ - core.ServicePort{ + { Name: "server", Protocol: core.ProtocolTCP, Port: ArangoPort, @@ -162,12 +163,12 @@ func CreateDatabaseClientService(ctx context.Context, svcs service.ModInterface, // If the service already exists, nil is returned. // If another error occurs, that error is returned. // The returned bool is true if the service is created, or false when the service already existed. -func CreateExternalAccessService(ctx context.Context, svcs service.ModInterface, svcName, role string, +func CreateExternalAccessService(ctx context.Context, svcs servicev1.ModInterface, svcName, role string, deployment metav1.Object, serviceType core.ServiceType, port, nodePort int, loadBalancerIP string, loadBalancerSourceRanges []string, owner metav1.OwnerReference) (string, bool, error) { deploymentName := deployment.GetName() ports := []core.ServicePort{ - core.ServicePort{ + { Name: "server", Protocol: core.ProtocolTCP, Port: int32(port), @@ -186,7 +187,7 @@ func CreateExternalAccessService(ctx context.Context, svcs service.ModInterface, // If the service already exists, nil is returned. // If another error occurs, that error is returned. // The returned bool is true if the service is created, or false when the service already existed. -func createService(ctx context.Context, svcs service.ModInterface, svcName, deploymentName, clusterIP, role string, +func createService(ctx context.Context, svcs servicev1.ModInterface, svcName, deploymentName, clusterIP, role string, serviceType core.ServiceType, ports []core.ServicePort, loadBalancerIP string, loadBalancerSourceRanges []string, publishNotReadyAddresses bool, owner metav1.OwnerReference) (bool, error) { labels := LabelsForDeployment(deploymentName, role) @@ -216,7 +217,7 @@ func createService(ctx context.Context, svcs service.ModInterface, svcName, depl } // CreateServiceURL creates a URL used to reach the given service. -func CreateServiceURL(svc core.Service, scheme string, portPredicate func(core.ServicePort) bool, nodeFetcher func() (core.NodeList, error)) (string, error) { +func CreateServiceURL(svc core.Service, scheme string, portPredicate func(core.ServicePort) bool, nodeFetcher func() ([]*core.Node, error)) (string, error) { var port int32 var nodePort int32 portFound := false @@ -255,10 +256,10 @@ func CreateServiceURL(svc core.Service, scheme string, portPredicate func(core.S if err != nil { return "", errors.WithStack(err) } - if len(nodeList.Items) == 0 { + if len(nodeList) == 0 { return "", errors.WithStack(errors.Newf("No nodes found")) } - node := nodeList.Items[rand.Intn(len(nodeList.Items))] + node := nodeList[rand.Intn(len(nodeList))] if len(node.Status.Addresses) > 0 { host = node.Status.Addresses[0].Address } diff --git a/pkg/util/kclient/fake.go b/pkg/util/kclient/fake.go index b73efd715..d78bf014c 100644 --- a/pkg/util/kclient/fake.go +++ b/pkg/util/kclient/fake.go @@ -23,9 +23,14 @@ package kclient import ( "sync" + api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" versionedFake "github.com/arangodb/kube-arangodb/pkg/generated/clientset/versioned/fake" + monitoring "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" monitoringFake "github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/fake" + core "k8s.io/api/core/v1" + policy "k8s.io/api/policy/v1beta1" apiextensionsclientFake "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/fake" + meta "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" kubernetesFake "k8s.io/client-go/kubernetes/fake" ) @@ -35,10 +40,7 @@ func NewFakeClient() Client { } type FakeClientBuilder interface { - Kubernetes(objects ...runtime.Object) FakeClientBuilder - KubernetesExtensions(objects ...runtime.Object) FakeClientBuilder - Arango(objects ...runtime.Object) FakeClientBuilder - Monitoring(objects ...runtime.Object) FakeClientBuilder + Add(objects ...runtime.Object) FakeClientBuilder Client() Client } @@ -50,52 +52,171 @@ func NewFakeClientBuilder() FakeClientBuilder { type fakeClientBuilder struct { lock sync.Mutex - kubernetes []runtime.Object - kubernetesExtensions []runtime.Object - arango []runtime.Object - monitoring []runtime.Object + all []runtime.Object } -func (f *fakeClientBuilder) Kubernetes(objects ...runtime.Object) FakeClientBuilder { +func (f *fakeClientBuilder) Add(objects ...runtime.Object) FakeClientBuilder { f.lock.Lock() defer f.lock.Unlock() - f.kubernetes = append(f.kubernetes, objects...) + f.all = append(f.all, objects...) return f } -func (f *fakeClientBuilder) KubernetesExtensions(objects ...runtime.Object) FakeClientBuilder { - f.lock.Lock() - defer f.lock.Unlock() +func (f *fakeClientBuilder) filter(reg func(s *runtime.Scheme) error) []runtime.Object { + s := runtime.NewScheme() - f.kubernetesExtensions = append(f.kubernetesExtensions, objects...) + r := make([]runtime.Object, 0, len(f.all)) - return f -} + if err := reg(s); err != nil { + panic(err) + } -func (f *fakeClientBuilder) Arango(objects ...runtime.Object) FakeClientBuilder { - f.lock.Lock() - defer f.lock.Unlock() - - f.arango = append(f.arango, objects...) + for _, o := range f.all { + if o == nil { + continue + } + if _, _, err := s.ObjectKinds(o); err == nil { + r = append(r, o) + } + } - return f + return r } -func (f *fakeClientBuilder) Monitoring(objects ...runtime.Object) FakeClientBuilder { - f.lock.Lock() - defer f.lock.Unlock() +func (f *fakeClientBuilder) Client() Client { + return NewStaticClient( + kubernetesFake.NewSimpleClientset(f.filter(kubernetesFake.AddToScheme)...), + apiextensionsclientFake.NewSimpleClientset(f.filter(apiextensionsclientFake.AddToScheme)...), + versionedFake.NewSimpleClientset(f.filter(versionedFake.AddToScheme)...), + monitoringFake.NewSimpleClientset(f.filter(monitoringFake.AddToScheme)...)) +} - f.monitoring = append(f.monitoring, objects...) +type FakeDataInput struct { + Namespace string + + Pods map[string]*core.Pod + Secrets map[string]*core.Secret + Services map[string]*core.Service + PVCS map[string]*core.PersistentVolumeClaim + ServiceAccounts map[string]*core.ServiceAccount + PDBS map[string]*policy.PodDisruptionBudget + ServiceMonitors map[string]*monitoring.ServiceMonitor + ArangoMembers map[string]*api.ArangoMember + Nodes map[string]*core.Node + ACS map[string]*api.ArangoClusterSynchronization + AT map[string]*api.ArangoTask +} - return f +func (f FakeDataInput) asList() []runtime.Object { + var r []runtime.Object + + for k, v := range f.Pods { + c := v.DeepCopy() + c.SetName(k) + if c.GetNamespace() == "" && f.Namespace != "" { + c.SetNamespace(f.Namespace) + } + r = append(r, c) + } + for k, v := range f.Secrets { + c := v.DeepCopy() + c.SetName(k) + if c.GetNamespace() == "" && f.Namespace != "" { + c.SetNamespace(f.Namespace) + } + r = append(r, c) + } + for k, v := range f.Services { + c := v.DeepCopy() + c.SetName(k) + if c.GetNamespace() == "" && f.Namespace != "" { + c.SetNamespace(f.Namespace) + } + r = append(r, c) + } + for k, v := range f.PVCS { + c := v.DeepCopy() + c.SetName(k) + if c.GetNamespace() == "" && f.Namespace != "" { + c.SetNamespace(f.Namespace) + } + r = append(r, c) + } + for k, v := range f.ServiceAccounts { + c := v.DeepCopy() + c.SetName(k) + if c.GetNamespace() == "" && f.Namespace != "" { + c.SetNamespace(f.Namespace) + } + r = append(r, c) + } + for k, v := range f.PDBS { + c := v.DeepCopy() + c.SetName(k) + if c.GetNamespace() == "" && f.Namespace != "" { + c.SetNamespace(f.Namespace) + } + r = append(r, c) + } + for k, v := range f.ServiceMonitors { + c := v.DeepCopy() + c.SetName(k) + if c.GetNamespace() == "" && f.Namespace != "" { + c.SetNamespace(f.Namespace) + } + r = append(r, c) + } + for k, v := range f.ArangoMembers { + c := v.DeepCopy() + c.SetName(k) + if c.GetNamespace() == "" && f.Namespace != "" { + c.SetNamespace(f.Namespace) + } + r = append(r, c) + } + for k, v := range f.Nodes { + c := v.DeepCopy() + c.SetName(k) + if c.GetNamespace() == "" && f.Namespace != "" { + c.SetNamespace(f.Namespace) + } + r = append(r, c) + } + for k, v := range f.ACS { + c := v.DeepCopy() + c.SetName(k) + if c.GetNamespace() == "" && f.Namespace != "" { + c.SetNamespace(f.Namespace) + } + r = append(r, c) + } + for k, v := range f.AT { + c := v.DeepCopy() + c.SetName(k) + if c.GetNamespace() == "" && f.Namespace != "" { + c.SetNamespace(f.Namespace) + } + r = append(r, c) + } + + for _, o := range r { + if f.Namespace != "" { + if m, ok := o.(meta.Object); ok { + if m.GetName() == "" { + panic("Invalid data") + } + if n := m.GetNamespace(); n == "" { + m.SetNamespace(f.Namespace) + } + } + } + } + + return r } -func (f *fakeClientBuilder) Client() Client { - return NewStaticClient( - kubernetesFake.NewSimpleClientset(f.kubernetes...), - apiextensionsclientFake.NewSimpleClientset(f.kubernetesExtensions...), - versionedFake.NewSimpleClientset(f.arango...), - monitoringFake.NewSimpleClientset(f.monitoring...)) +func (f FakeDataInput) Client() Client { + return NewFakeClientBuilder().Add(f.asList()...).Client() } diff --git a/pkg/util/kclient/helpers/secret.go b/pkg/util/kclient/helpers/secret.go index 7f2c78753..6a6ac7f22 100644 --- a/pkg/util/kclient/helpers/secret.go +++ b/pkg/util/kclient/helpers/secret.go @@ -31,7 +31,7 @@ import ( func SecretConfigGetter(s secret.Inspector, name, key string) kclient.ConfigGetter { return func() (*rest.Config, string, error) { - secret, ok := s.Secret(name) + secret, ok := s.Secret().V1().GetSimple(name) if !ok { return nil, "", errors.Errorf("Secret %s not found", name) } diff --git a/pkg/util/kclient/helpers/secret_test.go b/pkg/util/kclient/helpers/secret_test.go index 7bcec2fca..05c4a86ca 100644 --- a/pkg/util/kclient/helpers/secret_test.go +++ b/pkg/util/kclient/helpers/secret_test.go @@ -25,6 +25,7 @@ import ( "testing" "github.com/arangodb/kube-arangodb/pkg/deployment/resources/inspector" + "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/throttle" "github.com/arangodb/kube-arangodb/pkg/util/kclient" "github.com/stretchr/testify/require" core "k8s.io/api/core/v1" @@ -35,10 +36,10 @@ func Test_SecretConfigGetter(t *testing.T) { t.Run("Missing secret", func(t *testing.T) { c := kclient.NewFakeClient() - i, err := inspector.NewInspector(context.Background(), c, "default") - require.NoError(t, err) + i := inspector.NewInspector(throttle.NewAlwaysThrottleComponents(), c, "default") + require.NoError(t, i.Refresh(context.Background())) - _, _, err = SecretConfigGetter(i, "secret", "key")() + _, _, err := SecretConfigGetter(i, "secret", "key")() require.EqualError(t, err, "Secret secret not found") }) @@ -55,8 +56,8 @@ func Test_SecretConfigGetter(t *testing.T) { _, err := c.Kubernetes().CoreV1().Secrets("default").Create(context.Background(), &s, meta.CreateOptions{}) require.NoError(t, err) - i, err := inspector.NewInspector(context.Background(), c, "default") - require.NoError(t, err) + i := inspector.NewInspector(throttle.NewAlwaysThrottleComponents(), c, "default") + require.NoError(t, i.Refresh(context.Background())) _, _, err = SecretConfigGetter(i, "secret", "key")() require.EqualError(t, err, "Key secret/key not found") @@ -80,8 +81,8 @@ random data _, err := c.Kubernetes().CoreV1().Secrets("default").Create(context.Background(), &s, meta.CreateOptions{}) require.NoError(t, err) - i, err := inspector.NewInspector(context.Background(), c, "default") - require.NoError(t, err) + i := inspector.NewInspector(throttle.NewAlwaysThrottleComponents(), c, "default") + require.NoError(t, i.Refresh(context.Background())) _, _, err = SecretConfigGetter(i, "secret", "key")() require.Error(t, err, "Key secret/key not found") @@ -122,8 +123,8 @@ users: _, err := c.Kubernetes().CoreV1().Secrets("default").Create(context.Background(), &s, meta.CreateOptions{}) require.NoError(t, err) - i, err := inspector.NewInspector(context.Background(), c, "default") - require.NoError(t, err) + i := inspector.NewInspector(throttle.NewAlwaysThrottleComponents(), c, "default") + require.NoError(t, i.Refresh(context.Background())) _, _, err = SecretConfigGetter(i, "secret", "key")() require.NoError(t, err) diff --git a/pkg/util/kclient/mod.go b/pkg/util/kclient/mod.go index c353d0f17..082544bef 100644 --- a/pkg/util/kclient/mod.go +++ b/pkg/util/kclient/mod.go @@ -21,14 +21,14 @@ package kclient import ( - "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/arangomember" - "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/persistentvolumeclaim" - "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/pod" - "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/poddisruptionbudget" - "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/secret" - "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/service" - "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/serviceaccount" - "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/servicemonitor" + arangomemberv1 "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/arangomember/v1" + persistentvolumeclaimv1 "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/persistentvolumeclaim/v1" + podv1 "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/pod/v1" + poddisruptionbudgetv1beta1 "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/poddisruptionbudget/v1beta1" + secretv1 "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/secret/v1" + servicev1 "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/service/v1" + serviceaccountv1 "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/serviceaccount/v1" + servicemonitorv1 "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/servicemonitor/v1" ) func NewModInterface(client Client, namespace string) ModInterface { @@ -39,14 +39,14 @@ func NewModInterface(client Client, namespace string) ModInterface { } type ModInterface interface { - Secrets() secret.ModInterface - Pods() pod.ModInterface - Services() service.ModInterface - ServiceAccounts() serviceaccount.ModInterface - PersistentVolumeClaims() persistentvolumeclaim.ModInterface - PodDisruptionBudgets() poddisruptionbudget.ModInterface - ServiceMonitors() servicemonitor.ModInterface - ArangoMembers() arangomember.ModInterface + Secrets() secretv1.ModInterface + Pods() podv1.ModInterface + Services() servicev1.ModInterface + ServiceAccounts() serviceaccountv1.ModInterface + PersistentVolumeClaims() persistentvolumeclaimv1.ModInterface + PodDisruptionBudgets() poddisruptionbudgetv1beta1.ModInterface + ServiceMonitors() servicemonitorv1.ModInterface + ArangoMembers() arangomemberv1.ModInterface } type modInterface struct { @@ -54,34 +54,34 @@ type modInterface struct { namespace string } -func (m modInterface) PersistentVolumeClaims() persistentvolumeclaim.ModInterface { +func (m modInterface) PersistentVolumeClaims() persistentvolumeclaimv1.ModInterface { return m.client.Kubernetes().CoreV1().PersistentVolumeClaims(m.namespace) } -func (m modInterface) PodDisruptionBudgets() poddisruptionbudget.ModInterface { +func (m modInterface) PodDisruptionBudgets() poddisruptionbudgetv1beta1.ModInterface { return m.client.Kubernetes().PolicyV1beta1().PodDisruptionBudgets(m.namespace) } -func (m modInterface) ServiceMonitors() servicemonitor.ModInterface { +func (m modInterface) ServiceMonitors() servicemonitorv1.ModInterface { return m.client.Monitoring().MonitoringV1().ServiceMonitors(m.namespace) } -func (m modInterface) ArangoMembers() arangomember.ModInterface { +func (m modInterface) ArangoMembers() arangomemberv1.ModInterface { return m.client.Arango().DatabaseV1().ArangoMembers(m.namespace) } -func (m modInterface) Services() service.ModInterface { +func (m modInterface) Services() servicev1.ModInterface { return m.client.Kubernetes().CoreV1().Services(m.namespace) } -func (m modInterface) ServiceAccounts() serviceaccount.ModInterface { +func (m modInterface) ServiceAccounts() serviceaccountv1.ModInterface { return m.client.Kubernetes().CoreV1().ServiceAccounts(m.namespace) } -func (m modInterface) Pods() pod.ModInterface { +func (m modInterface) Pods() podv1.ModInterface { return m.client.Kubernetes().CoreV1().Pods(m.namespace) } -func (m modInterface) Secrets() secret.ModInterface { +func (m modInterface) Secrets() secretv1.ModInterface { return m.client.Kubernetes().CoreV1().Secrets(m.namespace) } diff --git a/pkg/util/parallel.go b/pkg/util/parallel.go index e5cf94b03..01048c7be 100644 --- a/pkg/util/parallel.go +++ b/pkg/util/parallel.go @@ -24,17 +24,10 @@ import "sync" // RunParallel runs actions parallelly throttling them to the given maximum number. func RunParallel(max int, actions ...func() error) error { - c := make(chan int, max) - errors := make([]error, len(actions)) - defer func() { - close(c) - for range c { - } - }() + c, close := ParallelThread(max) + defer close() - for i := 0; i < max; i++ { - c <- 0 - } + errors := make([]error, len(actions)) var wg sync.WaitGroup @@ -42,7 +35,7 @@ func RunParallel(max int, actions ...func() error) error { for id, i := range actions { go func(id int, action func() error) { defer func() { - c <- 0 + c <- struct{}{} wg.Done() }() <-c @@ -61,3 +54,17 @@ func RunParallel(max int, actions ...func() error) error { return nil } + +func ParallelThread(max int) (chan struct{}, func()) { + c := make(chan struct{}, max) + + for i := 0; i < max; i++ { + c <- struct{}{} + } + + return c, func() { + close(c) + for range c { + } + } +} diff --git a/pkg/util/tests/inspector.go b/pkg/util/tests/inspector.go index ae24b1440..61352daf0 100644 --- a/pkg/util/tests/inspector.go +++ b/pkg/util/tests/inspector.go @@ -27,13 +27,20 @@ import ( "context" "github.com/stretchr/testify/require" inspectorInterface "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector" + "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/throttle" ) const FakeNamespace = "fake" func NewInspector(t *testing.T, c kclient.Client) inspectorInterface.Inspector { - i, err := inspector.NewInspector(context.Background(), c, FakeNamespace) - require.NoError(t, err) + i := inspector.NewInspector(throttle.NewAlwaysThrottleComponents(), c, FakeNamespace) + require.NoError(t, i.Refresh(context.Background())) return i } + +func NewEmptyInspector(t *testing.T) inspectorInterface.Inspector { + c := kclient.NewFakeClient() + + return NewInspector(t, c) +}