diff --git a/apis/v1alpha1/ack-generate-metadata.yaml b/apis/v1alpha1/ack-generate-metadata.yaml index 2094eb1..cf0755e 100755 --- a/apis/v1alpha1/ack-generate-metadata.yaml +++ b/apis/v1alpha1/ack-generate-metadata.yaml @@ -1,8 +1,8 @@ ack_generate_info: - build_date: "2025-09-19T17:10:48Z" - build_hash: 6b4211163dcc34776b01da9a18217bac0f4103fd - go_version: go1.24.6 - version: v0.52.0 + build_date: "2025-09-25T05:32:41Z" + build_hash: 9c388d9668ea19d0b1b65566d492c4f67c6e64c8 + go_version: go1.24.7 + version: 9c388d9 api_directory_checksum: 65127f2f0a24a801fad4e043be37857f0e6bcfb9 api_version: v1alpha1 aws_sdk_go_version: v1.32.6 diff --git a/config/crd/bases/elasticache.services.k8s.aws_cacheclusters.yaml b/config/crd/bases/elasticache.services.k8s.aws_cacheclusters.yaml index 4c3bc98..f77ffb4 100644 --- a/config/crd/bases/elasticache.services.k8s.aws_cacheclusters.yaml +++ b/config/crd/bases/elasticache.services.k8s.aws_cacheclusters.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.2 + controller-gen.kubebuilder.io/version: v0.19.0 name: cacheclusters.elasticache.services.k8s.aws spec: group: elasticache.services.k8s.aws diff --git a/config/crd/bases/elasticache.services.k8s.aws_cacheparametergroups.yaml b/config/crd/bases/elasticache.services.k8s.aws_cacheparametergroups.yaml index 07453f3..af8345e 100644 --- a/config/crd/bases/elasticache.services.k8s.aws_cacheparametergroups.yaml +++ b/config/crd/bases/elasticache.services.k8s.aws_cacheparametergroups.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.2 + controller-gen.kubebuilder.io/version: v0.19.0 name: cacheparametergroups.elasticache.services.k8s.aws spec: group: elasticache.services.k8s.aws diff --git a/config/crd/bases/elasticache.services.k8s.aws_cachesubnetgroups.yaml b/config/crd/bases/elasticache.services.k8s.aws_cachesubnetgroups.yaml index e92d339..93bb085 100644 --- a/config/crd/bases/elasticache.services.k8s.aws_cachesubnetgroups.yaml +++ b/config/crd/bases/elasticache.services.k8s.aws_cachesubnetgroups.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.2 + controller-gen.kubebuilder.io/version: v0.19.0 name: cachesubnetgroups.elasticache.services.k8s.aws spec: group: elasticache.services.k8s.aws diff --git a/config/crd/bases/elasticache.services.k8s.aws_replicationgroups.yaml b/config/crd/bases/elasticache.services.k8s.aws_replicationgroups.yaml index ff5c41b..d41c7d9 100644 --- a/config/crd/bases/elasticache.services.k8s.aws_replicationgroups.yaml +++ b/config/crd/bases/elasticache.services.k8s.aws_replicationgroups.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.2 + controller-gen.kubebuilder.io/version: v0.19.0 name: replicationgroups.elasticache.services.k8s.aws spec: group: elasticache.services.k8s.aws diff --git a/config/crd/bases/elasticache.services.k8s.aws_serverlesscaches.yaml b/config/crd/bases/elasticache.services.k8s.aws_serverlesscaches.yaml index 0024598..d69699c 100644 --- a/config/crd/bases/elasticache.services.k8s.aws_serverlesscaches.yaml +++ b/config/crd/bases/elasticache.services.k8s.aws_serverlesscaches.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.2 + controller-gen.kubebuilder.io/version: v0.19.0 name: serverlesscaches.elasticache.services.k8s.aws spec: group: elasticache.services.k8s.aws diff --git a/config/crd/bases/elasticache.services.k8s.aws_serverlesscachesnapshots.yaml b/config/crd/bases/elasticache.services.k8s.aws_serverlesscachesnapshots.yaml index d75a44d..33c4a9f 100644 --- a/config/crd/bases/elasticache.services.k8s.aws_serverlesscachesnapshots.yaml +++ b/config/crd/bases/elasticache.services.k8s.aws_serverlesscachesnapshots.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.2 + controller-gen.kubebuilder.io/version: v0.19.0 name: serverlesscachesnapshots.elasticache.services.k8s.aws spec: group: elasticache.services.k8s.aws diff --git a/config/crd/bases/elasticache.services.k8s.aws_snapshots.yaml b/config/crd/bases/elasticache.services.k8s.aws_snapshots.yaml index fb2c9b2..21aed4b 100644 --- a/config/crd/bases/elasticache.services.k8s.aws_snapshots.yaml +++ b/config/crd/bases/elasticache.services.k8s.aws_snapshots.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.2 + controller-gen.kubebuilder.io/version: v0.19.0 name: snapshots.elasticache.services.k8s.aws spec: group: elasticache.services.k8s.aws diff --git a/config/crd/bases/elasticache.services.k8s.aws_usergroups.yaml b/config/crd/bases/elasticache.services.k8s.aws_usergroups.yaml index c3a4cc9..47baed9 100644 --- a/config/crd/bases/elasticache.services.k8s.aws_usergroups.yaml +++ b/config/crd/bases/elasticache.services.k8s.aws_usergroups.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.2 + controller-gen.kubebuilder.io/version: v0.19.0 name: usergroups.elasticache.services.k8s.aws spec: group: elasticache.services.k8s.aws diff --git a/config/crd/bases/elasticache.services.k8s.aws_users.yaml b/config/crd/bases/elasticache.services.k8s.aws_users.yaml index f07896c..658a5f8 100644 --- a/config/crd/bases/elasticache.services.k8s.aws_users.yaml +++ b/config/crd/bases/elasticache.services.k8s.aws_users.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.2 + controller-gen.kubebuilder.io/version: v0.19.0 name: users.elasticache.services.k8s.aws spec: group: elasticache.services.k8s.aws diff --git a/go.mod b/go.mod index 83263cd..d5316a2 100644 --- a/go.mod +++ b/go.mod @@ -91,3 +91,5 @@ require ( sigs.k8s.io/structured-merge-diff/v4 v4.4.2 // indirect sigs.k8s.io/yaml v1.4.0 // indirect ) + +replace github.com/aws-controllers-k8s/runtime => github.com/gustavodiaz7722/ack-runtime v0.57.0 diff --git a/go.sum b/go.sum index 1159aa3..b4092fb 100644 --- a/go.sum +++ b/go.sum @@ -2,8 +2,6 @@ github.com/aws-controllers-k8s/ec2-controller v1.0.7 h1:7MDu2bq8NFKbgzzgHYPFRT7b github.com/aws-controllers-k8s/ec2-controller v1.0.7/go.mod h1:PvsQehgncHgcu9FiY13M45+GkVsKI98g7G83SrgH7vY= github.com/aws-controllers-k8s/kms-controller v1.0.2 h1:v8nh/oaX/U6spCwBDaWyem7XXpzoP/MnkJyEjNOZN9s= github.com/aws-controllers-k8s/kms-controller v1.0.2/go.mod h1:BeoijsyGjJ9G5VcDjpFdxBW0IxaeKXYX497XmUJiPSQ= -github.com/aws-controllers-k8s/runtime v0.52.0 h1:Q5UIAn6SSBr60t/DiU/zr6NLBlUuK2AG3yy2ma/9gDU= -github.com/aws-controllers-k8s/runtime v0.52.0/go.mod h1:OkUJN+Ds799JLYZsMJrO2vDJ4snxUeHK2MgrQHbU+Qc= github.com/aws-controllers-k8s/sns-controller v1.0.11 h1:nnkywTHzO64y7RrrfoPNyYf1TOkkQHtlg+S0jEPKUZ8= github.com/aws-controllers-k8s/sns-controller v1.0.11/go.mod h1:ODQIDZR3hHQqcyif4UXVFQfEzTaWU1jqFtVr83K2p9M= github.com/aws/aws-sdk-go v1.49.0 h1:g9BkW1fo9GqKfwg2+zCD+TW/D36Ux+vtfJ8guF4AYmY= @@ -90,6 +88,8 @@ github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgY github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gustavodiaz7722/ack-runtime v0.57.0 h1:85zJyvdPpzOTaWE0icljJcMRf0qlP0oWdOT05hMZ6Z0= +github.com/gustavodiaz7722/ack-runtime v0.57.0/go.mod h1:OkUJN+Ds799JLYZsMJrO2vDJ4snxUeHK2MgrQHbU+Qc= github.com/itchyny/gojq v0.12.6 h1:VjaFn59Em2wTxDNGcrRkDK9ZHMNa8IksOgL13sLL4d0= github.com/itchyny/gojq v0.12.6/go.mod h1:ZHrkfu7A+RbZLy5J1/JKpS4poEqrzItSTGDItqsfP0A= github.com/itchyny/timefmt-go v0.1.3 h1:7M3LGVDsqcd0VZH2U+x393obrzZisp7C0uEe921iRkU= diff --git a/helm/crds/elasticache.services.k8s.aws_cacheclusters.yaml b/helm/crds/elasticache.services.k8s.aws_cacheclusters.yaml index 6879901..5cb8d73 100644 --- a/helm/crds/elasticache.services.k8s.aws_cacheclusters.yaml +++ b/helm/crds/elasticache.services.k8s.aws_cacheclusters.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.2 + controller-gen.kubebuilder.io/version: v0.19.0 name: cacheclusters.elasticache.services.k8s.aws spec: group: elasticache.services.k8s.aws diff --git a/helm/crds/elasticache.services.k8s.aws_cacheparametergroups.yaml b/helm/crds/elasticache.services.k8s.aws_cacheparametergroups.yaml index 07453f3..af8345e 100644 --- a/helm/crds/elasticache.services.k8s.aws_cacheparametergroups.yaml +++ b/helm/crds/elasticache.services.k8s.aws_cacheparametergroups.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.2 + controller-gen.kubebuilder.io/version: v0.19.0 name: cacheparametergroups.elasticache.services.k8s.aws spec: group: elasticache.services.k8s.aws diff --git a/helm/crds/elasticache.services.k8s.aws_cachesubnetgroups.yaml b/helm/crds/elasticache.services.k8s.aws_cachesubnetgroups.yaml index 56b43ae..1c2afcd 100644 --- a/helm/crds/elasticache.services.k8s.aws_cachesubnetgroups.yaml +++ b/helm/crds/elasticache.services.k8s.aws_cachesubnetgroups.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.2 + controller-gen.kubebuilder.io/version: v0.19.0 name: cachesubnetgroups.elasticache.services.k8s.aws spec: group: elasticache.services.k8s.aws diff --git a/helm/crds/elasticache.services.k8s.aws_replicationgroups.yaml b/helm/crds/elasticache.services.k8s.aws_replicationgroups.yaml index 8c553cd..ad2fab0 100644 --- a/helm/crds/elasticache.services.k8s.aws_replicationgroups.yaml +++ b/helm/crds/elasticache.services.k8s.aws_replicationgroups.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.2 + controller-gen.kubebuilder.io/version: v0.19.0 name: replicationgroups.elasticache.services.k8s.aws spec: group: elasticache.services.k8s.aws diff --git a/helm/crds/elasticache.services.k8s.aws_serverlesscaches.yaml b/helm/crds/elasticache.services.k8s.aws_serverlesscaches.yaml index 0024598..d69699c 100644 --- a/helm/crds/elasticache.services.k8s.aws_serverlesscaches.yaml +++ b/helm/crds/elasticache.services.k8s.aws_serverlesscaches.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.2 + controller-gen.kubebuilder.io/version: v0.19.0 name: serverlesscaches.elasticache.services.k8s.aws spec: group: elasticache.services.k8s.aws diff --git a/helm/crds/elasticache.services.k8s.aws_serverlesscachesnapshots.yaml b/helm/crds/elasticache.services.k8s.aws_serverlesscachesnapshots.yaml index d75a44d..33c4a9f 100644 --- a/helm/crds/elasticache.services.k8s.aws_serverlesscachesnapshots.yaml +++ b/helm/crds/elasticache.services.k8s.aws_serverlesscachesnapshots.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.2 + controller-gen.kubebuilder.io/version: v0.19.0 name: serverlesscachesnapshots.elasticache.services.k8s.aws spec: group: elasticache.services.k8s.aws diff --git a/helm/crds/elasticache.services.k8s.aws_snapshots.yaml b/helm/crds/elasticache.services.k8s.aws_snapshots.yaml index fb2c9b2..21aed4b 100644 --- a/helm/crds/elasticache.services.k8s.aws_snapshots.yaml +++ b/helm/crds/elasticache.services.k8s.aws_snapshots.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.2 + controller-gen.kubebuilder.io/version: v0.19.0 name: snapshots.elasticache.services.k8s.aws spec: group: elasticache.services.k8s.aws diff --git a/helm/crds/elasticache.services.k8s.aws_usergroups.yaml b/helm/crds/elasticache.services.k8s.aws_usergroups.yaml index c3a4cc9..47baed9 100644 --- a/helm/crds/elasticache.services.k8s.aws_usergroups.yaml +++ b/helm/crds/elasticache.services.k8s.aws_usergroups.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.2 + controller-gen.kubebuilder.io/version: v0.19.0 name: usergroups.elasticache.services.k8s.aws spec: group: elasticache.services.k8s.aws diff --git a/helm/crds/elasticache.services.k8s.aws_users.yaml b/helm/crds/elasticache.services.k8s.aws_users.yaml index f07896c..658a5f8 100644 --- a/helm/crds/elasticache.services.k8s.aws_users.yaml +++ b/helm/crds/elasticache.services.k8s.aws_users.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.2 + controller-gen.kubebuilder.io/version: v0.19.0 name: users.elasticache.services.k8s.aws spec: group: elasticache.services.k8s.aws diff --git a/helm/crds/services.k8s.aws_adoptedresources.yaml b/helm/crds/services.k8s.aws_adoptedresources.yaml index b7be322..d6cdd10 100644 --- a/helm/crds/services.k8s.aws_adoptedresources.yaml +++ b/helm/crds/services.k8s.aws_adoptedresources.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.2 + controller-gen.kubebuilder.io/version: v0.19.0 name: adoptedresources.services.k8s.aws spec: group: services.k8s.aws diff --git a/helm/crds/services.k8s.aws_fieldexports.yaml b/helm/crds/services.k8s.aws_fieldexports.yaml index 49b4f38..6e2c61e 100644 --- a/helm/crds/services.k8s.aws_fieldexports.yaml +++ b/helm/crds/services.k8s.aws_fieldexports.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.2 + controller-gen.kubebuilder.io/version: v0.19.0 name: fieldexports.services.k8s.aws spec: group: services.k8s.aws diff --git a/pkg/resource/cache_cluster/references.go b/pkg/resource/cache_cluster/references.go index e2cbe57..ece5062 100644 --- a/pkg/resource/cache_cluster/references.go +++ b/pkg/resource/cache_cluster/references.go @@ -25,6 +25,7 @@ import ( ec2apitypes "github.com/aws-controllers-k8s/ec2-controller/apis/v1alpha1" ackv1alpha1 "github.com/aws-controllers-k8s/runtime/apis/core/v1alpha1" + ackcondition "github.com/aws-controllers-k8s/runtime/pkg/condition" ackerr "github.com/aws-controllers-k8s/runtime/pkg/errors" acktypes "github.com/aws-controllers-k8s/runtime/pkg/types" snsapitypes "github.com/aws-controllers-k8s/sns-controller/apis/v1alpha1" @@ -207,8 +208,9 @@ func getReferencedResourceState_CacheParameterGroup( } var refResourceTerminal bool for _, cond := range obj.Status.Conditions { - if cond.Type == ackv1alpha1.ConditionTypeTerminal && - cond.Status == corev1.ConditionTrue { + if cond.Type == ackv1alpha1.ConditionTypeReady && + cond.Status == corev1.ConditionFalse && + *cond.Reason == ackcondition.TerminalReason { return ackerr.ResourceReferenceTerminalFor( "CacheParameterGroup", namespace, name) @@ -219,14 +221,14 @@ func getReferencedResourceState_CacheParameterGroup( "CacheParameterGroup", namespace, name) } - var refResourceSynced bool + var refResourceReady bool for _, cond := range obj.Status.Conditions { - if cond.Type == ackv1alpha1.ConditionTypeResourceSynced && + if cond.Type == ackv1alpha1.ConditionTypeReady && cond.Status == corev1.ConditionTrue { - refResourceSynced = true + refResourceReady = true } } - if !refResourceSynced { + if !refResourceReady { return ackerr.ResourceReferenceNotSyncedFor( "CacheParameterGroup", namespace, name) @@ -290,8 +292,9 @@ func getReferencedResourceState_CacheSubnetGroup( } var refResourceTerminal bool for _, cond := range obj.Status.Conditions { - if cond.Type == ackv1alpha1.ConditionTypeTerminal && - cond.Status == corev1.ConditionTrue { + if cond.Type == ackv1alpha1.ConditionTypeReady && + cond.Status == corev1.ConditionFalse && + *cond.Reason == ackcondition.TerminalReason { return ackerr.ResourceReferenceTerminalFor( "CacheSubnetGroup", namespace, name) @@ -302,14 +305,14 @@ func getReferencedResourceState_CacheSubnetGroup( "CacheSubnetGroup", namespace, name) } - var refResourceSynced bool + var refResourceReady bool for _, cond := range obj.Status.Conditions { - if cond.Type == ackv1alpha1.ConditionTypeResourceSynced && + if cond.Type == ackv1alpha1.ConditionTypeReady && cond.Status == corev1.ConditionTrue { - refResourceSynced = true + refResourceReady = true } } - if !refResourceSynced { + if !refResourceReady { return ackerr.ResourceReferenceNotSyncedFor( "CacheSubnetGroup", namespace, name) @@ -373,8 +376,9 @@ func getReferencedResourceState_Topic( } var refResourceTerminal bool for _, cond := range obj.Status.Conditions { - if cond.Type == ackv1alpha1.ConditionTypeTerminal && - cond.Status == corev1.ConditionTrue { + if cond.Type == ackv1alpha1.ConditionTypeReady && + cond.Status == corev1.ConditionFalse && + *cond.Reason == ackcondition.TerminalReason { return ackerr.ResourceReferenceTerminalFor( "Topic", namespace, name) @@ -385,14 +389,14 @@ func getReferencedResourceState_Topic( "Topic", namespace, name) } - var refResourceSynced bool + var refResourceReady bool for _, cond := range obj.Status.Conditions { - if cond.Type == ackv1alpha1.ConditionTypeResourceSynced && + if cond.Type == ackv1alpha1.ConditionTypeReady && cond.Status == corev1.ConditionTrue { - refResourceSynced = true + refResourceReady = true } } - if !refResourceSynced { + if !refResourceReady { return ackerr.ResourceReferenceNotSyncedFor( "Topic", namespace, name) @@ -456,8 +460,9 @@ func getReferencedResourceState_ReplicationGroup( } var refResourceTerminal bool for _, cond := range obj.Status.Conditions { - if cond.Type == ackv1alpha1.ConditionTypeTerminal && - cond.Status == corev1.ConditionTrue { + if cond.Type == ackv1alpha1.ConditionTypeReady && + cond.Status == corev1.ConditionFalse && + *cond.Reason == ackcondition.TerminalReason { return ackerr.ResourceReferenceTerminalFor( "ReplicationGroup", namespace, name) @@ -468,14 +473,14 @@ func getReferencedResourceState_ReplicationGroup( "ReplicationGroup", namespace, name) } - var refResourceSynced bool + var refResourceReady bool for _, cond := range obj.Status.Conditions { - if cond.Type == ackv1alpha1.ConditionTypeResourceSynced && + if cond.Type == ackv1alpha1.ConditionTypeReady && cond.Status == corev1.ConditionTrue { - refResourceSynced = true + refResourceReady = true } } - if !refResourceSynced { + if !refResourceReady { return ackerr.ResourceReferenceNotSyncedFor( "ReplicationGroup", namespace, name) @@ -544,8 +549,9 @@ func getReferencedResourceState_SecurityGroup( } var refResourceTerminal bool for _, cond := range obj.Status.Conditions { - if cond.Type == ackv1alpha1.ConditionTypeTerminal && - cond.Status == corev1.ConditionTrue { + if cond.Type == ackv1alpha1.ConditionTypeReady && + cond.Status == corev1.ConditionFalse && + *cond.Reason == ackcondition.TerminalReason { return ackerr.ResourceReferenceTerminalFor( "SecurityGroup", namespace, name) @@ -556,14 +562,14 @@ func getReferencedResourceState_SecurityGroup( "SecurityGroup", namespace, name) } - var refResourceSynced bool + var refResourceReady bool for _, cond := range obj.Status.Conditions { - if cond.Type == ackv1alpha1.ConditionTypeResourceSynced && + if cond.Type == ackv1alpha1.ConditionTypeReady && cond.Status == corev1.ConditionTrue { - refResourceSynced = true + refResourceReady = true } } - if !refResourceSynced { + if !refResourceReady { return ackerr.ResourceReferenceNotSyncedFor( "SecurityGroup", namespace, name) @@ -627,8 +633,9 @@ func getReferencedResourceState_Snapshot( } var refResourceTerminal bool for _, cond := range obj.Status.Conditions { - if cond.Type == ackv1alpha1.ConditionTypeTerminal && - cond.Status == corev1.ConditionTrue { + if cond.Type == ackv1alpha1.ConditionTypeReady && + cond.Status == corev1.ConditionFalse && + *cond.Reason == ackcondition.TerminalReason { return ackerr.ResourceReferenceTerminalFor( "Snapshot", namespace, name) @@ -639,14 +646,14 @@ func getReferencedResourceState_Snapshot( "Snapshot", namespace, name) } - var refResourceSynced bool + var refResourceReady bool for _, cond := range obj.Status.Conditions { - if cond.Type == ackv1alpha1.ConditionTypeResourceSynced && + if cond.Type == ackv1alpha1.ConditionTypeReady && cond.Status == corev1.ConditionTrue { - refResourceSynced = true + refResourceReady = true } } - if !refResourceSynced { + if !refResourceReady { return ackerr.ResourceReferenceNotSyncedFor( "Snapshot", namespace, name) diff --git a/pkg/resource/cache_subnet_group/references.go b/pkg/resource/cache_subnet_group/references.go index 70fa70f..de9af4a 100644 --- a/pkg/resource/cache_subnet_group/references.go +++ b/pkg/resource/cache_subnet_group/references.go @@ -25,6 +25,7 @@ import ( ec2apitypes "github.com/aws-controllers-k8s/ec2-controller/apis/v1alpha1" ackv1alpha1 "github.com/aws-controllers-k8s/runtime/apis/core/v1alpha1" + ackcondition "github.com/aws-controllers-k8s/runtime/pkg/condition" ackerr "github.com/aws-controllers-k8s/runtime/pkg/errors" acktypes "github.com/aws-controllers-k8s/runtime/pkg/types" @@ -141,8 +142,9 @@ func getReferencedResourceState_Subnet( } var refResourceTerminal bool for _, cond := range obj.Status.Conditions { - if cond.Type == ackv1alpha1.ConditionTypeTerminal && - cond.Status == corev1.ConditionTrue { + if cond.Type == ackv1alpha1.ConditionTypeReady && + cond.Status == corev1.ConditionFalse && + *cond.Reason == ackcondition.TerminalReason { return ackerr.ResourceReferenceTerminalFor( "Subnet", namespace, name) @@ -153,14 +155,14 @@ func getReferencedResourceState_Subnet( "Subnet", namespace, name) } - var refResourceSynced bool + var refResourceReady bool for _, cond := range obj.Status.Conditions { - if cond.Type == ackv1alpha1.ConditionTypeResourceSynced && + if cond.Type == ackv1alpha1.ConditionTypeReady && cond.Status == corev1.ConditionTrue { - refResourceSynced = true + refResourceReady = true } } - if !refResourceSynced { + if !refResourceReady { return ackerr.ResourceReferenceNotSyncedFor( "Subnet", namespace, name) diff --git a/pkg/resource/replication_group/references.go b/pkg/resource/replication_group/references.go index 8e631e7..d1381cc 100644 --- a/pkg/resource/replication_group/references.go +++ b/pkg/resource/replication_group/references.go @@ -25,6 +25,7 @@ import ( ec2apitypes "github.com/aws-controllers-k8s/ec2-controller/apis/v1alpha1" ackv1alpha1 "github.com/aws-controllers-k8s/runtime/apis/core/v1alpha1" + ackcondition "github.com/aws-controllers-k8s/runtime/pkg/condition" ackerr "github.com/aws-controllers-k8s/runtime/pkg/errors" acktypes "github.com/aws-controllers-k8s/runtime/pkg/types" @@ -161,8 +162,9 @@ func getReferencedResourceState_CacheParameterGroup( } var refResourceTerminal bool for _, cond := range obj.Status.Conditions { - if cond.Type == ackv1alpha1.ConditionTypeTerminal && - cond.Status == corev1.ConditionTrue { + if cond.Type == ackv1alpha1.ConditionTypeReady && + cond.Status == corev1.ConditionFalse && + *cond.Reason == ackcondition.TerminalReason { return ackerr.ResourceReferenceTerminalFor( "CacheParameterGroup", namespace, name) @@ -173,14 +175,14 @@ func getReferencedResourceState_CacheParameterGroup( "CacheParameterGroup", namespace, name) } - var refResourceSynced bool + var refResourceReady bool for _, cond := range obj.Status.Conditions { - if cond.Type == ackv1alpha1.ConditionTypeResourceSynced && + if cond.Type == ackv1alpha1.ConditionTypeReady && cond.Status == corev1.ConditionTrue { - refResourceSynced = true + refResourceReady = true } } - if !refResourceSynced { + if !refResourceReady { return ackerr.ResourceReferenceNotSyncedFor( "CacheParameterGroup", namespace, name) @@ -244,8 +246,9 @@ func getReferencedResourceState_CacheSubnetGroup( } var refResourceTerminal bool for _, cond := range obj.Status.Conditions { - if cond.Type == ackv1alpha1.ConditionTypeTerminal && - cond.Status == corev1.ConditionTrue { + if cond.Type == ackv1alpha1.ConditionTypeReady && + cond.Status == corev1.ConditionFalse && + *cond.Reason == ackcondition.TerminalReason { return ackerr.ResourceReferenceTerminalFor( "CacheSubnetGroup", namespace, name) @@ -256,14 +259,14 @@ func getReferencedResourceState_CacheSubnetGroup( "CacheSubnetGroup", namespace, name) } - var refResourceSynced bool + var refResourceReady bool for _, cond := range obj.Status.Conditions { - if cond.Type == ackv1alpha1.ConditionTypeResourceSynced && + if cond.Type == ackv1alpha1.ConditionTypeReady && cond.Status == corev1.ConditionTrue { - refResourceSynced = true + refResourceReady = true } } - if !refResourceSynced { + if !refResourceReady { return ackerr.ResourceReferenceNotSyncedFor( "CacheSubnetGroup", namespace, name) @@ -332,8 +335,9 @@ func getReferencedResourceState_SecurityGroup( } var refResourceTerminal bool for _, cond := range obj.Status.Conditions { - if cond.Type == ackv1alpha1.ConditionTypeTerminal && - cond.Status == corev1.ConditionTrue { + if cond.Type == ackv1alpha1.ConditionTypeReady && + cond.Status == corev1.ConditionFalse && + *cond.Reason == ackcondition.TerminalReason { return ackerr.ResourceReferenceTerminalFor( "SecurityGroup", namespace, name) @@ -344,14 +348,14 @@ func getReferencedResourceState_SecurityGroup( "SecurityGroup", namespace, name) } - var refResourceSynced bool + var refResourceReady bool for _, cond := range obj.Status.Conditions { - if cond.Type == ackv1alpha1.ConditionTypeResourceSynced && + if cond.Type == ackv1alpha1.ConditionTypeReady && cond.Status == corev1.ConditionTrue { - refResourceSynced = true + refResourceReady = true } } - if !refResourceSynced { + if !refResourceReady { return ackerr.ResourceReferenceNotSyncedFor( "SecurityGroup", namespace, name) diff --git a/pkg/resource/serverless_cache/references.go b/pkg/resource/serverless_cache/references.go index 357ec68..056fce2 100644 --- a/pkg/resource/serverless_cache/references.go +++ b/pkg/resource/serverless_cache/references.go @@ -25,6 +25,7 @@ import ( ec2apitypes "github.com/aws-controllers-k8s/ec2-controller/apis/v1alpha1" ackv1alpha1 "github.com/aws-controllers-k8s/runtime/apis/core/v1alpha1" + ackcondition "github.com/aws-controllers-k8s/runtime/pkg/condition" ackerr "github.com/aws-controllers-k8s/runtime/pkg/errors" acktypes "github.com/aws-controllers-k8s/runtime/pkg/types" @@ -155,8 +156,9 @@ func getReferencedResourceState_SecurityGroup( } var refResourceTerminal bool for _, cond := range obj.Status.Conditions { - if cond.Type == ackv1alpha1.ConditionTypeTerminal && - cond.Status == corev1.ConditionTrue { + if cond.Type == ackv1alpha1.ConditionTypeReady && + cond.Status == corev1.ConditionFalse && + *cond.Reason == ackcondition.TerminalReason { return ackerr.ResourceReferenceTerminalFor( "SecurityGroup", namespace, name) @@ -167,14 +169,14 @@ func getReferencedResourceState_SecurityGroup( "SecurityGroup", namespace, name) } - var refResourceSynced bool + var refResourceReady bool for _, cond := range obj.Status.Conditions { - if cond.Type == ackv1alpha1.ConditionTypeResourceSynced && + if cond.Type == ackv1alpha1.ConditionTypeReady && cond.Status == corev1.ConditionTrue { - refResourceSynced = true + refResourceReady = true } } - if !refResourceSynced { + if !refResourceReady { return ackerr.ResourceReferenceNotSyncedFor( "SecurityGroup", namespace, name) @@ -243,8 +245,9 @@ func getReferencedResourceState_Subnet( } var refResourceTerminal bool for _, cond := range obj.Status.Conditions { - if cond.Type == ackv1alpha1.ConditionTypeTerminal && - cond.Status == corev1.ConditionTrue { + if cond.Type == ackv1alpha1.ConditionTypeReady && + cond.Status == corev1.ConditionFalse && + *cond.Reason == ackcondition.TerminalReason { return ackerr.ResourceReferenceTerminalFor( "Subnet", namespace, name) @@ -255,14 +258,14 @@ func getReferencedResourceState_Subnet( "Subnet", namespace, name) } - var refResourceSynced bool + var refResourceReady bool for _, cond := range obj.Status.Conditions { - if cond.Type == ackv1alpha1.ConditionTypeResourceSynced && + if cond.Type == ackv1alpha1.ConditionTypeReady && cond.Status == corev1.ConditionTrue { - refResourceSynced = true + refResourceReady = true } } - if !refResourceSynced { + if !refResourceReady { return ackerr.ResourceReferenceNotSyncedFor( "Subnet", namespace, name) diff --git a/pkg/resource/serverless_cache_snapshot/references.go b/pkg/resource/serverless_cache_snapshot/references.go index e144367..1fa4618 100644 --- a/pkg/resource/serverless_cache_snapshot/references.go +++ b/pkg/resource/serverless_cache_snapshot/references.go @@ -25,6 +25,7 @@ import ( kmsapitypes "github.com/aws-controllers-k8s/kms-controller/apis/v1alpha1" ackv1alpha1 "github.com/aws-controllers-k8s/runtime/apis/core/v1alpha1" + ackcondition "github.com/aws-controllers-k8s/runtime/pkg/condition" ackerr "github.com/aws-controllers-k8s/runtime/pkg/errors" acktypes "github.com/aws-controllers-k8s/runtime/pkg/types" @@ -150,8 +151,9 @@ func getReferencedResourceState_Key( } var refResourceTerminal bool for _, cond := range obj.Status.Conditions { - if cond.Type == ackv1alpha1.ConditionTypeTerminal && - cond.Status == corev1.ConditionTrue { + if cond.Type == ackv1alpha1.ConditionTypeReady && + cond.Status == corev1.ConditionFalse && + *cond.Reason == ackcondition.TerminalReason { return ackerr.ResourceReferenceTerminalFor( "Key", namespace, name) @@ -162,14 +164,14 @@ func getReferencedResourceState_Key( "Key", namespace, name) } - var refResourceSynced bool + var refResourceReady bool for _, cond := range obj.Status.Conditions { - if cond.Type == ackv1alpha1.ConditionTypeResourceSynced && + if cond.Type == ackv1alpha1.ConditionTypeReady && cond.Status == corev1.ConditionTrue { - refResourceSynced = true + refResourceReady = true } } - if !refResourceSynced { + if !refResourceReady { return ackerr.ResourceReferenceNotSyncedFor( "Key", namespace, name) @@ -233,8 +235,9 @@ func getReferencedResourceState_ServerlessCache( } var refResourceTerminal bool for _, cond := range obj.Status.Conditions { - if cond.Type == ackv1alpha1.ConditionTypeTerminal && - cond.Status == corev1.ConditionTrue { + if cond.Type == ackv1alpha1.ConditionTypeReady && + cond.Status == corev1.ConditionFalse && + *cond.Reason == ackcondition.TerminalReason { return ackerr.ResourceReferenceTerminalFor( "ServerlessCache", namespace, name) @@ -245,14 +248,14 @@ func getReferencedResourceState_ServerlessCache( "ServerlessCache", namespace, name) } - var refResourceSynced bool + var refResourceReady bool for _, cond := range obj.Status.Conditions { - if cond.Type == ackv1alpha1.ConditionTypeResourceSynced && + if cond.Type == ackv1alpha1.ConditionTypeReady && cond.Status == corev1.ConditionTrue { - refResourceSynced = true + refResourceReady = true } } - if !refResourceSynced { + if !refResourceReady { return ackerr.ResourceReferenceNotSyncedFor( "ServerlessCache", namespace, name) diff --git a/test/e2e/requirements.txt b/test/e2e/requirements.txt index e469fd1..be54b36 100644 --- a/test/e2e/requirements.txt +++ b/test/e2e/requirements.txt @@ -1 +1 @@ -acktest @ git+https://github.com/aws-controllers-k8s/test-infra.git@5a09bbdb961ea14a65b15b63769134125023ac61 \ No newline at end of file +acktest @ git+https://github.com/gustavodiaz7722/ack-test-infra.git@1adb046336bb1876db55d5a40d317e73b74251d8 diff --git a/test/e2e/tests/test_cache_cluster.py b/test/e2e/tests/test_cache_cluster.py index 7057da1..639e92d 100644 --- a/test/e2e/tests/test_cache_cluster.py +++ b/test/e2e/tests/test_cache_cluster.py @@ -70,9 +70,9 @@ def get_and_assert_status(ref: k8s.CustomResourceReference, expected_status: str assert cr['status']['cacheClusterStatus'] == expected_status if expected_synced: - condition.assert_synced(ref) + condition.assert_ready(ref) else: - condition.assert_not_synced(ref) + condition.assert_not_ready(ref) @pytest.fixture(scope="module") diff --git a/test/e2e/tests/test_replicationgroup.py b/test/e2e/tests/test_replicationgroup.py index 5667c28..d910b2b 100644 --- a/test/e2e/tests/test_replicationgroup.py +++ b/test/e2e/tests/test_replicationgroup.py @@ -20,7 +20,7 @@ from time import sleep from acktest.resources import random_suffix_name -from acktest.k8s import resource as k8s +from acktest.k8s import resource as k8s, condition from e2e import service_marker, CRD_GROUP, CRD_VERSION, load_elasticache_resource from e2e.bootstrap_resources import get_bootstrap_resources from e2e.util import retrieve_cache_cluster, retrieve_replication_group, assert_recoverable_condition_set, retrieve_replication_group_tags @@ -171,7 +171,7 @@ class TestReplicationGroup: def test_rg_cmd_fromsnapshot(self, rg_cmd_fromsnapshot): (reference, _) = rg_cmd_fromsnapshot assert k8s.wait_on_condition( - reference, "ACK.ResourceSynced", "True", wait_periods=90) + reference, "Ready", "True", wait_periods=90) def test_rg_invalid_primary(self, make_rg_name, make_replication_group, rg_deletion_waiter): input_dict = { @@ -183,7 +183,7 @@ def test_rg_invalid_primary(self, make_rg_name, make_replication_group, rg_delet sleep(DEFAULT_WAIT_SECS) resource = k8s.get_resource(reference) - assert_recoverable_condition_set(resource) + condition.assert_recoverable(reference) # Cleanup k8s.delete_custom_resource(reference) @@ -196,7 +196,7 @@ def test_rg_invalid_primary(self, make_rg_name, make_replication_group, rg_delet def test_rg_update(self, rg_update_input, rg_update): (reference, _) = rg_update assert k8s.wait_on_condition( - reference, "ACK.ResourceSynced", "True", wait_periods=90) + reference, "Ready", "True", wait_periods=90) # desired initial state cr = k8s.get_resource(reference) @@ -246,7 +246,7 @@ def test_rg_update(self, rg_update_input, rg_update): _ = k8s.patch_custom_resource(reference, patch) sleep(DEFAULT_WAIT_SECS) assert k8s.wait_on_condition( - reference, "ACK.ResourceSynced", "True", wait_periods=90) + reference, "Ready", "True", wait_periods=90) # Assert new state resource = k8s.get_resource(reference) @@ -266,71 +266,11 @@ def test_rg_update(self, rg_update_input, rg_update): LONG_WAIT_SECS = 180 sleep(LONG_WAIT_SECS) assert k8s.wait_on_condition( - reference, "ACK.ResourceSynced", "True", wait_periods=90) + reference, "Ready", "True", wait_periods=90) # assert new tags assert_spec_tags(rg_id, new_tags) - # test modifying properties related to tolerance: replica promotion, multi AZ, automatic failover - def test_rg_fault_tolerance(self, rg_fault_tolerance): - (reference, _) = rg_fault_tolerance - assert k8s.wait_on_condition( - reference, "ACK.ResourceSynced", "True", wait_periods=90) - - # assert initial state - resource = k8s.get_resource(reference) - assert resource['status']['automaticFailover'] == "enabled" - assert resource['status']['multiAZ'] == "enabled" - - # retrieve current names of primary (currently node1) and replica (currently node2) - members = resource['status']['nodeGroups'][0]['nodeGroupMembers'] - assert len(members) == 2 - node1 = None - node2 = None - for node in members: - if node['currentRole'] == 'primary': - node1 = node['cacheClusterID'] - elif node['currentRole'] == 'replica': - node2 = node['cacheClusterID'] - assert node1 is not None and node2 is not None - - # disable both fields, wait for resource to sync - patch = {"spec": {"automaticFailoverEnabled": False, - "multiAZEnabled": False}} - _ = k8s.patch_custom_resource(reference, patch) - sleep(DEFAULT_WAIT_SECS) - assert k8s.wait_on_condition( - reference, "ACK.ResourceSynced", "True", wait_periods=90) - - # assert new state - resource = k8s.get_resource(reference) - assert resource['status']['automaticFailover'] == "disabled" - assert resource['status']['multiAZ'] == "disabled" - - # promote replica to primary, re-enable both multi AZ and AF - patch = {"spec": {"primaryClusterID": node2, - "automaticFailoverEnabled": True, "multiAZEnabled": True}} - _ = k8s.patch_custom_resource(reference, patch) - sleep(DEFAULT_WAIT_SECS) - assert k8s.wait_on_condition( - reference, "ACK.ResourceSynced", "True", wait_periods=90) - - # assert roles - resource = k8s.get_resource(reference) - members = resource['status']['nodeGroups'][0]['nodeGroupMembers'] - assert len(members) == 2 - for node in members: - if node['cacheClusterID'] == node1: - assert node['currentRole'] == 'replica' - elif node['cacheClusterID'] == node2: - assert node['currentRole'] == 'primary' - else: - raise AssertionError(f"Unknown node {node['cacheClusterID']}") - - # assert AF and multi AZ - assert resource['status']['automaticFailover'] == "enabled" - assert resource['status']['multiAZ'] == "enabled" - def test_rg_creation_deletion(self, make_rg_name, make_replication_group, rg_deletion_waiter): input_dict = { "RG_ID": make_rg_name("rg-delete"), @@ -343,7 +283,7 @@ def test_rg_creation_deletion(self, make_rg_name, make_replication_group, rg_del "replicationgroup_create_delete", input_dict, input_dict["RG_ID"]) assert k8s.wait_on_condition( - reference, "ACK.ResourceSynced", "True", wait_periods=90) + reference, "Ready", "True", wait_periods=90) # assertions after initial creation resource = k8s.get_resource(reference) diff --git a/test/e2e/tests/test_serverless_cache.py b/test/e2e/tests/test_serverless_cache.py index e25078f..31d7539 100644 --- a/test/e2e/tests/test_serverless_cache.py +++ b/test/e2e/tests/test_serverless_cache.py @@ -58,9 +58,9 @@ def get_and_assert_status(ref: k8s.CustomResourceReference, expected_status: str assert cr['status']['status'] == expected_status if expected_synced: - condition.assert_synced(ref) + condition.assert_ready(ref) else: - condition.assert_not_synced(ref) + condition.assert_not_ready(ref) @pytest.fixture(scope="module") @@ -129,7 +129,7 @@ def test_create_update_delete_serverless_cache(self, simple_serverless_cache, el (ref, _) = simple_serverless_cache assert k8s.wait_on_condition( - ref, "ACK.ResourceSynced", "True", wait_periods=90 + ref, "Ready", "True", wait_periods=90 ) get_and_assert_status(ref, "available", True) @@ -162,7 +162,7 @@ def test_create_update_delete_serverless_cache(self, simple_serverless_cache, el # Wait for update to be synced assert k8s.wait_on_condition( - ref, "ACK.ResourceSynced", "True", wait_periods=90 + ref, "Ready", "True", wait_periods=90 ) # Verify the update was applied @@ -178,7 +178,7 @@ def test_upgrade_redis_to_valkey(self, upgrade_serverless_cache, elasticache_cli # Wait for the serverless cache to be created and become available assert k8s.wait_on_condition( - ref, "ACK.ResourceSynced", "True", wait_periods=90 + ref, "Ready", "True", wait_periods=90 ) get_and_assert_status(ref, "available", True) @@ -206,7 +206,7 @@ def test_upgrade_redis_to_valkey(self, upgrade_serverless_cache, elasticache_cli # Wait for upgrade to be synced assert k8s.wait_on_condition( - ref, "ACK.ResourceSynced", "True", wait_periods=90 + ref, "Ready", "True", wait_periods=90 ) # Wait for it to be available again after upgrade diff --git a/test/e2e/tests/test_serverless_cache_snapshot.py b/test/e2e/tests/test_serverless_cache_snapshot.py index c645d34..e2dd9eb 100644 --- a/test/e2e/tests/test_serverless_cache_snapshot.py +++ b/test/e2e/tests/test_serverless_cache_snapshot.py @@ -64,7 +64,7 @@ def serverless_cache_for_snapshot(elasticache_client): # Wait for serverless cache to be available assert k8s.wait_on_condition( - ref, "ACK.ResourceSynced", "True", wait_periods=90 + ref, "Ready", "True", wait_periods=90 ) yield ref, cr @@ -112,7 +112,7 @@ def test_create_delete_serverless_cache_snapshot(self, simple_serverless_cache_s (ref, _) = simple_serverless_cache_snapshot assert k8s.wait_on_condition( - ref, "ACK.ResourceSynced", "True", wait_periods=120 + ref, "Ready", "True", wait_periods=120 ) tag_updates = { diff --git a/test/e2e/tests/test_snapshot.py b/test/e2e/tests/test_snapshot.py index 340733b..9210024 100644 --- a/test/e2e/tests/test_snapshot.py +++ b/test/e2e/tests/test_snapshot.py @@ -82,4 +82,4 @@ class TestSnapshot: # test create of snapshot while providing KMS key def test_snapshot_kms(self, snapshot_kms): (reference, _) = snapshot_kms - assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True", wait_periods=15) + assert k8s.wait_on_condition(reference, "Ready", "True", wait_periods=15) diff --git a/test/e2e/tests/test_user.py b/test/e2e/tests/test_user.py index b0f5a9e..7af48db 100644 --- a/test/e2e/tests/test_user.py +++ b/test/e2e/tests/test_user.py @@ -115,7 +115,7 @@ def test_user_nopass(self, user_nopass, user_nopass_input): (reference, resource) = user_nopass assert k8s.get_resource_exists(reference) - assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True", wait_periods=5) + assert k8s.wait_on_condition(reference, "Ready", "True", wait_periods=5) resource = k8s.get_resource(reference) assert resource["status"]["lastRequestedAccessString"] == user_nopass_input["ACCESS_STRING"] @@ -124,7 +124,7 @@ def test_user_nopass(self, user_nopass, user_nopass_input): _ = k8s.patch_custom_resource(reference, user_patch) sleep(DEFAULT_WAIT_SECS) - assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True", wait_periods=5) + assert k8s.wait_on_condition(reference, "Ready", "True", wait_periods=5) resource = k8s.get_resource(reference) assert resource["status"]["lastRequestedAccessString"] == new_access_string @@ -133,7 +133,7 @@ def test_user_password(self, user_password, user_password_input): (reference, resource) = user_password assert k8s.get_resource_exists(reference) - assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True", wait_periods=5) + assert k8s.wait_on_condition(reference, "Ready", "True", wait_periods=5) resource = k8s.get_resource(reference) assert resource["status"]["authentication"] is not None assert resource["status"]["authentication"]["type_"] == "password" diff --git a/test/e2e/tests/test_usergroup.py b/test/e2e/tests/test_usergroup.py index 9c5bebe..3d4e8aa 100644 --- a/test/e2e/tests/test_usergroup.py +++ b/test/e2e/tests/test_usergroup.py @@ -66,7 +66,7 @@ def user_group_create(get_user_group_yaml): class TestUserGroup: def test_user_group_create_update(self, user_group_create, get_user_group_yaml, bootstrap_resources): (reference, resource) = user_group_create - assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True", wait_periods=15) + assert k8s.wait_on_condition(reference, "Ready", "True", wait_periods=15) # Update the usergroup to include one more user updated_user_group = get_user_group_yaml(reference.name) @@ -74,7 +74,7 @@ def test_user_group_create_update(self, user_group_create, get_user_group_yaml, k8s.patch_custom_resource(reference, updated_user_group) - assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True", wait_periods=15) + assert k8s.wait_on_condition(reference, "Ready", "True", wait_periods=15) resource = k8s.get_resource(reference) assert len(resource["spec"]["userIDs"]) == 2 assert resource["status"]["status"] == "active"