diff --git a/pkg/resource/replication_group/annotations.go b/pkg/resource/replication_group/annotations.go index dd984231..8fb03abf 100644 --- a/pkg/resource/replication_group/annotations.go +++ b/pkg/resource/replication_group/annotations.go @@ -22,4 +22,14 @@ const ( // LogDeliveryConfigurationRequest structs passed in as input to either the create or modify API called most // recently AnnotationLastRequestedLDCs = svcapitypes.AnnotationPrefix + "last-requested-log-delivery-configurations" + // AnnotationLastRequestedCNT is an annotation whose value is passed in as input to either the create or modify API + // called most recently + AnnotationLastRequestedCNT = svcapitypes.AnnotationPrefix + "last-requested-cache-node-type" + // AnnotationLastRequestedNNG is an annotation whose value is passed in as input to either the create or modify API + // called most recently + AnnotationLastRequestedNNG = svcapitypes.AnnotationPrefix + "last-requested-num-node-groups" + // AnnotationLastRequestedNGC is an annotation whose value is the marshaled list of pointers to + // NodeGroupConfiguration structs passed in as input to either the create or modify API called most + // recently + AnnotationLastRequestedNGC = svcapitypes.AnnotationPrefix + "last-requested-node-group-configuration" ) diff --git a/pkg/resource/replication_group/custom_set_output.go b/pkg/resource/replication_group/custom_set_output.go index 8ad6b752..9aa8b320 100644 --- a/pkg/resource/replication_group/custom_set_output.go +++ b/pkg/resource/replication_group/custom_set_output.go @@ -16,6 +16,7 @@ package replication_group import ( "context" "encoding/json" + "strconv" svcapitypes "github.com/aws-controllers-k8s/elasticache-controller/apis/v1alpha1" ackv1alpha1 "github.com/aws-controllers-k8s/runtime/apis/core/v1alpha1" @@ -57,6 +58,8 @@ func (rm *resourceManager) CustomCreateReplicationGroupSetOutput( ) (*svcapitypes.ReplicationGroup, error) { rm.customSetOutput(resp.ReplicationGroup, ko) rm.setAnnotationsFields(r, ko) + rm.setLastRequestedNodeGroupConfiguration(r, ko) + rm.setLastRequestedNumNodeGroups(r, ko) return ko, nil } @@ -76,6 +79,9 @@ func (rm *resourceManager) CustomModifyReplicationGroupSetOutput( } ko.Spec.LogDeliveryConfigurations = logDeliveryConfig + // Keep the value of desired for CacheNodeType. + ko.Spec.CacheNodeType = r.ko.Spec.CacheNodeType + rm.setAnnotationsFields(r, ko) return ko, nil } @@ -267,20 +273,36 @@ func (rm *resourceManager) provideEvents( // setAnnotationsFields copies the desired object's annotations, populates any // relevant fields, and sets the latest object's annotations to this newly populated map. +// Fields that are handled by custom modify implementation are not set here. // This should only be called upon a successful create or modify call. func (rm *resourceManager) setAnnotationsFields( r *resource, ko *svcapitypes.ReplicationGroup, ) { + annotations := getAnnotationsFields(r, ko) + + rm.setLastRequestedLogDeliveryConfigurations(r, annotations) + rm.setLastRequestedCacheNodeType(r, annotations) + ko.ObjectMeta.Annotations = annotations +} + +// getAnnotationsFields return the annotations map that would be used to set the fields +func getAnnotationsFields( + r *resource, + ko *svcapitypes.ReplicationGroup) map[string]string { + + if ko.ObjectMeta.Annotations != nil { + return ko.ObjectMeta.Annotations + } + desiredAnnotations := r.ko.ObjectMeta.GetAnnotations() annotations := make(map[string]string) for k, v := range desiredAnnotations { annotations[k] = v } - rm.setLastRequestedLogDeliveryConfigurations(r, annotations) - ko.ObjectMeta.Annotations = annotations + return annotations } // setLastRequestedLogDeliveryConfigurations copies desired.Spec.LogDeliveryConfigurations @@ -297,3 +319,43 @@ func (rm *resourceManager) setLastRequestedLogDeliveryConfigurations( annotations[AnnotationLastRequestedLDCs] = string(lastRequestedConfigs) } } + +// setLastRequestedCacheNodeType copies desired.Spec.CacheNodeType into the annotation +// of the object. +func (rm *resourceManager) setLastRequestedCacheNodeType( + r *resource, + annotations map[string]string, +) { + if r.ko.Spec.CacheNodeType != nil { + annotations[AnnotationLastRequestedCNT] = *r.ko.Spec.CacheNodeType + } +} + +// setLastRequestedNodeGroupConfiguration copies desired.spec.NodeGroupConfiguration into the +// annotation of the object +func (rm *resourceManager) setLastRequestedNodeGroupConfiguration( + r *resource, + ko *svcapitypes.ReplicationGroup, +) { + annotations := getAnnotationsFields(r, ko) + lastRequestedConfigs, err := json.Marshal(r.ko.Spec.NodeGroupConfiguration) + if err != nil { + annotations[AnnotationLastRequestedNGC] = "null" + } else { + annotations[AnnotationLastRequestedNGC] = string(lastRequestedConfigs) + } +} + +// setLastRequestedNumNodeGroups copies desired.spec.NumNodeGroups into the +// annotation of the object +func (rm *resourceManager) setLastRequestedNumNodeGroups( + r *resource, + ko *svcapitypes.ReplicationGroup, +) { + annotations := getAnnotationsFields(r, ko) + if r.ko.Spec.NumNodeGroups != nil { + annotations[AnnotationLastRequestedNNG] = strconv.Itoa(int(*r.ko.Spec.NumNodeGroups)) + } else { + annotations[AnnotationLastRequestedNNG] = "null" + } +} diff --git a/pkg/resource/replication_group/custom_update_api.go b/pkg/resource/replication_group/custom_update_api.go index 207d97ac..e4c92896 100644 --- a/pkg/resource/replication_group/custom_update_api.go +++ b/pkg/resource/replication_group/custom_update_api.go @@ -15,11 +15,14 @@ package replication_group import ( "context" + "encoding/json" "fmt" "github.com/aws-controllers-k8s/runtime/pkg/requeue" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/pkg/errors" + "reflect" "sort" + "strconv" svcapitypes "github.com/aws-controllers-k8s/elasticache-controller/apis/v1alpha1" ackcompare "github.com/aws-controllers-k8s/runtime/pkg/compare" @@ -70,6 +73,28 @@ func (rm *resourceManager) CustomModifyReplicationGroup( requeue.DefaultRequeueAfterDuration) } + // Handle the asynchronous rollback case for while Scaling down. + // This means that we have already attempted to apply the CacheNodeType once and + // were not successful hence we will set a terminal condition. + if !cacheNodeTypeRequiresUpdate(desired) && delta.DifferentAt("Spec.CacheNodeType") { + return nil, awserr.New("InvalidParameterCombination", "Cannot update CacheNodeType, "+ + "Please refer to Events for more details", nil) + + } + + // Handle the asynchronous rollback for Resharding. + if !nodeGroupRequiresUpdate(desired) && rm.shardConfigurationsDiffer(desired, latest) { + + return nil, awserr.New("InvalidParameterCombination", "Cannot update NodeGroups, "+ + "Please refer to Events for more details", nil) + } + + // Handle NodeGroupConfiguration asynchronous rollback situations other than Resharding. + if !nodeGroupRequiresUpdate(desired) && (rm.replicaCountDifference(desired, latest) != 0 && !delta.DifferentAt("Spec.ReplicasPerNodeGroup")) { + return nil, awserr.New("InvalidParameterCombination", "Cannot update NodeGroupConfiguration, "+ + "Please refer to Events for more details", nil) + } + // Order of operations when diffs map to multiple updates APIs: // 1. When automaticFailoverEnabled differs: // if automaticFailoverEnabled == false; do nothing in this custom logic, let the modify execute first. @@ -316,7 +341,18 @@ func (rm *resourceManager) updateShardConfiguration( rm.log.V(1).Info("Error during ModifyReplicationGroupShardConfiguration", "error", respErr) return nil, respErr } - return rm.setReplicationGroupOutput(desired, resp.ReplicationGroup) + + r, err := rm.setReplicationGroupOutput(desired, resp.ReplicationGroup) + + if err != nil { + return r, err + } + + ko := r.ko.DeepCopy() + // Update the annotations since API call was successful + rm.setLastRequestedNodeGroupConfiguration(desired, ko) + rm.setLastRequestedNumNodeGroups(desired, ko) + return &resource{ko}, nil } // newIncreaseReplicaCountRequestPayload returns an SDK-specific struct for the HTTP request @@ -691,3 +727,45 @@ func (rm *resourceManager) newModifyReplicationGroupRequestPayload( return input } + +// cacheNodeTypeRequiresUpdate retrieves the last requested cacheNodeType saved in annotations and compares them +// to the current desired cacheNodeType +func cacheNodeTypeRequiresUpdate(desired *resource) bool { + annotations := desired.ko.ObjectMeta.GetAnnotations() + if val, ok := annotations[AnnotationLastRequestedCNT]; ok && desired.ko.Spec.CacheNodeType != nil { + return val != *desired.ko.Spec.CacheNodeType + } + + // This means there is delta and no value in annotation or in Spec + return true +} + +// nodeGroupRequiresUpdate retrieves the last applied NumNodeGroups and NodeGroupConfiguration and compares them +// to the current desired NumNodeGroups and NodeGroupConfiguration +func nodeGroupRequiresUpdate(desired *resource) bool { + annotations := desired.ko.ObjectMeta.GetAnnotations() + + if val, ok := annotations[AnnotationLastRequestedNNG]; ok && val != "null" { + numNodes, err := strconv.ParseInt(val, 10, 64) + + if err != nil { + return false + } + + if numNodes != *desired.ko.Spec.NumNodeGroups { + return true + } + + return false + } + + desiredNodeGroupConfig := desired.ko.Spec.NodeGroupConfiguration + if val, ok := annotations[AnnotationLastRequestedNGC]; ok && val != "null" { + var lastRequestedNodeGroupConfig []*svcapitypes.NodeGroupConfiguration + _ = json.Unmarshal([]byte(val), &lastRequestedNodeGroupConfig) + return !reflect.DeepEqual(desiredNodeGroupConfig, lastRequestedNodeGroupConfig) + } + + // This means there is delta and no value in annotation or in Spec + return true +} diff --git a/pkg/resource/replication_group/custom_update_api_test.go b/pkg/resource/replication_group/custom_update_api_test.go index 2e88fda2..ad0d2fb0 100644 --- a/pkg/resource/replication_group/custom_update_api_test.go +++ b/pkg/resource/replication_group/custom_update_api_test.go @@ -25,6 +25,7 @@ import ( "go.uber.org/zap/zapcore" "path/filepath" ctrlrtzap "sigs.k8s.io/controller-runtime/pkg/log/zap" + "strconv" "testing" "github.com/stretchr/testify/assert" @@ -290,8 +291,7 @@ func TestCustomModifyReplicationGroup(t *testing.T) { desired := provideResource() latest := provideResource() var delta ackcompare.Delta - var ctx context.Context - res, err := rm.CustomModifyReplicationGroup(ctx, desired, latest, &delta) + res, err := rm.CustomModifyReplicationGroup(context.TODO(), desired, latest, &delta) assert.Nil(res) assert.Nil(err) }) @@ -306,8 +306,7 @@ func TestCustomModifyReplicationGroup_Unavailable(t *testing.T) { desired := provideResource() latest := provideResourceWithStatus("modifying") var delta ackcompare.Delta - var ctx context.Context - res, err := rm.CustomModifyReplicationGroup(ctx, desired, latest, &delta) + res, err := rm.CustomModifyReplicationGroup(context.TODO(), desired, latest, &delta) assert.Nil(res) assert.NotNil(err) var requeueNeededAfter *requeue.RequeueNeededAfter @@ -329,8 +328,7 @@ func TestCustomModifyReplicationGroup_NodeGroup_Unvailable(t *testing.T) { nodeGroup.Status = &unavailableStatus } var delta ackcompare.Delta - var ctx context.Context - res, err := rm.CustomModifyReplicationGroup(ctx, desired, latest, &delta) + res, err := rm.CustomModifyReplicationGroup(context.TODO(), desired, latest, &delta) assert.Nil(res) assert.NotNil(err) var requeueNeededAfter *requeue.RequeueNeededAfter @@ -357,9 +355,8 @@ func TestCustomModifyReplicationGroup_NodeGroup_MemberClusters_mismatch(t *testi nodeGroup.Status = &availableStatus } var delta ackcompare.Delta - var ctx context.Context require.NotNil(latest.ko.Status.MemberClusters) - res, err := rm.CustomModifyReplicationGroup(ctx, desired, latest, &delta) + res, err := rm.CustomModifyReplicationGroup(context.TODO(), desired, latest, &delta) assert.Nil(res) assert.NotNil(err) // due to surplus member cluster var requeueNeededAfter *requeue.RequeueNeededAfter @@ -384,14 +381,61 @@ func TestCustomModifyReplicationGroup_NodeGroup_available(t *testing.T) { nodeGroup.Status = &availableStatus } var delta ackcompare.Delta - var ctx context.Context require.NotNil(latest.ko.Status.MemberClusters) - res, err := rm.CustomModifyReplicationGroup(ctx, desired, latest, &delta) + res, err := rm.CustomModifyReplicationGroup(context.TODO(), desired, latest, &delta) assert.Nil(res) assert.Nil(err) }) } +func TestCustomModifyReplicationGroup_Scaling_Async_Rollback(t *testing.T) { + assert := assert.New(t) + t.Run("ScaleDownRollback=Diff", func(t *testing.T) { + desired := provideResource() + latest := provideResource() + rgId := "RGID" + desired.ko.Spec.ReplicationGroupID = &rgId + latest.ko.Spec.ReplicationGroupID = &rgId + desired.ko.ObjectMeta.Annotations = make(map[string]string) + desiredCacheNodeType := "cache.t3.micro" + currentCacheNodeType := "cache.m5.large" + desired.ko.Annotations[AnnotationLastRequestedCNT] = desiredCacheNodeType + desired.ko.Spec.CacheNodeType = &desiredCacheNodeType + + rm := provideResourceManager() + + var delta ackcompare.Delta + delta.Add("Spec.CacheNodeType", currentCacheNodeType, desiredCacheNodeType) + + res, err := rm.CustomModifyReplicationGroup(context.TODO(), desired, latest, &delta) + assert.Nil(res) + assert.NotNil(err) + assert.Equal("InvalidParameterCombination: Cannot update CacheNodeType, Please refer to Events for more details", err.Error()) + }) + + t.Run("ScaleInRollback=Diff", func(t *testing.T) { + desired := provideResource() + latest := provideResource() + rgId := "RGID" + desired.ko.Spec.ReplicationGroupID = &rgId + latest.ko.Spec.ReplicationGroupID = &rgId + desired.ko.ObjectMeta.Annotations = make(map[string]string) + + desiredNodeGroup := int64(4) + currentNodeGroup := int64(3) + desired.ko.Annotations[AnnotationLastRequestedNNG] = strconv.Itoa(int(desiredNodeGroup)) + desired.ko.Spec.NumNodeGroups = &desiredNodeGroup + rm := provideResourceManager() + + var delta ackcompare.Delta + delta.Add("Spec.NumNodeGroups", currentNodeGroup, desiredNodeGroup) + + res, err := rm.CustomModifyReplicationGroup(context.TODO(), desired, latest, &delta) + assert.Nil(res) + assert.NotNil(err) + assert.Equal("InvalidParameterCombination: Cannot update NodeGroups, Please refer to Events for more details", err.Error()) + }) +} func TestCustomModifyReplicationGroup_ScaleUpAndDown_And_Resharding(t *testing.T) { assert := assert.New(t) @@ -402,16 +446,26 @@ func TestCustomModifyReplicationGroup_ScaleUpAndDown_And_Resharding(t *testing.T rgId := "RGID" desired.ko.Spec.ReplicationGroupID = &rgId latest.ko.Spec.ReplicationGroupID = &rgId + desired.ko.ObjectMeta.Annotations = make(map[string]string) + desiredCacheNodeType := "cache.m5.large" + currentCacheNodeType := "cache.t3.small" + desired.ko.Annotations[AnnotationLastRequestedCNT] = currentCacheNodeType + desired.ko.Spec.CacheNodeType = &desiredCacheNodeType + desiredNodeGroup := int64(4) + currentNodeGroup := int64(3) + desired.ko.Annotations[AnnotationLastRequestedNNG] = strconv.Itoa(int(currentNodeGroup)) + desired.ko.Spec.NumNodeGroups = &desiredNodeGroup + allowedNodeModifications := []*string{&desiredCacheNodeType} + desired.ko.Status.AllowedScaleUpModifications = allowedNodeModifications mocksdkapi := &mocksvcsdkapi.ElastiCacheAPI{} rm := provideResourceManagerWithMockSDKAPI(mocksdkapi) var delta ackcompare.Delta - delta.Add("Spec.CacheNodeType", "cache.t3.small", "cache.m5.large") - delta.Add("Spec.NumNodeGroups", 3, 4) + delta.Add("Spec.CacheNodeType", currentCacheNodeType, desiredCacheNodeType) + delta.Add("Spec.NumNodeGroups", currentNodeGroup, desiredNodeGroup) - var ctx context.Context - res, err := rm.CustomModifyReplicationGroup(ctx, desired, latest, &delta) + res, err := rm.CustomModifyReplicationGroup(context.TODO(), desired, latest, &delta) assert.Nil(res) assert.Nil(err) assert.Empty(mocksdkapi.Calls) @@ -429,15 +483,19 @@ func TestCustomModifyReplicationGroup_ScaleUpAndDown_And_Resharding(t *testing.T var delta ackcompare.Delta delta.Add("Spec.CacheNodeType", "cache.t3.small", "cache.t3.micro") + desired.ko.ObjectMeta.Annotations = make(map[string]string) + cacheNodeType := "cache.t3.small" + desired.ko.Annotations[AnnotationLastRequestedCNT] = "cache.t3.micro" + desired.ko.Spec.CacheNodeType = &cacheNodeType oldshardCount := int64(4) newShardCount := int64(10) delta.Add("Spec.NumNodeGroups", oldshardCount, newShardCount) desired.ko.Spec.NumNodeGroups = &newShardCount latest.ko.Spec.NumNodeGroups = &oldshardCount + desired.ko.Annotations[AnnotationLastRequestedNNG] = strconv.Itoa(int(oldshardCount)) mocksdkapi.On("ModifyReplicationGroupShardConfigurationWithContext", mock.Anything, mock.Anything).Return(nil, awserr.New("Invalid", "Invalid error", nil)) - var ctx context.Context - res, err := rm.CustomModifyReplicationGroup(ctx, desired, latest, &delta) + res, err := rm.CustomModifyReplicationGroup(context.TODO(), desired, latest, &delta) assert.Nil(res) assert.NotNil(err) assert.NotEmpty(mocksdkapi.Calls) diff --git a/pkg/resource/replication_group/post_set_output.go b/pkg/resource/replication_group/post_set_output.go index c87d525b..3ba18bf0 100644 --- a/pkg/resource/replication_group/post_set_output.go +++ b/pkg/resource/replication_group/post_set_output.go @@ -15,6 +15,7 @@ package replication_group import ( "context" + svcapitypes "github.com/aws-controllers-k8s/elasticache-controller/apis/v1alpha1" svcsdk "github.com/aws/aws-sdk-go/service/elasticache" ) @@ -35,8 +36,7 @@ func (rm *resourceManager) updateSpecFields( } // populate relevant ko.Spec fields with observed state of respRG.NodeGroups setReplicasPerNodeGroup(respRG, resource) - - //TODO: set Spec NodeGroupConfiguration + setNodeGroupConfiguration(respRG, resource) // updating some Spec fields requires a DescribeCacheClusters call latestCacheCluster, err := rm.describeCacheCluster(ctx, resource) @@ -47,6 +47,50 @@ func (rm *resourceManager) updateSpecFields( } } +// if NodeGroupConfiguration was given in the desired.Spec, update ko.Spec with the latest observed value +func setNodeGroupConfiguration( + respRG *svcsdk.ReplicationGroup, + resource *resource, +) { + ko := resource.ko + if respRG.NodeGroups != nil && ko.Spec.NodeGroupConfiguration != nil { + nodeGroupConfigurations := []*svcapitypes.NodeGroupConfiguration{} + for _, nodeGroup := range respRG.NodeGroups { + nodeGroupConfiguration := &svcapitypes.NodeGroupConfiguration{} + + if nodeGroup.NodeGroupId != nil { + nodeGroupConfiguration.NodeGroupID = nodeGroup.NodeGroupId + } + replicaAZs := []*string{} + + for _, nodeGroupMember := range nodeGroup.NodeGroupMembers { + if nodeGroupMember.CurrentRole != nil && *nodeGroupMember.CurrentRole == "primary" { + nodeGroupConfiguration.PrimaryAvailabilityZone = nodeGroupMember.PreferredAvailabilityZone + } + + // In this case we cannot say what is primary AZ and replica AZ. + if nodeGroupMember.CurrentRole == nil && nodeGroupConfiguration.PrimaryAvailabilityZone == nil { + // We cannot determine the correct AZ so we would use the first node group member as primary + nodeGroupConfiguration.PrimaryAvailabilityZone = nodeGroupMember.PreferredAvailabilityZone + } + + if nodeGroupConfiguration.PrimaryAvailabilityZone != nil || *nodeGroupMember.CurrentRole == "replica" { + replicaAZs = append(replicaAZs, nodeGroupMember.PreferredAvailabilityZone) + } + } + + if len(replicaAZs) > 0 { + nodeGroupConfiguration.ReplicaAvailabilityZones = replicaAZs + } + + replicaCount := int64(len(replicaAZs)) + nodeGroupConfiguration.ReplicaCount = &replicaCount + } + + ko.Spec.NodeGroupConfiguration = nodeGroupConfigurations + } +} + //TODO: for all the fields here, reevaluate if the latest observed state should always be populated, // even if the corresponding field was not specified in desired diff --git a/test/e2e/declarative_test_fwk/loader.py b/test/e2e/declarative_test_fwk/loader.py index e6fd3cd2..cfac760d 100644 --- a/test/e2e/declarative_test_fwk/loader.py +++ b/test/e2e/declarative_test_fwk/loader.py @@ -17,9 +17,10 @@ from e2e.declarative_test_fwk import model import pytest import os +import glob from typing import Iterable, List from pathlib import Path -from os.path import isfile, join +from os.path import isfile, join, isdir from acktest.resources import load_resource_file, random_suffix_name @@ -34,11 +35,11 @@ def list_scenarios(scenarios_directory: Path) -> Iterable: """ scenarios_list = [] - for scenario_file in sorted(os.listdir(scenarios_directory)): - scenario_file_full_path = join(scenarios_directory, scenario_file) - if not isfile(scenario_file_full_path) or not scenario_file.endswith(".yaml"): - continue - scenarios_list.append(pytest.param(Path(scenario_file_full_path), marks=marks(Path(scenario_file_full_path)))) + scenario_files = glob.glob(str(scenarios_directory) + "/**/*.yaml", recursive=True) + + for scenario_file in scenario_files: + scenarios_list.append(pytest.param(Path(scenario_file), marks=marks(Path(scenario_file)))) + return scenarios_list diff --git a/test/e2e/scenarios/Resharding/cme_scale_in_config_rollback.yaml b/test/e2e/scenarios/Resharding/cme_scale_in_config_rollback.yaml new file mode 100644 index 00000000..e365ac67 --- /dev/null +++ b/test/e2e/scenarios/Resharding/cme_scale_in_config_rollback.yaml @@ -0,0 +1,52 @@ +id: "RG_CME_SCALE_IN_CONFIG_ROLLBACK" +description: "In this test we execute scale in which will rollback" +#marks: +# - slow +# - blocked +resource: + apiVersion: $CRD_GROUP/$CRD_VERSION + kind: ReplicationGroup + metadata: + name: reshard$RANDOM_SUFFIX +steps: + - id: "create_CME_replication_group" + description: "Initial config" + create: + spec: + engine: redis + replicationGroupID: reshard$RANDOM_SUFFIX + replicationGroupDescription: Scaling in rollback + cacheNodeType: cache.m5.large + snapshotName: test-scale-in-config-rollback + cacheParameterGroupName: default.redis6.x.cluster.on + nodeGroupConfiguration: + - nodeGroupID: "0001" + replicaCount: 0 + - nodeGroupID: "0002" + replicaCount: 0 + wait: + status: + conditions: + ACK.ResourceSynced: + status: "True" + timeout: 2800 + expect: + status: + status: "available" + - id: "scale_in" + description: "Scaling in to 1 shard" + patch: + spec: + nodeGroupConfiguration: + - nodeGroupID: "0001" + replicaCount: 0 + wait: 300 + expect: + status: + conditions: + ACK.Terminal: "True" + ACK.ResourceSynced: + status: "False" + - id: "delete_CME_RG" + description: "Delete cluster mode enabled replication group" + delete: reshard$RANDOM_SUFFIX diff --git a/test/e2e/scenarios/Resharding/cme_scale_in_rollback.yaml b/test/e2e/scenarios/Resharding/cme_scale_in_rollback.yaml new file mode 100644 index 00000000..4738ae68 --- /dev/null +++ b/test/e2e/scenarios/Resharding/cme_scale_in_rollback.yaml @@ -0,0 +1,45 @@ +id: "RG_CME_SCALE_IN_ROLLBACK" +description: "In this test we execute scale in which will rollback" +#marks: +# - slow +# - blocked +resource: + apiVersion: $CRD_GROUP/$CRD_VERSION + kind: ReplicationGroup + metadata: + name: reshard$RANDOM_SUFFIX +steps: + - id: "create_CME_replication_group" + description: "Initial config" + create: + spec: + engine: redis + replicationGroupID: reshard$RANDOM_SUFFIX + replicationGroupDescription: Scaling in rollback + cacheNodeType: cache.m5.large + numNodeGroups: 2 + snapshotName: test-scale-in-rollback + wait: + status: + conditions: + ACK.ResourceSynced: + status: "True" + timeout: 2800 + expect: + status: + status: "available" + - id: "scale_in" + description: "Scaling in to 1 shard" + patch: + spec: + numNodeGroups: 1 + wait: 300 + expect: + status: + conditions: + ACK.Terminal: "True" + ACK.ResourceSynced: + status: "False" + - id: "delete_CME_RG" + description: "Delete cluster mode enabled replication group" + delete: reshard$RANDOM_SUFFIX diff --git a/test/e2e/scenarios/cmd_basic_create_update.yaml b/test/e2e/scenarios/ScaleUpAndDown/cmd_basic_create_update.yaml similarity index 89% rename from test/e2e/scenarios/cmd_basic_create_update.yaml rename to test/e2e/scenarios/ScaleUpAndDown/cmd_basic_create_update.yaml index aed144c9..626841e0 100644 --- a/test/e2e/scenarios/cmd_basic_create_update.yaml +++ b/test/e2e/scenarios/ScaleUpAndDown/cmd_basic_create_update.yaml @@ -7,14 +7,14 @@ resource: apiVersion: $CRD_GROUP/$CRD_VERSION kind: ReplicationGroup metadata: - name: test$RANDOM_SUFFIX + name: scaling$RANDOM_SUFFIX steps: - id: "create_CMD_replication_group" description: "Initial config" create: spec: engine: redis - replicationGroupID: test$RANDOM_SUFFIX + replicationGroupID: scaling$RANDOM_SUFFIX replicationGroupDescription: Basic create and update of CMD RG cacheNodeType: cache.t3.micro numNodeGroups: 1 @@ -23,7 +23,7 @@ steps: conditions: ACK.ResourceSynced: status: "True" - timeout: 1800 + timeout: 2800 expect: status: status: "available" @@ -42,4 +42,4 @@ steps: # message: "Expected message ..." - id: "delete_CMD_RG" description: "Delete cluster mode disabled replication group" - delete: test$RANDOM_SUFFIX + delete: scaling$RANDOM_SUFFIX diff --git a/test/e2e/scenarios/ScaleUpAndDown/cmd_scale_down.yaml b/test/e2e/scenarios/ScaleUpAndDown/cmd_scale_down.yaml new file mode 100644 index 00000000..0b8ab1f4 --- /dev/null +++ b/test/e2e/scenarios/ScaleUpAndDown/cmd_scale_down.yaml @@ -0,0 +1,43 @@ +id: "CMD_SCALE_DOWN" +description: "Scale down CMD" +resource: + apiVersion: $CRD_GROUP/$CRD_VERSION + kind: ReplicationGroup + metadata: + name: scaling$RANDOM_SUFFIX +steps: + - id: "create_CMD_replication_group" + description: "Initial config" + create: + spec: + engine: redis + replicationGroupID: scaling$RANDOM_SUFFIX + replicationGroupDescription: Scale down for CMD + cacheNodeType: cache.t3.medium + numNodeGroups: 1 + wait: + status: + conditions: + ACK.ResourceSynced: + status: "True" + timeout: 2800 + expect: + status: + status: "available" + - id: "scale_down" + description: "Scale down to t3.micro" + patch: + spec: + cacheNodeType: cache.t3.micro + wait: + status: + conditions: + ACK.ResourceSynced: + status: "True" + timeout: 2800 + expect: + status: + status: "available" + - id: "delete_CMD_RG" + description: "Delete cluster mode disabled replication group" + delete: scaling$RANDOM_SUFFIX diff --git a/test/e2e/scenarios/ScaleUpAndDown/cmd_scale_up.yaml b/test/e2e/scenarios/ScaleUpAndDown/cmd_scale_up.yaml new file mode 100644 index 00000000..59ec6454 --- /dev/null +++ b/test/e2e/scenarios/ScaleUpAndDown/cmd_scale_up.yaml @@ -0,0 +1,43 @@ +id: "CMD_SCALE_UP" +description: "Scale up CMD" +resource: + apiVersion: $CRD_GROUP/$CRD_VERSION + kind: ReplicationGroup + metadata: + name: scaling$RANDOM_SUFFIX +steps: + - id: "create_CMD_replication_group" + description: "Initial config" + create: + spec: + engine: redis + replicationGroupID: scaling$RANDOM_SUFFIX + replicationGroupDescription: Scale up for CMD + cacheNodeType: cache.t3.micro + numNodeGroups: 1 + wait: + status: + conditions: + ACK.ResourceSynced: + status: "True" + timeout: 2800 + expect: + status: + status: "available" + - id: "scale_up" + description: "Scale up from t3.micro to t3.medium" + patch: + spec: + cacheNodeType: cache.t3.medium + wait: + status: + conditions: + ACK.ResourceSynced: + status: "True" + timeout: 2800 + expect: + status: + status: "available" + - id: "delete_CMD_RG" + description: "Delete cluster mode disabled replication group" + delete: scaling$RANDOM_SUFFIX diff --git a/test/e2e/scenarios/ScaleUpAndDown/cme_scale_down.yaml b/test/e2e/scenarios/ScaleUpAndDown/cme_scale_down.yaml new file mode 100644 index 00000000..f493c3b4 --- /dev/null +++ b/test/e2e/scenarios/ScaleUpAndDown/cme_scale_down.yaml @@ -0,0 +1,43 @@ +id: "CME_SCALE_DOWN" +description: "Scale Down CME" +resource: + apiVersion: $CRD_GROUP/$CRD_VERSION + kind: ReplicationGroup + metadata: + name: scaling$RANDOM_SUFFIX +steps: + - id: "create_CMD_replication_group" + description: "Initial config" + create: + spec: + engine: redis + replicationGroupID: scaling$RANDOM_SUFFIX + replicationGroupDescription: Scale down CME + cacheNodeType: cache.t3.medium + numNodeGroups: 2 + wait: + status: + conditions: + ACK.ResourceSynced: + status: "True" + timeout: 2800 + expect: + status: + status: "available" + - id: "scale_down" + description: "Scale down from t3.medium to t3.micro" + patch: + spec: + cacheNodeType: cache.t3.micro + wait: + status: + conditions: + ACK.ResourceSynced: + status: "True" + timeout: 2800 + expect: + status: + status: "available" + - id: "delete_CED_RG" + description: "Delete cluster mode enabled replication group" + delete: scaling$RANDOM_SUFFIX diff --git a/test/e2e/scenarios/ScaleUpAndDown/cme_scale_down_rollback.yaml b/test/e2e/scenarios/ScaleUpAndDown/cme_scale_down_rollback.yaml new file mode 100644 index 00000000..f572da5c --- /dev/null +++ b/test/e2e/scenarios/ScaleUpAndDown/cme_scale_down_rollback.yaml @@ -0,0 +1,45 @@ +id: "RG_CME_SCALE_DOWN_ROLLBACK" +description: "In this test we execute scale down which will rollback" +#marks: +# - slow +# - blocked +resource: + apiVersion: $CRD_GROUP/$CRD_VERSION + kind: ReplicationGroup + metadata: + name: scaling$RANDOM_SUFFIX +steps: + - id: "create_CME_replication_group" + description: "Initial config" + create: + spec: + engine: redis + replicationGroupID: scaling$RANDOM_SUFFIX + replicationGroupDescription: Scalind down rollback + cacheNodeType: cache.m5.large + numNodeGroups: 2 + snapshotName: test-scale-down-rollback + wait: + status: + conditions: + ACK.ResourceSynced: + status: "True" + timeout: 2800 + expect: + status: + status: "available" + - id: "smaller_instance_scale_down" + description: "Scaling down with smaller instance" + patch: + spec: + cacheNodeType: cache.t3.micro + wait: 300 + expect: + status: + conditions: + ACK.Terminal: "True" + ACK.ResourceSynced: + status: "False" + - id: "delete_CME_RG" + description: "Delete cluster mode enabled replication group" + delete: scaling$RANDOM_SUFFIX diff --git a/test/e2e/scenarios/ScaleUpAndDown/cme_scale_up.yaml b/test/e2e/scenarios/ScaleUpAndDown/cme_scale_up.yaml new file mode 100644 index 00000000..ff2e43bb --- /dev/null +++ b/test/e2e/scenarios/ScaleUpAndDown/cme_scale_up.yaml @@ -0,0 +1,43 @@ +id: "CME_SCALE_UP" +description: "Scale Up CME" +resource: + apiVersion: $CRD_GROUP/$CRD_VERSION + kind: ReplicationGroup + metadata: + name: scaling$RANDOM_SUFFIX +steps: + - id: "create_CMD_replication_group" + description: "Initial config" + create: + spec: + engine: redis + replicationGroupID: scaling$RANDOM_SUFFIX + replicationGroupDescription: Scale Up CME + cacheNodeType: cache.t3.micro + numNodeGroups: 2 + wait: + status: + conditions: + ACK.ResourceSynced: + status: "True" + timeout: 2800 + expect: + status: + status: "available" + - id: "scale_up" + description: "Scale up from t3.micro to t3.medium" + patch: + spec: + cacheNodeType: cache.t3.medium + wait: + status: + conditions: + ACK.ResourceSynced: + status: "True" + timeout: 2800 + expect: + status: + status: "available" + - id: "delete_CME_RG" + description: "Delete cluster mode enabled replication group" + delete: scaling$RANDOM_SUFFIX diff --git a/test/e2e/tests/test_replicationgroup.py b/test/e2e/tests/test_replicationgroup.py index 4832ae57..c1d4b622 100644 --- a/test/e2e/tests/test_replicationgroup.py +++ b/test/e2e/tests/test_replicationgroup.py @@ -395,11 +395,11 @@ class TestReplicationGroup: def test_rg_input_coverage(self, rg_input_coverage): (reference, _) = rg_input_coverage - assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True", wait_periods=40) + assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True", wait_periods=90) def test_rg_cmd_fromsnapshot(self, rg_cmd_fromsnapshot): (reference, _) = rg_cmd_fromsnapshot - assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True", wait_periods=30) + assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True", wait_periods=90) # if primaryClusterID is a nonexistent node, the terminal condition should be set def test_rg_invalid_primary(self, rg_invalid_primary): @@ -413,7 +413,7 @@ def test_rg_invalid_primary(self, rg_invalid_primary): @pytest.mark.blocked # TODO: remove when passing def test_rg_cme_uneven_shards(self, rg_cme_uneven_shards, rg_cme_uneven_shards_input): (reference, _) = rg_cme_uneven_shards - assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True", wait_periods=30) + assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True", wait_periods=90) ngid1 = rg_cme_uneven_shards_input['NGID1'][1:-1] # need to strip double quotes off node group ID ngid2 = rg_cme_uneven_shards_input['NGID2'][1:-1] @@ -447,7 +447,7 @@ def test_rg_cme_uneven_shards(self, rg_cme_uneven_shards, rg_cme_uneven_shards_i } _ = k8s.patch_custom_resource(reference, patch) sleep(DEFAULT_WAIT_SECS) - assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True", wait_periods=30) + assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True", wait_periods=90) # assert new state resource = k8s.get_resource(reference) @@ -463,7 +463,7 @@ def test_rg_cme_uneven_shards(self, rg_cme_uneven_shards, rg_cme_uneven_shards_i # increase and decrease replica count evenly across all shards in a CME RG def test_rg_cme_even_shards(self, rg_cme_even_shards, rg_cme_even_shards_input): (reference, _) = rg_cme_even_shards - assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True", wait_periods=30) + assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True", wait_periods=90) nng = int(rg_cme_even_shards_input['NUM_NODE_GROUPS']) rpng = int(rg_cme_even_shards_input['REPLICAS_PER_NODE_GROUP']) @@ -477,7 +477,7 @@ def test_rg_cme_even_shards(self, rg_cme_even_shards, rg_cme_even_shards_input): patch = {"spec": {"replicasPerNodeGroup": rpng}} _ = k8s.patch_custom_resource(reference, patch) sleep(DEFAULT_WAIT_SECS) - assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True", wait_periods=30) + assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True", wait_periods=90) # assert replica count has increased resource = k8s.get_resource(reference) @@ -489,7 +489,7 @@ def test_rg_cme_even_shards(self, rg_cme_even_shards, rg_cme_even_shards_input): patch = {"spec": {"replicasPerNodeGroup": rpng}} _ = k8s.patch_custom_resource(reference, patch) sleep(DEFAULT_WAIT_SECS) - assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True", wait_periods=30) + assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True", wait_periods=90) # assert replica count has decreased resource = k8s.get_resource(reference) @@ -499,7 +499,7 @@ def test_rg_cme_even_shards(self, rg_cme_even_shards, rg_cme_even_shards_input): # test update behavior of controller (engine version and replica count) def test_rg_upgrade_ev(self, rg_upgrade_ev_input, rg_upgrade_ev): (reference, _) = rg_upgrade_ev - assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True", wait_periods=30) + assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True", wait_periods=90) # assert initial state cc = retrieve_cache_cluster(rg_upgrade_ev_input['RG_ID']) @@ -511,7 +511,7 @@ def test_rg_upgrade_ev(self, rg_upgrade_ev_input, rg_upgrade_ev): patch = {"spec": {"engineVersion": desired_engine_version}} _ = k8s.patch_custom_resource(reference, patch) sleep(DEFAULT_WAIT_SECS) - assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True", wait_periods=30) + assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True", wait_periods=90) # assert new state after upgrading engine version resource = k8s.get_resource(reference) @@ -524,7 +524,7 @@ def test_rg_upgrade_ev(self, rg_upgrade_ev_input, rg_upgrade_ev): # test update of fields that can be changed quickly def test_rg_update_misc(self, rg_update_misc_input, rg_update_misc): (reference, _) = rg_update_misc - assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True", wait_periods=30) + assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True", wait_periods=90) # desired initial state pmw = rg_update_misc_input['PMW'] @@ -549,7 +549,7 @@ def test_rg_update_misc(self, rg_update_misc_input, rg_update_misc): } _ = k8s.patch_custom_resource(reference, patch) sleep(DEFAULT_WAIT_SECS) - assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True", wait_periods=5) + assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True", wait_periods=90) # assert new state assert_misc_fields(reference, rg_update_misc_input['RG_ID'], pmw, description, srl, sw) @@ -558,7 +558,7 @@ def test_rg_update_misc(self, rg_update_misc_input, rg_update_misc): @pytest.mark.blocked # TODO: remove when passing def test_rg_fault_tolerance(self, rg_fault_tolerance): (reference, _) = rg_fault_tolerance - assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True", wait_periods=30) + assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True", wait_periods=90) # assert initial state resource = k8s.get_resource(reference) @@ -581,7 +581,7 @@ def test_rg_fault_tolerance(self, rg_fault_tolerance): patch = {"spec": {"automaticFailoverEnabled": False, "multiAZEnabled": False}} _ = k8s.patch_custom_resource(reference, patch) sleep(DEFAULT_WAIT_SECS) - assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True", wait_periods=10) + assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True", wait_periods=90) # assert new state resource = k8s.get_resource(reference) @@ -592,7 +592,7 @@ def test_rg_fault_tolerance(self, rg_fault_tolerance): patch = {"spec": {"primaryClusterID": node2, "automaticFailoverEnabled": True, "multiAZEnabled": True}} _ = k8s.patch_custom_resource(reference, patch) sleep(DEFAULT_WAIT_SECS) - assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True", wait_periods=15) + assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True", wait_periods=90) # assert roles resource = k8s.get_resource(reference) @@ -614,7 +614,7 @@ def test_rg_fault_tolerance(self, rg_fault_tolerance): @pytest.mark.blocked # TODO: remove when passing def test_rg_associate_resources(self, rg_associate_resources_input, rg_associate_resources, bootstrap_resources): (reference, _) = rg_associate_resources - assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True", wait_periods=30) + assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True", wait_periods=90) # associate resources, wait for RG to sync sg_list = [bootstrap_resources.SecurityGroup1, bootstrap_resources.SecurityGroup2] @@ -623,7 +623,7 @@ def test_rg_associate_resources(self, rg_associate_resources_input, rg_associate patch = {"spec": {"securityGroupIDs": sg_list, "notificationTopicARN": sns_topic, "userGroupIDs": ug_list}} _ = k8s.patch_custom_resource(reference, patch) sleep(DEFAULT_WAIT_SECS) - assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True", wait_periods=10) + assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True", wait_periods=90) # assert new state assert_associated_resources(rg_associate_resources_input['RG_ID'], sg_list, sns_topic, ug_list) @@ -635,7 +635,7 @@ def test_rg_associate_resources(self, rg_associate_resources_input, rg_associate patch = {"spec": {"securityGroupIDs": sg_list, "notificationTopicARN": sns_topic, "userGroupIDs": ug_list}} _ = k8s.patch_custom_resource(reference, patch) sleep(DEFAULT_WAIT_SECS) - assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True", wait_periods=10) + assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True", wait_periods=90) # assert new state assert_associated_resources(rg_associate_resources_input['RG_ID'], sg_list, sns_topic, ug_list) @@ -643,7 +643,7 @@ def test_rg_associate_resources(self, rg_associate_resources_input, rg_associate def test_rg_update_cpg(self, rg_update_cpg_input, rg_update_cpg, bootstrap_resources): # wait for resource to sync and retrieve initial state (reference, _) = rg_update_cpg - assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True", wait_periods=30) + assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True", wait_periods=90) # update, wait for resource to sync patch = {"spec": {"cacheParameterGroupName": bootstrap_resources.CPGName}} @@ -658,7 +658,7 @@ def test_rg_update_cpg(self, rg_update_cpg_input, rg_update_cpg, bootstrap_resou @pytest.mark.blocked # TODO: remove when passing def test_rg_scale_vertically(self, rg_scale_vertically_input, rg_scale_vertically): (reference, _) = rg_scale_vertically - assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True", wait_periods=30) + assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True", wait_periods=90) # assert initial state rg = retrieve_replication_group(rg_scale_vertically_input['RG_ID']) @@ -669,7 +669,7 @@ def test_rg_scale_vertically(self, rg_scale_vertically_input, rg_scale_verticall patch = {"spec": {"cacheNodeType": cnt}} _ = k8s.patch_custom_resource(reference, patch) sleep(DEFAULT_WAIT_SECS) - assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True", wait_periods=30) + assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True", wait_periods=90) # assert scale up complete rg = retrieve_replication_group(rg_scale_vertically_input['RG_ID']) @@ -680,7 +680,7 @@ def test_rg_scale_vertically(self, rg_scale_vertically_input, rg_scale_verticall patch = {"spec": {"cacheNodeType": cnt}} _ = k8s.patch_custom_resource(reference, patch) sleep(DEFAULT_WAIT_SECS) - assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True", wait_periods=30) + assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True", wait_periods=90) # assert scale down complete rg = retrieve_replication_group(rg_scale_vertically_input['RG_ID']) @@ -689,7 +689,7 @@ def test_rg_scale_vertically(self, rg_scale_vertically_input, rg_scale_verticall @pytest.mark.blocked # TODO: remove when passing def test_rg_scale_horizontally(self, rg_scale_horizontally_input, rg_scale_horizontally): (reference, _) = rg_scale_horizontally - assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True", wait_periods=30) + assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True", wait_periods=90) # assert initial state rg = retrieve_replication_group(rg_scale_horizontally_input['RG_ID']) @@ -701,7 +701,7 @@ def test_rg_scale_horizontally(self, rg_scale_horizontally_input, rg_scale_horiz patch = {"spec": {"numNodeGroups": nng}} _ = k8s.patch_custom_resource(reference, patch) sleep(DEFAULT_WAIT_SECS) - assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True", wait_periods=30) + assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True", wait_periods=90) # assert scale out complete rg = retrieve_replication_group(rg_scale_horizontally_input['RG_ID']) @@ -712,7 +712,7 @@ def test_rg_scale_horizontally(self, rg_scale_horizontally_input, rg_scale_horiz patch = {"spec": {"numNodeGroups": nng}} _ = k8s.patch_custom_resource(reference, patch) sleep(DEFAULT_WAIT_SECS) - assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True", wait_periods=30) + assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True", wait_periods=90) # assert scale in complete rg = retrieve_replication_group(rg_scale_horizontally_input['RG_ID']) @@ -721,7 +721,7 @@ def test_rg_scale_horizontally(self, rg_scale_horizontally_input, rg_scale_horiz # add and modify log delivery configuration to replication group def test_rg_log_delivery(self, rg_log_delivery_input, rg_log_delivery, bootstrap_resources): (reference, _) = rg_log_delivery - assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True", wait_periods=30) + assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True", wait_periods=90) # add log delivery config config = { @@ -738,7 +738,7 @@ def test_rg_log_delivery(self, rg_log_delivery_input, rg_log_delivery, bootstrap patch = {"spec": {"logDeliveryConfigurations": [config]}} k8s.patch_custom_resource(reference, patch) sleep(DEFAULT_WAIT_SECS) - assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True", wait_periods=10) + assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True", wait_periods=90) # assert log delivery added assert_log_delivery_config(reference, config) @@ -749,7 +749,7 @@ def test_rg_log_delivery(self, rg_log_delivery_input, rg_log_delivery, bootstrap patch = {"spec": {"logDeliveryConfigurations": [config]}} k8s.patch_custom_resource(reference, patch) sleep(DEFAULT_WAIT_SECS) - assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True", wait_periods=10) + assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True", wait_periods=90) # assert configuration modified assert_log_delivery_config(reference, config) @@ -759,7 +759,7 @@ def test_rg_log_delivery(self, rg_log_delivery_input, rg_log_delivery, bootstrap patch = {"spec": {"logDeliveryConfigurations": [config]}} k8s.patch_custom_resource(reference, patch) sleep(DEFAULT_WAIT_SECS) - assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True", wait_periods=10) + assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True", wait_periods=90) # assert error message present resource = k8s.get_resource(reference) @@ -771,23 +771,23 @@ def test_rg_log_delivery(self, rg_log_delivery_input, rg_log_delivery, bootstrap patch = {"spec": {"logDeliveryConfigurations": [config]}} k8s.patch_custom_resource(reference, patch) sleep(DEFAULT_WAIT_SECS) - assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True", wait_periods=10) + assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True", wait_periods=90) # assert log delivery disabled assert_log_delivery_config(reference, config) def test_rg_auth_token(self, rg_auth_token, secrets): (reference, _) = rg_auth_token - assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True", wait_periods=30) + assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True", wait_periods=90) patch = {"spec": {"authToken": {"name": secrets['NAME2'], "key": secrets['KEY2']}}} k8s.patch_custom_resource(reference, patch) sleep(DEFAULT_WAIT_SECS) - assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True", wait_periods=30) + assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True", wait_periods=90) def test_rg_deletion(self, rg_deletion_input, rg_deletion, rg_deletion_waiter): (reference, _) = rg_deletion - assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True", wait_periods=30) + assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True", wait_periods=90) # assertions after initial creation resource = k8s.get_resource(reference)