From d7e75a431ba23160427df9a315fdca6495b78e3f Mon Sep 17 00:00:00 2001 From: Arush Sharma Date: Fri, 7 Feb 2025 11:30:18 -0800 Subject: [PATCH 1/2] nuke tests, go v2 --- apis/v1alpha1/ack-generate-metadata.yaml | 12 +- apis/v1alpha1/cache_cluster.go | 75 +- apis/v1alpha1/cache_parameter_group.go | 2 +- apis/v1alpha1/cache_subnet_group.go | 5 + apis/v1alpha1/enums.go | 72 +- apis/v1alpha1/replication_group.go | 226 ++- apis/v1alpha1/snapshot.go | 98 +- apis/v1alpha1/types.go | 146 +- apis/v1alpha1/user.go | 4 +- apis/v1alpha1/user_group.go | 11 +- apis/v1alpha1/zz_generated.deepcopy.go | 417 ++++ cmd/controller/main.go | 16 +- config/controller/deployment.yaml | 4 + config/controller/kustomization.yaml | 2 +- ...icache.services.k8s.aws_cacheclusters.yaml | 179 +- ...services.k8s.aws_cacheparametergroups.yaml | 7 +- ...he.services.k8s.aws_cachesubnetgroups.yaml | 22 +- ...he.services.k8s.aws_replicationgroups.yaml | 323 ++-- ...lasticache.services.k8s.aws_snapshots.yaml | 124 +- ...asticache.services.k8s.aws_usergroups.yaml | 18 +- .../elasticache.services.k8s.aws_users.yaml | 16 +- .../services.k8s.aws_adoptedresources.yaml | 7 +- .../bases/services.k8s.aws_fieldexports.yaml | 2 +- config/rbac/cluster-role-controller.yaml | 174 +- go.mod | 90 +- go.sum | 194 +- helm/Chart.yaml | 4 +- ...icache.services.k8s.aws_cacheclusters.yaml | 189 +- ...services.k8s.aws_cacheparametergroups.yaml | 7 +- ...he.services.k8s.aws_cachesubnetgroups.yaml | 24 +- ...he.services.k8s.aws_replicationgroups.yaml | 329 ++-- ...lasticache.services.k8s.aws_snapshots.yaml | 124 +- ...asticache.services.k8s.aws_usergroups.yaml | 18 +- .../elasticache.services.k8s.aws_users.yaml | 16 +- .../services.k8s.aws_adoptedresources.yaml | 7 +- helm/crds/services.k8s.aws_fieldexports.yaml | 2 +- helm/templates/NOTES.txt | 2 +- helm/templates/_helpers.tpl | 185 +- helm/templates/caches-role-binding.yaml | 6 +- helm/templates/deployment.yaml | 9 + helm/values.schema.json | 7 + helm/values.yaml | 15 +- .../cache_cluster/custom_set_output.go | 67 - .../cache_cluster/custom_update_input_test.go | 192 -- pkg/resource/cache_cluster/delta.go | 7 + pkg/resource/cache_cluster/descriptor.go | 10 +- pkg/resource/cache_cluster/hooks.go | 71 +- pkg/resource/cache_cluster/manager.go | 24 +- pkg/resource/cache_cluster/manager_factory.go | 12 +- pkg/resource/cache_cluster/references.go | 101 +- pkg/resource/cache_cluster/resource.go | 11 + pkg/resource/cache_cluster/sdk.go | 440 ++--- .../cache_parameter_group/custom_api.go | 1 + .../custom_set_output.go | 1 + .../custom_update_api.go | 1 + .../cache_parameter_group/descriptor.go | 10 +- pkg/resource/cache_parameter_group/manager.go | 24 +- .../cache_parameter_group/manager_factory.go | 12 +- .../cache_parameter_group/references.go | 1 + .../cache_parameter_group/resource.go | 11 + pkg/resource/cache_parameter_group/sdk.go | 48 +- .../cache_subnet_group/custom_set_output.go | 1 + pkg/resource/cache_subnet_group/descriptor.go | 10 +- pkg/resource/cache_subnet_group/manager.go | 24 +- .../cache_subnet_group/manager_factory.go | 12 +- pkg/resource/cache_subnet_group/references.go | 21 +- pkg/resource/cache_subnet_group/resource.go | 11 + pkg/resource/cache_subnet_group/sdk.go | 128 +- .../replication_group/custom_update_api.go | 7 +- .../custom_update_api_test.go | 1058 ---------- pkg/resource/replication_group/delta.go | 35 + pkg/resource/replication_group/descriptor.go | 10 +- pkg/resource/replication_group/hooks_test.go | 129 -- pkg/resource/replication_group/manager.go | 24 +- .../replication_group/manager_factory.go | 12 +- .../manager_test_suite_test.go | 131 -- .../replication_group/post_set_output.go | 1 + pkg/resource/replication_group/references.go | 61 +- pkg/resource/replication_group/resource.go | 11 + pkg/resource/replication_group/sdk.go | 1717 +++++++++-------- .../testdata/DecreaseReplicaCountOutput.json | 77 - .../DescribeReplicationGroupsOutput.json | 80 - .../read_many/rg_cmd_allowed_node_types.json | 47 - .../read_many/rg_cmd_primary_cache_node.json | 32 - .../events/read_many/rg_cmd_events.json | 10 - .../cr/rg_cmd_before_create.yaml | 10 - .../rg_cmd_before_engine_version_upgrade.yaml | 99 - ..._before_engine_version_upgrade_latest.yaml | 99 - .../cr/rg_cmd_before_increase_replica.yaml | 104 - ...rg_cmd_before_increase_replica_latest.yaml | 104 - .../cr/rg_cmd_before_scale_up_desired.yaml | 91 - .../cr/rg_cmd_before_scale_up_latest.yaml | 91 - .../cr/rg_cmd_create_completed.yaml | 105 - .../cr/rg_cmd_create_completed_latest.yaml | 106 - ...g_cmd_create_completed_not_yet_latest.yaml | 107 - .../cr/rg_cmd_create_initiated.yaml | 32 - .../cr/rg_cmd_delete_initiated.yaml | 104 - .../cr/rg_cmd_engine_upgrade_initiated.yaml | 61 - .../cr/rg_cmd_increase_replica_initiated.yaml | 61 - .../cr/rg_cmd_invalid_before_create.yaml | 10 - .../cr/rg_cmd_invalid_create_attempted.yaml | 18 - .../cr/rg_cmd_scale_up_initiated.yaml | 59 - .../cr/rg_cme_before_create.yaml | 24 - .../cr/rg_cme_create_initiated.yaml | 50 - .../rg_cme_invalid_scale_out_attempted.yaml | 118 -- .../cr/rg_cme_shard_mismatch.yaml | 115 -- .../create/rg_cmd_creating.json | 22 - .../create/rg_cme_creating.json | 26 - .../delete/rg_cmd_delete_initiated.json | 16 - .../read_one/rg_cmd_create_completed.json | 61 - .../read_one/rg_cmd_delete_initiated.json | 61 - .../replication_group/read_one/tags.json | 3 - .../rg_cmd_engine_upgrade_initiated.json | 59 - .../rg_cmd_increase_replica_initiated.json | 60 - .../update/rg_cmd_scale_up_initiated.json | 58 - .../testdata/test_suite.yaml | 164 -- .../snapshot/custom_create_api_test.go | 71 - pkg/resource/snapshot/custom_set_output.go | 1 + pkg/resource/snapshot/custom_update_api.go | 1 + pkg/resource/snapshot/descriptor.go | 10 +- pkg/resource/snapshot/manager.go | 24 +- pkg/resource/snapshot/manager_factory.go | 12 +- pkg/resource/snapshot/references.go | 1 + pkg/resource/snapshot/resource.go | 11 + pkg/resource/snapshot/sdk.go | 128 +- pkg/resource/user/custom_update.go | 1 + pkg/resource/user/delta.go | 18 + pkg/resource/user/delta_util.go | 6 +- pkg/resource/user/descriptor.go | 10 +- pkg/resource/user/manager.go | 24 +- pkg/resource/user/manager_factory.go | 12 +- pkg/resource/user/references.go | 1 + pkg/resource/user/resource.go | 11 + pkg/resource/user/sdk.go | 158 +- pkg/resource/user_group/custom_set_output.go | 1 + pkg/resource/user_group/custom_update_api.go | 1 + pkg/resource/user_group/descriptor.go | 10 +- pkg/resource/user_group/manager.go | 24 +- pkg/resource/user_group/manager_factory.go | 12 +- pkg/resource/user_group/references.go | 1 + pkg/resource/user_group/resource.go | 11 + pkg/resource/user_group/sdk.go | 128 +- pkg/testutil/test_suite_config.go | 71 - pkg/testutil/test_suite_runner.go | 202 -- pkg/testutil/util.go | 57 - pkg/testutil/util_test.go | 37 - pkg/util/engine_version_test.go | 69 - scripts/install-mockery.sh | 25 - 148 files changed, 3580 insertions(+), 7550 deletions(-) delete mode 100644 pkg/resource/cache_cluster/custom_update_input_test.go delete mode 100644 pkg/resource/replication_group/custom_update_api_test.go delete mode 100644 pkg/resource/replication_group/hooks_test.go delete mode 100644 pkg/resource/replication_group/manager_test_suite_test.go delete mode 100644 pkg/resource/replication_group/testdata/DecreaseReplicaCountOutput.json delete mode 100644 pkg/resource/replication_group/testdata/DescribeReplicationGroupsOutput.json delete mode 100644 pkg/resource/replication_group/testdata/allowed_node_types/read_many/rg_cmd_allowed_node_types.json delete mode 100644 pkg/resource/replication_group/testdata/cache_clusters/read_many/rg_cmd_primary_cache_node.json delete mode 100644 pkg/resource/replication_group/testdata/events/read_many/rg_cmd_events.json delete mode 100644 pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_before_create.yaml delete mode 100644 pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_before_engine_version_upgrade.yaml delete mode 100644 pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_before_engine_version_upgrade_latest.yaml delete mode 100644 pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_before_increase_replica.yaml delete mode 100644 pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_before_increase_replica_latest.yaml delete mode 100644 pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_before_scale_up_desired.yaml delete mode 100644 pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_before_scale_up_latest.yaml delete mode 100644 pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_create_completed.yaml delete mode 100644 pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_create_completed_latest.yaml delete mode 100644 pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_create_completed_not_yet_latest.yaml delete mode 100644 pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_create_initiated.yaml delete mode 100644 pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_delete_initiated.yaml delete mode 100644 pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_engine_upgrade_initiated.yaml delete mode 100644 pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_increase_replica_initiated.yaml delete mode 100644 pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_invalid_before_create.yaml delete mode 100644 pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_invalid_create_attempted.yaml delete mode 100644 pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_scale_up_initiated.yaml delete mode 100644 pkg/resource/replication_group/testdata/replication_group/cr/rg_cme_before_create.yaml delete mode 100644 pkg/resource/replication_group/testdata/replication_group/cr/rg_cme_create_initiated.yaml delete mode 100644 pkg/resource/replication_group/testdata/replication_group/cr/rg_cme_invalid_scale_out_attempted.yaml delete mode 100644 pkg/resource/replication_group/testdata/replication_group/cr/rg_cme_shard_mismatch.yaml delete mode 100644 pkg/resource/replication_group/testdata/replication_group/create/rg_cmd_creating.json delete mode 100644 pkg/resource/replication_group/testdata/replication_group/create/rg_cme_creating.json delete mode 100644 pkg/resource/replication_group/testdata/replication_group/delete/rg_cmd_delete_initiated.json delete mode 100644 pkg/resource/replication_group/testdata/replication_group/read_one/rg_cmd_create_completed.json delete mode 100644 pkg/resource/replication_group/testdata/replication_group/read_one/rg_cmd_delete_initiated.json delete mode 100644 pkg/resource/replication_group/testdata/replication_group/read_one/tags.json delete mode 100644 pkg/resource/replication_group/testdata/replication_group/update/rg_cmd_engine_upgrade_initiated.json delete mode 100644 pkg/resource/replication_group/testdata/replication_group/update/rg_cmd_increase_replica_initiated.json delete mode 100644 pkg/resource/replication_group/testdata/replication_group/update/rg_cmd_scale_up_initiated.json delete mode 100644 pkg/resource/replication_group/testdata/test_suite.yaml delete mode 100644 pkg/resource/snapshot/custom_create_api_test.go delete mode 100644 pkg/testutil/test_suite_config.go delete mode 100644 pkg/testutil/test_suite_runner.go delete mode 100644 pkg/testutil/util.go delete mode 100644 pkg/testutil/util_test.go delete mode 100644 pkg/util/engine_version_test.go delete mode 100755 scripts/install-mockery.sh diff --git a/apis/v1alpha1/ack-generate-metadata.yaml b/apis/v1alpha1/ack-generate-metadata.yaml index ee63afdc..580140a8 100755 --- a/apis/v1alpha1/ack-generate-metadata.yaml +++ b/apis/v1alpha1/ack-generate-metadata.yaml @@ -1,11 +1,11 @@ ack_generate_info: - build_date: "2024-06-28T14:33:25Z" - build_hash: 14cef51778d471698018b6c38b604181a6948248 - go_version: go1.22.2 - version: v0.34.0 -api_directory_checksum: 73afd1cf92f1261c45bbb52544adf4da5c6a7cd0 + build_date: "2025-02-07T19:16:11Z" + build_hash: 3d74f13b9de7134b4c76ab7526a9c578c4857602 + go_version: go1.23.4 + version: v0.41.0-18-g3d74f13 +api_directory_checksum: eb643965cba3c68f76c1b45f100874d07d39c935 api_version: v1alpha1 -aws_sdk_go_version: v1.49.0 +aws_sdk_go_version: v1.32.6 generator_config_info: file_checksum: 3c359b3f45716af86c99ab2ea0f2ab50eeae5dc9 original_file_name: generator.yaml diff --git a/apis/v1alpha1/cache_cluster.go b/apis/v1alpha1/cache_cluster.go index aa6d48e2..a37e8400 100644 --- a/apis/v1alpha1/cache_cluster.go +++ b/apis/v1alpha1/cache_cluster.go @@ -41,17 +41,10 @@ type CacheClusterSpec struct { // - Must be only printable ASCII characters. // // - Must be at least 16 characters and no more than 128 characters in length. - // - // - The only permitted printable special characters are !, &, #, $, ^, <, - // >, and -. Other printable special characters cannot be used in the AUTH - // token. - // - // For more information, see AUTH password (http://redis.io/commands/AUTH) at - // http://redis.io/commands/AUTH. AuthToken *ackv1alpha1.SecretKeyReference `json:"authToken,omitempty"` - // If you are running Redis engine version 6.0 or later, set this parameter - // to yes if you want to opt-in to the next auto minor version upgrade campaign. - // This parameter is disabled for previous versions. + // If you are running Valkey 7.2 and above or Redis OSS engine version 6.0 and + // above, set this parameter to yes to opt-in to the next auto minor version + // upgrade campaign. This parameter is disabled for previous versions. AutoMinorVersionUpgrade *bool `json:"autoMinorVersionUpgrade,omitempty"` // The node group (shard) identifier. This parameter is stored as a lowercase // string. @@ -75,15 +68,15 @@ type CacheClusterSpec struct { // - General purpose: Current generation: M7g node types: cache.m7g.large, // cache.m7g.xlarge, cache.m7g.2xlarge, cache.m7g.4xlarge, cache.m7g.8xlarge, // cache.m7g.12xlarge, cache.m7g.16xlarge For region availability, see Supported - // Node Types (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) - // M6g node types (available only for Redis engine version 5.0.6 onward and - // for Memcached engine version 1.5.16 onward): cache.m6g.large, cache.m6g.xlarge, + // Node Types (https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) + // M6g node types (available only for Redis OSS engine version 5.0.6 onward + // and for Memcached engine version 1.5.16 onward): cache.m6g.large, cache.m6g.xlarge, // cache.m6g.2xlarge, cache.m6g.4xlarge, cache.m6g.8xlarge, cache.m6g.12xlarge, // cache.m6g.16xlarge M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, // cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge M4 node types: // cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge - // T4g node types (available only for Redis engine version 5.0.6 onward and - // Memcached engine version 1.5.16 onward): cache.t4g.micro, cache.t4g.small, + // T4g node types (available only for Redis OSS engine version 5.0.6 onward + // and Memcached engine version 1.5.16 onward): cache.t4g.micro, cache.t4g.small, // cache.t4g.medium T3 node types: cache.t3.micro, cache.t3.small, cache.t3.medium // T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium Previous // generation: (not recommended. Existing clusters are still supported but @@ -99,9 +92,9 @@ type CacheClusterSpec struct { // - Memory optimized: Current generation: R7g node types: cache.r7g.large, // cache.r7g.xlarge, cache.r7g.2xlarge, cache.r7g.4xlarge, cache.r7g.8xlarge, // cache.r7g.12xlarge, cache.r7g.16xlarge For region availability, see Supported - // Node Types (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) - // R6g node types (available only for Redis engine version 5.0.6 onward and - // for Memcached engine version 1.5.16 onward): cache.r6g.large, cache.r6g.xlarge, + // Node Types (https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) + // R6g node types (available only for Redis OSS engine version 5.0.6 onward + // and for Memcached engine version 1.5.16 onward): cache.r6g.large, cache.r6g.xlarge, // cache.r6g.2xlarge, cache.r6g.4xlarge, cache.r6g.8xlarge, cache.r6g.12xlarge, // cache.r6g.16xlarge R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, // cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge R4 node types: @@ -116,12 +109,14 @@ type CacheClusterSpec struct { // // - All current generation instance types are created in Amazon VPC by default. // - // - Redis append-only files (AOF) are not supported for T1 or T2 instances. + // - Valkey or Redis OSS append-only files (AOF) are not supported for T1 + // or T2 instances. // - // - Redis Multi-AZ with automatic failover is not supported on T1 instances. + // - Valkey or Redis OSS Multi-AZ with automatic failover is not supported + // on T1 instances. // - // - Redis configuration variables appendonly and appendfsync are not supported - // on Redis version 2.8.22 and later. + // - The configuration variables appendonly and appendfsync are not supported + // on Valkey, or on Redis OSS version 2.8.22 and later. CacheNodeType *string `json:"cacheNodeType,omitempty"` // The name of the parameter group to associate with this cluster. If this argument // is omitted, the default parameter group for the specified engine is used. @@ -141,7 +136,7 @@ type CacheClusterSpec struct { // // If you're going to launch your cluster in an Amazon VPC, you need to create // a subnet group before you start creating a cluster. For more information, - // see Subnets and Subnet Groups (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/SubnetGroups.html). + // see Subnets and Subnet Groups (https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/SubnetGroups.html). CacheSubnetGroupName *string `json:"cacheSubnetGroupName,omitempty"` CacheSubnetGroupRef *ackv1alpha1.AWSResourceReferenceWrapper `json:"cacheSubnetGroupRef,omitempty"` // The name of the cache engine to be used for this cluster. @@ -153,21 +148,22 @@ type CacheClusterSpec struct { // operation. // // Important: You can upgrade to a newer engine version (see Selecting a Cache - // Engine and Version (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/SelectEngine.html#VersionManagement)), + // Engine and Version (https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/SelectEngine.html#VersionManagement)), // but you cannot downgrade to an earlier engine version. If you want to use // an earlier engine version, you must delete the existing cluster or replication // group and create it anew with the earlier engine version. EngineVersion *string `json:"engineVersion,omitempty"` // The network type you choose when modifying a cluster, either ipv4 | ipv6. - // IPv6 is supported for workloads using Redis engine version 6.2 onward or - // Memcached engine version 1.6.6 on all instances built on the Nitro system - // (http://aws.amazon.com/ec2/nitro/). + // IPv6 is supported for workloads using Valkey 7.2 and above, Redis OSS engine + // version 6.2 and above or Memcached engine version 1.6.6 and above on all + // instances built on the Nitro system (http://aws.amazon.com/ec2/nitro/). IPDiscovery *string `json:"ipDiscovery,omitempty"` // Specifies the destination, format and type of the logs. LogDeliveryConfigurations []*LogDeliveryConfigurationRequest `json:"logDeliveryConfigurations,omitempty"` // Must be either ipv4 | ipv6 | dual_stack. IPv6 is supported for workloads - // using Redis engine version 6.2 onward or Memcached engine version 1.6.6 on - // all instances built on the Nitro system (http://aws.amazon.com/ec2/nitro/). + // using Valkey 7.2 and above, Redis OSS engine version 6.2 and above or Memcached + // engine version 1.6.6 and above on all instances built on the Nitro system + // (http://aws.amazon.com/ec2/nitro/). NetworkType *string `json:"networkType,omitempty"` // The Amazon Resource Name (ARN) of the Amazon Simple Notification Service // (SNS) topic to which notifications are sent. @@ -177,8 +173,8 @@ type CacheClusterSpec struct { NotificationTopicRef *ackv1alpha1.AWSResourceReferenceWrapper `json:"notificationTopicRef,omitempty"` // The initial number of cache nodes that the cluster has. // - // For clusters running Redis, this value must be 1. For clusters running Memcached, - // this value must be between 1 and 40. + // For clusters running Valkey or Redis OSS, this value must be 1. For clusters + // running Memcached, this value must be between 1 and 40. // // If you need more than 40 nodes for your Memcached cluster, please fill out // the ElastiCache Limit Increase Request form at http://aws.amazon.com/contact-us/elasticache-node-limit-request/ @@ -239,17 +235,17 @@ type CacheClusterSpec struct { // Private Cloud (Amazon VPC). SecurityGroupIDs []*string `json:"securityGroupIDs,omitempty"` // A single-element string list containing an Amazon Resource Name (ARN) that - // uniquely identifies a Redis RDB snapshot file stored in Amazon S3. The snapshot - // file is used to populate the node group (shard). The Amazon S3 object name - // in the ARN cannot contain any commas. + // uniquely identifies a Valkey or Redis OSS RDB snapshot file stored in Amazon + // S3. The snapshot file is used to populate the node group (shard). The Amazon + // S3 object name in the ARN cannot contain any commas. // // This parameter is only valid if the Engine parameter is redis. // // Example of an Amazon S3 ARN: arn:aws:s3:::my_bucket/snapshot1.rdb SnapshotARNs []*string `json:"snapshotARNs,omitempty"` - // The name of a Redis snapshot from which to restore data into the new node - // group (shard). The snapshot status changes to restoring while the new node - // group (shard) is being created. + // The name of a Valkey or Redis OSS snapshot from which to restore data into + // the new node group (shard). The snapshot status changes to restoring while + // the new node group (shard) is being created. // // This parameter is only valid if the Engine parameter is redis. SnapshotName *string `json:"snapshotName,omitempty"` @@ -298,12 +294,13 @@ type CacheClusterStatus struct { // to true when you create a cluster. // // Required: Only available when creating a replication group in an Amazon VPC - // using redis version 3.2.6, 4.x or later. + // using Redis OSS version 3.2.6, 4.x or later. // // Default: false // +kubebuilder:validation:Optional AtRestEncryptionEnabled *bool `json:"atRestEncryptionEnabled,omitempty"` - // A flag that enables using an AuthToken (password) when issuing Redis commands. + // A flag that enables using an AuthToken (password) when issuing Valkey or + // Redis OSS commands. // // Default: false // +kubebuilder:validation:Optional diff --git a/apis/v1alpha1/cache_parameter_group.go b/apis/v1alpha1/cache_parameter_group.go index a74dd72c..71335998 100644 --- a/apis/v1alpha1/cache_parameter_group.go +++ b/apis/v1alpha1/cache_parameter_group.go @@ -29,7 +29,7 @@ type CacheParameterGroupSpec struct { // can be used with. // // Valid values are: memcached1.4 | memcached1.5 | memcached1.6 | redis2.6 | - // redis2.8 | redis3.2 | redis4.0 | redis5.0 | redis6.x + // redis2.8 | redis3.2 | redis4.0 | redis5.0 | redis6.x | redis7 // +kubebuilder:validation:Required CacheParameterGroupFamily *string `json:"cacheParameterGroupFamily"` // A user-specified name for the cache parameter group. diff --git a/apis/v1alpha1/cache_subnet_group.go b/apis/v1alpha1/cache_subnet_group.go index 0286553e..150f4ffa 100644 --- a/apis/v1alpha1/cache_subnet_group.go +++ b/apis/v1alpha1/cache_subnet_group.go @@ -67,6 +67,11 @@ type CacheSubnetGroupStatus struct { // A list of subnets associated with the cache subnet group. // +kubebuilder:validation:Optional Subnets []*Subnet `json:"subnets,omitempty"` + // Either ipv4 | ipv6 | dual_stack. IPv6 is supported for workloads using Valkey + // 7.2 and above, Redis OSS engine version 6.2 and above or Memcached engine + // version 1.6.6 and above on all instances built on the Nitro system (http://aws.amazon.com/ec2/nitro/). + // +kubebuilder:validation:Optional + SupportedNetworkTypes []*string `json:"supportedNetworkTypes,omitempty"` // The Amazon Virtual Private Cloud identifier (VPC ID) of the cache subnet // group. // +kubebuilder:validation:Optional diff --git a/apis/v1alpha1/enums.go b/apis/v1alpha1/enums.go index ca62567d..39ec4363 100644 --- a/apis/v1alpha1/enums.go +++ b/apis/v1alpha1/enums.go @@ -18,40 +18,40 @@ package v1alpha1 type AZMode string const ( - AZMode_single_az AZMode = "single-az" AZMode_cross_az AZMode = "cross-az" + AZMode_single_az AZMode = "single-az" ) type AuthTokenUpdateStatus string const ( - AuthTokenUpdateStatus_SETTING AuthTokenUpdateStatus = "SETTING" AuthTokenUpdateStatus_ROTATING AuthTokenUpdateStatus = "ROTATING" + AuthTokenUpdateStatus_SETTING AuthTokenUpdateStatus = "SETTING" ) type AuthTokenUpdateStrategyType string const ( - AuthTokenUpdateStrategyType_SET AuthTokenUpdateStrategyType = "SET" - AuthTokenUpdateStrategyType_ROTATE AuthTokenUpdateStrategyType = "ROTATE" AuthTokenUpdateStrategyType_DELETE AuthTokenUpdateStrategyType = "DELETE" + AuthTokenUpdateStrategyType_ROTATE AuthTokenUpdateStrategyType = "ROTATE" + AuthTokenUpdateStrategyType_SET AuthTokenUpdateStrategyType = "SET" ) type AuthenticationType string const ( - AuthenticationType_password AuthenticationType = "password" - AuthenticationType_no_password AuthenticationType = "no-password" AuthenticationType_iam AuthenticationType = "iam" + AuthenticationType_no_password AuthenticationType = "no-password" + AuthenticationType_password AuthenticationType = "password" ) type AutomaticFailoverStatus string const ( - AutomaticFailoverStatus_enabled AutomaticFailoverStatus = "enabled" AutomaticFailoverStatus_disabled AutomaticFailoverStatus = "disabled" - AutomaticFailoverStatus_enabling AutomaticFailoverStatus = "enabling" AutomaticFailoverStatus_disabling AutomaticFailoverStatus = "disabling" + AutomaticFailoverStatus_enabled AutomaticFailoverStatus = "enabled" + AutomaticFailoverStatus_enabling AutomaticFailoverStatus = "enabling" ) type ChangeType string @@ -64,9 +64,9 @@ const ( type ClusterMode string const ( - ClusterMode_enabled ClusterMode = "enabled" - ClusterMode_disabled ClusterMode = "disabled" ClusterMode_compatible ClusterMode = "compatible" + ClusterMode_disabled ClusterMode = "disabled" + ClusterMode_enabled ClusterMode = "enabled" ) type DataStorageUnit string @@ -78,8 +78,8 @@ const ( type DataTieringStatus string const ( - DataTieringStatus_enabled DataTieringStatus = "enabled" DataTieringStatus_disabled DataTieringStatus = "disabled" + DataTieringStatus_enabled DataTieringStatus = "enabled" ) type DestinationType string @@ -99,80 +99,80 @@ const ( type InputAuthenticationType string const ( - InputAuthenticationType_password InputAuthenticationType = "password" - InputAuthenticationType_no_password_required InputAuthenticationType = "no-password-required" InputAuthenticationType_iam InputAuthenticationType = "iam" + InputAuthenticationType_no_password_required InputAuthenticationType = "no-password-required" + InputAuthenticationType_password InputAuthenticationType = "password" ) type LogDeliveryConfigurationStatus string const ( LogDeliveryConfigurationStatus_active LogDeliveryConfigurationStatus = "active" - LogDeliveryConfigurationStatus_enabling LogDeliveryConfigurationStatus = "enabling" - LogDeliveryConfigurationStatus_modifying LogDeliveryConfigurationStatus = "modifying" LogDeliveryConfigurationStatus_disabling LogDeliveryConfigurationStatus = "disabling" + LogDeliveryConfigurationStatus_enabling LogDeliveryConfigurationStatus = "enabling" LogDeliveryConfigurationStatus_error LogDeliveryConfigurationStatus = "error" + LogDeliveryConfigurationStatus_modifying LogDeliveryConfigurationStatus = "modifying" ) type LogFormat string const ( - LogFormat_text LogFormat = "text" LogFormat_json LogFormat = "json" + LogFormat_text LogFormat = "text" ) type LogType string const ( - LogType_slow_log LogType = "slow-log" LogType_engine_log LogType = "engine-log" + LogType_slow_log LogType = "slow-log" ) type MultiAZStatus string const ( - MultiAZStatus_enabled MultiAZStatus = "enabled" MultiAZStatus_disabled MultiAZStatus = "disabled" + MultiAZStatus_enabled MultiAZStatus = "enabled" ) type NetworkType string const ( + NetworkType_dual_stack NetworkType = "dual_stack" NetworkType_ipv4 NetworkType = "ipv4" NetworkType_ipv6 NetworkType = "ipv6" - NetworkType_dual_stack NetworkType = "dual_stack" ) type NodeUpdateInitiatedBy string const ( - NodeUpdateInitiatedBy_system NodeUpdateInitiatedBy = "system" NodeUpdateInitiatedBy_customer NodeUpdateInitiatedBy = "customer" + NodeUpdateInitiatedBy_system NodeUpdateInitiatedBy = "system" ) type NodeUpdateStatus string const ( - NodeUpdateStatus_not_applied NodeUpdateStatus = "not-applied" - NodeUpdateStatus_waiting_to_start NodeUpdateStatus = "waiting-to-start" + NodeUpdateStatus_complete NodeUpdateStatus = "complete" NodeUpdateStatus_in_progress NodeUpdateStatus = "in-progress" - NodeUpdateStatus_stopping NodeUpdateStatus = "stopping" + NodeUpdateStatus_not_applied NodeUpdateStatus = "not-applied" NodeUpdateStatus_stopped NodeUpdateStatus = "stopped" - NodeUpdateStatus_complete NodeUpdateStatus = "complete" + NodeUpdateStatus_stopping NodeUpdateStatus = "stopping" + NodeUpdateStatus_waiting_to_start NodeUpdateStatus = "waiting-to-start" ) type OutpostMode string const ( - OutpostMode_single_outpost OutpostMode = "single-outpost" OutpostMode_cross_outpost OutpostMode = "cross-outpost" + OutpostMode_single_outpost OutpostMode = "single-outpost" ) type PendingAutomaticFailoverStatus string const ( - PendingAutomaticFailoverStatus_enabled PendingAutomaticFailoverStatus = "enabled" PendingAutomaticFailoverStatus_disabled PendingAutomaticFailoverStatus = "disabled" + PendingAutomaticFailoverStatus_enabled PendingAutomaticFailoverStatus = "enabled" ) type ServiceUpdateSeverity string @@ -180,8 +180,8 @@ type ServiceUpdateSeverity string const ( ServiceUpdateSeverity_critical ServiceUpdateSeverity = "critical" ServiceUpdateSeverity_important ServiceUpdateSeverity = "important" - ServiceUpdateSeverity_medium ServiceUpdateSeverity = "medium" ServiceUpdateSeverity_low ServiceUpdateSeverity = "low" + ServiceUpdateSeverity_medium ServiceUpdateSeverity = "medium" ) type ServiceUpdateStatus string @@ -201,9 +201,9 @@ const ( type SlaMet string const ( - SlaMet_yes SlaMet = "yes" - SlaMet_no SlaMet = "no" SlaMet_n_a SlaMet = "n/a" + SlaMet_no SlaMet = "no" + SlaMet_yes SlaMet = "yes" ) type SourceType string @@ -230,13 +230,13 @@ const ( type UpdateActionStatus string const ( - UpdateActionStatus_not_applied UpdateActionStatus = "not-applied" - UpdateActionStatus_waiting_to_start UpdateActionStatus = "waiting-to-start" - UpdateActionStatus_in_progress UpdateActionStatus = "in-progress" - UpdateActionStatus_stopping UpdateActionStatus = "stopping" - UpdateActionStatus_stopped UpdateActionStatus = "stopped" UpdateActionStatus_complete UpdateActionStatus = "complete" - UpdateActionStatus_scheduling UpdateActionStatus = "scheduling" - UpdateActionStatus_scheduled UpdateActionStatus = "scheduled" + UpdateActionStatus_in_progress UpdateActionStatus = "in-progress" UpdateActionStatus_not_applicable UpdateActionStatus = "not-applicable" + UpdateActionStatus_not_applied UpdateActionStatus = "not-applied" + UpdateActionStatus_scheduled UpdateActionStatus = "scheduled" + UpdateActionStatus_scheduling UpdateActionStatus = "scheduling" + UpdateActionStatus_stopped UpdateActionStatus = "stopped" + UpdateActionStatus_stopping UpdateActionStatus = "stopping" + UpdateActionStatus_waiting_to_start UpdateActionStatus = "waiting-to-start" ) diff --git a/apis/v1alpha1/replication_group.go b/apis/v1alpha1/replication_group.go index 48f9bfbd..c35db1a9 100644 --- a/apis/v1alpha1/replication_group.go +++ b/apis/v1alpha1/replication_group.go @@ -22,7 +22,8 @@ import ( // ReplicationGroupSpec defines the desired state of ReplicationGroup. // -// Contains all of the attributes of a specific Redis replication group. +// Contains all of the attributes of a specific Valkey or Redis OSS replication +// group. type ReplicationGroupSpec struct { // A flag that enables encryption at rest when set to true. @@ -33,7 +34,7 @@ type ReplicationGroupSpec struct { // group. // // Required: Only available when creating a replication group in an Amazon VPC - // using redis version 3.2.6, 4.x or later. + // using Redis OSS version 3.2.6, 4.x or later. // // Default: false AtRestEncryptionEnabled *bool `json:"atRestEncryptionEnabled,omitempty"` @@ -50,19 +51,12 @@ type ReplicationGroupSpec struct { // - Must be only printable ASCII characters. // // - Must be at least 16 characters and no more than 128 characters in length. - // - // - The only permitted printable special characters are !, &, #, $, ^, <, - // >, and -. Other printable special characters cannot be used in the AUTH - // token. - // - // For more information, see AUTH password (http://redis.io/commands/AUTH) at - // http://redis.io/commands/AUTH. AuthToken *ackv1alpha1.SecretKeyReference `json:"authToken,omitempty"` // Specifies whether a read-only replica is automatically promoted to read/write // primary if the existing primary fails. // - // AutomaticFailoverEnabled must be enabled for Redis (cluster mode enabled) - // replication groups. + // AutomaticFailoverEnabled must be enabled for Valkey or Redis OSS (cluster + // mode enabled) replication groups. // // Default: false AutomaticFailoverEnabled *bool `json:"automaticFailoverEnabled,omitempty"` @@ -72,67 +66,72 @@ type ReplicationGroupSpec struct { // the current generation types provide more memory and computational power // at lower cost when compared to their equivalent previous generation counterparts. // - // - General purpose: Current generation: M6g node types (available only - // for Redis engine version 5.0.6 onward and for Memcached engine version - // 1.5.16 onward): cache.m6g.large, cache.m6g.xlarge, cache.m6g.2xlarge, - // cache.m6g.4xlarge, cache.m6g.8xlarge, cache.m6g.12xlarge, cache.m6g.16xlarge - // For region availability, see Supported Node Types (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) - // M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, - // cache.m5.12xlarge, cache.m5.24xlarge M4 node types: cache.m4.large, cache.m4.xlarge, - // cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge T4g node types (available - // only for Redis engine version 5.0.6 onward and Memcached engine version - // 1.5.16 onward): cache.t4g.micro, cache.t4g.small, cache.t4g.medium T3 - // node types: cache.t3.micro, cache.t3.small, cache.t3.medium T2 node types: - // cache.t2.micro, cache.t2.small, cache.t2.medium Previous generation: (not - // recommended. Existing clusters are still supported but creation of new - // clusters is not supported for these types.) T1 node types: cache.t1.micro - // M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge - // M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge + // - General purpose: Current generation: M7g node types: cache.m7g.large, + // cache.m7g.xlarge, cache.m7g.2xlarge, cache.m7g.4xlarge, cache.m7g.8xlarge, + // cache.m7g.12xlarge, cache.m7g.16xlarge For region availability, see Supported + // Node Types (https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) + // M6g node types (available only for Redis OSS engine version 5.0.6 onward + // and for Memcached engine version 1.5.16 onward): cache.m6g.large, cache.m6g.xlarge, + // cache.m6g.2xlarge, cache.m6g.4xlarge, cache.m6g.8xlarge, cache.m6g.12xlarge, + // cache.m6g.16xlarge M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, + // cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge M4 node types: + // cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge + // T4g node types (available only for Redis OSS engine version 5.0.6 onward + // and Memcached engine version 1.5.16 onward): cache.t4g.micro, cache.t4g.small, + // cache.t4g.medium T3 node types: cache.t3.micro, cache.t3.small, cache.t3.medium + // T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium Previous + // generation: (not recommended. Existing clusters are still supported but + // creation of new clusters is not supported for these types.) T1 node types: + // cache.t1.micro M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, + // cache.m1.xlarge M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, + // cache.m3.2xlarge // // - Compute optimized: Previous generation: (not recommended. Existing clusters // are still supported but creation of new clusters is not supported for // these types.) C1 node types: cache.c1.xlarge // - // - Memory optimized with data tiering: Current generation: R6gd node types - // (available only for Redis engine version 6.2 onward). cache.r6gd.xlarge, - // cache.r6gd.2xlarge, cache.r6gd.4xlarge, cache.r6gd.8xlarge, cache.r6gd.12xlarge, - // cache.r6gd.16xlarge - // - // - Memory optimized: Current generation: R6g node types (available only - // for Redis engine version 5.0.6 onward and for Memcached engine version - // 1.5.16 onward). cache.r6g.large, cache.r6g.xlarge, cache.r6g.2xlarge, - // cache.r6g.4xlarge, cache.r6g.8xlarge, cache.r6g.12xlarge, cache.r6g.16xlarge - // For region availability, see Supported Node Types (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) - // R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, - // cache.r5.12xlarge, cache.r5.24xlarge R4 node types: cache.r4.large, cache.r4.xlarge, - // cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge - // Previous generation: (not recommended. Existing clusters are still supported - // but creation of new clusters is not supported for these types.) M2 node - // types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge R3 node types: - // cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge + // - Memory optimized: Current generation: R7g node types: cache.r7g.large, + // cache.r7g.xlarge, cache.r7g.2xlarge, cache.r7g.4xlarge, cache.r7g.8xlarge, + // cache.r7g.12xlarge, cache.r7g.16xlarge For region availability, see Supported + // Node Types (https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) + // R6g node types (available only for Redis OSS engine version 5.0.6 onward + // and for Memcached engine version 1.5.16 onward): cache.r6g.large, cache.r6g.xlarge, + // cache.r6g.2xlarge, cache.r6g.4xlarge, cache.r6g.8xlarge, cache.r6g.12xlarge, + // cache.r6g.16xlarge R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, + // cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge R4 node types: + // cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, + // cache.r4.16xlarge Previous generation: (not recommended. Existing clusters + // are still supported but creation of new clusters is not supported for + // these types.) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge + // R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, + // cache.r3.8xlarge // // Additional node type info // // - All current generation instance types are created in Amazon VPC by default. // - // - Redis append-only files (AOF) are not supported for T1 or T2 instances. + // - Valkey or Redis OSS append-only files (AOF) are not supported for T1 + // or T2 instances. // - // - Redis Multi-AZ with automatic failover is not supported on T1 instances. + // - Valkey or Redis OSS Multi-AZ with automatic failover is not supported + // on T1 instances. // - // - Redis configuration variables appendonly and appendfsync are not supported - // on Redis version 2.8.22 and later. + // - The configuration variables appendonly and appendfsync are not supported + // on Valkey, or on Redis OSS version 2.8.22 and later. CacheNodeType *string `json:"cacheNodeType,omitempty"` // The name of the parameter group to associate with this replication group. // If this argument is omitted, the default cache parameter group for the specified // engine is used. // - // If you are running Redis version 3.2.4 or later, only one node group (shard), - // and want to use a default parameter group, we recommend that you specify - // the parameter group by name. + // If you are running Valkey or Redis OSS version 3.2.4 or later, only one node + // group (shard), and want to use a default parameter group, we recommend that + // you specify the parameter group by name. // - // - To create a Redis (cluster mode disabled) replication group, use CacheParameterGroupName=default.redis3.2. + // - To create a Valkey or Redis OSS (cluster mode disabled) replication + // group, use CacheParameterGroupName=default.redis3.2. // - // - To create a Redis (cluster mode enabled) replication group, use CacheParameterGroupName=default.redis3.2.cluster.on. + // - To create a Valkey or Redis OSS (cluster mode enabled) replication group, + // use CacheParameterGroupName=default.redis3.2.cluster.on. CacheParameterGroupName *string `json:"cacheParameterGroupName,omitempty"` CacheParameterGroupRef *ackv1alpha1.AWSResourceReferenceWrapper `json:"cacheParameterGroupRef,omitempty"` // A list of cache security group names to associate with this replication group. @@ -141,47 +140,64 @@ type ReplicationGroupSpec struct { // // If you're going to launch your cluster in an Amazon VPC, you need to create // a subnet group before you start creating a cluster. For more information, - // see Subnets and Subnet Groups (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/SubnetGroups.html). + // see Subnets and Subnet Groups (https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/SubnetGroups.html). CacheSubnetGroupName *string `json:"cacheSubnetGroupName,omitempty"` CacheSubnetGroupRef *ackv1alpha1.AWSResourceReferenceWrapper `json:"cacheSubnetGroupRef,omitempty"` + // Enabled or Disabled. To modify cluster mode from Disabled to Enabled, you + // must first set the cluster mode to Compatible. Compatible mode allows your + // Valkey or Redis OSS clients to connect using both cluster mode enabled and + // cluster mode disabled. After you migrate all Valkey or Redis OSS clients + // to use cluster mode enabled, you can then complete cluster mode configuration + // and set the cluster mode to Enabled. + ClusterMode *string `json:"clusterMode,omitempty"` // Enables data tiering. Data tiering is only supported for replication groups // using the r6gd node type. This parameter must be set to true when using r6gd - // nodes. For more information, see Data tiering (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/data-tiering.html). + // nodes. For more information, see Data tiering (https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/data-tiering.html). DataTieringEnabled *bool `json:"dataTieringEnabled,omitempty"` // A user-created description for the replication group. // +kubebuilder:validation:Required Description *string `json:"description"` // The name of the cache engine to be used for the clusters in this replication - // group. Must be Redis. + // group. The value must be set to Redis. Engine *string `json:"engine,omitempty"` // The version number of the cache engine to be used for the clusters in this // replication group. To view the supported cache engine versions, use the DescribeCacheEngineVersions // operation. // // Important: You can upgrade to a newer engine version (see Selecting a Cache - // Engine and Version (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/SelectEngine.html#VersionManagement)) + // Engine and Version (https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/SelectEngine.html#VersionManagement)) // in the ElastiCache User Guide, but you cannot downgrade to an earlier engine // version. If you want to use an earlier engine version, you must delete the // existing cluster or replication group and create it anew with the earlier // engine version. EngineVersion *string `json:"engineVersion,omitempty"` + // The network type you choose when creating a replication group, either ipv4 + // | ipv6. IPv6 is supported for workloads using Valkey 7.2 and above, Redis + // OSS engine version 6.2 and above or Memcached engine version 1.6.6 and above + // on all instances built on the Nitro system (http://aws.amazon.com/ec2/nitro/). + IPDiscovery *string `json:"ipDiscovery,omitempty"` // The ID of the KMS key used to encrypt the disk in the cluster. KMSKeyID *string `json:"kmsKeyID,omitempty"` // Specifies the destination, format and type of the logs. LogDeliveryConfigurations []*LogDeliveryConfigurationRequest `json:"logDeliveryConfigurations,omitempty"` // A flag indicating if you have Multi-AZ enabled to enhance fault tolerance. - // For more information, see Minimizing Downtime: Multi-AZ (http://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/AutoFailover.html). + // For more information, see Minimizing Downtime: Multi-AZ (http://docs.aws.amazon.com/AmazonElastiCache/latest/dg/AutoFailover.html). MultiAZEnabled *bool `json:"multiAZEnabled,omitempty"` + // Must be either ipv4 | ipv6 | dual_stack. IPv6 is supported for workloads + // using Valkey 7.2 and above, Redis OSS engine version 6.2 and above or Memcached + // engine version 1.6.6 and above on all instances built on the Nitro system + // (http://aws.amazon.com/ec2/nitro/). + NetworkType *string `json:"networkType,omitempty"` // A list of node group (shard) configuration options. Each node group (shard) // configuration has the following members: PrimaryAvailabilityZone, ReplicaAvailabilityZones, // ReplicaCount, and Slots. // - // If you're creating a Redis (cluster mode disabled) or a Redis (cluster mode - // enabled) replication group, you can use this parameter to individually configure - // each node group (shard), or you can omit this parameter. However, it is required - // when seeding a Redis (cluster mode enabled) cluster from a S3 rdb file. You - // must configure each node group (shard) using this parameter because you must - // specify the slots for each node group. + // If you're creating a Valkey or Redis OSS (cluster mode disabled) or a Valkey + // or Redis OSS (cluster mode enabled) replication group, you can use this parameter + // to individually configure each node group (shard), or you can omit this parameter. + // However, it is required when seeding a Valkey or Redis OSS (cluster mode + // enabled) cluster from a S3 rdb file. You must configure each node group (shard) + // using this parameter because you must specify the slots for each node group. NodeGroupConfiguration []*NodeGroupConfiguration `json:"nodeGroupConfiguration,omitempty"` // The Amazon Resource Name (ARN) of the Amazon Simple Notification Service // (SNS) topic to which notifications are sent. @@ -189,8 +205,9 @@ type ReplicationGroupSpec struct { // The Amazon SNS topic owner must be the same as the cluster owner. NotificationTopicARN *string `json:"notificationTopicARN,omitempty"` // An optional parameter that specifies the number of node groups (shards) for - // this Redis (cluster mode enabled) replication group. For Redis (cluster mode - // disabled) either omit this parameter or set it to 1. + // this Valkey or Redis OSS (cluster mode enabled) replication group. For Valkey + // or Redis OSS (cluster mode disabled) either omit this parameter or set it + // to 1. // // Default: 1 NumNodeGroups *int64 `json:"numNodeGroups,omitempty"` @@ -214,11 +231,6 @@ type ReplicationGroupSpec struct { PreferredCacheClusterAZs []*string `json:"preferredCacheClusterAZs,omitempty"` // Specifies the weekly time range during which maintenance on the cluster is // performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi - // (24H Clock UTC). The minimum maintenance window is a 60 minute period. Valid - // values for ddd are: - // - // Specifies the weekly time range during which maintenance on the cluster is - // performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi // (24H Clock UTC). The minimum maintenance window is a 60 minute period. // // Valid values for ddd are: @@ -267,13 +279,16 @@ type ReplicationGroupSpec struct { // Virtual Private Cloud (Amazon VPC). SecurityGroupIDs []*string `json:"securityGroupIDs,omitempty"` SecurityGroupRefs []*ackv1alpha1.AWSResourceReferenceWrapper `json:"securityGroupRefs,omitempty"` - // A list of Amazon Resource Names (ARN) that uniquely identify the Redis RDB - // snapshot files stored in Amazon S3. The snapshot files are used to populate - // the new replication group. The Amazon S3 object name in the ARN cannot contain - // any commas. The new replication group will have the number of node groups - // (console: shards) specified by the parameter NumNodeGroups or the number - // of node groups configured by NodeGroupConfiguration regardless of the number - // of ARNs specified here. + // The name of the snapshot used to create a replication group. Available for + // Valkey, Redis OSS only. + ServerlessCacheSnapshotName *string `json:"serverlessCacheSnapshotName,omitempty"` + // A list of Amazon Resource Names (ARN) that uniquely identify the Valkey or + // Redis OSS RDB snapshot files stored in Amazon S3. The snapshot files are + // used to populate the new replication group. The Amazon S3 object name in + // the ARN cannot contain any commas. The new replication group will have the + // number of node groups (console: shards) specified by the parameter NumNodeGroups + // or the number of node groups configured by NodeGroupConfiguration regardless + // of the number of ARNs specified here. // // Example of an Amazon S3 ARN: arn:aws:s3:::my_bucket/snapshot1.rdb SnapshotARNs []*string `json:"snapshotARNs,omitempty"` @@ -302,10 +317,6 @@ type ReplicationGroupSpec struct { Tags []*Tag `json:"tags,omitempty"` // A flag that enables in-transit encryption when set to true. // - // You cannot modify the value of TransitEncryptionEnabled after the cluster - // is created. To enable in-transit encryption on a cluster you must set TransitEncryptionEnabled - // to true when you create a cluster. - // // This parameter is valid only if the Engine parameter is redis, the EngineVersion // parameter is 3.2.6, 4.x or later, and the cluster is being created in an // Amazon VPC. @@ -313,13 +324,28 @@ type ReplicationGroupSpec struct { // If you enable in-transit encryption, you must also specify a value for CacheSubnetGroup. // // Required: Only available when creating a replication group in an Amazon VPC - // using redis version 3.2.6, 4.x or later. + // using Redis OSS version 3.2.6, 4.x or later. // // Default: false // // For HIPAA compliance, you must specify TransitEncryptionEnabled as true, // an AuthToken, and a CacheSubnetGroup. TransitEncryptionEnabled *bool `json:"transitEncryptionEnabled,omitempty"` + // A setting that allows you to migrate your clients to use in-transit encryption, + // with no downtime. + // + // When setting TransitEncryptionEnabled to true, you can set your TransitEncryptionMode + // to preferred in the same request, to allow both encrypted and unencrypted + // connections at the same time. Once you migrate all your Valkey or Redis OSS + // clients to use encrypted connections you can modify the value to required + // to allow encrypted connections only. + // + // Setting TransitEncryptionMode to required is a two-step process that requires + // you to first set the TransitEncryptionMode to preferred, after that you can + // set TransitEncryptionMode to required. + // + // This process will not trigger the replacement of the replication group. + TransitEncryptionMode *string `json:"transitEncryptionMode,omitempty"` // The user group to associate with the replication group. UserGroupIDs []*string `json:"userGroupIDs,omitempty"` } @@ -338,20 +364,21 @@ type ReplicationGroupStatus struct { // +kubebuilder:validation:Optional Conditions []*ackv1alpha1.Condition `json:"conditions"` // A string list, each element of which specifies a cache node type which you - // can use to scale your cluster or replication group. When scaling down a Redis - // cluster or replication group using ModifyCacheCluster or ModifyReplicationGroup, + // can use to scale your cluster or replication group. When scaling down a Valkey + // or Redis OSS cluster or replication group using ModifyCacheCluster or ModifyReplicationGroup, // use a value from this list for the CacheNodeType parameter. // +kubebuilder:validation:Optional AllowedScaleDownModifications []*string `json:"allowedScaleDownModifications,omitempty"` // A string list, each element of which specifies a cache node type which you // can use to scale your cluster or replication group. // - // When scaling up a Redis cluster or replication group using ModifyCacheCluster - // or ModifyReplicationGroup, use a value from this list for the CacheNodeType - // parameter. + // When scaling up a Valkey or Redis OSS cluster or replication group using + // ModifyCacheCluster or ModifyReplicationGroup, use a value from this list + // for the CacheNodeType parameter. // +kubebuilder:validation:Optional AllowedScaleUpModifications []*string `json:"allowedScaleUpModifications,omitempty"` - // A flag that enables using an AuthToken (password) when issuing Redis commands. + // A flag that enables using an AuthToken (password) when issuing Valkey or + // Redis OSS commands. // // Default: false // +kubebuilder:validation:Optional @@ -359,12 +386,13 @@ type ReplicationGroupStatus struct { // The date the auth token was last modified // +kubebuilder:validation:Optional AuthTokenLastModifiedDate *metav1.Time `json:"authTokenLastModifiedDate,omitempty"` - // If you are running Redis engine version 6.0 or later, set this parameter - // to yes if you want to opt-in to the next auto minor version upgrade campaign. - // This parameter is disabled for previous versions. + // If you are running Valkey 7.2 and above, or Redis OSS engine version 6.0 + // and above, set this parameter to yes if you want to opt-in to the next auto + // minor version upgrade campaign. This parameter is disabled for previous versions. // +kubebuilder:validation:Optional AutoMinorVersionUpgrade *bool `json:"autoMinorVersionUpgrade,omitempty"` - // Indicates the status of automatic failover for this Redis replication group. + // Indicates the status of automatic failover for this Valkey or Redis OSS replication + // group. // +kubebuilder:validation:Optional AutomaticFailover *string `json:"automaticFailover,omitempty"` // A flag indicating whether or not this replication group is cluster enabled; @@ -380,7 +408,7 @@ type ReplicationGroupStatus struct { ConfigurationEndpoint *Endpoint `json:"configurationEndpoint,omitempty"` // Enables data tiering. Data tiering is only supported for replication groups // using the r6gd node type. This parameter must be set to true when using r6gd - // nodes. For more information, see Data tiering (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/data-tiering.html). + // nodes. For more information, see Data tiering (https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/data-tiering.html). // +kubebuilder:validation:Optional DataTiering *string `json:"dataTiering,omitempty"` // A list of events. Each element in the list contains detailed information @@ -401,13 +429,13 @@ type ReplicationGroupStatus struct { // +kubebuilder:validation:Optional MemberClustersOutpostARNs []*string `json:"memberClustersOutpostARNs,omitempty"` // A flag indicating if you have Multi-AZ enabled to enhance fault tolerance. - // For more information, see Minimizing Downtime: Multi-AZ (http://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/AutoFailover.html) + // For more information, see Minimizing Downtime: Multi-AZ (http://docs.aws.amazon.com/AmazonElastiCache/latest/dg/AutoFailover.html) // +kubebuilder:validation:Optional MultiAZ *string `json:"multiAZ,omitempty"` - // A list of node groups in this replication group. For Redis (cluster mode - // disabled) replication groups, this is a single-element list. For Redis (cluster - // mode enabled) replication groups, the list contains an entry for each node - // group (shard). + // A list of node groups in this replication group. For Valkey or Redis OSS + // (cluster mode disabled) replication groups, this is a single-element list. + // For Valkey or Redis OSS (cluster mode enabled) replication groups, the list + // contains an entry for each node group (shard). // +kubebuilder:validation:Optional NodeGroups []*NodeGroup `json:"nodeGroups,omitempty"` // A group of settings to be applied to the replication group, either immediately diff --git a/apis/v1alpha1/snapshot.go b/apis/v1alpha1/snapshot.go index 8b29231e..34ff6cd4 100644 --- a/apis/v1alpha1/snapshot.go +++ b/apis/v1alpha1/snapshot.go @@ -22,8 +22,8 @@ import ( // SnapshotSpec defines the desired state of Snapshot. // -// Represents a copy of an entire Redis cluster as of the time when the snapshot -// was taken. +// Represents a copy of an entire Valkey or Redis OSS cluster as of the time +// when the snapshot was taken. type SnapshotSpec struct { // The identifier of an existing cluster. The snapshot is created from this @@ -57,13 +57,13 @@ type SnapshotStatus struct { // resource // +kubebuilder:validation:Optional Conditions []*ackv1alpha1.Condition `json:"conditions"` - // If you are running Redis engine version 6.0 or later, set this parameter - // to yes if you want to opt-in to the next auto minor version upgrade campaign. - // This parameter is disabled for previous versions. + // If you are running Valkey 7.2 and above or Redis OSS engine version 6.0 and + // above, set this parameter to yes if you want to opt-in to the next auto minor + // version upgrade campaign. This parameter is disabled for previous versions. // +kubebuilder:validation:Optional AutoMinorVersionUpgrade *bool `json:"autoMinorVersionUpgrade,omitempty"` - // Indicates the status of automatic failover for the source Redis replication - // group. + // Indicates the status of automatic failover for the source Valkey or Redis + // OSS replication group. // +kubebuilder:validation:Optional AutomaticFailover *string `json:"automaticFailover,omitempty"` // The date and time when the source cluster was created. @@ -75,56 +75,58 @@ type SnapshotStatus struct { // the current generation types provide more memory and computational power // at lower cost when compared to their equivalent previous generation counterparts. // - // * General purpose: Current generation: M6g node types (available only - // for Redis engine version 5.0.6 onward and for Memcached engine version - // 1.5.16 onward). cache.m6g.large, cache.m6g.xlarge, cache.m6g.2xlarge, - // cache.m6g.4xlarge, cache.m6g.8xlarge, cache.m6g.12xlarge, cache.m6g.16xlarge - // For region availability, see Supported Node Types (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) - // M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, - // cache.m5.12xlarge, cache.m5.24xlarge M4 node types: cache.m4.large, cache.m4.xlarge, - // cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge T4g node types (available - // only for Redis engine version 5.0.6 onward and Memcached engine version - // 1.5.16 onward): cache.t4g.micro, cache.t4g.small, cache.t4g.medium T3 - // node types: cache.t3.micro, cache.t3.small, cache.t3.medium T2 node types: - // cache.t2.micro, cache.t2.small, cache.t2.medium Previous generation: (not - // recommended. Existing clusters are still supported but creation of new - // clusters is not supported for these types.) T1 node types: cache.t1.micro - // M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge - // M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge + // * General purpose: Current generation: M7g node types: cache.m7g.large, + // cache.m7g.xlarge, cache.m7g.2xlarge, cache.m7g.4xlarge, cache.m7g.8xlarge, + // cache.m7g.12xlarge, cache.m7g.16xlarge For region availability, see Supported + // Node Types (https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) + // M6g node types (available only for Redis OSS engine version 5.0.6 onward + // and for Memcached engine version 1.5.16 onward): cache.m6g.large, cache.m6g.xlarge, + // cache.m6g.2xlarge, cache.m6g.4xlarge, cache.m6g.8xlarge, cache.m6g.12xlarge, + // cache.m6g.16xlarge M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, + // cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge M4 node types: + // cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge + // T4g node types (available only for Redis OSS engine version 5.0.6 onward + // and Memcached engine version 1.5.16 onward): cache.t4g.micro, cache.t4g.small, + // cache.t4g.medium T3 node types: cache.t3.micro, cache.t3.small, cache.t3.medium + // T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium Previous + // generation: (not recommended. Existing clusters are still supported but + // creation of new clusters is not supported for these types.) T1 node types: + // cache.t1.micro M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, + // cache.m1.xlarge M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, + // cache.m3.2xlarge // // * Compute optimized: Previous generation: (not recommended. Existing clusters // are still supported but creation of new clusters is not supported for // these types.) C1 node types: cache.c1.xlarge // - // * Memory optimized with data tiering: Current generation: R6gd node types - // (available only for Redis engine version 6.2 onward). cache.r6gd.xlarge, - // cache.r6gd.2xlarge, cache.r6gd.4xlarge, cache.r6gd.8xlarge, cache.r6gd.12xlarge, - // cache.r6gd.16xlarge - // - // * Memory optimized: Current generation: R6g node types (available only - // for Redis engine version 5.0.6 onward and for Memcached engine version - // 1.5.16 onward). cache.r6g.large, cache.r6g.xlarge, cache.r6g.2xlarge, - // cache.r6g.4xlarge, cache.r6g.8xlarge, cache.r6g.12xlarge, cache.r6g.16xlarge - // For region availability, see Supported Node Types (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) - // For region availability, see Supported Node Types (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) - // R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, - // cache.r5.12xlarge, cache.r5.24xlarge R4 node types: cache.r4.large, cache.r4.xlarge, - // cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge - // Previous generation: (not recommended. Existing clusters are still supported - // but creation of new clusters is not supported for these types.) M2 node - // types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge R3 node types: - // cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge + // * Memory optimized: Current generation: R7g node types: cache.r7g.large, + // cache.r7g.xlarge, cache.r7g.2xlarge, cache.r7g.4xlarge, cache.r7g.8xlarge, + // cache.r7g.12xlarge, cache.r7g.16xlarge For region availability, see Supported + // Node Types (https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) + // R6g node types (available only for Redis OSS engine version 5.0.6 onward + // and for Memcached engine version 1.5.16 onward): cache.r6g.large, cache.r6g.xlarge, + // cache.r6g.2xlarge, cache.r6g.4xlarge, cache.r6g.8xlarge, cache.r6g.12xlarge, + // cache.r6g.16xlarge R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, + // cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge R4 node types: + // cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, + // cache.r4.16xlarge Previous generation: (not recommended. Existing clusters + // are still supported but creation of new clusters is not supported for + // these types.) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge + // R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, + // cache.r3.8xlarge // // Additional node type info // // * All current generation instance types are created in Amazon VPC by default. // - // * Redis append-only files (AOF) are not supported for T1 or T2 instances. + // * Valkey or Redis OSS append-only files (AOF) are not supported for T1 + // or T2 instances. // - // * Redis Multi-AZ with automatic failover is not supported on T1 instances. + // * Valkey or Redis OSS Multi-AZ with automatic failover is not supported + // on T1 instances. // - // * Redis configuration variables appendonly and appendfsync are not supported - // on Redis version 2.8.22 and later. + // * The configuration variables appendonly and appendfsync are not supported + // on Valkey, or on Redis OSS version 2.8.22 and later. // +kubebuilder:validation:Optional CacheNodeType *string `json:"cacheNodeType,omitempty"` // The cache parameter group that is associated with the source cluster. @@ -135,7 +137,7 @@ type SnapshotStatus struct { CacheSubnetGroupName *string `json:"cacheSubnetGroupName,omitempty"` // Enables data tiering. Data tiering is only supported for replication groups // using the r6gd node type. This parameter must be set to true when using r6gd - // nodes. For more information, see Data tiering (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/data-tiering.html). + // nodes. For more information, see Data tiering (https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/data-tiering.html). // +kubebuilder:validation:Optional DataTiering *string `json:"dataTiering,omitempty"` // The name of the cache engine (memcached or redis) used by the source cluster. @@ -149,8 +151,8 @@ type SnapshotStatus struct { NodeSnapshots []*NodeSnapshot `json:"nodeSnapshots,omitempty"` // The number of cache nodes in the source cluster. // - // For clusters running Redis, this value must be 1. For clusters running Memcached, - // this value must be between 1 and 40. + // For clusters running Valkey or Redis OSS, this value must be 1. For clusters + // running Memcached, this value must be between 1 and 40. // +kubebuilder:validation:Optional NumCacheNodes *int64 `json:"numCacheNodes,omitempty"` // The number of node groups (shards) in this snapshot. When restoring from diff --git a/apis/v1alpha1/types.go b/apis/v1alpha1/types.go index a3ef42e4..bb7254ef 100644 --- a/apis/v1alpha1/types.go +++ b/apis/v1alpha1/types.go @@ -34,6 +34,12 @@ type Authentication struct { Type *string `json:"type_,omitempty"` } +// Specifies the authentication mode to use. +type AuthenticationMode struct { + Passwords []*string `json:"passwords,omitempty"` + Type *string `json:"type_,omitempty"` +} + // Describes an Availability Zone in which the cluster is launched. type AvailabilityZone struct { Name *string `json:"name,omitempty"` @@ -57,7 +63,7 @@ type CacheCluster_SDK struct { CacheSubnetGroupName *string `json:"cacheSubnetGroupName,omitempty"` ClientDownloadLandingPage *string `json:"clientDownloadLandingPage,omitempty"` // Represents the information required for client programs to connect to a cache - // node. + // node. This value is read-only. ConfigurationEndpoint *Endpoint `json:"configurationEndpoint,omitempty"` Engine *string `json:"engine,omitempty"` EngineVersion *string `json:"engineVersion,omitempty"` @@ -94,7 +100,7 @@ type CacheEngineVersion struct { // Represents an individual cache node within a cluster. Each cache node runs // its own instance of the cluster's protocol-compliant caching software - either -// Memcached or Redis. +// Memcached, Valkey or Redis OSS. // // The following node types are supported by ElastiCache. Generally speaking, // the current generation types provide more memory and computational power @@ -103,15 +109,15 @@ type CacheEngineVersion struct { // - General purpose: Current generation: M7g node types: cache.m7g.large, // cache.m7g.xlarge, cache.m7g.2xlarge, cache.m7g.4xlarge, cache.m7g.8xlarge, // cache.m7g.12xlarge, cache.m7g.16xlarge For region availability, see Supported -// Node Types (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) -// M6g node types (available only for Redis engine version 5.0.6 onward and -// for Memcached engine version 1.5.16 onward): cache.m6g.large, cache.m6g.xlarge, +// Node Types (https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) +// M6g node types (available only for Redis OSS engine version 5.0.6 onward +// and for Memcached engine version 1.5.16 onward): cache.m6g.large, cache.m6g.xlarge, // cache.m6g.2xlarge, cache.m6g.4xlarge, cache.m6g.8xlarge, cache.m6g.12xlarge, // cache.m6g.16xlarge M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, // cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge M4 node types: // cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge -// T4g node types (available only for Redis engine version 5.0.6 onward and -// Memcached engine version 1.5.16 onward): cache.t4g.micro, cache.t4g.small, +// T4g node types (available only for Redis OSS engine version 5.0.6 onward +// and Memcached engine version 1.5.16 onward): cache.t4g.micro, cache.t4g.small, // cache.t4g.medium T3 node types: cache.t3.micro, cache.t3.small, cache.t3.medium // T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium Previous // generation: (not recommended. Existing clusters are still supported but @@ -127,9 +133,9 @@ type CacheEngineVersion struct { // - Memory optimized: Current generation: R7g node types: cache.r7g.large, // cache.r7g.xlarge, cache.r7g.2xlarge, cache.r7g.4xlarge, cache.r7g.8xlarge, // cache.r7g.12xlarge, cache.r7g.16xlarge For region availability, see Supported -// Node Types (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) -// R6g node types (available only for Redis engine version 5.0.6 onward and -// for Memcached engine version 1.5.16 onward): cache.r6g.large, cache.r6g.xlarge, +// Node Types (https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) +// R6g node types (available only for Redis OSS engine version 5.0.6 onward +// and for Memcached engine version 1.5.16 onward): cache.r6g.large, cache.r6g.xlarge, // cache.r6g.2xlarge, cache.r6g.4xlarge, cache.r6g.8xlarge, cache.r6g.12xlarge, // cache.r6g.16xlarge R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, // cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge R4 node types: @@ -144,12 +150,14 @@ type CacheEngineVersion struct { // // - All current generation instance types are created in Amazon VPC by default. // -// - Redis append-only files (AOF) are not supported for T1 or T2 instances. +// - Valkey or Redis OSS append-only files (AOF) are not supported for T1 +// or T2 instances. // -// - Redis Multi-AZ with automatic failover is not supported on T1 instances. +// - Valkey or Redis OSS Multi-AZ with automatic failover is not supported +// on T1 instances. // -// - Redis configuration variables appendonly and appendfsync are not supported -// on Redis version 2.8.22 and later. +// - The configuration variables appendonly and appendfsync are not supported +// on Valkey, or on Redis OSS version 2.8.22 and later. type CacheNode struct { CacheNodeCreateTime *metav1.Time `json:"cacheNodeCreateTime,omitempty"` CacheNodeID *string `json:"cacheNodeID,omitempty"` @@ -157,15 +165,15 @@ type CacheNode struct { CustomerAvailabilityZone *string `json:"customerAvailabilityZone,omitempty"` CustomerOutpostARN *string `json:"customerOutpostARN,omitempty"` // Represents the information required for client programs to connect to a cache - // node. + // node. This value is read-only. Endpoint *Endpoint `json:"endpoint,omitempty"` ParameterGroupStatus *string `json:"parameterGroupStatus,omitempty"` SourceCacheNodeID *string `json:"sourceCacheNodeID,omitempty"` } // A parameter that has a different value for each cache node type it is applied -// to. For example, in a Redis cluster, a cache.m1.large cache node type would -// have a larger maxmemory value than a cache.m1.small type. +// to. For example, in a Valkey or Redis OSS cluster, a cache.m1.large cache +// node type would have a larger maxmemory value than a cache.m1.small type. type CacheNodeTypeSpecificParameter struct { AllowedValues *string `json:"allowedValues,omitempty"` ChangeType *string `json:"changeType,omitempty"` @@ -239,6 +247,7 @@ type CacheSubnetGroup_SDK struct { CacheSubnetGroupDescription *string `json:"cacheSubnetGroupDescription,omitempty"` CacheSubnetGroupName *string `json:"cacheSubnetGroupName,omitempty"` Subnets []*Subnet `json:"subnets,omitempty"` + SupportedNetworkTypes []*string `json:"supportedNetworkTypes,omitempty"` VPCID *string `json:"vpcID,omitempty"` } @@ -251,8 +260,10 @@ type CloudWatchLogsDestinationDetails struct { // Each node group (shard) configuration has the following members: NodeGroupId, // NewReplicaCount, and PreferredAvailabilityZones. type ConfigureShard struct { - NewReplicaCount *int64 `json:"newReplicaCount,omitempty"` - NodeGroupID *string `json:"nodeGroupID,omitempty"` + NewReplicaCount *int64 `json:"newReplicaCount,omitempty"` + NodeGroupID *string `json:"nodeGroupID,omitempty"` + PreferredAvailabilityZones []*string `json:"preferredAvailabilityZones,omitempty"` + PreferredOutpostARNs []*string `json:"preferredOutpostARNs,omitempty"` } // The endpoint from which data should be migrated. @@ -261,6 +272,12 @@ type CustomerNodeEndpoint struct { Port *int64 `json:"port,omitempty"` } +// The data storage limit. +type DataStorage struct { + Maximum *int64 `json:"maximum,omitempty"` + Minimum *int64 `json:"minimum,omitempty"` +} + // Configuration details of either a CloudWatch Logs destination or Kinesis // Data Firehose destination. type DestinationDetails struct { @@ -277,8 +294,15 @@ type EC2SecurityGroup struct { Status *string `json:"status,omitempty"` } +// The configuration for the number of ElastiCache Processing Units (ECPU) the +// cache can consume per second. +type ECPUPerSecond struct { + Maximum *int64 `json:"maximum,omitempty"` + Minimum *int64 `json:"minimum,omitempty"` +} + // Represents the information required for client programs to connect to a cache -// node. +// node. This value is read-only. type Endpoint struct { Address *string `json:"address,omitempty"` Port *int64 `json:"port,omitempty"` @@ -386,10 +410,10 @@ type NodeGroup struct { NodeGroupID *string `json:"nodeGroupID,omitempty"` NodeGroupMembers []*NodeGroupMember `json:"nodeGroupMembers,omitempty"` // Represents the information required for client programs to connect to a cache - // node. + // node. This value is read-only. PrimaryEndpoint *Endpoint `json:"primaryEndpoint,omitempty"` // Represents the information required for client programs to connect to a cache - // node. + // node. This value is read-only. ReaderEndpoint *Endpoint `json:"readerEndpoint,omitempty"` Slots *string `json:"slots,omitempty"` Status *string `json:"status,omitempty"` @@ -416,7 +440,7 @@ type NodeGroupMember struct { PreferredAvailabilityZone *string `json:"preferredAvailabilityZone,omitempty"` PreferredOutpostARN *string `json:"preferredOutpostARN,omitempty"` // Represents the information required for client programs to connect to a cache - // node. + // node. This value is read-only. ReadEndpoint *Endpoint `json:"readEndpoint,omitempty"` } @@ -520,20 +544,24 @@ type RegionalConfiguration struct { ReplicationGroupRegion *string `json:"replicationGroupRegion,omitempty"` } -// The settings to be applied to the Redis replication group, either immediately -// or during the next maintenance window. +// The settings to be applied to the Valkey or Redis OSS replication group, +// either immediately or during the next maintenance window. type ReplicationGroupPendingModifiedValues struct { AuthTokenStatus *string `json:"authTokenStatus,omitempty"` AutomaticFailoverStatus *string `json:"automaticFailoverStatus,omitempty"` + ClusterMode *string `json:"clusterMode,omitempty"` LogDeliveryConfigurations []*PendingLogDeliveryConfiguration `json:"logDeliveryConfigurations,omitempty"` PrimaryClusterID *string `json:"primaryClusterID,omitempty"` // The status of an online resharding operation. - Resharding *ReshardingStatus `json:"resharding,omitempty"` + Resharding *ReshardingStatus `json:"resharding,omitempty"` + TransitEncryptionEnabled *bool `json:"transitEncryptionEnabled,omitempty"` + TransitEncryptionMode *string `json:"transitEncryptionMode,omitempty"` // The status of the user group update. UserGroups *UserGroupsUpdateStatus `json:"userGroups,omitempty"` } -// Contains all of the attributes of a specific Redis replication group. +// Contains all of the attributes of a specific Valkey or Redis OSS replication +// group. type ReplicationGroup_SDK struct { ARN *string `json:"arn,omitempty"` AtRestEncryptionEnabled *bool `json:"atRestEncryptionEnabled,omitempty"` @@ -543,22 +571,26 @@ type ReplicationGroup_SDK struct { AutomaticFailover *string `json:"automaticFailover,omitempty"` CacheNodeType *string `json:"cacheNodeType,omitempty"` ClusterEnabled *bool `json:"clusterEnabled,omitempty"` + ClusterMode *string `json:"clusterMode,omitempty"` // Represents the information required for client programs to connect to a cache - // node. + // node. This value is read-only. ConfigurationEndpoint *Endpoint `json:"configurationEndpoint,omitempty"` DataTiering *string `json:"dataTiering,omitempty"` Description *string `json:"description,omitempty"` + Engine *string `json:"engine,omitempty"` // The name of the Global datastore and role of this replication group in the // Global datastore. GlobalReplicationGroupInfo *GlobalReplicationGroupInfo `json:"globalReplicationGroupInfo,omitempty"` + IPDiscovery *string `json:"ipDiscovery,omitempty"` KMSKeyID *string `json:"kmsKeyID,omitempty"` LogDeliveryConfigurations []*LogDeliveryConfiguration `json:"logDeliveryConfigurations,omitempty"` MemberClusters []*string `json:"memberClusters,omitempty"` MemberClustersOutpostARNs []*string `json:"memberClustersOutpostARNs,omitempty"` MultiAZ *string `json:"multiAZ,omitempty"` + NetworkType *string `json:"networkType,omitempty"` NodeGroups []*NodeGroup `json:"nodeGroups,omitempty"` - // The settings to be applied to the Redis replication group, either immediately - // or during the next maintenance window. + // The settings to be applied to the Valkey or Redis OSS replication group, + // either immediately or during the next maintenance window. PendingModifiedValues *ReplicationGroupPendingModifiedValues `json:"pendingModifiedValues,omitempty"` ReplicationGroupCreateTime *metav1.Time `json:"replicationGroupCreateTime,omitempty"` ReplicationGroupID *string `json:"replicationGroupID,omitempty"` @@ -567,6 +599,7 @@ type ReplicationGroup_SDK struct { SnapshottingClusterID *string `json:"snapshottingClusterID,omitempty"` Status *string `json:"status,omitempty"` TransitEncryptionEnabled *bool `json:"transitEncryptionEnabled,omitempty"` + TransitEncryptionMode *string `json:"transitEncryptionMode,omitempty"` UserGroupIDs []*string `json:"userGroupIDs,omitempty"` } @@ -616,7 +649,50 @@ type SecurityGroupMembership struct { Status *string `json:"status,omitempty"` } -// An update that you can apply to your Redis clusters. +// The resource representing a serverless cache. +type ServerlessCache struct { + ARN *string `json:"arn,omitempty"` + CreateTime *metav1.Time `json:"createTime,omitempty"` + DailySnapshotTime *string `json:"dailySnapshotTime,omitempty"` + Description *string `json:"description,omitempty"` + // Represents the information required for client programs to connect to a cache + // node. This value is read-only. + Endpoint *Endpoint `json:"endpoint,omitempty"` + Engine *string `json:"engine,omitempty"` + FullEngineVersion *string `json:"fullEngineVersion,omitempty"` + KMSKeyID *string `json:"kmsKeyID,omitempty"` + MajorEngineVersion *string `json:"majorEngineVersion,omitempty"` + // Represents the information required for client programs to connect to a cache + // node. This value is read-only. + ReaderEndpoint *Endpoint `json:"readerEndpoint,omitempty"` + SecurityGroupIDs []*string `json:"securityGroupIDs,omitempty"` + ServerlessCacheName *string `json:"serverlessCacheName,omitempty"` + SnapshotRetentionLimit *int64 `json:"snapshotRetentionLimit,omitempty"` + Status *string `json:"status,omitempty"` + UserGroupID *string `json:"userGroupID,omitempty"` +} + +// The configuration settings for a specific serverless cache. +type ServerlessCacheConfiguration struct { + Engine *string `json:"engine,omitempty"` + MajorEngineVersion *string `json:"majorEngineVersion,omitempty"` + ServerlessCacheName *string `json:"serverlessCacheName,omitempty"` +} + +// The resource representing a serverless cache snapshot. Available for Valkey, +// Redis OSS and Serverless Memcached only. +type ServerlessCacheSnapshot struct { + ARN *string `json:"arn,omitempty"` + BytesUsedForCache *string `json:"bytesUsedForCache,omitempty"` + CreateTime *metav1.Time `json:"createTime,omitempty"` + ExpiryTime *metav1.Time `json:"expiryTime,omitempty"` + KMSKeyID *string `json:"kmsKeyID,omitempty"` + ServerlessCacheSnapshotName *string `json:"serverlessCacheSnapshotName,omitempty"` + SnapshotType *string `json:"snapshotType,omitempty"` + Status *string `json:"status,omitempty"` +} + +// An update that you can apply to your Valkey or Redis OSS clusters. type ServiceUpdate struct { AutoUpdateAfterRecommendedApplyByDate *bool `json:"autoUpdateAfterRecommendedApplyByDate,omitempty"` Engine *string `json:"engine,omitempty"` @@ -634,8 +710,8 @@ type SlotMigration struct { ProgressPercentage *float64 `json:"progressPercentage,omitempty"` } -// Represents a copy of an entire Redis cluster as of the time when the snapshot -// was taken. +// Represents a copy of an entire Valkey or Redis OSS cluster as of the time +// when the snapshot was taken. type Snapshot_SDK struct { ARN *string `json:"arn,omitempty"` AutoMinorVersionUpgrade *bool `json:"autoMinorVersionUpgrade,omitempty"` @@ -675,7 +751,8 @@ type Subnet struct { SubnetAvailabilityZone *AvailabilityZone `json:"subnetAvailabilityZone,omitempty"` SubnetIdentifier *string `json:"subnetIdentifier,omitempty"` // The ID of the outpost subnet. - SubnetOutpost *SubnetOutpost `json:"subnetOutpost,omitempty"` + SubnetOutpost *SubnetOutpost `json:"subnetOutpost,omitempty"` + SupportedNetworkTypes []*string `json:"supportedNetworkTypes,omitempty"` } // The ID of the outpost subnet. @@ -738,6 +815,7 @@ type UserGroup_SDK struct { // Returns the updates being applied to the user group. PendingChanges *UserGroupPendingChanges `json:"pendingChanges,omitempty"` ReplicationGroups []*string `json:"replicationGroups,omitempty"` + ServerlessCaches []*string `json:"serverlessCaches,omitempty"` Status *string `json:"status,omitempty"` UserGroupID *string `json:"userGroupID,omitempty"` UserIDs []*string `json:"userIDs,omitempty"` diff --git a/apis/v1alpha1/user.go b/apis/v1alpha1/user.go index 1be62ed7..899bbb25 100644 --- a/apis/v1alpha1/user.go +++ b/apis/v1alpha1/user.go @@ -28,6 +28,8 @@ type UserSpec struct { // Access permissions string used for this user. // +kubebuilder:validation:Required AccessString *string `json:"accessString"` + // Specifies how to authenticate the user. + AuthenticationMode *AuthenticationMode `json:"authenticationMode,omitempty"` // The current supported value is Redis. // +kubebuilder:validation:Required Engine *string `json:"engine"` @@ -69,7 +71,7 @@ type UserStatus struct { // Access permissions string used for this user. // +kubebuilder:validation:Optional LastRequestedAccessString *string `json:"lastRequestedAccessString,omitempty"` - // The minimum engine version required, which is Redis 6.0 + // The minimum engine version required, which is Redis OSS 6.0 // +kubebuilder:validation:Optional MinimumEngineVersion *string `json:"minimumEngineVersion,omitempty"` // Indicates the user status. Can be "active", "modifying" or "deleting". diff --git a/apis/v1alpha1/user_group.go b/apis/v1alpha1/user_group.go index 1cd3b17f..af23842e 100644 --- a/apis/v1alpha1/user_group.go +++ b/apis/v1alpha1/user_group.go @@ -25,11 +25,12 @@ import ( type UserGroupSpec struct { - // The current supported value is Redis. + // The current supported value is Redis user. // +kubebuilder:validation:Required Engine *string `json:"engine"` // A list of tags to be added to this resource. A tag is a key-value pair. A - // tag key must be accompanied by a tag value, although null is accepted. + // tag key must be accompanied by a tag value, although null is accepted. Available + // for Valkey and Redis OSS only. Tags []*Tag `json:"tags,omitempty"` // The ID of the user group. // +kubebuilder:validation:Required @@ -51,7 +52,7 @@ type UserGroupStatus struct { // resource // +kubebuilder:validation:Optional Conditions []*ackv1alpha1.Condition `json:"conditions"` - // The minimum engine version required, which is Redis 6.0 + // The minimum engine version required, which is Redis OSS 6.0 // +kubebuilder:validation:Optional MinimumEngineVersion *string `json:"minimumEngineVersion,omitempty"` // A list of updates being applied to the user group. @@ -60,6 +61,10 @@ type UserGroupStatus struct { // A list of replication groups that the user group can access. // +kubebuilder:validation:Optional ReplicationGroups []*string `json:"replicationGroups,omitempty"` + // Indicates which serverless caches the specified user group is associated + // with. Available for Valkey, Redis OSS and Serverless Memcached only. + // +kubebuilder:validation:Optional + ServerlessCaches []*string `json:"serverlessCaches,omitempty"` // Indicates user group status. Can be "creating", "active", "modifying", "deleting". // +kubebuilder:validation:Optional Status *string `json:"status,omitempty"` diff --git a/apis/v1alpha1/zz_generated.deepcopy.go b/apis/v1alpha1/zz_generated.deepcopy.go index 013abe4c..e2afe033 100644 --- a/apis/v1alpha1/zz_generated.deepcopy.go +++ b/apis/v1alpha1/zz_generated.deepcopy.go @@ -49,6 +49,37 @@ func (in *Authentication) DeepCopy() *Authentication { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthenticationMode) DeepCopyInto(out *AuthenticationMode) { + *out = *in + if in.Passwords != nil { + in, out := &in.Passwords, &out.Passwords + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationMode. +func (in *AuthenticationMode) DeepCopy() *AuthenticationMode { + if in == nil { + return nil + } + out := new(AuthenticationMode) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AvailabilityZone) DeepCopyInto(out *AvailabilityZone) { *out = *in @@ -1059,6 +1090,17 @@ func (in *CacheParameterGroupStatus) DeepCopy() *CacheParameterGroupStatus { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CacheParameterGroupStatus_SDK) DeepCopyInto(out *CacheParameterGroupStatus_SDK) { *out = *in + if in.CacheNodeIDsToReboot != nil { + in, out := &in.CacheNodeIDsToReboot, &out.CacheNodeIDsToReboot + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } if in.CacheParameterGroupName != nil { in, out := &in.CacheParameterGroupName, &out.CacheParameterGroupName *out = new(string) @@ -1339,6 +1381,17 @@ func (in *CacheSubnetGroupStatus) DeepCopyInto(out *CacheSubnetGroupStatus) { } } } + if in.SupportedNetworkTypes != nil { + in, out := &in.SupportedNetworkTypes, &out.SupportedNetworkTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } if in.VPCID != nil { in, out := &in.VPCID, &out.VPCID *out = new(string) @@ -1385,6 +1438,17 @@ func (in *CacheSubnetGroup_SDK) DeepCopyInto(out *CacheSubnetGroup_SDK) { } } } + if in.SupportedNetworkTypes != nil { + in, out := &in.SupportedNetworkTypes, &out.SupportedNetworkTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } if in.VPCID != nil { in, out := &in.VPCID, &out.VPCID *out = new(string) @@ -1435,6 +1499,28 @@ func (in *ConfigureShard) DeepCopyInto(out *ConfigureShard) { *out = new(string) **out = **in } + if in.PreferredAvailabilityZones != nil { + in, out := &in.PreferredAvailabilityZones, &out.PreferredAvailabilityZones + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PreferredOutpostARNs != nil { + in, out := &in.PreferredOutpostARNs, &out.PreferredOutpostARNs + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigureShard. @@ -1472,6 +1558,31 @@ func (in *CustomerNodeEndpoint) DeepCopy() *CustomerNodeEndpoint { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataStorage) DeepCopyInto(out *DataStorage) { + *out = *in + if in.Maximum != nil { + in, out := &in.Maximum, &out.Maximum + *out = new(int64) + **out = **in + } + if in.Minimum != nil { + in, out := &in.Minimum, &out.Minimum + *out = new(int64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataStorage. +func (in *DataStorage) DeepCopy() *DataStorage { + if in == nil { + return nil + } + out := new(DataStorage) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DestinationDetails) DeepCopyInto(out *DestinationDetails) { *out = *in @@ -1527,6 +1638,31 @@ func (in *EC2SecurityGroup) DeepCopy() *EC2SecurityGroup { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ECPUPerSecond) DeepCopyInto(out *ECPUPerSecond) { + *out = *in + if in.Maximum != nil { + in, out := &in.Maximum, &out.Maximum + *out = new(int64) + **out = **in + } + if in.Minimum != nil { + in, out := &in.Minimum, &out.Minimum + *out = new(int64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ECPUPerSecond. +func (in *ECPUPerSecond) DeepCopy() *ECPUPerSecond { + if in == nil { + return nil + } + out := new(ECPUPerSecond) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Endpoint) DeepCopyInto(out *Endpoint) { *out = *in @@ -2542,6 +2678,11 @@ func (in *ReplicationGroupPendingModifiedValues) DeepCopyInto(out *ReplicationGr *out = new(string) **out = **in } + if in.ClusterMode != nil { + in, out := &in.ClusterMode, &out.ClusterMode + *out = new(string) + **out = **in + } if in.LogDeliveryConfigurations != nil { in, out := &in.LogDeliveryConfigurations, &out.LogDeliveryConfigurations *out = make([]*PendingLogDeliveryConfiguration, len(*in)) @@ -2563,6 +2704,16 @@ func (in *ReplicationGroupPendingModifiedValues) DeepCopyInto(out *ReplicationGr *out = new(ReshardingStatus) (*in).DeepCopyInto(*out) } + if in.TransitEncryptionEnabled != nil { + in, out := &in.TransitEncryptionEnabled, &out.TransitEncryptionEnabled + *out = new(bool) + **out = **in + } + if in.TransitEncryptionMode != nil { + in, out := &in.TransitEncryptionMode, &out.TransitEncryptionMode + *out = new(string) + **out = **in + } if in.UserGroups != nil { in, out := &in.UserGroups, &out.UserGroups *out = new(UserGroupsUpdateStatus) @@ -2634,6 +2785,11 @@ func (in *ReplicationGroupSpec) DeepCopyInto(out *ReplicationGroupSpec) { *out = new(corev1alpha1.AWSResourceReferenceWrapper) (*in).DeepCopyInto(*out) } + if in.ClusterMode != nil { + in, out := &in.ClusterMode, &out.ClusterMode + *out = new(string) + **out = **in + } if in.DataTieringEnabled != nil { in, out := &in.DataTieringEnabled, &out.DataTieringEnabled *out = new(bool) @@ -2654,6 +2810,11 @@ func (in *ReplicationGroupSpec) DeepCopyInto(out *ReplicationGroupSpec) { *out = new(string) **out = **in } + if in.IPDiscovery != nil { + in, out := &in.IPDiscovery, &out.IPDiscovery + *out = new(string) + **out = **in + } if in.KMSKeyID != nil { in, out := &in.KMSKeyID, &out.KMSKeyID *out = new(string) @@ -2675,6 +2836,11 @@ func (in *ReplicationGroupSpec) DeepCopyInto(out *ReplicationGroupSpec) { *out = new(bool) **out = **in } + if in.NetworkType != nil { + in, out := &in.NetworkType, &out.NetworkType + *out = new(string) + **out = **in + } if in.NodeGroupConfiguration != nil { in, out := &in.NodeGroupConfiguration, &out.NodeGroupConfiguration *out = make([]*NodeGroupConfiguration, len(*in)) @@ -2754,6 +2920,11 @@ func (in *ReplicationGroupSpec) DeepCopyInto(out *ReplicationGroupSpec) { } } } + if in.ServerlessCacheSnapshotName != nil { + in, out := &in.ServerlessCacheSnapshotName, &out.ServerlessCacheSnapshotName + *out = new(string) + **out = **in + } if in.SnapshotARNs != nil { in, out := &in.SnapshotARNs, &out.SnapshotARNs *out = make([]*string, len(*in)) @@ -2796,6 +2967,11 @@ func (in *ReplicationGroupSpec) DeepCopyInto(out *ReplicationGroupSpec) { *out = new(bool) **out = **in } + if in.TransitEncryptionMode != nil { + in, out := &in.TransitEncryptionMode, &out.TransitEncryptionMode + *out = new(string) + **out = **in + } if in.UserGroupIDs != nil { in, out := &in.UserGroupIDs, &out.UserGroupIDs *out = make([]*string, len(*in)) @@ -3032,6 +3208,11 @@ func (in *ReplicationGroup_SDK) DeepCopyInto(out *ReplicationGroup_SDK) { *out = new(bool) **out = **in } + if in.ClusterMode != nil { + in, out := &in.ClusterMode, &out.ClusterMode + *out = new(string) + **out = **in + } if in.ConfigurationEndpoint != nil { in, out := &in.ConfigurationEndpoint, &out.ConfigurationEndpoint *out = new(Endpoint) @@ -3047,11 +3228,21 @@ func (in *ReplicationGroup_SDK) DeepCopyInto(out *ReplicationGroup_SDK) { *out = new(string) **out = **in } + if in.Engine != nil { + in, out := &in.Engine, &out.Engine + *out = new(string) + **out = **in + } if in.GlobalReplicationGroupInfo != nil { in, out := &in.GlobalReplicationGroupInfo, &out.GlobalReplicationGroupInfo *out = new(GlobalReplicationGroupInfo) (*in).DeepCopyInto(*out) } + if in.IPDiscovery != nil { + in, out := &in.IPDiscovery, &out.IPDiscovery + *out = new(string) + **out = **in + } if in.KMSKeyID != nil { in, out := &in.KMSKeyID, &out.KMSKeyID *out = new(string) @@ -3095,6 +3286,11 @@ func (in *ReplicationGroup_SDK) DeepCopyInto(out *ReplicationGroup_SDK) { *out = new(string) **out = **in } + if in.NetworkType != nil { + in, out := &in.NetworkType, &out.NetworkType + *out = new(string) + **out = **in + } if in.NodeGroups != nil { in, out := &in.NodeGroups, &out.NodeGroups *out = make([]*NodeGroup, len(*in)) @@ -3145,6 +3341,11 @@ func (in *ReplicationGroup_SDK) DeepCopyInto(out *ReplicationGroup_SDK) { *out = new(bool) **out = **in } + if in.TransitEncryptionMode != nil { + in, out := &in.TransitEncryptionMode, &out.TransitEncryptionMode + *out = new(string) + **out = **in + } if in.UserGroupIDs != nil { in, out := &in.UserGroupIDs, &out.UserGroupIDs *out = make([]*string, len(*in)) @@ -3368,6 +3569,184 @@ func (in *SecurityGroupMembership) DeepCopy() *SecurityGroupMembership { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerlessCache) DeepCopyInto(out *ServerlessCache) { + *out = *in + if in.ARN != nil { + in, out := &in.ARN, &out.ARN + *out = new(string) + **out = **in + } + if in.CreateTime != nil { + in, out := &in.CreateTime, &out.CreateTime + *out = (*in).DeepCopy() + } + if in.DailySnapshotTime != nil { + in, out := &in.DailySnapshotTime, &out.DailySnapshotTime + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Endpoint != nil { + in, out := &in.Endpoint, &out.Endpoint + *out = new(Endpoint) + (*in).DeepCopyInto(*out) + } + if in.Engine != nil { + in, out := &in.Engine, &out.Engine + *out = new(string) + **out = **in + } + if in.FullEngineVersion != nil { + in, out := &in.FullEngineVersion, &out.FullEngineVersion + *out = new(string) + **out = **in + } + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } + if in.MajorEngineVersion != nil { + in, out := &in.MajorEngineVersion, &out.MajorEngineVersion + *out = new(string) + **out = **in + } + if in.ReaderEndpoint != nil { + in, out := &in.ReaderEndpoint, &out.ReaderEndpoint + *out = new(Endpoint) + (*in).DeepCopyInto(*out) + } + if in.SecurityGroupIDs != nil { + in, out := &in.SecurityGroupIDs, &out.SecurityGroupIDs + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ServerlessCacheName != nil { + in, out := &in.ServerlessCacheName, &out.ServerlessCacheName + *out = new(string) + **out = **in + } + if in.SnapshotRetentionLimit != nil { + in, out := &in.SnapshotRetentionLimit, &out.SnapshotRetentionLimit + *out = new(int64) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.UserGroupID != nil { + in, out := &in.UserGroupID, &out.UserGroupID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerlessCache. +func (in *ServerlessCache) DeepCopy() *ServerlessCache { + if in == nil { + return nil + } + out := new(ServerlessCache) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerlessCacheConfiguration) DeepCopyInto(out *ServerlessCacheConfiguration) { + *out = *in + if in.Engine != nil { + in, out := &in.Engine, &out.Engine + *out = new(string) + **out = **in + } + if in.MajorEngineVersion != nil { + in, out := &in.MajorEngineVersion, &out.MajorEngineVersion + *out = new(string) + **out = **in + } + if in.ServerlessCacheName != nil { + in, out := &in.ServerlessCacheName, &out.ServerlessCacheName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerlessCacheConfiguration. +func (in *ServerlessCacheConfiguration) DeepCopy() *ServerlessCacheConfiguration { + if in == nil { + return nil + } + out := new(ServerlessCacheConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerlessCacheSnapshot) DeepCopyInto(out *ServerlessCacheSnapshot) { + *out = *in + if in.ARN != nil { + in, out := &in.ARN, &out.ARN + *out = new(string) + **out = **in + } + if in.BytesUsedForCache != nil { + in, out := &in.BytesUsedForCache, &out.BytesUsedForCache + *out = new(string) + **out = **in + } + if in.CreateTime != nil { + in, out := &in.CreateTime, &out.CreateTime + *out = (*in).DeepCopy() + } + if in.ExpiryTime != nil { + in, out := &in.ExpiryTime, &out.ExpiryTime + *out = (*in).DeepCopy() + } + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } + if in.ServerlessCacheSnapshotName != nil { + in, out := &in.ServerlessCacheSnapshotName, &out.ServerlessCacheSnapshotName + *out = new(string) + **out = **in + } + if in.SnapshotType != nil { + in, out := &in.SnapshotType, &out.SnapshotType + *out = new(string) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerlessCacheSnapshot. +func (in *ServerlessCacheSnapshot) DeepCopy() *ServerlessCacheSnapshot { + if in == nil { + return nil + } + out := new(ServerlessCacheSnapshot) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ServiceUpdate) DeepCopyInto(out *ServiceUpdate) { *out = *in @@ -3884,6 +4263,17 @@ func (in *Subnet) DeepCopyInto(out *Subnet) { *out = new(SubnetOutpost) (*in).DeepCopyInto(*out) } + if in.SupportedNetworkTypes != nil { + in, out := &in.SupportedNetworkTypes, &out.SupportedNetworkTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Subnet. @@ -4275,6 +4665,17 @@ func (in *UserGroupStatus) DeepCopyInto(out *UserGroupStatus) { } } } + if in.ServerlessCaches != nil { + in, out := &in.ServerlessCaches, &out.ServerlessCaches + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } if in.Status != nil { in, out := &in.Status, &out.Status *out = new(string) @@ -4326,6 +4727,17 @@ func (in *UserGroup_SDK) DeepCopyInto(out *UserGroup_SDK) { } } } + if in.ServerlessCaches != nil { + in, out := &in.ServerlessCaches, &out.ServerlessCaches + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } if in.Status != nil { in, out := &in.Status, &out.Status *out = new(string) @@ -4436,6 +4848,11 @@ func (in *UserSpec) DeepCopyInto(out *UserSpec) { *out = new(string) **out = **in } + if in.AuthenticationMode != nil { + in, out := &in.AuthenticationMode, &out.AuthenticationMode + *out = new(AuthenticationMode) + (*in).DeepCopyInto(*out) + } if in.Engine != nil { in, out := &in.Engine, &out.Engine *out = new(string) diff --git a/cmd/controller/main.go b/cmd/controller/main.go index efd665cc..2abf2554 100644 --- a/cmd/controller/main.go +++ b/cmd/controller/main.go @@ -16,6 +16,7 @@ package main import ( + "context" "os" ec2apitypes "github.com/aws-controllers-k8s/ec2-controller/apis/v1alpha1" @@ -39,7 +40,6 @@ import ( svctypes "github.com/aws-controllers-k8s/elasticache-controller/apis/v1alpha1" svcresource "github.com/aws-controllers-k8s/elasticache-controller/pkg/resource" - svcsdk "github.com/aws/aws-sdk-go/service/elasticache" _ "github.com/aws-controllers-k8s/elasticache-controller/pkg/resource/cache_cluster" _ "github.com/aws-controllers-k8s/elasticache-controller/pkg/resource/cache_parameter_group" @@ -53,11 +53,10 @@ import ( ) var ( - awsServiceAPIGroup = "elasticache.services.k8s.aws" - awsServiceAlias = "elasticache" - awsServiceEndpointsID = svcsdk.EndpointsID - scheme = runtime.NewScheme() - setupLog = ctrlrt.Log.WithName("setup") + awsServiceAPIGroup = "elasticache.services.k8s.aws" + awsServiceAlias = "elasticache" + scheme = runtime.NewScheme() + setupLog = ctrlrt.Log.WithName("setup") ) func init() { @@ -81,7 +80,8 @@ func main() { resourceGVKs = append(resourceGVKs, mf.ResourceDescriptor().GroupVersionKind()) } - if err := ackCfg.Validate(ackcfg.WithGVKs(resourceGVKs)); err != nil { + ctx := context.Background() + if err := ackCfg.Validate(ctx, ackcfg.WithGVKs(resourceGVKs)); err != nil { setupLog.Error( err, "Unable to create controller manager", "aws.service", awsServiceAlias, @@ -146,7 +146,7 @@ func main() { "aws.service", awsServiceAlias, ) sc := ackrt.NewServiceController( - awsServiceAlias, awsServiceAPIGroup, awsServiceEndpointsID, + awsServiceAlias, awsServiceAPIGroup, acktypes.VersionInfo{ version.GitCommit, version.GitVersion, diff --git a/config/controller/deployment.yaml b/config/controller/deployment.yaml index 5efef318..8380edc4 100644 --- a/config/controller/deployment.yaml +++ b/config/controller/deployment.yaml @@ -41,6 +41,8 @@ spec: - "$(LEADER_ELECTION_NAMESPACE)" - --reconcile-default-max-concurrent-syncs - "$(RECONCILE_DEFAULT_MAX_CONCURRENT_SYNCS)" + - --feature-gates + - "$(FEATURE_GATES)" image: controller:latest name: controller ports: @@ -76,6 +78,8 @@ spec: value: "ack-system" - name: "RECONCILE_DEFAULT_MAX_CONCURRENT_SYNCS" value: "1" + - name: "FEATURE_GATES" + value: "" securityContext: allowPrivilegeEscalation: false privileged: false diff --git a/config/controller/kustomization.yaml b/config/controller/kustomization.yaml index 38a413ef..5c69fbb5 100644 --- a/config/controller/kustomization.yaml +++ b/config/controller/kustomization.yaml @@ -6,4 +6,4 @@ kind: Kustomization images: - name: controller newName: public.ecr.aws/aws-controllers-k8s/elasticache-controller - newTag: 0.1.0 + newTag: 1.6.0 diff --git a/config/crd/bases/elasticache.services.k8s.aws_cacheclusters.yaml b/config/crd/bases/elasticache.services.k8s.aws_cacheclusters.yaml index c51ef39e..ece8c670 100644 --- a/config/crd/bases/elasticache.services.k8s.aws_cacheclusters.yaml +++ b/config/crd/bases/elasticache.services.k8s.aws_cacheclusters.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.14.0 + controller-gen.kubebuilder.io/version: v0.16.2 name: cacheclusters.elasticache.services.k8s.aws spec: group: elasticache.services.k8s.aws @@ -57,30 +57,17 @@ spec: description: |- CacheClusterSpec defines the desired state of CacheCluster. - Contains all of the attributes of a specific cluster. properties: authToken: description: |- Reserved parameter. The password used to access a password protected server. - Password constraints: - * Must be only printable ASCII characters. - * Must be at least 16 characters and no more than 128 characters in length. - - - * The only permitted printable special characters are !, &, #, $, ^, <, - >, and -. Other printable special characters cannot be used in the AUTH - token. - - - For more information, see AUTH password (http://redis.io/commands/AUTH) at - http://redis.io/commands/AUTH. properties: key: description: Key is the key within the secret @@ -99,9 +86,9 @@ spec: x-kubernetes-map-type: atomic autoMinorVersionUpgrade: description: |- - If you are running Redis engine version 6.0 or later, set this parameter - to yes if you want to opt-in to the next auto minor version upgrade campaign. - This parameter is disabled for previous versions. + If you are running Valkey 7.2 and above or Redis OSS engine version 6.0 and + above, set this parameter to yes to opt-in to the next auto minor version + upgrade campaign. This parameter is disabled for previous versions. type: boolean azMode: description: |- @@ -109,10 +96,8 @@ spec: Availability Zone or created across multiple Availability Zones in the cluster's region. - This parameter is only supported for Memcached clusters. - If the AZMode and PreferredAvailabilityZones are not specified, ElastiCache assumes single-az mode. type: string @@ -121,40 +106,34 @@ spec: The node group (shard) identifier. This parameter is stored as a lowercase string. - Constraints: - * A name must contain from 1 to 50 alphanumeric characters or hyphens. - * The first character must be a letter. - * A name cannot end with a hyphen or contain two consecutive hyphens. type: string cacheNodeType: description: |- The compute and memory capacity of the nodes in the node group (shard). - The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts. - * General purpose: Current generation: M7g node types: cache.m7g.large, cache.m7g.xlarge, cache.m7g.2xlarge, cache.m7g.4xlarge, cache.m7g.8xlarge, cache.m7g.12xlarge, cache.m7g.16xlarge For region availability, see Supported - Node Types (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) - M6g node types (available only for Redis engine version 5.0.6 onward and - for Memcached engine version 1.5.16 onward): cache.m6g.large, cache.m6g.xlarge, + Node Types (https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) + M6g node types (available only for Redis OSS engine version 5.0.6 onward + and for Memcached engine version 1.5.16 onward): cache.m6g.large, cache.m6g.xlarge, cache.m6g.2xlarge, cache.m6g.4xlarge, cache.m6g.8xlarge, cache.m6g.12xlarge, cache.m6g.16xlarge M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge - T4g node types (available only for Redis engine version 5.0.6 onward and - Memcached engine version 1.5.16 onward): cache.t4g.micro, cache.t4g.small, + T4g node types (available only for Redis OSS engine version 5.0.6 onward + and Memcached engine version 1.5.16 onward): cache.t4g.micro, cache.t4g.small, cache.t4g.medium T3 node types: cache.t3.micro, cache.t3.small, cache.t3.medium T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium Previous generation: (not recommended. Existing clusters are still supported but @@ -163,18 +142,16 @@ spec: cache.m1.xlarge M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge - * Compute optimized: Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.) C1 node types: cache.c1.xlarge - * Memory optimized: Current generation: R7g node types: cache.r7g.large, cache.r7g.xlarge, cache.r7g.2xlarge, cache.r7g.4xlarge, cache.r7g.8xlarge, cache.r7g.12xlarge, cache.r7g.16xlarge For region availability, see Supported - Node Types (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) - R6g node types (available only for Redis engine version 5.0.6 onward and - for Memcached engine version 1.5.16 onward): cache.r6g.large, cache.r6g.xlarge, + Node Types (https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) + R6g node types (available only for Redis OSS engine version 5.0.6 onward + and for Memcached engine version 1.5.16 onward): cache.r6g.large, cache.r6g.xlarge, cache.r6g.2xlarge, cache.r6g.4xlarge, cache.r6g.8xlarge, cache.r6g.12xlarge, cache.r6g.16xlarge R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge R4 node types: @@ -185,21 +162,18 @@ spec: R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge - Additional node type info - * All current generation instance types are created in Amazon VPC by default. + * Valkey or Redis OSS append-only files (AOF) are not supported for T1 + or T2 instances. - * Redis append-only files (AOF) are not supported for T1 or T2 instances. - + * Valkey or Redis OSS Multi-AZ with automatic failover is not supported + on T1 instances. - * Redis Multi-AZ with automatic failover is not supported on T1 instances. - - - * Redis configuration variables appendonly and appendfsync are not supported - on Redis version 2.8.22 and later. + * The configuration variables appendonly and appendfsync are not supported + on Valkey, or on Redis OSS version 2.8.22 and later. type: string cacheParameterGroupName: description: |- @@ -211,7 +185,7 @@ spec: cacheParameterGroupRef: description: "AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference\ntype to provide more user friendly syntax - for references using 'from' field\nEx:\nAPIIDRef:\n\n\n\tfrom:\n\t + for references using 'from' field\nEx:\nAPIIDRef:\n\n\tfrom:\n\t \ name: my-api" properties: from: @@ -227,7 +201,6 @@ spec: description: |- A list of security group names to associate with this cluster. - Use this parameter only when you are creating a cluster outside of an Amazon Virtual Private Cloud (Amazon VPC). items: @@ -237,19 +210,17 @@ spec: description: |- The name of the subnet group to be used for the cluster. - Use this parameter only when you are creating a cluster in an Amazon Virtual Private Cloud (Amazon VPC). - If you're going to launch your cluster in an Amazon VPC, you need to create a subnet group before you start creating a cluster. For more information, - see Subnets and Subnet Groups (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/SubnetGroups.html). + see Subnets and Subnet Groups (https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/SubnetGroups.html). type: string cacheSubnetGroupRef: description: "AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference\ntype to provide more user friendly syntax - for references using 'from' field\nEx:\nAPIIDRef:\n\n\n\tfrom:\n\t + for references using 'from' field\nEx:\nAPIIDRef:\n\n\tfrom:\n\t \ name: my-api" properties: from: @@ -265,7 +236,6 @@ spec: description: |- The name of the cache engine to be used for this cluster. - Valid values for this parameter are: memcached | redis type: string engineVersion: @@ -274,9 +244,8 @@ spec: the supported cache engine versions, use the DescribeCacheEngineVersions operation. - Important: You can upgrade to a newer engine version (see Selecting a Cache - Engine and Version (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/SelectEngine.html#VersionManagement)), + Engine and Version (https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/SelectEngine.html#VersionManagement)), but you cannot downgrade to an earlier engine version. If you want to use an earlier engine version, you must delete the existing cluster or replication group and create it anew with the earlier engine version. @@ -284,9 +253,9 @@ spec: ipDiscovery: description: |- The network type you choose when modifying a cluster, either ipv4 | ipv6. - IPv6 is supported for workloads using Redis engine version 6.2 onward or - Memcached engine version 1.6.6 on all instances built on the Nitro system - (http://aws.amazon.com/ec2/nitro/). + IPv6 is supported for workloads using Valkey 7.2 and above, Redis OSS engine + version 6.2 and above or Memcached engine version 1.6.6 and above on all + instances built on the Nitro system (http://aws.amazon.com/ec2/nitro/). type: string logDeliveryConfigurations: description: Specifies the destination, format and type of the logs. @@ -326,21 +295,21 @@ spec: networkType: description: |- Must be either ipv4 | ipv6 | dual_stack. IPv6 is supported for workloads - using Redis engine version 6.2 onward or Memcached engine version 1.6.6 on - all instances built on the Nitro system (http://aws.amazon.com/ec2/nitro/). + using Valkey 7.2 and above, Redis OSS engine version 6.2 and above or Memcached + engine version 1.6.6 and above on all instances built on the Nitro system + (http://aws.amazon.com/ec2/nitro/). type: string notificationTopicARN: description: |- The Amazon Resource Name (ARN) of the Amazon Simple Notification Service (SNS) topic to which notifications are sent. - The Amazon SNS topic owner must be the same as the cluster owner. type: string notificationTopicRef: description: "AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference\ntype to provide more user friendly syntax - for references using 'from' field\nEx:\nAPIIDRef:\n\n\n\tfrom:\n\t + for references using 'from' field\nEx:\nAPIIDRef:\n\n\tfrom:\n\t \ name: my-api" properties: from: @@ -356,10 +325,8 @@ spec: description: |- The initial number of cache nodes that the cluster has. - - For clusters running Redis, this value must be 1. For clusters running Memcached, - this value must be between 1 and 40. - + For clusters running Valkey or Redis OSS, this value must be 1. For clusters + running Memcached, this value must be between 1 and 40. If you need more than 40 nodes for your Memcached cluster, please fill out the ElastiCache Limit Increase Request form at http://aws.amazon.com/contact-us/elasticache-node-limit-request/ @@ -380,12 +347,10 @@ spec: description: |- The EC2 Availability Zone in which the cluster is created. - All nodes belonging to this cluster are placed in the preferred Availability Zone. If you want to create your nodes across multiple Availability Zones, use PreferredAvailabilityZones. - Default: System chosen Availability Zone. type: string preferredAvailabilityZones: @@ -393,22 +358,17 @@ spec: A list of the Availability Zones in which cache nodes are created. The order of the zones in the list is not important. - This option is only supported on Memcached. - If you are creating your cluster in an Amazon VPC (recommended) you can only locate nodes in Availability Zones that are associated with the subnets in the selected subnet group. - The number of Availability Zones listed must equal the value of NumCacheNodes. - If you want all the nodes in the same Availability Zone, use PreferredAvailabilityZone instead, or repeat the Availability Zone multiple times in the list. - Default: System chosen Availability Zones. items: type: string @@ -434,18 +394,16 @@ spec: group as a read replica; otherwise, the cluster is a standalone primary that is not part of any replication group. - If the specified replication group is Multi-AZ enabled and the Availability Zone is not specified, the cluster is created in Availability Zones that provide the best spread of read replicas across Availability Zones. - This parameter is only valid if the Engine parameter is redis. type: string replicationGroupRef: description: "AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference\ntype to provide more user friendly syntax - for references using 'from' field\nEx:\nAPIIDRef:\n\n\n\tfrom:\n\t + for references using 'from' field\nEx:\nAPIIDRef:\n\n\tfrom:\n\t \ name: my-api" properties: from: @@ -461,7 +419,6 @@ spec: description: |- One or more VPC security groups associated with the cluster. - Use this parameter only when you are creating a cluster in an Amazon Virtual Private Cloud (Amazon VPC). items: @@ -470,31 +427,28 @@ spec: snapshotARNs: description: |- A single-element string list containing an Amazon Resource Name (ARN) that - uniquely identifies a Redis RDB snapshot file stored in Amazon S3. The snapshot - file is used to populate the node group (shard). The Amazon S3 object name - in the ARN cannot contain any commas. - + uniquely identifies a Valkey or Redis OSS RDB snapshot file stored in Amazon + S3. The snapshot file is used to populate the node group (shard). The Amazon + S3 object name in the ARN cannot contain any commas. This parameter is only valid if the Engine parameter is redis. - Example of an Amazon S3 ARN: arn:aws:s3:::my_bucket/snapshot1.rdb items: type: string type: array snapshotName: description: |- - The name of a Redis snapshot from which to restore data into the new node - group (shard). The snapshot status changes to restoring while the new node - group (shard) is being created. - + The name of a Valkey or Redis OSS snapshot from which to restore data into + the new node group (shard). The snapshot status changes to restoring while + the new node group (shard) is being created. This parameter is only valid if the Engine parameter is redis. type: string snapshotRef: description: "AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference\ntype to provide more user friendly syntax - for references using 'from' field\nEx:\nAPIIDRef:\n\n\n\tfrom:\n\t + for references using 'from' field\nEx:\nAPIIDRef:\n\n\tfrom:\n\t \ name: my-api" properties: from: @@ -512,10 +466,8 @@ spec: deleting them. For example, if you set SnapshotRetentionLimit to 5, a snapshot taken today is retained for 5 days before being deleted. - This parameter is only valid if the Engine parameter is redis. - Default: 0 (i.e., automatic backups are disabled for this cache cluster). format: int64 type: integer @@ -524,14 +476,11 @@ spec: The daily time range (in UTC) during which ElastiCache begins taking a daily snapshot of your node group (shard). - Example: 05:00-09:00 - If you do not specify this parameter, ElastiCache automatically chooses an appropriate time range. - This parameter is only valid if the Engine parameter is redis. type: string tags: @@ -575,7 +524,6 @@ spec: when it has verified that an "adopted" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. - TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270 type: string ownerAccountID: @@ -595,22 +543,19 @@ spec: description: |- A flag that enables encryption at-rest when set to true. - You cannot modify the value of AtRestEncryptionEnabled after the cluster is created. To enable at-rest encryption on a cluster you must set AtRestEncryptionEnabled to true when you create a cluster. - Required: Only available when creating a replication group in an Amazon VPC - using redis version 3.2.6, 4.x or later. - + using Redis OSS version 3.2.6, 4.x or later. Default: false type: boolean authTokenEnabled: description: |- - A flag that enables using an AuthToken (password) when issuing Redis commands. - + A flag that enables using an AuthToken (password) when issuing Valkey or + Redis OSS commands. Default: false type: boolean @@ -634,26 +579,24 @@ spec: description: |- Represents an individual cache node within a cluster. Each cache node runs its own instance of the cluster's protocol-compliant caching software - either - Memcached or Redis. - + Memcached, Valkey or Redis OSS. The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts. - * General purpose: Current generation: M7g node types: cache.m7g.large, cache.m7g.xlarge, cache.m7g.2xlarge, cache.m7g.4xlarge, cache.m7g.8xlarge, cache.m7g.12xlarge, cache.m7g.16xlarge For region availability, see Supported - Node Types (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) - M6g node types (available only for Redis engine version 5.0.6 onward and - for Memcached engine version 1.5.16 onward): cache.m6g.large, cache.m6g.xlarge, + Node Types (https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) + M6g node types (available only for Redis OSS engine version 5.0.6 onward + and for Memcached engine version 1.5.16 onward): cache.m6g.large, cache.m6g.xlarge, cache.m6g.2xlarge, cache.m6g.4xlarge, cache.m6g.8xlarge, cache.m6g.12xlarge, cache.m6g.16xlarge M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge - T4g node types (available only for Redis engine version 5.0.6 onward and - Memcached engine version 1.5.16 onward): cache.t4g.micro, cache.t4g.small, + T4g node types (available only for Redis OSS engine version 5.0.6 onward + and Memcached engine version 1.5.16 onward): cache.t4g.micro, cache.t4g.small, cache.t4g.medium T3 node types: cache.t3.micro, cache.t3.small, cache.t3.medium T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium Previous generation: (not recommended. Existing clusters are still supported but @@ -662,18 +605,16 @@ spec: cache.m1.xlarge M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge - * Compute optimized: Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.) C1 node types: cache.c1.xlarge - * Memory optimized: Current generation: R7g node types: cache.r7g.large, cache.r7g.xlarge, cache.r7g.2xlarge, cache.r7g.4xlarge, cache.r7g.8xlarge, cache.r7g.12xlarge, cache.r7g.16xlarge For region availability, see Supported - Node Types (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) - R6g node types (available only for Redis engine version 5.0.6 onward and - for Memcached engine version 1.5.16 onward): cache.r6g.large, cache.r6g.xlarge, + Node Types (https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) + R6g node types (available only for Redis OSS engine version 5.0.6 onward + and for Memcached engine version 1.5.16 onward): cache.r6g.large, cache.r6g.xlarge, cache.r6g.2xlarge, cache.r6g.4xlarge, cache.r6g.8xlarge, cache.r6g.12xlarge, cache.r6g.16xlarge R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge R4 node types: @@ -684,21 +625,18 @@ spec: R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge - Additional node type info - * All current generation instance types are created in Amazon VPC by default. + * Valkey or Redis OSS append-only files (AOF) are not supported for T1 + or T2 instances. - * Redis append-only files (AOF) are not supported for T1 or T2 instances. + * Valkey or Redis OSS Multi-AZ with automatic failover is not supported + on T1 instances. - - * Redis Multi-AZ with automatic failover is not supported on T1 instances. - - - * Redis configuration variables appendonly and appendfsync are not supported - on Redis version 2.8.22 and later. + * The configuration variables appendonly and appendfsync are not supported + on Valkey, or on Redis OSS version 2.8.22 and later. properties: cacheNodeCreateTime: format: date-time @@ -714,7 +652,7 @@ spec: endpoint: description: |- Represents the information required for client programs to connect to a cache - node. + node. This value is read-only. properties: address: type: string @@ -799,7 +737,6 @@ spec: to connect to any node in the cluster. The configuration endpoint will always have .cfg in it. - Example: mem-3.9dvc4r.cfg.usw2.cache.amazonaws.com:11211 properties: address: diff --git a/config/crd/bases/elasticache.services.k8s.aws_cacheparametergroups.yaml b/config/crd/bases/elasticache.services.k8s.aws_cacheparametergroups.yaml index a873d1bf..18c1fd37 100644 --- a/config/crd/bases/elasticache.services.k8s.aws_cacheparametergroups.yaml +++ b/config/crd/bases/elasticache.services.k8s.aws_cacheparametergroups.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.14.0 + controller-gen.kubebuilder.io/version: v0.16.2 name: cacheparametergroups.elasticache.services.k8s.aws spec: group: elasticache.services.k8s.aws @@ -41,7 +41,6 @@ spec: description: |- CacheParameterGroupSpec defines the desired state of CacheParameterGroup. - Represents the output of a CreateCacheParameterGroup operation. properties: cacheParameterGroupFamily: @@ -49,9 +48,8 @@ spec: The name of the cache parameter group family that the cache parameter group can be used with. - Valid values are: memcached1.4 | memcached1.5 | memcached1.6 | redis2.6 | - redis2.8 | redis3.2 | redis4.0 | redis5.0 | redis6.x + redis2.8 | redis3.2 | redis4.0 | redis5.0 | redis6.x | redis7 type: string cacheParameterGroupName: description: A user-specified name for the cache parameter group. @@ -116,7 +114,6 @@ spec: when it has verified that an "adopted" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. - TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270 type: string ownerAccountID: diff --git a/config/crd/bases/elasticache.services.k8s.aws_cachesubnetgroups.yaml b/config/crd/bases/elasticache.services.k8s.aws_cachesubnetgroups.yaml index a87a3bc5..93f825a9 100644 --- a/config/crd/bases/elasticache.services.k8s.aws_cachesubnetgroups.yaml +++ b/config/crd/bases/elasticache.services.k8s.aws_cachesubnetgroups.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.14.0 + controller-gen.kubebuilder.io/version: v0.16.2 name: cachesubnetgroups.elasticache.services.k8s.aws spec: group: elasticache.services.k8s.aws @@ -40,13 +40,10 @@ spec: description: |- CacheSubnetGroupSpec defines the desired state of CacheSubnetGroup. - Represents the output of one of the following operations: - * CreateCacheSubnetGroup - * ModifyCacheSubnetGroup properties: cacheSubnetGroupDescription: @@ -56,10 +53,8 @@ spec: description: |- A name for the cache subnet group. This value is stored as a lowercase string. - Constraints: Must contain no more than 255 alphanumeric characters or hyphens. - Example: mysubnetgroup type: string subnetIDs: @@ -71,7 +66,7 @@ spec: items: description: "AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference\ntype to provide more user friendly syntax - for references using 'from' field\nEx:\nAPIIDRef:\n\n\n\tfrom:\n\t + for references using 'from' field\nEx:\nAPIIDRef:\n\n\tfrom:\n\t \ name: my-api" properties: from: @@ -124,7 +119,6 @@ spec: when it has verified that an "adopted" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. - TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270 type: string ownerAccountID: @@ -219,8 +213,20 @@ spec: subnetOutpostARN: type: string type: object + supportedNetworkTypes: + items: + type: string + type: array type: object type: array + supportedNetworkTypes: + description: |- + Either ipv4 | ipv6 | dual_stack. IPv6 is supported for workloads using Valkey + 7.2 and above, Redis OSS engine version 6.2 and above or Memcached engine + version 1.6.6 and above on all instances built on the Nitro system (http://aws.amazon.com/ec2/nitro/). + items: + type: string + type: array vpcID: description: |- The Amazon Virtual Private Cloud identifier (VPC ID) of the cache subnet diff --git a/config/crd/bases/elasticache.services.k8s.aws_replicationgroups.yaml b/config/crd/bases/elasticache.services.k8s.aws_replicationgroups.yaml index 69cbe8fa..de3642c9 100644 --- a/config/crd/bases/elasticache.services.k8s.aws_replicationgroups.yaml +++ b/config/crd/bases/elasticache.services.k8s.aws_replicationgroups.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.14.0 + controller-gen.kubebuilder.io/version: v0.16.2 name: replicationgroups.elasticache.services.k8s.aws spec: group: elasticache.services.k8s.aws @@ -40,23 +40,20 @@ spec: description: |- ReplicationGroupSpec defines the desired state of ReplicationGroup. - - Contains all of the attributes of a specific Redis replication group. + Contains all of the attributes of a specific Valkey or Redis OSS replication + group. properties: atRestEncryptionEnabled: description: |- A flag that enables encryption at rest when set to true. - You cannot modify the value of AtRestEncryptionEnabled after the replication group is created. To enable encryption at rest on a replication group you must set AtRestEncryptionEnabled to true when you create the replication group. - Required: Only available when creating a replication group in an Amazon VPC - using redis version 3.2.6, 4.x or later. - + using Redis OSS version 3.2.6, 4.x or later. Default: false type: boolean @@ -64,31 +61,17 @@ spec: description: |- Reserved parameter. The password used to access a password protected server. - AuthToken can be specified only on replication groups where TransitEncryptionEnabled is true. - For HIPAA compliance, you must specify TransitEncryptionEnabled as true, an AuthToken, and a CacheSubnetGroup. - Password constraints: - * Must be only printable ASCII characters. - * Must be at least 16 characters and no more than 128 characters in length. - - - * The only permitted printable special characters are !, &, #, $, ^, <, - >, and -. Other printable special characters cannot be used in the AUTH - token. - - - For more information, see AUTH password (http://redis.io/commands/AUTH) at - http://redis.io/commands/AUTH. properties: key: description: Key is the key within the secret @@ -110,10 +93,8 @@ spec: Specifies whether a read-only replica is automatically promoted to read/write primary if the existing primary fails. - - AutomaticFailoverEnabled must be enabled for Redis (cluster mode enabled) - replication groups. - + AutomaticFailoverEnabled must be enabled for Valkey or Redis OSS (cluster + mode enabled) replication groups. Default: false type: boolean @@ -121,69 +102,62 @@ spec: description: |- The compute and memory capacity of the nodes in the node group (shard). - The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts. - - * General purpose: Current generation: M6g node types (available only - for Redis engine version 5.0.6 onward and for Memcached engine version - 1.5.16 onward): cache.m6g.large, cache.m6g.xlarge, cache.m6g.2xlarge, - cache.m6g.4xlarge, cache.m6g.8xlarge, cache.m6g.12xlarge, cache.m6g.16xlarge - For region availability, see Supported Node Types (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) - M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, - cache.m5.12xlarge, cache.m5.24xlarge M4 node types: cache.m4.large, cache.m4.xlarge, - cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge T4g node types (available - only for Redis engine version 5.0.6 onward and Memcached engine version - 1.5.16 onward): cache.t4g.micro, cache.t4g.small, cache.t4g.medium T3 - node types: cache.t3.micro, cache.t3.small, cache.t3.medium T2 node types: - cache.t2.micro, cache.t2.small, cache.t2.medium Previous generation: (not - recommended. Existing clusters are still supported but creation of new - clusters is not supported for these types.) T1 node types: cache.t1.micro - M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge - M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge - + * General purpose: Current generation: M7g node types: cache.m7g.large, + cache.m7g.xlarge, cache.m7g.2xlarge, cache.m7g.4xlarge, cache.m7g.8xlarge, + cache.m7g.12xlarge, cache.m7g.16xlarge For region availability, see Supported + Node Types (https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) + M6g node types (available only for Redis OSS engine version 5.0.6 onward + and for Memcached engine version 1.5.16 onward): cache.m6g.large, cache.m6g.xlarge, + cache.m6g.2xlarge, cache.m6g.4xlarge, cache.m6g.8xlarge, cache.m6g.12xlarge, + cache.m6g.16xlarge M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, + cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge M4 node types: + cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge + T4g node types (available only for Redis OSS engine version 5.0.6 onward + and Memcached engine version 1.5.16 onward): cache.t4g.micro, cache.t4g.small, + cache.t4g.medium T3 node types: cache.t3.micro, cache.t3.small, cache.t3.medium + T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium Previous + generation: (not recommended. Existing clusters are still supported but + creation of new clusters is not supported for these types.) T1 node types: + cache.t1.micro M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, + cache.m1.xlarge M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, + cache.m3.2xlarge * Compute optimized: Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.) C1 node types: cache.c1.xlarge - - * Memory optimized with data tiering: Current generation: R6gd node types - (available only for Redis engine version 6.2 onward). cache.r6gd.xlarge, - cache.r6gd.2xlarge, cache.r6gd.4xlarge, cache.r6gd.8xlarge, cache.r6gd.12xlarge, - cache.r6gd.16xlarge - - - * Memory optimized: Current generation: R6g node types (available only - for Redis engine version 5.0.6 onward and for Memcached engine version - 1.5.16 onward). cache.r6g.large, cache.r6g.xlarge, cache.r6g.2xlarge, - cache.r6g.4xlarge, cache.r6g.8xlarge, cache.r6g.12xlarge, cache.r6g.16xlarge - For region availability, see Supported Node Types (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) - R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, - cache.r5.12xlarge, cache.r5.24xlarge R4 node types: cache.r4.large, cache.r4.xlarge, - cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge - Previous generation: (not recommended. Existing clusters are still supported - but creation of new clusters is not supported for these types.) M2 node - types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge R3 node types: - cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge - + * Memory optimized: Current generation: R7g node types: cache.r7g.large, + cache.r7g.xlarge, cache.r7g.2xlarge, cache.r7g.4xlarge, cache.r7g.8xlarge, + cache.r7g.12xlarge, cache.r7g.16xlarge For region availability, see Supported + Node Types (https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) + R6g node types (available only for Redis OSS engine version 5.0.6 onward + and for Memcached engine version 1.5.16 onward): cache.r6g.large, cache.r6g.xlarge, + cache.r6g.2xlarge, cache.r6g.4xlarge, cache.r6g.8xlarge, cache.r6g.12xlarge, + cache.r6g.16xlarge R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, + cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge R4 node types: + cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, + cache.r4.16xlarge Previous generation: (not recommended. Existing clusters + are still supported but creation of new clusters is not supported for + these types.) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge + R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, + cache.r3.8xlarge Additional node type info - * All current generation instance types are created in Amazon VPC by default. + * Valkey or Redis OSS append-only files (AOF) are not supported for T1 + or T2 instances. - * Redis append-only files (AOF) are not supported for T1 or T2 instances. - + * Valkey or Redis OSS Multi-AZ with automatic failover is not supported + on T1 instances. - * Redis Multi-AZ with automatic failover is not supported on T1 instances. - - - * Redis configuration variables appendonly and appendfsync are not supported - on Redis version 2.8.22 and later. + * The configuration variables appendonly and appendfsync are not supported + on Valkey, or on Redis OSS version 2.8.22 and later. type: string cacheParameterGroupName: description: |- @@ -191,21 +165,20 @@ spec: If this argument is omitted, the default cache parameter group for the specified engine is used. + If you are running Valkey or Redis OSS version 3.2.4 or later, only one node + group (shard), and want to use a default parameter group, we recommend that + you specify the parameter group by name. - If you are running Redis version 3.2.4 or later, only one node group (shard), - and want to use a default parameter group, we recommend that you specify - the parameter group by name. - + * To create a Valkey or Redis OSS (cluster mode disabled) replication + group, use CacheParameterGroupName=default.redis3.2. - * To create a Redis (cluster mode disabled) replication group, use CacheParameterGroupName=default.redis3.2. - - - * To create a Redis (cluster mode enabled) replication group, use CacheParameterGroupName=default.redis3.2.cluster.on. + * To create a Valkey or Redis OSS (cluster mode enabled) replication group, + use CacheParameterGroupName=default.redis3.2.cluster.on. type: string cacheParameterGroupRef: description: "AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference\ntype to provide more user friendly syntax - for references using 'from' field\nEx:\nAPIIDRef:\n\n\n\tfrom:\n\t + for references using 'from' field\nEx:\nAPIIDRef:\n\n\tfrom:\n\t \ name: my-api" properties: from: @@ -227,15 +200,14 @@ spec: description: |- The name of the cache subnet group to be used for the replication group. - If you're going to launch your cluster in an Amazon VPC, you need to create a subnet group before you start creating a cluster. For more information, - see Subnets and Subnet Groups (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/SubnetGroups.html). + see Subnets and Subnet Groups (https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/SubnetGroups.html). type: string cacheSubnetGroupRef: description: "AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference\ntype to provide more user friendly syntax - for references using 'from' field\nEx:\nAPIIDRef:\n\n\n\tfrom:\n\t + for references using 'from' field\nEx:\nAPIIDRef:\n\n\tfrom:\n\t \ name: my-api" properties: from: @@ -247,11 +219,20 @@ spec: type: string type: object type: object + clusterMode: + description: |- + Enabled or Disabled. To modify cluster mode from Disabled to Enabled, you + must first set the cluster mode to Compatible. Compatible mode allows your + Valkey or Redis OSS clients to connect using both cluster mode enabled and + cluster mode disabled. After you migrate all Valkey or Redis OSS clients + to use cluster mode enabled, you can then complete cluster mode configuration + and set the cluster mode to Enabled. + type: string dataTieringEnabled: description: |- Enables data tiering. Data tiering is only supported for replication groups using the r6gd node type. This parameter must be set to true when using r6gd - nodes. For more information, see Data tiering (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/data-tiering.html). + nodes. For more information, see Data tiering (https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/data-tiering.html). type: boolean description: description: A user-created description for the replication group. @@ -259,7 +240,7 @@ spec: engine: description: |- The name of the cache engine to be used for the clusters in this replication - group. Must be Redis. + group. The value must be set to Redis. type: string engineVersion: description: |- @@ -267,14 +248,20 @@ spec: replication group. To view the supported cache engine versions, use the DescribeCacheEngineVersions operation. - Important: You can upgrade to a newer engine version (see Selecting a Cache - Engine and Version (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/SelectEngine.html#VersionManagement)) + Engine and Version (https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/SelectEngine.html#VersionManagement)) in the ElastiCache User Guide, but you cannot downgrade to an earlier engine version. If you want to use an earlier engine version, you must delete the existing cluster or replication group and create it anew with the earlier engine version. type: string + ipDiscovery: + description: |- + The network type you choose when creating a replication group, either ipv4 + | ipv6. IPv6 is supported for workloads using Valkey 7.2 and above, Redis + OSS engine version 6.2 and above or Memcached engine version 1.6.6 and above + on all instances built on the Nitro system (http://aws.amazon.com/ec2/nitro/). + type: string kmsKeyID: description: The ID of the KMS key used to encrypt the disk in the cluster. @@ -317,21 +304,27 @@ spec: multiAZEnabled: description: |- A flag indicating if you have Multi-AZ enabled to enhance fault tolerance. - For more information, see Minimizing Downtime: Multi-AZ (http://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/AutoFailover.html). + For more information, see Minimizing Downtime: Multi-AZ (http://docs.aws.amazon.com/AmazonElastiCache/latest/dg/AutoFailover.html). type: boolean + networkType: + description: |- + Must be either ipv4 | ipv6 | dual_stack. IPv6 is supported for workloads + using Valkey 7.2 and above, Redis OSS engine version 6.2 and above or Memcached + engine version 1.6.6 and above on all instances built on the Nitro system + (http://aws.amazon.com/ec2/nitro/). + type: string nodeGroupConfiguration: description: |- A list of node group (shard) configuration options. Each node group (shard) configuration has the following members: PrimaryAvailabilityZone, ReplicaAvailabilityZones, ReplicaCount, and Slots. - - If you're creating a Redis (cluster mode disabled) or a Redis (cluster mode - enabled) replication group, you can use this parameter to individually configure - each node group (shard), or you can omit this parameter. However, it is required - when seeding a Redis (cluster mode enabled) cluster from a S3 rdb file. You - must configure each node group (shard) using this parameter because you must - specify the slots for each node group. + If you're creating a Valkey or Redis OSS (cluster mode disabled) or a Valkey + or Redis OSS (cluster mode enabled) replication group, you can use this parameter + to individually configure each node group (shard), or you can omit this parameter. + However, it is required when seeding a Valkey or Redis OSS (cluster mode + enabled) cluster from a S3 rdb file. You must configure each node group (shard) + using this parameter because you must specify the slots for each node group. items: description: |- Node group (shard) configuration options. Each node group (shard) configuration @@ -364,15 +357,14 @@ spec: The Amazon Resource Name (ARN) of the Amazon Simple Notification Service (SNS) topic to which notifications are sent. - The Amazon SNS topic owner must be the same as the cluster owner. type: string numNodeGroups: description: |- An optional parameter that specifies the number of node groups (shards) for - this Redis (cluster mode enabled) replication group. For Redis (cluster mode - disabled) either omit this parameter or set it to 1. - + this Valkey or Redis OSS (cluster mode enabled) replication group. For Valkey + or Redis OSS (cluster mode disabled) either omit this parameter or set it + to 1. Default: 1 format: int64 @@ -389,60 +381,41 @@ spec: in which clusters are allocated. The primary cluster is created in the first AZ in the list. - This parameter is not used if there is more than one node group (shard). You should use NodeGroupConfiguration instead. - If you are creating your replication group in an Amazon VPC (recommended), you can only locate clusters in Availability Zones associated with the subnets in the selected subnet group. - The number of Availability Zones listed must equal the value of NumCacheClusters. - Default: system chosen Availability Zones. items: type: string type: array preferredMaintenanceWindow: description: |- - Specifies the weekly time range during which maintenance on the cluster is - performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi - (24H Clock UTC). The minimum maintenance window is a 60 minute period. Valid - values for ddd are: - - Specifies the weekly time range during which maintenance on the cluster is performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi (24H Clock UTC). The minimum maintenance window is a 60 minute period. - Valid values for ddd are: - * sun - * mon - * tue - * wed - * thu - * fri - * sat - Example: sun:23:00-mon:01:30 type: string primaryClusterID: @@ -450,7 +423,6 @@ spec: The identifier of the cluster that serves as the primary for this replication group. This cluster must already exist and have a status of available. - This parameter is not required if NumCacheClusters, NumNodeGroups, or ReplicasPerNodeGroup is specified. type: string @@ -465,23 +437,18 @@ spec: The replication group identifier. This parameter is stored as a lowercase string. - Constraints: - * A name must contain from 1 to 40 alphanumeric characters or hyphens. - * The first character must be a letter. - * A name cannot end with a hyphen or contain two consecutive hyphens. type: string securityGroupIDs: description: |- One or more Amazon VPC security groups associated with this replication group. - Use this parameter only when you are creating a replication group in an Amazon Virtual Private Cloud (Amazon VPC). items: @@ -491,7 +458,7 @@ spec: items: description: "AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference\ntype to provide more user friendly syntax - for references using 'from' field\nEx:\nAPIIDRef:\n\n\n\tfrom:\n\t + for references using 'from' field\nEx:\nAPIIDRef:\n\n\tfrom:\n\t \ name: my-api" properties: from: @@ -504,16 +471,20 @@ spec: type: object type: object type: array + serverlessCacheSnapshotName: + description: |- + The name of the snapshot used to create a replication group. Available for + Valkey, Redis OSS only. + type: string snapshotARNs: description: |- - A list of Amazon Resource Names (ARN) that uniquely identify the Redis RDB - snapshot files stored in Amazon S3. The snapshot files are used to populate - the new replication group. The Amazon S3 object name in the ARN cannot contain - any commas. The new replication group will have the number of node groups - (console: shards) specified by the parameter NumNodeGroups or the number - of node groups configured by NodeGroupConfiguration regardless of the number - of ARNs specified here. - + A list of Amazon Resource Names (ARN) that uniquely identify the Valkey or + Redis OSS RDB snapshot files stored in Amazon S3. The snapshot files are + used to populate the new replication group. The Amazon S3 object name in + the ARN cannot contain any commas. The new replication group will have the + number of node groups (console: shards) specified by the parameter NumNodeGroups + or the number of node groups configured by NodeGroupConfiguration regardless + of the number of ARNs specified here. Example of an Amazon S3 ARN: arn:aws:s3:::my_bucket/snapshot1.rdb items: @@ -531,7 +502,6 @@ spec: deleting them. For example, if you set SnapshotRetentionLimit to 5, a snapshot that was taken today is retained for 5 days before being deleted. - Default: 0 (i.e., automatic backups are disabled for this cluster). format: int64 type: integer @@ -540,10 +510,8 @@ spec: The daily time range (in UTC) during which ElastiCache begins taking a daily snapshot of your node group (shard). - Example: 05:00-09:00 - If you do not specify this parameter, ElastiCache automatically chooses an appropriate time range. type: string @@ -572,30 +540,37 @@ spec: description: |- A flag that enables in-transit encryption when set to true. - - You cannot modify the value of TransitEncryptionEnabled after the cluster - is created. To enable in-transit encryption on a cluster you must set TransitEncryptionEnabled - to true when you create a cluster. - - This parameter is valid only if the Engine parameter is redis, the EngineVersion parameter is 3.2.6, 4.x or later, and the cluster is being created in an Amazon VPC. - If you enable in-transit encryption, you must also specify a value for CacheSubnetGroup. - Required: Only available when creating a replication group in an Amazon VPC - using redis version 3.2.6, 4.x or later. - + using Redis OSS version 3.2.6, 4.x or later. Default: false - For HIPAA compliance, you must specify TransitEncryptionEnabled as true, an AuthToken, and a CacheSubnetGroup. type: boolean + transitEncryptionMode: + description: |- + A setting that allows you to migrate your clients to use in-transit encryption, + with no downtime. + + When setting TransitEncryptionEnabled to true, you can set your TransitEncryptionMode + to preferred in the same request, to allow both encrypted and unencrypted + connections at the same time. Once you migrate all your Valkey or Redis OSS + clients to use encrypted connections you can modify the value to required + to allow encrypted connections only. + + Setting TransitEncryptionMode to required is a two-step process that requires + you to first set the TransitEncryptionMode to preferred, after that you can + set TransitEncryptionMode to required. + + This process will not trigger the replacement of the replication group. + type: string userGroupIDs: description: The user group to associate with the replication group. items: @@ -622,7 +597,6 @@ spec: when it has verified that an "adopted" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. - TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270 type: string ownerAccountID: @@ -641,8 +615,8 @@ spec: allowedScaleDownModifications: description: |- A string list, each element of which specifies a cache node type which you - can use to scale your cluster or replication group. When scaling down a Redis - cluster or replication group using ModifyCacheCluster or ModifyReplicationGroup, + can use to scale your cluster or replication group. When scaling down a Valkey + or Redis OSS cluster or replication group using ModifyCacheCluster or ModifyReplicationGroup, use a value from this list for the CacheNodeType parameter. items: type: string @@ -652,17 +626,16 @@ spec: A string list, each element of which specifies a cache node type which you can use to scale your cluster or replication group. - - When scaling up a Redis cluster or replication group using ModifyCacheCluster - or ModifyReplicationGroup, use a value from this list for the CacheNodeType - parameter. + When scaling up a Valkey or Redis OSS cluster or replication group using + ModifyCacheCluster or ModifyReplicationGroup, use a value from this list + for the CacheNodeType parameter. items: type: string type: array authTokenEnabled: description: |- - A flag that enables using an AuthToken (password) when issuing Redis commands. - + A flag that enables using an AuthToken (password) when issuing Valkey or + Redis OSS commands. Default: false type: boolean @@ -672,13 +645,14 @@ spec: type: string autoMinorVersionUpgrade: description: |- - If you are running Redis engine version 6.0 or later, set this parameter - to yes if you want to opt-in to the next auto minor version upgrade campaign. - This parameter is disabled for previous versions. + If you are running Valkey 7.2 and above, or Redis OSS engine version 6.0 + and above, set this parameter to yes if you want to opt-in to the next auto + minor version upgrade campaign. This parameter is disabled for previous versions. type: boolean automaticFailover: - description: Indicates the status of automatic failover for this Redis - replication group. + description: |- + Indicates the status of automatic failover for this Valkey or Redis OSS replication + group. type: string clusterEnabled: description: |- @@ -686,7 +660,6 @@ spec: i.e., whether its data can be partitioned across multiple shards (API/CLI: node groups). - Valid values: true | false type: boolean conditions: @@ -739,7 +712,7 @@ spec: description: |- Enables data tiering. Data tiering is only supported for replication groups using the r6gd node type. This parameter must be set to true when using r6gd - nodes. For more information, see Data tiering (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/data-tiering.html). + nodes. For more information, see Data tiering (https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/data-tiering.html). type: string events: description: |- @@ -823,14 +796,14 @@ spec: multiAZ: description: |- A flag indicating if you have Multi-AZ enabled to enhance fault tolerance. - For more information, see Minimizing Downtime: Multi-AZ (http://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/AutoFailover.html) + For more information, see Minimizing Downtime: Multi-AZ (http://docs.aws.amazon.com/AmazonElastiCache/latest/dg/AutoFailover.html) type: string nodeGroups: description: |- - A list of node groups in this replication group. For Redis (cluster mode - disabled) replication groups, this is a single-element list. For Redis (cluster - mode enabled) replication groups, the list contains an entry for each node - group (shard). + A list of node groups in this replication group. For Valkey or Redis OSS + (cluster mode disabled) replication groups, this is a single-element list. + For Valkey or Redis OSS (cluster mode enabled) replication groups, the list + contains an entry for each node group (shard). items: description: |- Represents a collection of cache nodes in a replication group. One node in @@ -857,7 +830,7 @@ spec: readEndpoint: description: |- Represents the information required for client programs to connect to a cache - node. + node. This value is read-only. properties: address: type: string @@ -870,7 +843,7 @@ spec: primaryEndpoint: description: |- Represents the information required for client programs to connect to a cache - node. + node. This value is read-only. properties: address: type: string @@ -881,7 +854,7 @@ spec: readerEndpoint: description: |- Represents the information required for client programs to connect to a cache - node. + node. This value is read-only. properties: address: type: string @@ -904,6 +877,8 @@ spec: type: string automaticFailoverStatus: type: string + clusterMode: + type: string logDeliveryConfigurations: items: description: The log delivery configurations being modified @@ -949,6 +924,10 @@ spec: type: number type: object type: object + transitEncryptionEnabled: + type: boolean + transitEncryptionMode: + type: string userGroups: description: The status of the user group update. properties: diff --git a/config/crd/bases/elasticache.services.k8s.aws_snapshots.yaml b/config/crd/bases/elasticache.services.k8s.aws_snapshots.yaml index 09c6ca24..8424b2ff 100644 --- a/config/crd/bases/elasticache.services.k8s.aws_snapshots.yaml +++ b/config/crd/bases/elasticache.services.k8s.aws_snapshots.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.14.0 + controller-gen.kubebuilder.io/version: v0.16.2 name: snapshots.elasticache.services.k8s.aws spec: group: elasticache.services.k8s.aws @@ -40,9 +40,8 @@ spec: description: |- SnapshotSpec defines the desired state of Snapshot. - - Represents a copy of an entire Redis cluster as of the time when the snapshot - was taken. + Represents a copy of an entire Valkey or Redis OSS cluster as of the time + when the snapshot was taken. properties: cacheClusterID: description: |- @@ -103,7 +102,6 @@ spec: when it has verified that an "adopted" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. - TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270 type: string ownerAccountID: @@ -121,14 +119,14 @@ spec: type: object autoMinorVersionUpgrade: description: |- - If you are running Redis engine version 6.0 or later, set this parameter - to yes if you want to opt-in to the next auto minor version upgrade campaign. - This parameter is disabled for previous versions. + If you are running Valkey 7.2 and above or Redis OSS engine version 6.0 and + above, set this parameter to yes if you want to opt-in to the next auto minor + version upgrade campaign. This parameter is disabled for previous versions. type: boolean automaticFailover: description: |- - Indicates the status of automatic failover for the source Redis replication - group. + Indicates the status of automatic failover for the source Valkey or Redis + OSS replication group. type: string cacheClusterCreateTime: description: The date and time when the source cluster was created. @@ -138,70 +136,62 @@ spec: description: |- The name of the compute and memory capacity node type for the source cluster. - The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts. - - * General purpose: Current generation: M6g node types (available only - for Redis engine version 5.0.6 onward and for Memcached engine version - 1.5.16 onward). cache.m6g.large, cache.m6g.xlarge, cache.m6g.2xlarge, - cache.m6g.4xlarge, cache.m6g.8xlarge, cache.m6g.12xlarge, cache.m6g.16xlarge - For region availability, see Supported Node Types (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) - M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, - cache.m5.12xlarge, cache.m5.24xlarge M4 node types: cache.m4.large, cache.m4.xlarge, - cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge T4g node types (available - only for Redis engine version 5.0.6 onward and Memcached engine version - 1.5.16 onward): cache.t4g.micro, cache.t4g.small, cache.t4g.medium T3 - node types: cache.t3.micro, cache.t3.small, cache.t3.medium T2 node types: - cache.t2.micro, cache.t2.small, cache.t2.medium Previous generation: (not - recommended. Existing clusters are still supported but creation of new - clusters is not supported for these types.) T1 node types: cache.t1.micro - M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge - M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge - + * General purpose: Current generation: M7g node types: cache.m7g.large, + cache.m7g.xlarge, cache.m7g.2xlarge, cache.m7g.4xlarge, cache.m7g.8xlarge, + cache.m7g.12xlarge, cache.m7g.16xlarge For region availability, see Supported + Node Types (https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) + M6g node types (available only for Redis OSS engine version 5.0.6 onward + and for Memcached engine version 1.5.16 onward): cache.m6g.large, cache.m6g.xlarge, + cache.m6g.2xlarge, cache.m6g.4xlarge, cache.m6g.8xlarge, cache.m6g.12xlarge, + cache.m6g.16xlarge M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, + cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge M4 node types: + cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge + T4g node types (available only for Redis OSS engine version 5.0.6 onward + and Memcached engine version 1.5.16 onward): cache.t4g.micro, cache.t4g.small, + cache.t4g.medium T3 node types: cache.t3.micro, cache.t3.small, cache.t3.medium + T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium Previous + generation: (not recommended. Existing clusters are still supported but + creation of new clusters is not supported for these types.) T1 node types: + cache.t1.micro M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, + cache.m1.xlarge M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, + cache.m3.2xlarge * Compute optimized: Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.) C1 node types: cache.c1.xlarge - - * Memory optimized with data tiering: Current generation: R6gd node types - (available only for Redis engine version 6.2 onward). cache.r6gd.xlarge, - cache.r6gd.2xlarge, cache.r6gd.4xlarge, cache.r6gd.8xlarge, cache.r6gd.12xlarge, - cache.r6gd.16xlarge - - - * Memory optimized: Current generation: R6g node types (available only - for Redis engine version 5.0.6 onward and for Memcached engine version - 1.5.16 onward). cache.r6g.large, cache.r6g.xlarge, cache.r6g.2xlarge, - cache.r6g.4xlarge, cache.r6g.8xlarge, cache.r6g.12xlarge, cache.r6g.16xlarge - For region availability, see Supported Node Types (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) - For region availability, see Supported Node Types (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) - R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, - cache.r5.12xlarge, cache.r5.24xlarge R4 node types: cache.r4.large, cache.r4.xlarge, - cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge - Previous generation: (not recommended. Existing clusters are still supported - but creation of new clusters is not supported for these types.) M2 node - types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge R3 node types: - cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge - + * Memory optimized: Current generation: R7g node types: cache.r7g.large, + cache.r7g.xlarge, cache.r7g.2xlarge, cache.r7g.4xlarge, cache.r7g.8xlarge, + cache.r7g.12xlarge, cache.r7g.16xlarge For region availability, see Supported + Node Types (https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) + R6g node types (available only for Redis OSS engine version 5.0.6 onward + and for Memcached engine version 1.5.16 onward): cache.r6g.large, cache.r6g.xlarge, + cache.r6g.2xlarge, cache.r6g.4xlarge, cache.r6g.8xlarge, cache.r6g.12xlarge, + cache.r6g.16xlarge R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, + cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge R4 node types: + cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, + cache.r4.16xlarge Previous generation: (not recommended. Existing clusters + are still supported but creation of new clusters is not supported for + these types.) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge + R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, + cache.r3.8xlarge Additional node type info - * All current generation instance types are created in Amazon VPC by default. + * Valkey or Redis OSS append-only files (AOF) are not supported for T1 + or T2 instances. - * Redis append-only files (AOF) are not supported for T1 or T2 instances. - - - * Redis Multi-AZ with automatic failover is not supported on T1 instances. - + * Valkey or Redis OSS Multi-AZ with automatic failover is not supported + on T1 instances. - * Redis configuration variables appendonly and appendfsync are not supported - on Redis version 2.8.22 and later. + * The configuration variables appendonly and appendfsync are not supported + on Valkey, or on Redis OSS version 2.8.22 and later. type: string cacheParameterGroupName: description: The cache parameter group that is associated with the @@ -250,7 +240,7 @@ spec: description: |- Enables data tiering. Data tiering is only supported for replication groups using the r6gd node type. This parameter must be set to true when using r6gd - nodes. For more information, see Data tiering (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/data-tiering.html). + nodes. For more information, see Data tiering (https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/data-tiering.html). type: string engine: description: The name of the cache engine (memcached or redis) used @@ -312,9 +302,8 @@ spec: description: |- The number of cache nodes in the source cluster. - - For clusters running Redis, this value must be 1. For clusters running Memcached, - this value must be between 1 and 40. + For clusters running Valkey or Redis OSS, this value must be 1. For clusters + running Memcached, this value must be between 1 and 40. format: int64 type: integer numNodeGroups: @@ -339,31 +328,22 @@ spec: performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi (24H Clock UTC). The minimum maintenance window is a 60 minute period. - Valid values for ddd are: - * sun - * mon - * tue - * wed - * thu - * fri - * sat - Example: sun:23:00-mon:01:30 type: string preferredOutpostARN: @@ -377,13 +357,11 @@ spec: For an automatic snapshot, the number of days for which ElastiCache retains the snapshot before deleting it. - For manual snapshots, this field reflects the SnapshotRetentionLimit for the source cluster when the snapshot was created. This field is otherwise ignored: Manual snapshots do not expire, and can only be deleted using the DeleteSnapshot operation. - Important If the value of SnapshotRetentionLimit is set to zero (0), backups are turned off. format: int64 diff --git a/config/crd/bases/elasticache.services.k8s.aws_usergroups.yaml b/config/crd/bases/elasticache.services.k8s.aws_usergroups.yaml index fb0c481a..4761393d 100644 --- a/config/crd/bases/elasticache.services.k8s.aws_usergroups.yaml +++ b/config/crd/bases/elasticache.services.k8s.aws_usergroups.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.14.0 + controller-gen.kubebuilder.io/version: v0.16.2 name: usergroups.elasticache.services.k8s.aws spec: group: elasticache.services.k8s.aws @@ -39,12 +39,13 @@ spec: spec: properties: engine: - description: The current supported value is Redis. + description: The current supported value is Redis user. type: string tags: description: |- A list of tags to be added to this resource. A tag is a key-value pair. A - tag key must be accompanied by a tag value, although null is accepted. + tag key must be accompanied by a tag value, although null is accepted. Available + for Valkey and Redis OSS only. items: description: |- A tag that can be added to an ElastiCache cluster or replication group. Tags @@ -89,7 +90,6 @@ spec: when it has verified that an "adopted" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. - TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270 type: string ownerAccountID: @@ -141,7 +141,8 @@ spec: type: object type: array minimumEngineVersion: - description: The minimum engine version required, which is Redis 6.0 + description: The minimum engine version required, which is Redis OSS + 6.0 type: string pendingChanges: description: A list of updates being applied to the user group. @@ -161,6 +162,13 @@ spec: items: type: string type: array + serverlessCaches: + description: |- + Indicates which serverless caches the specified user group is associated + with. Available for Valkey, Redis OSS and Serverless Memcached only. + items: + type: string + type: array status: description: Indicates user group status. Can be "creating", "active", "modifying", "deleting". diff --git a/config/crd/bases/elasticache.services.k8s.aws_users.yaml b/config/crd/bases/elasticache.services.k8s.aws_users.yaml index 1507f204..20322f8c 100644 --- a/config/crd/bases/elasticache.services.k8s.aws_users.yaml +++ b/config/crd/bases/elasticache.services.k8s.aws_users.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.14.0 + controller-gen.kubebuilder.io/version: v0.16.2 name: users.elasticache.services.k8s.aws spec: group: elasticache.services.k8s.aws @@ -41,6 +41,16 @@ spec: accessString: description: Access permissions string used for this user. type: string + authenticationMode: + description: Specifies how to authenticate the user. + properties: + passwords: + items: + type: string + type: array + type_: + type: string + type: object engine: description: The current supported value is Redis. type: string @@ -120,7 +130,6 @@ spec: when it has verified that an "adopted" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. - TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270 type: string ownerAccountID: @@ -187,7 +196,8 @@ spec: description: Access permissions string used for this user. type: string minimumEngineVersion: - description: The minimum engine version required, which is Redis 6.0 + description: The minimum engine version required, which is Redis OSS + 6.0 type: string status: description: Indicates the user status. Can be "active", "modifying" diff --git a/config/crd/common/bases/services.k8s.aws_adoptedresources.yaml b/config/crd/common/bases/services.k8s.aws_adoptedresources.yaml index 65eff735..b7be3224 100644 --- a/config/crd/common/bases/services.k8s.aws_adoptedresources.yaml +++ b/config/crd/common/bases/services.k8s.aws_adoptedresources.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.14.0 + controller-gen.kubebuilder.io/version: v0.16.2 name: adoptedresources.services.k8s.aws spec: group: services.k8s.aws @@ -78,11 +78,9 @@ spec: automatically converts this to an arbitrary string-string map. https://github.com/kubernetes-sigs/controller-tools/issues/385 - Active discussion about inclusion of this field in the spec is happening in this PR: https://github.com/kubernetes-sigs/controller-tools/pull/395 - Until this is allowed, or if it never is, we will produce a subset of the object meta that contains only the fields which the user is allowed to modify in the metadata. properties: @@ -105,13 +103,11 @@ spec: and may be truncated by the length of the suffix required to make the value unique on the server. - If this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header). - Applied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency type: string @@ -140,7 +136,6 @@ spec: Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty. - Must be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces diff --git a/config/crd/common/bases/services.k8s.aws_fieldexports.yaml b/config/crd/common/bases/services.k8s.aws_fieldexports.yaml index 4d3a8f1d..49b4f383 100644 --- a/config/crd/common/bases/services.k8s.aws_fieldexports.yaml +++ b/config/crd/common/bases/services.k8s.aws_fieldexports.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.14.0 + controller-gen.kubebuilder.io/version: v0.16.2 name: fieldexports.services.k8s.aws spec: group: services.k8s.aws diff --git a/config/rbac/cluster-role-controller.yaml b/config/rbac/cluster-role-controller.yaml index bb1c76a1..0fc7a8d8 100644 --- a/config/rbac/cluster-role-controller.yaml +++ b/config/rbac/cluster-role-controller.yaml @@ -8,6 +8,7 @@ rules: - "" resources: - configmaps + - secrets verbs: - get - list @@ -21,39 +22,12 @@ rules: - get - list - watch -- apiGroups: - - "" - resources: - - secrets - verbs: - - get - - list - - patch - - watch - apiGroups: - ec2.services.k8s.aws resources: - securitygroups - verbs: - - get - - list -- apiGroups: - - ec2.services.k8s.aws - resources: - securitygroups/status - verbs: - - get - - list -- apiGroups: - - ec2.services.k8s.aws - resources: - subnets - verbs: - - get - - list -- apiGroups: - - ec2.services.k8s.aws - resources: - subnets/status verbs: - get @@ -62,125 +36,11 @@ rules: - elasticache.services.k8s.aws resources: - cacheclusters - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - elasticache.services.k8s.aws - resources: - - cacheclusters/status - verbs: - - get - - patch - - update -- apiGroups: - - elasticache.services.k8s.aws - resources: - cacheparametergroups - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - elasticache.services.k8s.aws - resources: - - cacheparametergroups/status - verbs: - - get - - patch - - update -- apiGroups: - - elasticache.services.k8s.aws - resources: - cachesubnetgroups - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - elasticache.services.k8s.aws - resources: - - cachesubnetgroups/status - verbs: - - get - - patch - - update -- apiGroups: - - elasticache.services.k8s.aws - resources: - replicationgroups - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - elasticache.services.k8s.aws - resources: - - replicationgroups/status - verbs: - - get - - patch - - update -- apiGroups: - - elasticache.services.k8s.aws - resources: - snapshots - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - elasticache.services.k8s.aws - resources: - - snapshots/status - verbs: - - get - - patch - - update -- apiGroups: - - elasticache.services.k8s.aws - resources: - usergroups - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - elasticache.services.k8s.aws - resources: - - usergroups/status - verbs: - - get - - patch - - update -- apiGroups: - - elasticache.services.k8s.aws - resources: - users verbs: - create @@ -193,6 +53,12 @@ rules: - apiGroups: - elasticache.services.k8s.aws resources: + - cacheclusters/status + - cacheparametergroups/status + - cachesubnetgroups/status + - replicationgroups/status + - snapshots/status + - usergroups/status - users/status verbs: - get @@ -202,25 +68,6 @@ rules: - services.k8s.aws resources: - adoptedresources - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - services.k8s.aws - resources: - - adoptedresources/status - verbs: - - get - - patch - - update -- apiGroups: - - services.k8s.aws - resources: - fieldexports verbs: - create @@ -233,6 +80,7 @@ rules: - apiGroups: - services.k8s.aws resources: + - adoptedresources/status - fieldexports/status verbs: - get @@ -242,12 +90,6 @@ rules: - sns.services.k8s.aws resources: - topics - verbs: - - get - - list -- apiGroups: - - sns.services.k8s.aws - resources: - topics/status verbs: - get diff --git a/go.mod b/go.mod index 75c4bdcb..ea17f515 100644 --- a/go.mod +++ b/go.mod @@ -1,45 +1,58 @@ module github.com/aws-controllers-k8s/elasticache-controller -go 1.21 +go 1.22.0 -toolchain go1.21.5 +toolchain go1.23.4 require ( github.com/aws-controllers-k8s/ec2-controller v1.0.7 - github.com/aws-controllers-k8s/runtime v0.34.0 + github.com/aws-controllers-k8s/runtime v0.42.0 github.com/aws-controllers-k8s/sns-controller v1.0.11 github.com/aws/aws-sdk-go v1.49.0 - github.com/ghodss/yaml v1.0.0 - github.com/go-logr/logr v1.4.1 - github.com/google/go-cmp v0.6.0 + github.com/aws/aws-sdk-go-v2 v1.36.1 + github.com/aws/aws-sdk-go-v2/service/elasticache v1.44.12 + github.com/aws/smithy-go v1.22.2 + github.com/go-logr/logr v1.4.2 github.com/pkg/errors v0.9.1 github.com/spf13/pflag v1.0.5 - github.com/stretchr/testify v1.8.4 - go.uber.org/zap v1.26.0 - k8s.io/api v0.29.0 - k8s.io/apimachinery v0.29.0 - k8s.io/client-go v0.29.0 - sigs.k8s.io/controller-runtime v0.17.2 + github.com/stretchr/testify v1.9.0 + k8s.io/api v0.31.0 + k8s.io/apimachinery v0.31.0 + k8s.io/client-go v0.31.0 + sigs.k8s.io/controller-runtime v0.19.0 ) require ( + github.com/aws/aws-sdk-go-v2/config v1.28.6 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.17.47 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.21 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.32 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.32 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.1 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.6 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.24.7 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.6 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.33.2 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/cenkalti/backoff/v4 v4.2.1 // indirect - github.com/cespare/xxhash/v2 v2.2.0 // indirect - github.com/davecgh/go-spew v1.1.1 // indirect + github.com/cenkalti/backoff/v4 v4.3.0 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect - github.com/evanphx/json-patch/v5 v5.8.0 // indirect + github.com/evanphx/json-patch/v5 v5.9.0 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/fxamacker/cbor/v2 v2.7.0 // indirect github.com/go-logr/zapr v1.3.0 // indirect github.com/go-openapi/jsonpointer v0.19.6 // indirect github.com/go-openapi/jsonreference v0.20.2 // indirect - github.com/go-openapi/swag v0.22.3 // indirect + github.com/go-openapi/swag v0.22.4 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/protobuf v1.5.3 // indirect + github.com/golang/protobuf v1.5.4 // indirect github.com/google/gnostic-models v0.6.8 // indirect + github.com/google/go-cmp v0.6.0 // indirect github.com/google/gofuzz v1.2.0 // indirect - github.com/google/uuid v1.3.0 // indirect + github.com/google/uuid v1.6.0 // indirect github.com/imdario/mergo v0.3.12 // indirect github.com/itchyny/gojq v0.12.6 // indirect github.com/itchyny/timefmt-go v0.1.3 // indirect @@ -48,36 +61,35 @@ require ( github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/mailru/easyjson v0.7.7 // indirect - github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prometheus/client_golang v1.18.0 // indirect - github.com/prometheus/client_model v0.5.0 // indirect - github.com/prometheus/common v0.45.0 // indirect - github.com/prometheus/procfs v0.12.0 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/prometheus/client_golang v1.19.1 // indirect + github.com/prometheus/client_model v0.6.1 // indirect + github.com/prometheus/common v0.55.0 // indirect + github.com/prometheus/procfs v0.15.1 // indirect github.com/samber/lo v1.37.0 // indirect - github.com/stretchr/objx v0.5.0 // indirect + github.com/stretchr/objx v0.5.2 // indirect + github.com/x448/float16 v0.8.4 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e // indirect - golang.org/x/net v0.23.0 // indirect - golang.org/x/oauth2 v0.12.0 // indirect - golang.org/x/sys v0.18.0 // indirect - golang.org/x/term v0.18.0 // indirect - golang.org/x/text v0.14.0 // indirect + go.uber.org/zap v1.26.0 // indirect + golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc // indirect + golang.org/x/net v0.33.0 // indirect + golang.org/x/oauth2 v0.21.0 // indirect + golang.org/x/sys v0.28.0 // indirect + golang.org/x/term v0.27.0 // indirect + golang.org/x/text v0.21.0 // indirect golang.org/x/time v0.3.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect - google.golang.org/appengine v1.6.7 // indirect - google.golang.org/protobuf v1.33.0 // indirect + google.golang.org/protobuf v1.34.2 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/apiextensions-apiserver v0.29.0 // indirect - k8s.io/component-base v0.29.0 // indirect - k8s.io/klog/v2 v2.110.1 // indirect - k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect - k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect + k8s.io/apiextensions-apiserver v0.31.0 // indirect + k8s.io/klog/v2 v2.130.1 // indirect + k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect + k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect sigs.k8s.io/yaml v1.4.0 // indirect diff --git a/go.sum b/go.sum index c02376b5..69a2275a 100644 --- a/go.sum +++ b/go.sum @@ -1,66 +1,92 @@ github.com/aws-controllers-k8s/ec2-controller v1.0.7 h1:7MDu2bq8NFKbgzzgHYPFRT7bf+SmTchgWuq8ixKK0Jc= github.com/aws-controllers-k8s/ec2-controller v1.0.7/go.mod h1:PvsQehgncHgcu9FiY13M45+GkVsKI98g7G83SrgH7vY= -github.com/aws-controllers-k8s/runtime v0.34.0 h1:pz8MTzz8bY9JMTSMjvWx9SAJ6bJQIEx5ZrXw6wS74mc= -github.com/aws-controllers-k8s/runtime v0.34.0/go.mod h1:aCud9ahYydZ22JhBStUOW2hnzyE1lWPhGAfxW5AW1YU= +github.com/aws-controllers-k8s/runtime v0.42.0 h1:fVb3cOwUtn0ZwTSedapES+Rspb97S8BTxMqXJt6R5uM= +github.com/aws-controllers-k8s/runtime v0.42.0/go.mod h1:Oy0JKvDxZMZ+SVupm4NZVqP00KLIIAMfk93KnOwlt5c= github.com/aws-controllers-k8s/sns-controller v1.0.11 h1:nnkywTHzO64y7RrrfoPNyYf1TOkkQHtlg+S0jEPKUZ8= github.com/aws-controllers-k8s/sns-controller v1.0.11/go.mod h1:ODQIDZR3hHQqcyif4UXVFQfEzTaWU1jqFtVr83K2p9M= github.com/aws/aws-sdk-go v1.49.0 h1:g9BkW1fo9GqKfwg2+zCD+TW/D36Ux+vtfJ8guF4AYmY= github.com/aws/aws-sdk-go v1.49.0/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= +github.com/aws/aws-sdk-go-v2 v1.36.1 h1:iTDl5U6oAhkNPba0e1t1hrwAo02ZMqbrGq4k5JBWM5E= +github.com/aws/aws-sdk-go-v2 v1.36.1/go.mod h1:5PMILGVKiW32oDzjj6RU52yrNrDPUHcbZQYr1sM7qmM= +github.com/aws/aws-sdk-go-v2/config v1.28.6 h1:D89IKtGrs/I3QXOLNTH93NJYtDhm8SYa9Q5CsPShmyo= +github.com/aws/aws-sdk-go-v2/config v1.28.6/go.mod h1:GDzxJ5wyyFSCoLkS+UhGB0dArhb9mI+Co4dHtoTxbko= +github.com/aws/aws-sdk-go-v2/credentials v1.17.47 h1:48bA+3/fCdi2yAwVt+3COvmatZ6jUDNkDTIsqDiMUdw= +github.com/aws/aws-sdk-go-v2/credentials v1.17.47/go.mod h1:+KdckOejLW3Ks3b0E3b5rHsr2f9yuORBum0WPnE5o5w= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.21 h1:AmoU1pziydclFT/xRV+xXE/Vb8fttJCLRPv8oAkprc0= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.21/go.mod h1:AjUdLYe4Tgs6kpH4Bv7uMZo7pottoyHMn4eTcIcneaY= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.32 h1:BjUcr3X3K0wZPGFg2bxOWW3VPN8rkE3/61zhP+IHviA= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.32/go.mod h1:80+OGC/bgzzFFTUmcuwD0lb4YutwQeKLFpmt6hoWapU= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.32 h1:m1GeXHVMJsRsUAqG6HjZWx9dj7F5TR+cF1bjyfYyBd4= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.32/go.mod h1:IitoQxGfaKdVLNg0hD8/DXmAqNy0H4K2H2Sf91ti8sI= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 h1:VaRN3TlFdd6KxX1x3ILT5ynH6HvKgqdiXoTxAF4HQcQ= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc= +github.com/aws/aws-sdk-go-v2/service/elasticache v1.44.12 h1:jOcCDjNCWNdJmkXyKiIP/HGorjcdmeOmGLZmU4XiydM= +github.com/aws/aws-sdk-go-v2/service/elasticache v1.44.12/go.mod h1:AwS8/VfBl4lEHfbhvKcP2v8DyMx9olcVvz2Y0ygiWxA= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.1 h1:iXtILhvDxB6kPvEXgsDhGaZCSC6LQET5ZHSdJozeI0Y= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.1/go.mod h1:9nu0fVANtYiAePIBh2/pFUSwtJ402hLnp854CNoDOeE= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.6 h1:50+XsN70RS7dwJ2CkVNXzj7U2L1HKP8nqTd3XWEXBN4= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.6/go.mod h1:WqgLmwY7so32kG01zD8CPTJWVWM+TzJoOVHwTg4aPug= +github.com/aws/aws-sdk-go-v2/service/sso v1.24.7 h1:rLnYAfXQ3YAccocshIH5mzNNwZBkBo+bP6EhIxak6Hw= +github.com/aws/aws-sdk-go-v2/service/sso v1.24.7/go.mod h1:ZHtuQJ6t9A/+YDuxOLnbryAmITtr8UysSny3qcyvJTc= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.6 h1:JnhTZR3PiYDNKlXy50/pNeix9aGMo6lLpXwJ1mw8MD4= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.6/go.mod h1:URronUEGfXZN1VpdktPSD1EkAL9mfrV+2F4sjH38qOY= +github.com/aws/aws-sdk-go-v2/service/sts v1.33.2 h1:s4074ZO1Hk8qv65GqNXqDjmkf4HSQqJukaLuuW0TpDA= +github.com/aws/aws-sdk-go-v2/service/sts v1.33.2/go.mod h1:mVggCnIWoM09jP71Wh+ea7+5gAp53q+49wDFs1SW5z8= +github.com/aws/smithy-go v1.22.2 h1:6D9hW43xKFrRx/tXXfAlIZc4JI+yQe6snnWcQyxSyLQ= +github.com/aws/smithy-go v1.22.2/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= -github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= -github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= -github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch/v5 v5.8.0 h1:lRj6N9Nci7MvzrXuX6HFzU8XjmhPiXPlsKEy1u0KQro= -github.com/evanphx/json-patch/v5 v5.8.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= +github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg= +github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= -github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= -github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= -github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= +github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= -github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= +github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU= +github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= -github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= -github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af h1:kmjWCqn2qkEml422C2Rrd27c3VGxi6a/6HNq8QmHRKM= +github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/itchyny/gojq v0.12.6 h1:VjaFn59Em2wTxDNGcrRkDK9ZHMNa8IksOgL13sLL4d0= @@ -90,8 +116,6 @@ github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0 github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= -github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg= -github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -99,38 +123,42 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/onsi/ginkgo/v2 v2.14.0 h1:vSmGj2Z5YPb9JwCWT6z6ihcUvDhuXLc3sJiqd3jMKAY= -github.com/onsi/ginkgo/v2 v2.14.0/go.mod h1:JkUdW7JkN0V6rFvsHcJ478egV3XH9NxpD27Hal/PhZw= -github.com/onsi/gomega v1.30.0 h1:hvMK7xYz4D3HapigLTeGdId/NcfQx1VHMJc60ew99+8= -github.com/onsi/gomega v1.30.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= +github.com/onsi/ginkgo/v2 v2.19.0 h1:9Cnnf7UHo57Hy3k6/m5k3dRfGTMXGvxhHFvkDTCTpvA= +github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To= +github.com/onsi/gomega v1.33.1 h1:dsYjIxxSR755MDmKVsaFQTE22ChNBcuuTWgkUDSubOk= +github.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16AVYg0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk= -github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA= -github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= -github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= -github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lneoxM= -github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY= -github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= -github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= -github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= -github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= +github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= +github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= +github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/samber/lo v1.37.0 h1:XjVcB8g6tgUp8rsPsJ2CvhClfImrpL04YpQHXeHPhRw= github.com/samber/lo v1.37.0/go.mod h1:9vaz2O4o8oOnK23pd2TrXufcbdbJIa3b6cstBWKpopA= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= -github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= @@ -142,19 +170,18 @@ go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e h1:+WEEuIdZHnUeJJmEUjyYC2gfUMj69yZXw17EnHg/otA= -golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e/go.mod h1:Kr81I6Kryrl9sr8s2FK3vxD90NdsKWRuOIl2O4CvYbA= +golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc h1:mCRnTeVUjcrhlRmO0VK8a6k6Rrf6TF9htwo2pJVSjIU= +golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= -golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= -golang.org/x/oauth2 v0.12.0 h1:smVPGxink+n1ZI5pkQa8y6fZT0RW0MgCO5bFpepy4B4= -golang.org/x/oauth2 v0.12.0/go.mod h1:A74bZ3aGXgCY0qaIC9Ahg6Lglin4AMAco8cIv9baba4= +golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I= +golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= +golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= +golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -163,38 +190,35 @@ golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= -golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8= -golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= +golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= +golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q= +golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.16.1 h1:TLyB3WofjdOEepBHAU20JdNC1Zbg87elYofWYAY5oZA= -golang.org/x/tools v0.16.1/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= -google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= -google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= +google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= +gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -205,24 +229,22 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -k8s.io/api v0.29.0 h1:NiCdQMY1QOp1H8lfRyeEf8eOwV6+0xA6XEE44ohDX2A= -k8s.io/api v0.29.0/go.mod h1:sdVmXoz2Bo/cb77Pxi71IPTSErEW32xa4aXwKH7gfBA= -k8s.io/apiextensions-apiserver v0.29.0 h1:0VuspFG7Hj+SxyF/Z/2T0uFbI5gb5LRgEyUVE3Q4lV0= -k8s.io/apiextensions-apiserver v0.29.0/go.mod h1:TKmpy3bTS0mr9pylH0nOt/QzQRrW7/h7yLdRForMZwc= -k8s.io/apimachinery v0.29.0 h1:+ACVktwyicPz0oc6MTMLwa2Pw3ouLAfAon1wPLtG48o= -k8s.io/apimachinery v0.29.0/go.mod h1:eVBxQ/cwiJxH58eK/jd/vAk4mrxmVlnpBH5J2GbMeis= -k8s.io/client-go v0.29.0 h1:KmlDtFcrdUzOYrBhXHgKw5ycWzc3ryPX5mQe0SkG3y8= -k8s.io/client-go v0.29.0/go.mod h1:yLkXH4HKMAywcrD82KMSmfYg2DlE8mepPR4JGSo5n38= -k8s.io/component-base v0.29.0 h1:T7rjd5wvLnPBV1vC4zWd/iWRbV8Mdxs+nGaoaFzGw3s= -k8s.io/component-base v0.29.0/go.mod h1:sADonFTQ9Zc9yFLghpDpmNXEdHyQmFIGbiuZbqAXQ1M= -k8s.io/klog/v2 v2.110.1 h1:U/Af64HJf7FcwMcXyKm2RPM22WZzyR7OSpYj5tg3cL0= -k8s.io/klog/v2 v2.110.1/go.mod h1:YGtd1984u+GgbuZ7e08/yBuAfKLSO0+uR1Fhi6ExXjo= -k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= -k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= -k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= -k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -sigs.k8s.io/controller-runtime v0.17.2 h1:FwHwD1CTUemg0pW2otk7/U5/i5m2ymzvOXdbeGOUvw0= -sigs.k8s.io/controller-runtime v0.17.2/go.mod h1:+MngTvIQQQhfXtwfdGw/UOQ/aIaqsYywfCINOtwMO/s= +k8s.io/api v0.31.0 h1:b9LiSjR2ym/SzTOlfMHm1tr7/21aD7fSkqgD/CVJBCo= +k8s.io/api v0.31.0/go.mod h1:0YiFF+JfFxMM6+1hQei8FY8M7s1Mth+z/q7eF1aJkTE= +k8s.io/apiextensions-apiserver v0.31.0 h1:fZgCVhGwsclj3qCw1buVXCV6khjRzKC5eCFt24kyLSk= +k8s.io/apiextensions-apiserver v0.31.0/go.mod h1:b9aMDEYaEe5sdK+1T0KU78ApR/5ZVp4i56VacZYEHxk= +k8s.io/apimachinery v0.31.0 h1:m9jOiSr3FoSSL5WO9bjm1n6B9KROYYgNZOb4tyZ1lBc= +k8s.io/apimachinery v0.31.0/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= +k8s.io/client-go v0.31.0 h1:QqEJzNjbN2Yv1H79SsS+SWnXkBgVu4Pj3CJQgbx0gI8= +k8s.io/client-go v0.31.0/go.mod h1:Y9wvC76g4fLjmU0BA+rV+h2cncoadjvjjkkIGoTLcGU= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= +k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A= +k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +sigs.k8s.io/controller-runtime v0.19.0 h1:nWVM7aq+Il2ABxwiCizrVDSlmDcshi9llbaFbC0ji/Q= +sigs.k8s.io/controller-runtime v0.19.0/go.mod h1:iRmWllt8IlaLjvTTDLhRBXIEtkCK6hwVBJJsYS9Ajf4= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= diff --git a/helm/Chart.yaml b/helm/Chart.yaml index d776d053..ef5f5eb2 100644 --- a/helm/Chart.yaml +++ b/helm/Chart.yaml @@ -1,8 +1,8 @@ apiVersion: v1 name: elasticache-chart description: A Helm chart for the ACK service controller for Amazon ElastiCache (ElastiCache) -version: 0.1.0 -appVersion: 0.1.0 +version: 1.6.0 +appVersion: 1.6.0 home: https://github.com/aws-controllers-k8s/elasticache-controller icon: https://raw.githubusercontent.com/aws/eks-charts/master/docs/logo/aws.png sources: diff --git a/helm/crds/elasticache.services.k8s.aws_cacheclusters.yaml b/helm/crds/elasticache.services.k8s.aws_cacheclusters.yaml index bc042534..46abb5da 100644 --- a/helm/crds/elasticache.services.k8s.aws_cacheclusters.yaml +++ b/helm/crds/elasticache.services.k8s.aws_cacheclusters.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.14.0 + controller-gen.kubebuilder.io/version: v0.16.2 name: cacheclusters.elasticache.services.k8s.aws spec: group: elasticache.services.k8s.aws @@ -57,30 +57,17 @@ spec: description: |- CacheClusterSpec defines the desired state of CacheCluster. - Contains all of the attributes of a specific cluster. properties: authToken: description: |- Reserved parameter. The password used to access a password protected server. - Password constraints: - - Must be only printable ASCII characters. - - Must be at least 16 characters and no more than 128 characters in length. - - - - The only permitted printable special characters are !, &, #, $, ^, <, - >, and -. Other printable special characters cannot be used in the AUTH - token. - - - For more information, see AUTH password (http://redis.io/commands/AUTH) at - http://redis.io/commands/AUTH. properties: key: description: Key is the key within the secret @@ -99,9 +86,9 @@ spec: x-kubernetes-map-type: atomic autoMinorVersionUpgrade: description: |- - If you are running Redis engine version 6.0 or later, set this parameter - to yes if you want to opt-in to the next auto minor version upgrade campaign. - This parameter is disabled for previous versions. + If you are running Valkey 7.2 and above or Redis OSS engine version 6.0 and + above, set this parameter to yes to opt-in to the next auto minor version + upgrade campaign. This parameter is disabled for previous versions. type: boolean azMode: description: |- @@ -109,10 +96,8 @@ spec: Availability Zone or created across multiple Availability Zones in the cluster's region. - This parameter is only supported for Memcached clusters. - If the AZMode and PreferredAvailabilityZones are not specified, ElastiCache assumes single-az mode. type: string @@ -121,40 +106,34 @@ spec: The node group (shard) identifier. This parameter is stored as a lowercase string. - Constraints: - - A name must contain from 1 to 50 alphanumeric characters or hyphens. - - The first character must be a letter. - - A name cannot end with a hyphen or contain two consecutive hyphens. type: string cacheNodeType: description: |- The compute and memory capacity of the nodes in the node group (shard). - The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts. - - General purpose: Current generation: M7g node types: cache.m7g.large, cache.m7g.xlarge, cache.m7g.2xlarge, cache.m7g.4xlarge, cache.m7g.8xlarge, cache.m7g.12xlarge, cache.m7g.16xlarge For region availability, see Supported - Node Types (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) - M6g node types (available only for Redis engine version 5.0.6 onward and - for Memcached engine version 1.5.16 onward): cache.m6g.large, cache.m6g.xlarge, + Node Types (https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) + M6g node types (available only for Redis OSS engine version 5.0.6 onward + and for Memcached engine version 1.5.16 onward): cache.m6g.large, cache.m6g.xlarge, cache.m6g.2xlarge, cache.m6g.4xlarge, cache.m6g.8xlarge, cache.m6g.12xlarge, cache.m6g.16xlarge M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge - T4g node types (available only for Redis engine version 5.0.6 onward and - Memcached engine version 1.5.16 onward): cache.t4g.micro, cache.t4g.small, + T4g node types (available only for Redis OSS engine version 5.0.6 onward + and Memcached engine version 1.5.16 onward): cache.t4g.micro, cache.t4g.small, cache.t4g.medium T3 node types: cache.t3.micro, cache.t3.small, cache.t3.medium T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium Previous generation: (not recommended. Existing clusters are still supported but @@ -163,18 +142,16 @@ spec: cache.m1.xlarge M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge - - Compute optimized: Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.) C1 node types: cache.c1.xlarge - - Memory optimized: Current generation: R7g node types: cache.r7g.large, cache.r7g.xlarge, cache.r7g.2xlarge, cache.r7g.4xlarge, cache.r7g.8xlarge, cache.r7g.12xlarge, cache.r7g.16xlarge For region availability, see Supported - Node Types (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) - R6g node types (available only for Redis engine version 5.0.6 onward and - for Memcached engine version 1.5.16 onward): cache.r6g.large, cache.r6g.xlarge, + Node Types (https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) + R6g node types (available only for Redis OSS engine version 5.0.6 onward + and for Memcached engine version 1.5.16 onward): cache.r6g.large, cache.r6g.xlarge, cache.r6g.2xlarge, cache.r6g.4xlarge, cache.r6g.8xlarge, cache.r6g.12xlarge, cache.r6g.16xlarge R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge R4 node types: @@ -185,21 +162,18 @@ spec: R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge - Additional node type info - - All current generation instance types are created in Amazon VPC by default. + - Valkey or Redis OSS append-only files (AOF) are not supported for T1 + or T2 instances. - - Redis append-only files (AOF) are not supported for T1 or T2 instances. - - - - Redis Multi-AZ with automatic failover is not supported on T1 instances. - + - Valkey or Redis OSS Multi-AZ with automatic failover is not supported + on T1 instances. - - Redis configuration variables appendonly and appendfsync are not supported - on Redis version 2.8.22 and later. + - The configuration variables appendonly and appendfsync are not supported + on Valkey, or on Redis OSS version 2.8.22 and later. type: string cacheParameterGroupName: description: |- @@ -211,7 +185,7 @@ spec: cacheParameterGroupRef: description: "AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference\ntype to provide more user friendly syntax - for references using 'from' field\nEx:\nAPIIDRef:\n\n\n\tfrom:\n\t + for references using 'from' field\nEx:\nAPIIDRef:\n\n\tfrom:\n\t \ name: my-api" properties: from: @@ -221,13 +195,14 @@ spec: properties: name: type: string + namespace: + type: string type: object type: object cacheSecurityGroupNames: description: |- A list of security group names to associate with this cluster. - Use this parameter only when you are creating a cluster outside of an Amazon Virtual Private Cloud (Amazon VPC). items: @@ -237,19 +212,17 @@ spec: description: |- The name of the subnet group to be used for the cluster. - Use this parameter only when you are creating a cluster in an Amazon Virtual Private Cloud (Amazon VPC). - If you're going to launch your cluster in an Amazon VPC, you need to create a subnet group before you start creating a cluster. For more information, - see Subnets and Subnet Groups (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/SubnetGroups.html). + see Subnets and Subnet Groups (https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/SubnetGroups.html). type: string cacheSubnetGroupRef: description: "AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference\ntype to provide more user friendly syntax - for references using 'from' field\nEx:\nAPIIDRef:\n\n\n\tfrom:\n\t + for references using 'from' field\nEx:\nAPIIDRef:\n\n\tfrom:\n\t \ name: my-api" properties: from: @@ -259,13 +232,14 @@ spec: properties: name: type: string + namespace: + type: string type: object type: object engine: description: |- The name of the cache engine to be used for this cluster. - Valid values for this parameter are: memcached | redis type: string engineVersion: @@ -274,9 +248,8 @@ spec: the supported cache engine versions, use the DescribeCacheEngineVersions operation. - Important: You can upgrade to a newer engine version (see Selecting a Cache - Engine and Version (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/SelectEngine.html#VersionManagement)), + Engine and Version (https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/SelectEngine.html#VersionManagement)), but you cannot downgrade to an earlier engine version. If you want to use an earlier engine version, you must delete the existing cluster or replication group and create it anew with the earlier engine version. @@ -284,9 +257,9 @@ spec: ipDiscovery: description: |- The network type you choose when modifying a cluster, either ipv4 | ipv6. - IPv6 is supported for workloads using Redis engine version 6.2 onward or - Memcached engine version 1.6.6 on all instances built on the Nitro system - (http://aws.amazon.com/ec2/nitro/). + IPv6 is supported for workloads using Valkey 7.2 and above, Redis OSS engine + version 6.2 and above or Memcached engine version 1.6.6 and above on all + instances built on the Nitro system (http://aws.amazon.com/ec2/nitro/). type: string logDeliveryConfigurations: description: Specifies the destination, format and type of the logs. @@ -326,21 +299,21 @@ spec: networkType: description: |- Must be either ipv4 | ipv6 | dual_stack. IPv6 is supported for workloads - using Redis engine version 6.2 onward or Memcached engine version 1.6.6 on - all instances built on the Nitro system (http://aws.amazon.com/ec2/nitro/). + using Valkey 7.2 and above, Redis OSS engine version 6.2 and above or Memcached + engine version 1.6.6 and above on all instances built on the Nitro system + (http://aws.amazon.com/ec2/nitro/). type: string notificationTopicARN: description: |- The Amazon Resource Name (ARN) of the Amazon Simple Notification Service (SNS) topic to which notifications are sent. - The Amazon SNS topic owner must be the same as the cluster owner. type: string notificationTopicRef: description: "AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference\ntype to provide more user friendly syntax - for references using 'from' field\nEx:\nAPIIDRef:\n\n\n\tfrom:\n\t + for references using 'from' field\nEx:\nAPIIDRef:\n\n\tfrom:\n\t \ name: my-api" properties: from: @@ -350,16 +323,16 @@ spec: properties: name: type: string + namespace: + type: string type: object type: object numCacheNodes: description: |- The initial number of cache nodes that the cluster has. - - For clusters running Redis, this value must be 1. For clusters running Memcached, - this value must be between 1 and 40. - + For clusters running Valkey or Redis OSS, this value must be 1. For clusters + running Memcached, this value must be between 1 and 40. If you need more than 40 nodes for your Memcached cluster, please fill out the ElastiCache Limit Increase Request form at http://aws.amazon.com/contact-us/elasticache-node-limit-request/ @@ -380,12 +353,10 @@ spec: description: |- The EC2 Availability Zone in which the cluster is created. - All nodes belonging to this cluster are placed in the preferred Availability Zone. If you want to create your nodes across multiple Availability Zones, use PreferredAvailabilityZones. - Default: System chosen Availability Zone. type: string preferredAvailabilityZones: @@ -393,22 +364,17 @@ spec: A list of the Availability Zones in which cache nodes are created. The order of the zones in the list is not important. - This option is only supported on Memcached. - If you are creating your cluster in an Amazon VPC (recommended) you can only locate nodes in Availability Zones that are associated with the subnets in the selected subnet group. - The number of Availability Zones listed must equal the value of NumCacheNodes. - If you want all the nodes in the same Availability Zone, use PreferredAvailabilityZone instead, or repeat the Availability Zone multiple times in the list. - Default: System chosen Availability Zones. items: type: string @@ -434,18 +400,16 @@ spec: group as a read replica; otherwise, the cluster is a standalone primary that is not part of any replication group. - If the specified replication group is Multi-AZ enabled and the Availability Zone is not specified, the cluster is created in Availability Zones that provide the best spread of read replicas across Availability Zones. - This parameter is only valid if the Engine parameter is redis. type: string replicationGroupRef: description: "AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference\ntype to provide more user friendly syntax - for references using 'from' field\nEx:\nAPIIDRef:\n\n\n\tfrom:\n\t + for references using 'from' field\nEx:\nAPIIDRef:\n\n\tfrom:\n\t \ name: my-api" properties: from: @@ -455,13 +419,14 @@ spec: properties: name: type: string + namespace: + type: string type: object type: object securityGroupIDs: description: |- One or more VPC security groups associated with the cluster. - Use this parameter only when you are creating a cluster in an Amazon Virtual Private Cloud (Amazon VPC). items: @@ -470,31 +435,28 @@ spec: snapshotARNs: description: |- A single-element string list containing an Amazon Resource Name (ARN) that - uniquely identifies a Redis RDB snapshot file stored in Amazon S3. The snapshot - file is used to populate the node group (shard). The Amazon S3 object name - in the ARN cannot contain any commas. - + uniquely identifies a Valkey or Redis OSS RDB snapshot file stored in Amazon + S3. The snapshot file is used to populate the node group (shard). The Amazon + S3 object name in the ARN cannot contain any commas. This parameter is only valid if the Engine parameter is redis. - Example of an Amazon S3 ARN: arn:aws:s3:::my_bucket/snapshot1.rdb items: type: string type: array snapshotName: description: |- - The name of a Redis snapshot from which to restore data into the new node - group (shard). The snapshot status changes to restoring while the new node - group (shard) is being created. - + The name of a Valkey or Redis OSS snapshot from which to restore data into + the new node group (shard). The snapshot status changes to restoring while + the new node group (shard) is being created. This parameter is only valid if the Engine parameter is redis. type: string snapshotRef: description: "AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference\ntype to provide more user friendly syntax - for references using 'from' field\nEx:\nAPIIDRef:\n\n\n\tfrom:\n\t + for references using 'from' field\nEx:\nAPIIDRef:\n\n\tfrom:\n\t \ name: my-api" properties: from: @@ -504,6 +466,8 @@ spec: properties: name: type: string + namespace: + type: string type: object type: object snapshotRetentionLimit: @@ -512,10 +476,8 @@ spec: deleting them. For example, if you set SnapshotRetentionLimit to 5, a snapshot taken today is retained for 5 days before being deleted. - This parameter is only valid if the Engine parameter is redis. - Default: 0 (i.e., automatic backups are disabled for this cache cluster). format: int64 type: integer @@ -524,14 +486,11 @@ spec: The daily time range (in UTC) during which ElastiCache begins taking a daily snapshot of your node group (shard). - Example: 05:00-09:00 - If you do not specify this parameter, ElastiCache automatically chooses an appropriate time range. - This parameter is only valid if the Engine parameter is redis. type: string tags: @@ -575,7 +534,6 @@ spec: when it has verified that an "adopted" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. - TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270 type: string ownerAccountID: @@ -595,22 +553,19 @@ spec: description: |- A flag that enables encryption at-rest when set to true. - You cannot modify the value of AtRestEncryptionEnabled after the cluster is created. To enable at-rest encryption on a cluster you must set AtRestEncryptionEnabled to true when you create a cluster. - Required: Only available when creating a replication group in an Amazon VPC - using redis version 3.2.6, 4.x or later. - + using Redis OSS version 3.2.6, 4.x or later. Default: false type: boolean authTokenEnabled: description: |- - A flag that enables using an AuthToken (password) when issuing Redis commands. - + A flag that enables using an AuthToken (password) when issuing Valkey or + Redis OSS commands. Default: false type: boolean @@ -634,26 +589,24 @@ spec: description: |- Represents an individual cache node within a cluster. Each cache node runs its own instance of the cluster's protocol-compliant caching software - either - Memcached or Redis. - + Memcached, Valkey or Redis OSS. The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts. - - General purpose: Current generation: M7g node types: cache.m7g.large, cache.m7g.xlarge, cache.m7g.2xlarge, cache.m7g.4xlarge, cache.m7g.8xlarge, cache.m7g.12xlarge, cache.m7g.16xlarge For region availability, see Supported - Node Types (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) - M6g node types (available only for Redis engine version 5.0.6 onward and - for Memcached engine version 1.5.16 onward): cache.m6g.large, cache.m6g.xlarge, + Node Types (https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) + M6g node types (available only for Redis OSS engine version 5.0.6 onward + and for Memcached engine version 1.5.16 onward): cache.m6g.large, cache.m6g.xlarge, cache.m6g.2xlarge, cache.m6g.4xlarge, cache.m6g.8xlarge, cache.m6g.12xlarge, cache.m6g.16xlarge M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge - T4g node types (available only for Redis engine version 5.0.6 onward and - Memcached engine version 1.5.16 onward): cache.t4g.micro, cache.t4g.small, + T4g node types (available only for Redis OSS engine version 5.0.6 onward + and Memcached engine version 1.5.16 onward): cache.t4g.micro, cache.t4g.small, cache.t4g.medium T3 node types: cache.t3.micro, cache.t3.small, cache.t3.medium T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium Previous generation: (not recommended. Existing clusters are still supported but @@ -662,18 +615,16 @@ spec: cache.m1.xlarge M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge - - Compute optimized: Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.) C1 node types: cache.c1.xlarge - - Memory optimized: Current generation: R7g node types: cache.r7g.large, cache.r7g.xlarge, cache.r7g.2xlarge, cache.r7g.4xlarge, cache.r7g.8xlarge, cache.r7g.12xlarge, cache.r7g.16xlarge For region availability, see Supported - Node Types (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) - R6g node types (available only for Redis engine version 5.0.6 onward and - for Memcached engine version 1.5.16 onward): cache.r6g.large, cache.r6g.xlarge, + Node Types (https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) + R6g node types (available only for Redis OSS engine version 5.0.6 onward + and for Memcached engine version 1.5.16 onward): cache.r6g.large, cache.r6g.xlarge, cache.r6g.2xlarge, cache.r6g.4xlarge, cache.r6g.8xlarge, cache.r6g.12xlarge, cache.r6g.16xlarge R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge R4 node types: @@ -684,21 +635,18 @@ spec: R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge - Additional node type info - - All current generation instance types are created in Amazon VPC by default. + - Valkey or Redis OSS append-only files (AOF) are not supported for T1 + or T2 instances. - - Redis append-only files (AOF) are not supported for T1 or T2 instances. + - Valkey or Redis OSS Multi-AZ with automatic failover is not supported + on T1 instances. - - - Redis Multi-AZ with automatic failover is not supported on T1 instances. - - - - Redis configuration variables appendonly and appendfsync are not supported - on Redis version 2.8.22 and later. + - The configuration variables appendonly and appendfsync are not supported + on Valkey, or on Redis OSS version 2.8.22 and later. properties: cacheNodeCreateTime: format: date-time @@ -714,7 +662,7 @@ spec: endpoint: description: |- Represents the information required for client programs to connect to a cache - node. + node. This value is read-only. properties: address: type: string @@ -799,7 +747,6 @@ spec: to connect to any node in the cluster. The configuration endpoint will always have .cfg in it. - Example: mem-3.9dvc4r.cfg.usw2.cache.amazonaws.com:11211 properties: address: diff --git a/helm/crds/elasticache.services.k8s.aws_cacheparametergroups.yaml b/helm/crds/elasticache.services.k8s.aws_cacheparametergroups.yaml index a873d1bf..18c1fd37 100644 --- a/helm/crds/elasticache.services.k8s.aws_cacheparametergroups.yaml +++ b/helm/crds/elasticache.services.k8s.aws_cacheparametergroups.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.14.0 + controller-gen.kubebuilder.io/version: v0.16.2 name: cacheparametergroups.elasticache.services.k8s.aws spec: group: elasticache.services.k8s.aws @@ -41,7 +41,6 @@ spec: description: |- CacheParameterGroupSpec defines the desired state of CacheParameterGroup. - Represents the output of a CreateCacheParameterGroup operation. properties: cacheParameterGroupFamily: @@ -49,9 +48,8 @@ spec: The name of the cache parameter group family that the cache parameter group can be used with. - Valid values are: memcached1.4 | memcached1.5 | memcached1.6 | redis2.6 | - redis2.8 | redis3.2 | redis4.0 | redis5.0 | redis6.x + redis2.8 | redis3.2 | redis4.0 | redis5.0 | redis6.x | redis7 type: string cacheParameterGroupName: description: A user-specified name for the cache parameter group. @@ -116,7 +114,6 @@ spec: when it has verified that an "adopted" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. - TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270 type: string ownerAccountID: diff --git a/helm/crds/elasticache.services.k8s.aws_cachesubnetgroups.yaml b/helm/crds/elasticache.services.k8s.aws_cachesubnetgroups.yaml index 8b3f1234..d47ddb62 100644 --- a/helm/crds/elasticache.services.k8s.aws_cachesubnetgroups.yaml +++ b/helm/crds/elasticache.services.k8s.aws_cachesubnetgroups.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.14.0 + controller-gen.kubebuilder.io/version: v0.16.2 name: cachesubnetgroups.elasticache.services.k8s.aws spec: group: elasticache.services.k8s.aws @@ -40,13 +40,10 @@ spec: description: |- CacheSubnetGroupSpec defines the desired state of CacheSubnetGroup. - Represents the output of one of the following operations: - - CreateCacheSubnetGroup - - ModifyCacheSubnetGroup properties: cacheSubnetGroupDescription: @@ -56,10 +53,8 @@ spec: description: |- A name for the cache subnet group. This value is stored as a lowercase string. - Constraints: Must contain no more than 255 alphanumeric characters or hyphens. - Example: mysubnetgroup type: string subnetIDs: @@ -71,7 +66,7 @@ spec: items: description: "AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference\ntype to provide more user friendly syntax - for references using 'from' field\nEx:\nAPIIDRef:\n\n\n\tfrom:\n\t + for references using 'from' field\nEx:\nAPIIDRef:\n\n\tfrom:\n\t \ name: my-api" properties: from: @@ -81,6 +76,8 @@ spec: properties: name: type: string + namespace: + type: string type: object type: object type: array @@ -124,7 +121,6 @@ spec: when it has verified that an "adopted" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. - TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270 type: string ownerAccountID: @@ -219,8 +215,20 @@ spec: subnetOutpostARN: type: string type: object + supportedNetworkTypes: + items: + type: string + type: array type: object type: array + supportedNetworkTypes: + description: |- + Either ipv4 | ipv6 | dual_stack. IPv6 is supported for workloads using Valkey + 7.2 and above, Redis OSS engine version 6.2 and above or Memcached engine + version 1.6.6 and above on all instances built on the Nitro system (http://aws.amazon.com/ec2/nitro/). + items: + type: string + type: array vpcID: description: |- The Amazon Virtual Private Cloud identifier (VPC ID) of the cache subnet diff --git a/helm/crds/elasticache.services.k8s.aws_replicationgroups.yaml b/helm/crds/elasticache.services.k8s.aws_replicationgroups.yaml index 340168a3..29ec95de 100644 --- a/helm/crds/elasticache.services.k8s.aws_replicationgroups.yaml +++ b/helm/crds/elasticache.services.k8s.aws_replicationgroups.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.14.0 + controller-gen.kubebuilder.io/version: v0.16.2 name: replicationgroups.elasticache.services.k8s.aws spec: group: elasticache.services.k8s.aws @@ -40,23 +40,20 @@ spec: description: |- ReplicationGroupSpec defines the desired state of ReplicationGroup. - - Contains all of the attributes of a specific Redis replication group. + Contains all of the attributes of a specific Valkey or Redis OSS replication + group. properties: atRestEncryptionEnabled: description: |- A flag that enables encryption at rest when set to true. - You cannot modify the value of AtRestEncryptionEnabled after the replication group is created. To enable encryption at rest on a replication group you must set AtRestEncryptionEnabled to true when you create the replication group. - Required: Only available when creating a replication group in an Amazon VPC - using redis version 3.2.6, 4.x or later. - + using Redis OSS version 3.2.6, 4.x or later. Default: false type: boolean @@ -64,31 +61,17 @@ spec: description: |- Reserved parameter. The password used to access a password protected server. - AuthToken can be specified only on replication groups where TransitEncryptionEnabled is true. - For HIPAA compliance, you must specify TransitEncryptionEnabled as true, an AuthToken, and a CacheSubnetGroup. - Password constraints: - - Must be only printable ASCII characters. - - Must be at least 16 characters and no more than 128 characters in length. - - - - The only permitted printable special characters are !, &, #, $, ^, <, - >, and -. Other printable special characters cannot be used in the AUTH - token. - - - For more information, see AUTH password (http://redis.io/commands/AUTH) at - http://redis.io/commands/AUTH. properties: key: description: Key is the key within the secret @@ -110,10 +93,8 @@ spec: Specifies whether a read-only replica is automatically promoted to read/write primary if the existing primary fails. - - AutomaticFailoverEnabled must be enabled for Redis (cluster mode enabled) - replication groups. - + AutomaticFailoverEnabled must be enabled for Valkey or Redis OSS (cluster + mode enabled) replication groups. Default: false type: boolean @@ -121,69 +102,62 @@ spec: description: |- The compute and memory capacity of the nodes in the node group (shard). - The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts. - - - General purpose: Current generation: M6g node types (available only - for Redis engine version 5.0.6 onward and for Memcached engine version - 1.5.16 onward): cache.m6g.large, cache.m6g.xlarge, cache.m6g.2xlarge, - cache.m6g.4xlarge, cache.m6g.8xlarge, cache.m6g.12xlarge, cache.m6g.16xlarge - For region availability, see Supported Node Types (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) - M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, - cache.m5.12xlarge, cache.m5.24xlarge M4 node types: cache.m4.large, cache.m4.xlarge, - cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge T4g node types (available - only for Redis engine version 5.0.6 onward and Memcached engine version - 1.5.16 onward): cache.t4g.micro, cache.t4g.small, cache.t4g.medium T3 - node types: cache.t3.micro, cache.t3.small, cache.t3.medium T2 node types: - cache.t2.micro, cache.t2.small, cache.t2.medium Previous generation: (not - recommended. Existing clusters are still supported but creation of new - clusters is not supported for these types.) T1 node types: cache.t1.micro - M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge - M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge - + - General purpose: Current generation: M7g node types: cache.m7g.large, + cache.m7g.xlarge, cache.m7g.2xlarge, cache.m7g.4xlarge, cache.m7g.8xlarge, + cache.m7g.12xlarge, cache.m7g.16xlarge For region availability, see Supported + Node Types (https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) + M6g node types (available only for Redis OSS engine version 5.0.6 onward + and for Memcached engine version 1.5.16 onward): cache.m6g.large, cache.m6g.xlarge, + cache.m6g.2xlarge, cache.m6g.4xlarge, cache.m6g.8xlarge, cache.m6g.12xlarge, + cache.m6g.16xlarge M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, + cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge M4 node types: + cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge + T4g node types (available only for Redis OSS engine version 5.0.6 onward + and Memcached engine version 1.5.16 onward): cache.t4g.micro, cache.t4g.small, + cache.t4g.medium T3 node types: cache.t3.micro, cache.t3.small, cache.t3.medium + T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium Previous + generation: (not recommended. Existing clusters are still supported but + creation of new clusters is not supported for these types.) T1 node types: + cache.t1.micro M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, + cache.m1.xlarge M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, + cache.m3.2xlarge - Compute optimized: Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.) C1 node types: cache.c1.xlarge - - - Memory optimized with data tiering: Current generation: R6gd node types - (available only for Redis engine version 6.2 onward). cache.r6gd.xlarge, - cache.r6gd.2xlarge, cache.r6gd.4xlarge, cache.r6gd.8xlarge, cache.r6gd.12xlarge, - cache.r6gd.16xlarge - - - - Memory optimized: Current generation: R6g node types (available only - for Redis engine version 5.0.6 onward and for Memcached engine version - 1.5.16 onward). cache.r6g.large, cache.r6g.xlarge, cache.r6g.2xlarge, - cache.r6g.4xlarge, cache.r6g.8xlarge, cache.r6g.12xlarge, cache.r6g.16xlarge - For region availability, see Supported Node Types (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) - R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, - cache.r5.12xlarge, cache.r5.24xlarge R4 node types: cache.r4.large, cache.r4.xlarge, - cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge - Previous generation: (not recommended. Existing clusters are still supported - but creation of new clusters is not supported for these types.) M2 node - types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge R3 node types: - cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge - + - Memory optimized: Current generation: R7g node types: cache.r7g.large, + cache.r7g.xlarge, cache.r7g.2xlarge, cache.r7g.4xlarge, cache.r7g.8xlarge, + cache.r7g.12xlarge, cache.r7g.16xlarge For region availability, see Supported + Node Types (https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) + R6g node types (available only for Redis OSS engine version 5.0.6 onward + and for Memcached engine version 1.5.16 onward): cache.r6g.large, cache.r6g.xlarge, + cache.r6g.2xlarge, cache.r6g.4xlarge, cache.r6g.8xlarge, cache.r6g.12xlarge, + cache.r6g.16xlarge R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, + cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge R4 node types: + cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, + cache.r4.16xlarge Previous generation: (not recommended. Existing clusters + are still supported but creation of new clusters is not supported for + these types.) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge + R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, + cache.r3.8xlarge Additional node type info - - All current generation instance types are created in Amazon VPC by default. + - Valkey or Redis OSS append-only files (AOF) are not supported for T1 + or T2 instances. - - Redis append-only files (AOF) are not supported for T1 or T2 instances. - - - - Redis Multi-AZ with automatic failover is not supported on T1 instances. + - Valkey or Redis OSS Multi-AZ with automatic failover is not supported + on T1 instances. - - - Redis configuration variables appendonly and appendfsync are not supported - on Redis version 2.8.22 and later. + - The configuration variables appendonly and appendfsync are not supported + on Valkey, or on Redis OSS version 2.8.22 and later. type: string cacheParameterGroupName: description: |- @@ -191,21 +165,20 @@ spec: If this argument is omitted, the default cache parameter group for the specified engine is used. + If you are running Valkey or Redis OSS version 3.2.4 or later, only one node + group (shard), and want to use a default parameter group, we recommend that + you specify the parameter group by name. - If you are running Redis version 3.2.4 or later, only one node group (shard), - and want to use a default parameter group, we recommend that you specify - the parameter group by name. - - - - To create a Redis (cluster mode disabled) replication group, use CacheParameterGroupName=default.redis3.2. + - To create a Valkey or Redis OSS (cluster mode disabled) replication + group, use CacheParameterGroupName=default.redis3.2. - - - To create a Redis (cluster mode enabled) replication group, use CacheParameterGroupName=default.redis3.2.cluster.on. + - To create a Valkey or Redis OSS (cluster mode enabled) replication group, + use CacheParameterGroupName=default.redis3.2.cluster.on. type: string cacheParameterGroupRef: description: "AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference\ntype to provide more user friendly syntax - for references using 'from' field\nEx:\nAPIIDRef:\n\n\n\tfrom:\n\t + for references using 'from' field\nEx:\nAPIIDRef:\n\n\tfrom:\n\t \ name: my-api" properties: from: @@ -215,6 +188,8 @@ spec: properties: name: type: string + namespace: + type: string type: object type: object cacheSecurityGroupNames: @@ -227,15 +202,14 @@ spec: description: |- The name of the cache subnet group to be used for the replication group. - If you're going to launch your cluster in an Amazon VPC, you need to create a subnet group before you start creating a cluster. For more information, - see Subnets and Subnet Groups (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/SubnetGroups.html). + see Subnets and Subnet Groups (https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/SubnetGroups.html). type: string cacheSubnetGroupRef: description: "AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference\ntype to provide more user friendly syntax - for references using 'from' field\nEx:\nAPIIDRef:\n\n\n\tfrom:\n\t + for references using 'from' field\nEx:\nAPIIDRef:\n\n\tfrom:\n\t \ name: my-api" properties: from: @@ -245,13 +219,24 @@ spec: properties: name: type: string + namespace: + type: string type: object type: object + clusterMode: + description: |- + Enabled or Disabled. To modify cluster mode from Disabled to Enabled, you + must first set the cluster mode to Compatible. Compatible mode allows your + Valkey or Redis OSS clients to connect using both cluster mode enabled and + cluster mode disabled. After you migrate all Valkey or Redis OSS clients + to use cluster mode enabled, you can then complete cluster mode configuration + and set the cluster mode to Enabled. + type: string dataTieringEnabled: description: |- Enables data tiering. Data tiering is only supported for replication groups using the r6gd node type. This parameter must be set to true when using r6gd - nodes. For more information, see Data tiering (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/data-tiering.html). + nodes. For more information, see Data tiering (https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/data-tiering.html). type: boolean description: description: A user-created description for the replication group. @@ -259,7 +244,7 @@ spec: engine: description: |- The name of the cache engine to be used for the clusters in this replication - group. Must be Redis. + group. The value must be set to Redis. type: string engineVersion: description: |- @@ -267,14 +252,20 @@ spec: replication group. To view the supported cache engine versions, use the DescribeCacheEngineVersions operation. - Important: You can upgrade to a newer engine version (see Selecting a Cache - Engine and Version (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/SelectEngine.html#VersionManagement)) + Engine and Version (https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/SelectEngine.html#VersionManagement)) in the ElastiCache User Guide, but you cannot downgrade to an earlier engine version. If you want to use an earlier engine version, you must delete the existing cluster or replication group and create it anew with the earlier engine version. type: string + ipDiscovery: + description: |- + The network type you choose when creating a replication group, either ipv4 + | ipv6. IPv6 is supported for workloads using Valkey 7.2 and above, Redis + OSS engine version 6.2 and above or Memcached engine version 1.6.6 and above + on all instances built on the Nitro system (http://aws.amazon.com/ec2/nitro/). + type: string kmsKeyID: description: The ID of the KMS key used to encrypt the disk in the cluster. @@ -317,21 +308,27 @@ spec: multiAZEnabled: description: |- A flag indicating if you have Multi-AZ enabled to enhance fault tolerance. - For more information, see Minimizing Downtime: Multi-AZ (http://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/AutoFailover.html). + For more information, see Minimizing Downtime: Multi-AZ (http://docs.aws.amazon.com/AmazonElastiCache/latest/dg/AutoFailover.html). type: boolean + networkType: + description: |- + Must be either ipv4 | ipv6 | dual_stack. IPv6 is supported for workloads + using Valkey 7.2 and above, Redis OSS engine version 6.2 and above or Memcached + engine version 1.6.6 and above on all instances built on the Nitro system + (http://aws.amazon.com/ec2/nitro/). + type: string nodeGroupConfiguration: description: |- A list of node group (shard) configuration options. Each node group (shard) configuration has the following members: PrimaryAvailabilityZone, ReplicaAvailabilityZones, ReplicaCount, and Slots. - - If you're creating a Redis (cluster mode disabled) or a Redis (cluster mode - enabled) replication group, you can use this parameter to individually configure - each node group (shard), or you can omit this parameter. However, it is required - when seeding a Redis (cluster mode enabled) cluster from a S3 rdb file. You - must configure each node group (shard) using this parameter because you must - specify the slots for each node group. + If you're creating a Valkey or Redis OSS (cluster mode disabled) or a Valkey + or Redis OSS (cluster mode enabled) replication group, you can use this parameter + to individually configure each node group (shard), or you can omit this parameter. + However, it is required when seeding a Valkey or Redis OSS (cluster mode + enabled) cluster from a S3 rdb file. You must configure each node group (shard) + using this parameter because you must specify the slots for each node group. items: description: |- Node group (shard) configuration options. Each node group (shard) configuration @@ -364,15 +361,14 @@ spec: The Amazon Resource Name (ARN) of the Amazon Simple Notification Service (SNS) topic to which notifications are sent. - The Amazon SNS topic owner must be the same as the cluster owner. type: string numNodeGroups: description: |- An optional parameter that specifies the number of node groups (shards) for - this Redis (cluster mode enabled) replication group. For Redis (cluster mode - disabled) either omit this parameter or set it to 1. - + this Valkey or Redis OSS (cluster mode enabled) replication group. For Valkey + or Redis OSS (cluster mode disabled) either omit this parameter or set it + to 1. Default: 1 format: int64 @@ -389,60 +385,41 @@ spec: in which clusters are allocated. The primary cluster is created in the first AZ in the list. - This parameter is not used if there is more than one node group (shard). You should use NodeGroupConfiguration instead. - If you are creating your replication group in an Amazon VPC (recommended), you can only locate clusters in Availability Zones associated with the subnets in the selected subnet group. - The number of Availability Zones listed must equal the value of NumCacheClusters. - Default: system chosen Availability Zones. items: type: string type: array preferredMaintenanceWindow: description: |- - Specifies the weekly time range during which maintenance on the cluster is - performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi - (24H Clock UTC). The minimum maintenance window is a 60 minute period. Valid - values for ddd are: - - Specifies the weekly time range during which maintenance on the cluster is performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi (24H Clock UTC). The minimum maintenance window is a 60 minute period. - Valid values for ddd are: - - sun - - mon - - tue - - wed - - thu - - fri - - sat - Example: sun:23:00-mon:01:30 type: string primaryClusterID: @@ -450,7 +427,6 @@ spec: The identifier of the cluster that serves as the primary for this replication group. This cluster must already exist and have a status of available. - This parameter is not required if NumCacheClusters, NumNodeGroups, or ReplicasPerNodeGroup is specified. type: string @@ -465,23 +441,18 @@ spec: The replication group identifier. This parameter is stored as a lowercase string. - Constraints: - - A name must contain from 1 to 40 alphanumeric characters or hyphens. - - The first character must be a letter. - - A name cannot end with a hyphen or contain two consecutive hyphens. type: string securityGroupIDs: description: |- One or more Amazon VPC security groups associated with this replication group. - Use this parameter only when you are creating a replication group in an Amazon Virtual Private Cloud (Amazon VPC). items: @@ -491,7 +462,7 @@ spec: items: description: "AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference\ntype to provide more user friendly syntax - for references using 'from' field\nEx:\nAPIIDRef:\n\n\n\tfrom:\n\t + for references using 'from' field\nEx:\nAPIIDRef:\n\n\tfrom:\n\t \ name: my-api" properties: from: @@ -501,19 +472,25 @@ spec: properties: name: type: string + namespace: + type: string type: object type: object type: array + serverlessCacheSnapshotName: + description: |- + The name of the snapshot used to create a replication group. Available for + Valkey, Redis OSS only. + type: string snapshotARNs: description: |- - A list of Amazon Resource Names (ARN) that uniquely identify the Redis RDB - snapshot files stored in Amazon S3. The snapshot files are used to populate - the new replication group. The Amazon S3 object name in the ARN cannot contain - any commas. The new replication group will have the number of node groups - (console: shards) specified by the parameter NumNodeGroups or the number - of node groups configured by NodeGroupConfiguration regardless of the number - of ARNs specified here. - + A list of Amazon Resource Names (ARN) that uniquely identify the Valkey or + Redis OSS RDB snapshot files stored in Amazon S3. The snapshot files are + used to populate the new replication group. The Amazon S3 object name in + the ARN cannot contain any commas. The new replication group will have the + number of node groups (console: shards) specified by the parameter NumNodeGroups + or the number of node groups configured by NodeGroupConfiguration regardless + of the number of ARNs specified here. Example of an Amazon S3 ARN: arn:aws:s3:::my_bucket/snapshot1.rdb items: @@ -531,7 +508,6 @@ spec: deleting them. For example, if you set SnapshotRetentionLimit to 5, a snapshot that was taken today is retained for 5 days before being deleted. - Default: 0 (i.e., automatic backups are disabled for this cluster). format: int64 type: integer @@ -540,10 +516,8 @@ spec: The daily time range (in UTC) during which ElastiCache begins taking a daily snapshot of your node group (shard). - Example: 05:00-09:00 - If you do not specify this parameter, ElastiCache automatically chooses an appropriate time range. type: string @@ -572,30 +546,37 @@ spec: description: |- A flag that enables in-transit encryption when set to true. - - You cannot modify the value of TransitEncryptionEnabled after the cluster - is created. To enable in-transit encryption on a cluster you must set TransitEncryptionEnabled - to true when you create a cluster. - - This parameter is valid only if the Engine parameter is redis, the EngineVersion parameter is 3.2.6, 4.x or later, and the cluster is being created in an Amazon VPC. - If you enable in-transit encryption, you must also specify a value for CacheSubnetGroup. - Required: Only available when creating a replication group in an Amazon VPC - using redis version 3.2.6, 4.x or later. - + using Redis OSS version 3.2.6, 4.x or later. Default: false - For HIPAA compliance, you must specify TransitEncryptionEnabled as true, an AuthToken, and a CacheSubnetGroup. type: boolean + transitEncryptionMode: + description: |- + A setting that allows you to migrate your clients to use in-transit encryption, + with no downtime. + + When setting TransitEncryptionEnabled to true, you can set your TransitEncryptionMode + to preferred in the same request, to allow both encrypted and unencrypted + connections at the same time. Once you migrate all your Valkey or Redis OSS + clients to use encrypted connections you can modify the value to required + to allow encrypted connections only. + + Setting TransitEncryptionMode to required is a two-step process that requires + you to first set the TransitEncryptionMode to preferred, after that you can + set TransitEncryptionMode to required. + + This process will not trigger the replacement of the replication group. + type: string userGroupIDs: description: The user group to associate with the replication group. items: @@ -622,7 +603,6 @@ spec: when it has verified that an "adopted" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. - TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270 type: string ownerAccountID: @@ -641,8 +621,8 @@ spec: allowedScaleDownModifications: description: |- A string list, each element of which specifies a cache node type which you - can use to scale your cluster or replication group. When scaling down a Redis - cluster or replication group using ModifyCacheCluster or ModifyReplicationGroup, + can use to scale your cluster or replication group. When scaling down a Valkey + or Redis OSS cluster or replication group using ModifyCacheCluster or ModifyReplicationGroup, use a value from this list for the CacheNodeType parameter. items: type: string @@ -652,17 +632,16 @@ spec: A string list, each element of which specifies a cache node type which you can use to scale your cluster or replication group. - - When scaling up a Redis cluster or replication group using ModifyCacheCluster - or ModifyReplicationGroup, use a value from this list for the CacheNodeType - parameter. + When scaling up a Valkey or Redis OSS cluster or replication group using + ModifyCacheCluster or ModifyReplicationGroup, use a value from this list + for the CacheNodeType parameter. items: type: string type: array authTokenEnabled: description: |- - A flag that enables using an AuthToken (password) when issuing Redis commands. - + A flag that enables using an AuthToken (password) when issuing Valkey or + Redis OSS commands. Default: false type: boolean @@ -672,13 +651,14 @@ spec: type: string autoMinorVersionUpgrade: description: |- - If you are running Redis engine version 6.0 or later, set this parameter - to yes if you want to opt-in to the next auto minor version upgrade campaign. - This parameter is disabled for previous versions. + If you are running Valkey 7.2 and above, or Redis OSS engine version 6.0 + and above, set this parameter to yes if you want to opt-in to the next auto + minor version upgrade campaign. This parameter is disabled for previous versions. type: boolean automaticFailover: - description: Indicates the status of automatic failover for this Redis - replication group. + description: |- + Indicates the status of automatic failover for this Valkey or Redis OSS replication + group. type: string clusterEnabled: description: |- @@ -686,7 +666,6 @@ spec: i.e., whether its data can be partitioned across multiple shards (API/CLI: node groups). - Valid values: true | false type: boolean conditions: @@ -739,7 +718,7 @@ spec: description: |- Enables data tiering. Data tiering is only supported for replication groups using the r6gd node type. This parameter must be set to true when using r6gd - nodes. For more information, see Data tiering (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/data-tiering.html). + nodes. For more information, see Data tiering (https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/data-tiering.html). type: string events: description: |- @@ -823,14 +802,14 @@ spec: multiAZ: description: |- A flag indicating if you have Multi-AZ enabled to enhance fault tolerance. - For more information, see Minimizing Downtime: Multi-AZ (http://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/AutoFailover.html) + For more information, see Minimizing Downtime: Multi-AZ (http://docs.aws.amazon.com/AmazonElastiCache/latest/dg/AutoFailover.html) type: string nodeGroups: description: |- - A list of node groups in this replication group. For Redis (cluster mode - disabled) replication groups, this is a single-element list. For Redis (cluster - mode enabled) replication groups, the list contains an entry for each node - group (shard). + A list of node groups in this replication group. For Valkey or Redis OSS + (cluster mode disabled) replication groups, this is a single-element list. + For Valkey or Redis OSS (cluster mode enabled) replication groups, the list + contains an entry for each node group (shard). items: description: |- Represents a collection of cache nodes in a replication group. One node in @@ -857,7 +836,7 @@ spec: readEndpoint: description: |- Represents the information required for client programs to connect to a cache - node. + node. This value is read-only. properties: address: type: string @@ -870,7 +849,7 @@ spec: primaryEndpoint: description: |- Represents the information required for client programs to connect to a cache - node. + node. This value is read-only. properties: address: type: string @@ -881,7 +860,7 @@ spec: readerEndpoint: description: |- Represents the information required for client programs to connect to a cache - node. + node. This value is read-only. properties: address: type: string @@ -904,6 +883,8 @@ spec: type: string automaticFailoverStatus: type: string + clusterMode: + type: string logDeliveryConfigurations: items: description: The log delivery configurations being modified @@ -949,6 +930,10 @@ spec: type: number type: object type: object + transitEncryptionEnabled: + type: boolean + transitEncryptionMode: + type: string userGroups: description: The status of the user group update. properties: diff --git a/helm/crds/elasticache.services.k8s.aws_snapshots.yaml b/helm/crds/elasticache.services.k8s.aws_snapshots.yaml index 09c6ca24..8424b2ff 100644 --- a/helm/crds/elasticache.services.k8s.aws_snapshots.yaml +++ b/helm/crds/elasticache.services.k8s.aws_snapshots.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.14.0 + controller-gen.kubebuilder.io/version: v0.16.2 name: snapshots.elasticache.services.k8s.aws spec: group: elasticache.services.k8s.aws @@ -40,9 +40,8 @@ spec: description: |- SnapshotSpec defines the desired state of Snapshot. - - Represents a copy of an entire Redis cluster as of the time when the snapshot - was taken. + Represents a copy of an entire Valkey or Redis OSS cluster as of the time + when the snapshot was taken. properties: cacheClusterID: description: |- @@ -103,7 +102,6 @@ spec: when it has verified that an "adopted" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. - TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270 type: string ownerAccountID: @@ -121,14 +119,14 @@ spec: type: object autoMinorVersionUpgrade: description: |- - If you are running Redis engine version 6.0 or later, set this parameter - to yes if you want to opt-in to the next auto minor version upgrade campaign. - This parameter is disabled for previous versions. + If you are running Valkey 7.2 and above or Redis OSS engine version 6.0 and + above, set this parameter to yes if you want to opt-in to the next auto minor + version upgrade campaign. This parameter is disabled for previous versions. type: boolean automaticFailover: description: |- - Indicates the status of automatic failover for the source Redis replication - group. + Indicates the status of automatic failover for the source Valkey or Redis + OSS replication group. type: string cacheClusterCreateTime: description: The date and time when the source cluster was created. @@ -138,70 +136,62 @@ spec: description: |- The name of the compute and memory capacity node type for the source cluster. - The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts. - - * General purpose: Current generation: M6g node types (available only - for Redis engine version 5.0.6 onward and for Memcached engine version - 1.5.16 onward). cache.m6g.large, cache.m6g.xlarge, cache.m6g.2xlarge, - cache.m6g.4xlarge, cache.m6g.8xlarge, cache.m6g.12xlarge, cache.m6g.16xlarge - For region availability, see Supported Node Types (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) - M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, - cache.m5.12xlarge, cache.m5.24xlarge M4 node types: cache.m4.large, cache.m4.xlarge, - cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge T4g node types (available - only for Redis engine version 5.0.6 onward and Memcached engine version - 1.5.16 onward): cache.t4g.micro, cache.t4g.small, cache.t4g.medium T3 - node types: cache.t3.micro, cache.t3.small, cache.t3.medium T2 node types: - cache.t2.micro, cache.t2.small, cache.t2.medium Previous generation: (not - recommended. Existing clusters are still supported but creation of new - clusters is not supported for these types.) T1 node types: cache.t1.micro - M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge - M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge - + * General purpose: Current generation: M7g node types: cache.m7g.large, + cache.m7g.xlarge, cache.m7g.2xlarge, cache.m7g.4xlarge, cache.m7g.8xlarge, + cache.m7g.12xlarge, cache.m7g.16xlarge For region availability, see Supported + Node Types (https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) + M6g node types (available only for Redis OSS engine version 5.0.6 onward + and for Memcached engine version 1.5.16 onward): cache.m6g.large, cache.m6g.xlarge, + cache.m6g.2xlarge, cache.m6g.4xlarge, cache.m6g.8xlarge, cache.m6g.12xlarge, + cache.m6g.16xlarge M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, + cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge M4 node types: + cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge + T4g node types (available only for Redis OSS engine version 5.0.6 onward + and Memcached engine version 1.5.16 onward): cache.t4g.micro, cache.t4g.small, + cache.t4g.medium T3 node types: cache.t3.micro, cache.t3.small, cache.t3.medium + T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium Previous + generation: (not recommended. Existing clusters are still supported but + creation of new clusters is not supported for these types.) T1 node types: + cache.t1.micro M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, + cache.m1.xlarge M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, + cache.m3.2xlarge * Compute optimized: Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.) C1 node types: cache.c1.xlarge - - * Memory optimized with data tiering: Current generation: R6gd node types - (available only for Redis engine version 6.2 onward). cache.r6gd.xlarge, - cache.r6gd.2xlarge, cache.r6gd.4xlarge, cache.r6gd.8xlarge, cache.r6gd.12xlarge, - cache.r6gd.16xlarge - - - * Memory optimized: Current generation: R6g node types (available only - for Redis engine version 5.0.6 onward and for Memcached engine version - 1.5.16 onward). cache.r6g.large, cache.r6g.xlarge, cache.r6g.2xlarge, - cache.r6g.4xlarge, cache.r6g.8xlarge, cache.r6g.12xlarge, cache.r6g.16xlarge - For region availability, see Supported Node Types (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) - For region availability, see Supported Node Types (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) - R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, - cache.r5.12xlarge, cache.r5.24xlarge R4 node types: cache.r4.large, cache.r4.xlarge, - cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge - Previous generation: (not recommended. Existing clusters are still supported - but creation of new clusters is not supported for these types.) M2 node - types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge R3 node types: - cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge - + * Memory optimized: Current generation: R7g node types: cache.r7g.large, + cache.r7g.xlarge, cache.r7g.2xlarge, cache.r7g.4xlarge, cache.r7g.8xlarge, + cache.r7g.12xlarge, cache.r7g.16xlarge For region availability, see Supported + Node Types (https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) + R6g node types (available only for Redis OSS engine version 5.0.6 onward + and for Memcached engine version 1.5.16 onward): cache.r6g.large, cache.r6g.xlarge, + cache.r6g.2xlarge, cache.r6g.4xlarge, cache.r6g.8xlarge, cache.r6g.12xlarge, + cache.r6g.16xlarge R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, + cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge R4 node types: + cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, + cache.r4.16xlarge Previous generation: (not recommended. Existing clusters + are still supported but creation of new clusters is not supported for + these types.) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge + R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, + cache.r3.8xlarge Additional node type info - * All current generation instance types are created in Amazon VPC by default. + * Valkey or Redis OSS append-only files (AOF) are not supported for T1 + or T2 instances. - * Redis append-only files (AOF) are not supported for T1 or T2 instances. - - - * Redis Multi-AZ with automatic failover is not supported on T1 instances. - + * Valkey or Redis OSS Multi-AZ with automatic failover is not supported + on T1 instances. - * Redis configuration variables appendonly and appendfsync are not supported - on Redis version 2.8.22 and later. + * The configuration variables appendonly and appendfsync are not supported + on Valkey, or on Redis OSS version 2.8.22 and later. type: string cacheParameterGroupName: description: The cache parameter group that is associated with the @@ -250,7 +240,7 @@ spec: description: |- Enables data tiering. Data tiering is only supported for replication groups using the r6gd node type. This parameter must be set to true when using r6gd - nodes. For more information, see Data tiering (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/data-tiering.html). + nodes. For more information, see Data tiering (https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/data-tiering.html). type: string engine: description: The name of the cache engine (memcached or redis) used @@ -312,9 +302,8 @@ spec: description: |- The number of cache nodes in the source cluster. - - For clusters running Redis, this value must be 1. For clusters running Memcached, - this value must be between 1 and 40. + For clusters running Valkey or Redis OSS, this value must be 1. For clusters + running Memcached, this value must be between 1 and 40. format: int64 type: integer numNodeGroups: @@ -339,31 +328,22 @@ spec: performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi (24H Clock UTC). The minimum maintenance window is a 60 minute period. - Valid values for ddd are: - * sun - * mon - * tue - * wed - * thu - * fri - * sat - Example: sun:23:00-mon:01:30 type: string preferredOutpostARN: @@ -377,13 +357,11 @@ spec: For an automatic snapshot, the number of days for which ElastiCache retains the snapshot before deleting it. - For manual snapshots, this field reflects the SnapshotRetentionLimit for the source cluster when the snapshot was created. This field is otherwise ignored: Manual snapshots do not expire, and can only be deleted using the DeleteSnapshot operation. - Important If the value of SnapshotRetentionLimit is set to zero (0), backups are turned off. format: int64 diff --git a/helm/crds/elasticache.services.k8s.aws_usergroups.yaml b/helm/crds/elasticache.services.k8s.aws_usergroups.yaml index fb0c481a..4761393d 100644 --- a/helm/crds/elasticache.services.k8s.aws_usergroups.yaml +++ b/helm/crds/elasticache.services.k8s.aws_usergroups.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.14.0 + controller-gen.kubebuilder.io/version: v0.16.2 name: usergroups.elasticache.services.k8s.aws spec: group: elasticache.services.k8s.aws @@ -39,12 +39,13 @@ spec: spec: properties: engine: - description: The current supported value is Redis. + description: The current supported value is Redis user. type: string tags: description: |- A list of tags to be added to this resource. A tag is a key-value pair. A - tag key must be accompanied by a tag value, although null is accepted. + tag key must be accompanied by a tag value, although null is accepted. Available + for Valkey and Redis OSS only. items: description: |- A tag that can be added to an ElastiCache cluster or replication group. Tags @@ -89,7 +90,6 @@ spec: when it has verified that an "adopted" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. - TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270 type: string ownerAccountID: @@ -141,7 +141,8 @@ spec: type: object type: array minimumEngineVersion: - description: The minimum engine version required, which is Redis 6.0 + description: The minimum engine version required, which is Redis OSS + 6.0 type: string pendingChanges: description: A list of updates being applied to the user group. @@ -161,6 +162,13 @@ spec: items: type: string type: array + serverlessCaches: + description: |- + Indicates which serverless caches the specified user group is associated + with. Available for Valkey, Redis OSS and Serverless Memcached only. + items: + type: string + type: array status: description: Indicates user group status. Can be "creating", "active", "modifying", "deleting". diff --git a/helm/crds/elasticache.services.k8s.aws_users.yaml b/helm/crds/elasticache.services.k8s.aws_users.yaml index 1507f204..20322f8c 100644 --- a/helm/crds/elasticache.services.k8s.aws_users.yaml +++ b/helm/crds/elasticache.services.k8s.aws_users.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.14.0 + controller-gen.kubebuilder.io/version: v0.16.2 name: users.elasticache.services.k8s.aws spec: group: elasticache.services.k8s.aws @@ -41,6 +41,16 @@ spec: accessString: description: Access permissions string used for this user. type: string + authenticationMode: + description: Specifies how to authenticate the user. + properties: + passwords: + items: + type: string + type: array + type_: + type: string + type: object engine: description: The current supported value is Redis. type: string @@ -120,7 +130,6 @@ spec: when it has verified that an "adopted" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. - TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270 type: string ownerAccountID: @@ -187,7 +196,8 @@ spec: description: Access permissions string used for this user. type: string minimumEngineVersion: - description: The minimum engine version required, which is Redis 6.0 + description: The minimum engine version required, which is Redis OSS + 6.0 type: string status: description: Indicates the user status. Can be "active", "modifying" diff --git a/helm/crds/services.k8s.aws_adoptedresources.yaml b/helm/crds/services.k8s.aws_adoptedresources.yaml index 65eff735..b7be3224 100644 --- a/helm/crds/services.k8s.aws_adoptedresources.yaml +++ b/helm/crds/services.k8s.aws_adoptedresources.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.14.0 + controller-gen.kubebuilder.io/version: v0.16.2 name: adoptedresources.services.k8s.aws spec: group: services.k8s.aws @@ -78,11 +78,9 @@ spec: automatically converts this to an arbitrary string-string map. https://github.com/kubernetes-sigs/controller-tools/issues/385 - Active discussion about inclusion of this field in the spec is happening in this PR: https://github.com/kubernetes-sigs/controller-tools/pull/395 - Until this is allowed, or if it never is, we will produce a subset of the object meta that contains only the fields which the user is allowed to modify in the metadata. properties: @@ -105,13 +103,11 @@ spec: and may be truncated by the length of the suffix required to make the value unique on the server. - If this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header). - Applied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency type: string @@ -140,7 +136,6 @@ spec: Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty. - Must be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces diff --git a/helm/crds/services.k8s.aws_fieldexports.yaml b/helm/crds/services.k8s.aws_fieldexports.yaml index 4d3a8f1d..49b4f383 100644 --- a/helm/crds/services.k8s.aws_fieldexports.yaml +++ b/helm/crds/services.k8s.aws_fieldexports.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.14.0 + controller-gen.kubebuilder.io/version: v0.16.2 name: fieldexports.services.k8s.aws spec: group: services.k8s.aws diff --git a/helm/templates/NOTES.txt b/helm/templates/NOTES.txt index 752add27..3b3f27dd 100644 --- a/helm/templates/NOTES.txt +++ b/helm/templates/NOTES.txt @@ -1,5 +1,5 @@ {{ .Chart.Name }} has been installed. -This chart deploys "public.ecr.aws/aws-controllers-k8s/elasticache-controller:0.1.0". +This chart deploys "public.ecr.aws/aws-controllers-k8s/elasticache-controller:1.6.0". Check its status by running: kubectl --namespace {{ .Release.Namespace }} get pods -l "app.kubernetes.io/instance={{ .Release.Name }}" diff --git a/helm/templates/_helpers.tpl b/helm/templates/_helpers.tpl index cb8aba9a..e62eff6b 100644 --- a/helm/templates/_helpers.tpl +++ b/helm/templates/_helpers.tpl @@ -55,6 +55,7 @@ rules: - "" resources: - configmaps + - secrets verbs: - get - list @@ -68,39 +69,12 @@ rules: - get - list - watch -- apiGroups: - - "" - resources: - - secrets - verbs: - - get - - list - - patch - - watch - apiGroups: - ec2.services.k8s.aws resources: - securitygroups - verbs: - - get - - list -- apiGroups: - - ec2.services.k8s.aws - resources: - securitygroups/status - verbs: - - get - - list -- apiGroups: - - ec2.services.k8s.aws - resources: - subnets - verbs: - - get - - list -- apiGroups: - - ec2.services.k8s.aws - resources: - subnets/status verbs: - get @@ -109,125 +83,11 @@ rules: - elasticache.services.k8s.aws resources: - cacheclusters - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - elasticache.services.k8s.aws - resources: - - cacheclusters/status - verbs: - - get - - patch - - update -- apiGroups: - - elasticache.services.k8s.aws - resources: - cacheparametergroups - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - elasticache.services.k8s.aws - resources: - - cacheparametergroups/status - verbs: - - get - - patch - - update -- apiGroups: - - elasticache.services.k8s.aws - resources: - cachesubnetgroups - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - elasticache.services.k8s.aws - resources: - - cachesubnetgroups/status - verbs: - - get - - patch - - update -- apiGroups: - - elasticache.services.k8s.aws - resources: - replicationgroups - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - elasticache.services.k8s.aws - resources: - - replicationgroups/status - verbs: - - get - - patch - - update -- apiGroups: - - elasticache.services.k8s.aws - resources: - snapshots - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - elasticache.services.k8s.aws - resources: - - snapshots/status - verbs: - - get - - patch - - update -- apiGroups: - - elasticache.services.k8s.aws - resources: - usergroups - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - elasticache.services.k8s.aws - resources: - - usergroups/status - verbs: - - get - - patch - - update -- apiGroups: - - elasticache.services.k8s.aws - resources: - users verbs: - create @@ -240,6 +100,12 @@ rules: - apiGroups: - elasticache.services.k8s.aws resources: + - cacheclusters/status + - cacheparametergroups/status + - cachesubnetgroups/status + - replicationgroups/status + - snapshots/status + - usergroups/status - users/status verbs: - get @@ -249,25 +115,6 @@ rules: - services.k8s.aws resources: - adoptedresources - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - services.k8s.aws - resources: - - adoptedresources/status - verbs: - - get - - patch - - update -- apiGroups: - - services.k8s.aws - resources: - fieldexports verbs: - create @@ -280,6 +127,7 @@ rules: - apiGroups: - services.k8s.aws resources: + - adoptedresources/status - fieldexports/status verbs: - get @@ -289,14 +137,17 @@ rules: - sns.services.k8s.aws resources: - topics - verbs: - - get - - list -- apiGroups: - - sns.services.k8s.aws - resources: - topics/status verbs: - get - list -{{- end }} \ No newline at end of file +{{- end }} + +{{/* Convert k/v map to string like: "key1=value1,key2=value2,..." */}} +{{- define "ack-elasticache-controller.feature-gates" -}} +{{- $list := list -}} +{{- range $k, $v := .Values.featureGates -}} +{{- $list = append $list (printf "%s=%s" $k ( $v | toString)) -}} +{{- end -}} +{{ join "," $list }} +{{- end -}} diff --git a/helm/templates/caches-role-binding.yaml b/helm/templates/caches-role-binding.yaml index 39da502d..86e86e00 100644 --- a/helm/templates/caches-role-binding.yaml +++ b/helm/templates/caches-role-binding.yaml @@ -8,7 +8,7 @@ roleRef: name: ack-namespaces-cache-elasticache-controller subjects: - kind: ServiceAccount - name: ack-elasticache-controller + name: {{ include "ack-elasticache-controller.service-account.name" . }} namespace: {{ .Release.Namespace }} --- apiVersion: rbac.authorization.k8s.io/v1 @@ -22,5 +22,5 @@ roleRef: name: ack-configmaps-cache-elasticache-controller subjects: - kind: ServiceAccount - name: ack-elasticache-controller - namespace: {{ .Release.Namespace }} \ No newline at end of file + name: {{ include "ack-elasticache-controller.service-account.name" . }} + namespace: {{ .Release.Namespace }} diff --git a/helm/templates/deployment.yaml b/helm/templates/deployment.yaml index 95f84407..da135c6f 100644 --- a/helm/templates/deployment.yaml +++ b/helm/templates/deployment.yaml @@ -79,6 +79,10 @@ spec: {{- range $key, $value := .Values.reconcile.resourceMaxConcurrentSyncs }} - --reconcile-resource-max-concurrent-syncs - "$(RECONCILE_RESOURCE_MAX_CONCURRENT_SYNCS_{{ $key | upper }})" +{{- end }} +{{- if .Values.featureGates}} + - --feature-gates + - "$(FEATURE_GATES)" {{- end }} image: {{ .Values.image.repository }}:{{ .Values.image.tag }} imagePullPolicy: {{ .Values.image.pullPolicy }} @@ -122,6 +126,10 @@ spec: {{- range $key, $value := .Values.reconcile.resourceMaxConcurrentSyncs }} - name: RECONCILE_RESOURCE_MAX_CONCURRENT_SYNCS_{{ $key | upper }} value: {{ $key }}={{ $value }} +{{- end }} +{{- if .Values.featureGates}} + - name: FEATURE_GATES + value: {{ include "ack-elasticache-controller.feature-gates" . }} {{- end }} {{- if .Values.aws.credentials.secretName }} - name: AWS_SHARED_CREDENTIALS_FILE @@ -144,6 +152,7 @@ spec: securityContext: allowPrivilegeEscalation: false privileged: false + readOnlyRootFilesystem: true runAsNonRoot: true capabilities: drop: diff --git a/helm/values.schema.json b/helm/values.schema.json index 742163eb..7ccb485d 100644 --- a/helm/values.schema.json +++ b/helm/values.schema.json @@ -268,6 +268,13 @@ "type": "object" } }, + "featureGates": { + "description": "Feature gates settings", + "type": "object", + "additionalProperties": { + "type": "boolean" + } + }, "required": [ "image", "deployment", diff --git a/helm/values.yaml b/helm/values.yaml index 8522d9ad..f1221ed7 100644 --- a/helm/values.yaml +++ b/helm/values.yaml @@ -4,7 +4,7 @@ image: repository: public.ecr.aws/aws-controllers-k8s/elasticache-controller - tag: 0.1.0 + tag: 1.6.0 pullPolicy: IfNotPresent pullSecrets: [] @@ -153,3 +153,16 @@ leaderElection: # will attempt to use the namespace of the service account mounted to the Controller # pod. namespace: "" + +# Configuration for feature gates. These are optional controller features that +# can be individually enabled ("true") or disabled ("false") by adding key/value +# pairs below. +featureGates: + # Enables the Service level granularity for CARM. See https://github.com/aws-controllers-k8s/community/issues/2031 + ServiceLevelCARM: false + # Enables the Team level granularity for CARM. See https://github.com/aws-controllers-k8s/community/issues/2031 + TeamLevelCARM: false + # Enable ReadOnlyResources feature/annotation. + ReadOnlyResources: false + # Enable ResourceAdoption feature/annotation. + ResourceAdoption: false \ No newline at end of file diff --git a/pkg/resource/cache_cluster/custom_set_output.go b/pkg/resource/cache_cluster/custom_set_output.go index 61e511c5..312835ea 100644 --- a/pkg/resource/cache_cluster/custom_set_output.go +++ b/pkg/resource/cache_cluster/custom_set_output.go @@ -12,70 +12,3 @@ // permissions and limitations under the License. package cache_cluster - -import ( - "context" - "encoding/json" - - "github.com/aws/aws-sdk-go/service/elasticache" - - svcapitypes "github.com/aws-controllers-k8s/elasticache-controller/apis/v1alpha1" -) - -func (rm *resourceManager) customCreateCacheClusterSetOutput( - _ context.Context, - r *resource, - _ *elasticache.CreateCacheClusterOutput, - ko *svcapitypes.CacheCluster, -) (*svcapitypes.CacheCluster, error) { - rm.setAnnotationsFields(r, ko) - return ko, nil -} - -func (rm *resourceManager) customModifyCacheClusterSetOutput( - _ context.Context, - r *resource, - _ *elasticache.ModifyCacheClusterOutput, - ko *svcapitypes.CacheCluster, -) (*svcapitypes.CacheCluster, error) { - rm.setAnnotationsFields(r, ko) - return ko, nil -} - -// setAnnotationsFields copies the desired object's annotations, populates any -// relevant fields, and sets the latest object's annotations to this newly populated map. -// Fields that are handled by custom modify implementation are not set here. -// This should only be called upon a successful create or modify call. -func (rm *resourceManager) setAnnotationsFields( - r *resource, - ko *svcapitypes.CacheCluster, -) { - annotations := getAnnotationsFields(r, ko) - annotations[AnnotationLastRequestedPAZs] = marshalAsAnnotation(r.ko.Spec.PreferredAvailabilityZones) - ko.ObjectMeta.Annotations = annotations -} - -// getAnnotationsFields return the annotations map that would be used to set the fields. -func getAnnotationsFields( - r *resource, - ko *svcapitypes.CacheCluster, -) map[string]string { - if ko.ObjectMeta.Annotations != nil { - return ko.ObjectMeta.Annotations - } - desiredAnnotations := r.ko.ObjectMeta.GetAnnotations() - annotations := make(map[string]string) - for k, v := range desiredAnnotations { - annotations[k] = v - } - ko.ObjectMeta.Annotations = annotations - return annotations -} - -func marshalAsAnnotation(val interface{}) string { - data, err := json.Marshal(val) - if err != nil { - return "null" - } - return string(data) -} diff --git a/pkg/resource/cache_cluster/custom_update_input_test.go b/pkg/resource/cache_cluster/custom_update_input_test.go deleted file mode 100644 index a535948f..00000000 --- a/pkg/resource/cache_cluster/custom_update_input_test.go +++ /dev/null @@ -1,192 +0,0 @@ -// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"). You may -// not use this file except in compliance with the License. A copy of the -// License is located at -// -// http://aws.amazon.com/apache2.0/ -// -// or in the "license" file accompanying this file. This file is distributed -// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either -// express or implied. See the License for the specific language governing -// permissions and limitations under the License. - -package cache_cluster - -import ( - "testing" - - ackcompare "github.com/aws-controllers-k8s/runtime/pkg/compare" - ackmetrics "github.com/aws-controllers-k8s/runtime/pkg/metrics" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/elasticache" - "github.com/stretchr/testify/assert" - "go.uber.org/zap/zapcore" - ctrlrtzap "sigs.k8s.io/controller-runtime/pkg/log/zap" - - svcapitypes "github.com/aws-controllers-k8s/elasticache-controller/apis/v1alpha1" -) - -func resourceWithSpec(spec svcapitypes.CacheClusterSpec) *resource { - return newResource(spec, svcapitypes.CacheClusterStatus{}) -} - -func newResource(spec svcapitypes.CacheClusterSpec, status svcapitypes.CacheClusterStatus) *resource { - return &resource{ - ko: &svcapitypes.CacheCluster{ - Spec: spec, - Status: status, - }, - } -} - -func provideResourceManager() *resourceManager { - zapOptions := ctrlrtzap.Options{ - Development: true, - Level: zapcore.InfoLevel, - } - fakeLogger := ctrlrtzap.New(ctrlrtzap.UseFlagOptions(&zapOptions)) - return &resourceManager{ - log: fakeLogger, - metrics: ackmetrics.NewMetrics("elasticache"), - } -} - -func TestCustomUpdateInput(t *testing.T) { - tests := []struct { - description string - desired *resource - latest *resource - makeDelta func() *ackcompare.Delta - - expectedPayload *elasticache.ModifyCacheClusterInput - expectedErr string - }{ - { - description: "no changes", - desired: resourceWithSpec(svcapitypes.CacheClusterSpec{ - NumCacheNodes: aws.Int64(1), - }), - latest: resourceWithSpec(svcapitypes.CacheClusterSpec{ - NumCacheNodes: aws.Int64(1), - }), - makeDelta: ackcompare.NewDelta, - - expectedPayload: &elasticache.ModifyCacheClusterInput{}, - }, - { - description: "increase NumCacheNodes with new PreferredAvailabilityZones", - desired: resourceWithSpec(svcapitypes.CacheClusterSpec{ - NumCacheNodes: aws.Int64(3), - PreferredAvailabilityZones: aws.StringSlice([]string{"us-west-2a", "us-west-2b"}), - }), - latest: resourceWithSpec(svcapitypes.CacheClusterSpec{ - NumCacheNodes: aws.Int64(1), - }), - makeDelta: func() *ackcompare.Delta { - var delta ackcompare.Delta - delta.Add("Spec.NumCacheNodes", aws.Int64(3), aws.Int64(1)) - delta.Add("Spec.PreferredAvailabilityZones", aws.StringSlice([]string{"us-west-2a", "us-west-2b"}), nil) - return &delta - }, - - expectedPayload: &elasticache.ModifyCacheClusterInput{ - NewAvailabilityZones: aws.StringSlice([]string{"us-west-2a", "us-west-2b"}), - }, - }, - { - description: "increase NumCacheNodes again with new PreferredAvailabilityZones", - desired: resourceWithSpec(svcapitypes.CacheClusterSpec{ - NumCacheNodes: aws.Int64(5), - PreferredAvailabilityZones: aws.StringSlice([]string{"us-west-2a", "us-west-2b", "us-west-2c", "us-west-2b"}), - }), - latest: resourceWithSpec(svcapitypes.CacheClusterSpec{ - NumCacheNodes: aws.Int64(3), - PreferredAvailabilityZones: aws.StringSlice([]string{"us-west-2a", "us-west-2b"}), - }), - makeDelta: func() *ackcompare.Delta { - var delta ackcompare.Delta - delta.Add("Spec.NumCacheNodes", aws.Int64(5), aws.Int64(3)) - delta.Add("Spec.PreferredAvailabilityZones", aws.StringSlice([]string{"us-west-2a", "us-west-2b", "us-west-2c", "us-west-2b"}), - aws.StringSlice([]string{"us-west-2a", "us-west-2b"})) - return &delta - }, - - expectedPayload: &elasticache.ModifyCacheClusterInput{ - NewAvailabilityZones: aws.StringSlice([]string{"us-west-2c", "us-west-2b"}), - }, - }, - { - description: "decrease NumCacheNodes", - desired: resourceWithSpec(svcapitypes.CacheClusterSpec{ - NumCacheNodes: aws.Int64(3), - }), - latest: resourceWithSpec(svcapitypes.CacheClusterSpec{ - NumCacheNodes: aws.Int64(5), - }), - makeDelta: func() *ackcompare.Delta { - var delta ackcompare.Delta - delta.Add("Spec.NumCacheNodes", aws.Int64(3), aws.Int64(5)) - return &delta - }, - expectedPayload: &elasticache.ModifyCacheClusterInput{ - CacheNodeIdsToRemove: aws.StringSlice([]string{"0005", "0004"}), - }, - }, - { - description: "PreferredAvailabilityZones changed with no change in NumCacheNodes", - desired: resourceWithSpec(svcapitypes.CacheClusterSpec{ - PreferredAvailabilityZones: aws.StringSlice([]string{"us-west-2c"}), - NumCacheNodes: aws.Int64(3), - }), - latest: resourceWithSpec(svcapitypes.CacheClusterSpec{ - NumCacheNodes: aws.Int64(3), - }), - makeDelta: func() *ackcompare.Delta { - var delta ackcompare.Delta - delta.Add("Spec.PreferredAvailabilityZones", aws.StringSlice([]string{"us-west-2a"}), nil) - return &delta - }, - expectedErr: "spec.preferredAvailabilityZones can only be changed when new nodes are being added via spec.numCacheNodes", - }, - { - description: "decrease NumCacheNodes when a modification is pending", - desired: resourceWithSpec(svcapitypes.CacheClusterSpec{ - NumCacheNodes: aws.Int64(3), - }), - latest: newResource(svcapitypes.CacheClusterSpec{ - NumCacheNodes: aws.Int64(5), - PreferredAvailabilityZones: aws.StringSlice([]string{"us-west-2a", "us-west-2b"}), - }, svcapitypes.CacheClusterStatus{ - PendingModifiedValues: &svcapitypes.PendingModifiedValues{ - NumCacheNodes: aws.Int64(7), - }, - }), - makeDelta: func() *ackcompare.Delta { - var delta ackcompare.Delta - delta.Add("Spec.NumCacheNodes", aws.Int64(3), aws.Int64(5)) - return &delta - }, - - expectedPayload: &elasticache.ModifyCacheClusterInput{ - CacheNodeIdsToRemove: aws.StringSlice([]string{"0007", "0006", "0005", "0004"}), - }, - }, - } - - for _, tt := range tests { - t.Run(tt.description, func(t *testing.T) { - assert := assert.New(t) - rm := provideResourceManager() - var input elasticache.ModifyCacheClusterInput - err := rm.updateCacheClusterPayload(&input, tt.desired, tt.latest, tt.makeDelta()) - if tt.expectedErr != "" { - assert.NotNil(err) - assert.Contains(err.Error(), tt.expectedErr) - return - } - assert.Nil(err) - assert.Equal(tt.expectedPayload, &input) - }) - } -} diff --git a/pkg/resource/cache_cluster/delta.go b/pkg/resource/cache_cluster/delta.go index f2198593..cbf4dd7d 100644 --- a/pkg/resource/cache_cluster/delta.go +++ b/pkg/resource/cache_cluster/delta.go @@ -126,6 +126,13 @@ func newResourceDelta( delta.Add("Spec.IPDiscovery", a.ko.Spec.IPDiscovery, b.ko.Spec.IPDiscovery) } } + if len(a.ko.Spec.LogDeliveryConfigurations) != len(b.ko.Spec.LogDeliveryConfigurations) { + delta.Add("Spec.LogDeliveryConfigurations", a.ko.Spec.LogDeliveryConfigurations, b.ko.Spec.LogDeliveryConfigurations) + } else if len(a.ko.Spec.LogDeliveryConfigurations) > 0 { + if !reflect.DeepEqual(a.ko.Spec.LogDeliveryConfigurations, b.ko.Spec.LogDeliveryConfigurations) { + delta.Add("Spec.LogDeliveryConfigurations", a.ko.Spec.LogDeliveryConfigurations, b.ko.Spec.LogDeliveryConfigurations) + } + } if ackcompare.HasNilDifference(a.ko.Spec.NetworkType, b.ko.Spec.NetworkType) { delta.Add("Spec.NetworkType", a.ko.Spec.NetworkType, b.ko.Spec.NetworkType) } else if a.ko.Spec.NetworkType != nil && b.ko.Spec.NetworkType != nil { diff --git a/pkg/resource/cache_cluster/descriptor.go b/pkg/resource/cache_cluster/descriptor.go index 0ef12b91..6b048972 100644 --- a/pkg/resource/cache_cluster/descriptor.go +++ b/pkg/resource/cache_cluster/descriptor.go @@ -28,7 +28,7 @@ import ( ) const ( - finalizerString = "finalizers.elasticache.services.k8s.aws/CacheCluster" + FinalizerString = "finalizers.elasticache.services.k8s.aws/CacheCluster" ) var ( @@ -88,8 +88,8 @@ func (d *resourceDescriptor) IsManaged( // https://github.com/kubernetes-sigs/controller-runtime/issues/994 is // fixed. This should be able to be: // - // return k8sctrlutil.ContainsFinalizer(obj, finalizerString) - return containsFinalizer(obj, finalizerString) + // return k8sctrlutil.ContainsFinalizer(obj, FinalizerString) + return containsFinalizer(obj, FinalizerString) } // Remove once https://github.com/kubernetes-sigs/controller-runtime/issues/994 @@ -118,7 +118,7 @@ func (d *resourceDescriptor) MarkManaged( // Should not happen. If it does, there is a bug in the code panic("nil RuntimeMetaObject in AWSResource") } - k8sctrlutil.AddFinalizer(obj, finalizerString) + k8sctrlutil.AddFinalizer(obj, FinalizerString) } // MarkUnmanaged removes the supplied resource from management by ACK. What @@ -133,7 +133,7 @@ func (d *resourceDescriptor) MarkUnmanaged( // Should not happen. If it does, there is a bug in the code panic("nil RuntimeMetaObject in AWSResource") } - k8sctrlutil.RemoveFinalizer(obj, finalizerString) + k8sctrlutil.RemoveFinalizer(obj, FinalizerString) } // MarkAdopted places descriptors on the custom resource that indicate the diff --git a/pkg/resource/cache_cluster/hooks.go b/pkg/resource/cache_cluster/hooks.go index b0afced3..7a4b8edf 100644 --- a/pkg/resource/cache_cluster/hooks.go +++ b/pkg/resource/cache_cluster/hooks.go @@ -15,13 +15,14 @@ package cache_cluster import ( "context" + "encoding/json" "errors" "fmt" "slices" ackcompare "github.com/aws-controllers-k8s/runtime/pkg/compare" ackrequeue "github.com/aws-controllers-k8s/runtime/pkg/requeue" - svcsdk "github.com/aws/aws-sdk-go/service/elasticache" + svcsdk "github.com/aws/aws-sdk-go-v2/service/elasticache" svcapitypes "github.com/aws-controllers-k8s/elasticache-controller/apis/v1alpha1" "github.com/aws-controllers-k8s/elasticache-controller/pkg/util" @@ -107,7 +108,7 @@ func (rm *resourceManager) updateCacheClusterPayload(input *svcsdk.ModifyCacheCl if nodesDelta > 0 { for i := numNodes; i > numNodes-nodesDelta; i-- { nodeID := fmt.Sprintf("%04d", i) - input.CacheNodeIdsToRemove = append(input.CacheNodeIdsToRemove, &nodeID) + input.CacheNodeIdsToRemove = append(input.CacheNodeIdsToRemove, nodeID) } } } @@ -127,7 +128,71 @@ func (rm *resourceManager) updateCacheClusterPayload(input *svcsdk.ModifyCacheCl if len(desiredSpec.PreferredAvailabilityZones) <= oldAZsLen { return errors.New("newly specified AZs in spec.preferredAvailabilityZones must match the number of cache nodes being added") } - input.NewAvailabilityZones = desiredSpec.PreferredAvailabilityZones[oldAZsLen:] + preferredAvailability := make([]string, 0, len(desiredSpec.PreferredAvailabilityZones[oldAZsLen:])) + for az := range desiredSpec.PreferredAvailabilityZones[oldAZsLen:] { + if desiredSpec.PreferredAvailabilityZones[az] != nil { + preferredAvailability = append(preferredAvailability, *desiredSpec.PreferredAvailabilityZones[az]) + } + } + input.NewAvailabilityZones = preferredAvailability } return nil } + +func (rm *resourceManager) customCreateCacheClusterSetOutput( + _ context.Context, + r *resource, + _ *svcsdk.CreateCacheClusterOutput, + ko *svcapitypes.CacheCluster, +) (*svcapitypes.CacheCluster, error) { + rm.setAnnotationsFields(r, ko) + return ko, nil +} + +func (rm *resourceManager) customModifyCacheClusterSetOutput( + _ context.Context, + r *resource, + _ *svcsdk.ModifyCacheClusterOutput, + ko *svcapitypes.CacheCluster, +) (*svcapitypes.CacheCluster, error) { + rm.setAnnotationsFields(r, ko) + return ko, nil +} + +// setAnnotationsFields copies the desired object's annotations, populates any +// relevant fields, and sets the latest object's annotations to this newly populated map. +// Fields that are handled by custom modify implementation are not set here. +// This should only be called upon a successful create or modify call. +func (rm *resourceManager) setAnnotationsFields( + r *resource, + ko *svcapitypes.CacheCluster, +) { + annotations := getAnnotationsFields(r, ko) + annotations[AnnotationLastRequestedPAZs] = marshalAsAnnotation(r.ko.Spec.PreferredAvailabilityZones) + ko.ObjectMeta.Annotations = annotations +} + +// getAnnotationsFields return the annotations map that would be used to set the fields. +func getAnnotationsFields( + r *resource, + ko *svcapitypes.CacheCluster, +) map[string]string { + if ko.ObjectMeta.Annotations != nil { + return ko.ObjectMeta.Annotations + } + desiredAnnotations := r.ko.ObjectMeta.GetAnnotations() + annotations := make(map[string]string) + for k, v := range desiredAnnotations { + annotations[k] = v + } + ko.ObjectMeta.Annotations = annotations + return annotations +} + +func marshalAsAnnotation(val interface{}) string { + data, err := json.Marshal(val) + if err != nil { + return "null" + } + return string(data) +} diff --git a/pkg/resource/cache_cluster/manager.go b/pkg/resource/cache_cluster/manager.go index 9ce0dd7f..e5949daa 100644 --- a/pkg/resource/cache_cluster/manager.go +++ b/pkg/resource/cache_cluster/manager.go @@ -32,9 +32,8 @@ import ( acktags "github.com/aws-controllers-k8s/runtime/pkg/tags" acktypes "github.com/aws-controllers-k8s/runtime/pkg/types" ackutil "github.com/aws-controllers-k8s/runtime/pkg/util" - "github.com/aws/aws-sdk-go/aws/session" - svcsdk "github.com/aws/aws-sdk-go/service/elasticache" - svcsdkapi "github.com/aws/aws-sdk-go/service/elasticache/elasticacheiface" + "github.com/aws/aws-sdk-go-v2/aws" + svcsdk "github.com/aws/aws-sdk-go-v2/service/elasticache" "github.com/go-logr/logr" corev1 "k8s.io/api/core/v1" @@ -59,6 +58,9 @@ type resourceManager struct { // cfg is a copy of the ackcfg.Config object passed on start of the service // controller cfg ackcfg.Config + // clientcfg is a copy of the client configuration passed on start of the + // service controller + clientcfg aws.Config // log refers to the logr.Logger object handling logging for the service // controller log logr.Logger @@ -73,12 +75,9 @@ type resourceManager struct { awsAccountID ackv1alpha1.AWSAccountID // The AWS Region that this resource manager targets awsRegion ackv1alpha1.AWSRegion - // sess is the AWS SDK Session object used to communicate with the backend - // AWS service API - sess *session.Session - // sdk is a pointer to the AWS service API interface exposed by the - // aws-sdk-go/services/{alias}/{alias}iface package. - sdkapi svcsdkapi.ElastiCacheAPI + // sdk is a pointer to the AWS service API client exposed by the + // aws-sdk-go-v2/services/{alias} package. + sdkapi *svcsdk.Client } // concreteResource returns a pointer to a resource from the supplied @@ -308,24 +307,25 @@ func (rm *resourceManager) EnsureTags( // newResourceManager returns a new struct implementing // acktypes.AWSResourceManager +// This is for AWS-SDK-GO-V2 - Created newResourceManager With AWS sdk-Go-ClientV2 func newResourceManager( cfg ackcfg.Config, + clientcfg aws.Config, log logr.Logger, metrics *ackmetrics.Metrics, rr acktypes.Reconciler, - sess *session.Session, id ackv1alpha1.AWSAccountID, region ackv1alpha1.AWSRegion, ) (*resourceManager, error) { return &resourceManager{ cfg: cfg, + clientcfg: clientcfg, log: log, metrics: metrics, rr: rr, awsAccountID: id, awsRegion: region, - sess: sess, - sdkapi: svcsdk.New(sess), + sdkapi: svcsdk.NewFromConfig(clientcfg), }, nil } diff --git a/pkg/resource/cache_cluster/manager_factory.go b/pkg/resource/cache_cluster/manager_factory.go index 1c63020a..3fa2b8dc 100644 --- a/pkg/resource/cache_cluster/manager_factory.go +++ b/pkg/resource/cache_cluster/manager_factory.go @@ -23,7 +23,7 @@ import ( ackcfg "github.com/aws-controllers-k8s/runtime/pkg/config" ackmetrics "github.com/aws-controllers-k8s/runtime/pkg/metrics" acktypes "github.com/aws-controllers-k8s/runtime/pkg/types" - "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go-v2/aws" "github.com/go-logr/logr" svcresource "github.com/aws-controllers-k8s/elasticache-controller/pkg/resource" @@ -47,14 +47,18 @@ func (f *resourceManagerFactory) ResourceDescriptor() acktypes.AWSResourceDescri // supplied AWS account func (f *resourceManagerFactory) ManagerFor( cfg ackcfg.Config, + clientcfg aws.Config, log logr.Logger, metrics *ackmetrics.Metrics, rr acktypes.Reconciler, - sess *session.Session, id ackv1alpha1.AWSAccountID, region ackv1alpha1.AWSRegion, + roleARN ackv1alpha1.AWSResourceName, ) (acktypes.AWSResourceManager, error) { - rmId := fmt.Sprintf("%s/%s", id, region) + // We use the account ID, region, and role ARN to uniquely identify a + // resource manager. This helps us to avoid creating multiple resource + // managers for the same account/region/roleARN combination. + rmId := fmt.Sprintf("%s/%s/%s", id, region, roleARN) f.RLock() rm, found := f.rmCache[rmId] f.RUnlock() @@ -66,7 +70,7 @@ func (f *resourceManagerFactory) ManagerFor( f.Lock() defer f.Unlock() - rm, err := newResourceManager(cfg, log, metrics, rr, sess, id, region) + rm, err := newResourceManager(cfg, clientcfg, log, metrics, rr, id, region) if err != nil { return nil, err } diff --git a/pkg/resource/cache_cluster/references.go b/pkg/resource/cache_cluster/references.go index 0c0b666b..590cb93b 100644 --- a/pkg/resource/cache_cluster/references.go +++ b/pkg/resource/cache_cluster/references.go @@ -76,36 +76,35 @@ func (rm *resourceManager) ResolveReferences( apiReader client.Reader, res acktypes.AWSResource, ) (acktypes.AWSResource, bool, error) { - namespace := res.MetaObject().GetNamespace() ko := rm.concreteResource(res).ko resourceHasReferences := false err := validateReferenceFields(ko) - if fieldHasReferences, err := rm.resolveReferenceForCacheParameterGroupName(ctx, apiReader, namespace, ko); err != nil { + if fieldHasReferences, err := rm.resolveReferenceForCacheParameterGroupName(ctx, apiReader, ko); err != nil { return &resource{ko}, (resourceHasReferences || fieldHasReferences), err } else { resourceHasReferences = resourceHasReferences || fieldHasReferences } - if fieldHasReferences, err := rm.resolveReferenceForCacheSubnetGroupName(ctx, apiReader, namespace, ko); err != nil { + if fieldHasReferences, err := rm.resolveReferenceForCacheSubnetGroupName(ctx, apiReader, ko); err != nil { return &resource{ko}, (resourceHasReferences || fieldHasReferences), err } else { resourceHasReferences = resourceHasReferences || fieldHasReferences } - if fieldHasReferences, err := rm.resolveReferenceForNotificationTopicARN(ctx, apiReader, namespace, ko); err != nil { + if fieldHasReferences, err := rm.resolveReferenceForNotificationTopicARN(ctx, apiReader, ko); err != nil { return &resource{ko}, (resourceHasReferences || fieldHasReferences), err } else { resourceHasReferences = resourceHasReferences || fieldHasReferences } - if fieldHasReferences, err := rm.resolveReferenceForReplicationGroupID(ctx, apiReader, namespace, ko); err != nil { + if fieldHasReferences, err := rm.resolveReferenceForReplicationGroupID(ctx, apiReader, ko); err != nil { return &resource{ko}, (resourceHasReferences || fieldHasReferences), err } else { resourceHasReferences = resourceHasReferences || fieldHasReferences } - if fieldHasReferences, err := rm.resolveReferenceForSnapshotName(ctx, apiReader, namespace, ko); err != nil { + if fieldHasReferences, err := rm.resolveReferenceForSnapshotName(ctx, apiReader, ko); err != nil { return &resource{ko}, (resourceHasReferences || fieldHasReferences), err } else { resourceHasReferences = resourceHasReferences || fieldHasReferences @@ -147,7 +146,6 @@ func validateReferenceFields(ko *svcapitypes.CacheCluster) error { func (rm *resourceManager) resolveReferenceForCacheParameterGroupName( ctx context.Context, apiReader client.Reader, - namespace string, ko *svcapitypes.CacheCluster, ) (hasReferences bool, err error) { if ko.Spec.CacheParameterGroupRef != nil && ko.Spec.CacheParameterGroupRef.From != nil { @@ -156,6 +154,10 @@ func (rm *resourceManager) resolveReferenceForCacheParameterGroupName( if arr.Name == nil || *arr.Name == "" { return hasReferences, fmt.Errorf("provided resource reference is nil or empty: CacheParameterGroupRef") } + namespace := ko.ObjectMeta.GetNamespace() + if arr.Namespace != nil && *arr.Namespace != "" { + namespace = *arr.Namespace + } obj := &svcapitypes.CacheParameterGroup{} if err := getReferencedResourceState_CacheParameterGroup(ctx, apiReader, obj, *arr.Name, namespace); err != nil { return hasReferences, err @@ -185,12 +187,8 @@ func getReferencedResourceState_CacheParameterGroup( if err != nil { return err } - var refResourceSynced, refResourceTerminal bool + var refResourceTerminal bool for _, cond := range obj.Status.Conditions { - if cond.Type == ackv1alpha1.ConditionTypeResourceSynced && - cond.Status == corev1.ConditionTrue { - refResourceSynced = true - } if cond.Type == ackv1alpha1.ConditionTypeTerminal && cond.Status == corev1.ConditionTrue { return ackerr.ResourceReferenceTerminalFor( @@ -203,6 +201,13 @@ func getReferencedResourceState_CacheParameterGroup( "CacheParameterGroup", namespace, name) } + var refResourceSynced bool + for _, cond := range obj.Status.Conditions { + if cond.Type == ackv1alpha1.ConditionTypeResourceSynced && + cond.Status == corev1.ConditionTrue { + refResourceSynced = true + } + } if !refResourceSynced { return ackerr.ResourceReferenceNotSyncedFor( "CacheParameterGroup", @@ -224,7 +229,6 @@ func getReferencedResourceState_CacheParameterGroup( func (rm *resourceManager) resolveReferenceForCacheSubnetGroupName( ctx context.Context, apiReader client.Reader, - namespace string, ko *svcapitypes.CacheCluster, ) (hasReferences bool, err error) { if ko.Spec.CacheSubnetGroupRef != nil && ko.Spec.CacheSubnetGroupRef.From != nil { @@ -233,6 +237,10 @@ func (rm *resourceManager) resolveReferenceForCacheSubnetGroupName( if arr.Name == nil || *arr.Name == "" { return hasReferences, fmt.Errorf("provided resource reference is nil or empty: CacheSubnetGroupRef") } + namespace := ko.ObjectMeta.GetNamespace() + if arr.Namespace != nil && *arr.Namespace != "" { + namespace = *arr.Namespace + } obj := &svcapitypes.CacheSubnetGroup{} if err := getReferencedResourceState_CacheSubnetGroup(ctx, apiReader, obj, *arr.Name, namespace); err != nil { return hasReferences, err @@ -262,12 +270,8 @@ func getReferencedResourceState_CacheSubnetGroup( if err != nil { return err } - var refResourceSynced, refResourceTerminal bool + var refResourceTerminal bool for _, cond := range obj.Status.Conditions { - if cond.Type == ackv1alpha1.ConditionTypeResourceSynced && - cond.Status == corev1.ConditionTrue { - refResourceSynced = true - } if cond.Type == ackv1alpha1.ConditionTypeTerminal && cond.Status == corev1.ConditionTrue { return ackerr.ResourceReferenceTerminalFor( @@ -280,6 +284,13 @@ func getReferencedResourceState_CacheSubnetGroup( "CacheSubnetGroup", namespace, name) } + var refResourceSynced bool + for _, cond := range obj.Status.Conditions { + if cond.Type == ackv1alpha1.ConditionTypeResourceSynced && + cond.Status == corev1.ConditionTrue { + refResourceSynced = true + } + } if !refResourceSynced { return ackerr.ResourceReferenceNotSyncedFor( "CacheSubnetGroup", @@ -301,7 +312,6 @@ func getReferencedResourceState_CacheSubnetGroup( func (rm *resourceManager) resolveReferenceForNotificationTopicARN( ctx context.Context, apiReader client.Reader, - namespace string, ko *svcapitypes.CacheCluster, ) (hasReferences bool, err error) { if ko.Spec.NotificationTopicRef != nil && ko.Spec.NotificationTopicRef.From != nil { @@ -310,6 +320,10 @@ func (rm *resourceManager) resolveReferenceForNotificationTopicARN( if arr.Name == nil || *arr.Name == "" { return hasReferences, fmt.Errorf("provided resource reference is nil or empty: NotificationTopicRef") } + namespace := ko.ObjectMeta.GetNamespace() + if arr.Namespace != nil && *arr.Namespace != "" { + namespace = *arr.Namespace + } obj := &snsapitypes.Topic{} if err := getReferencedResourceState_Topic(ctx, apiReader, obj, *arr.Name, namespace); err != nil { return hasReferences, err @@ -339,12 +353,8 @@ func getReferencedResourceState_Topic( if err != nil { return err } - var refResourceSynced, refResourceTerminal bool + var refResourceTerminal bool for _, cond := range obj.Status.Conditions { - if cond.Type == ackv1alpha1.ConditionTypeResourceSynced && - cond.Status == corev1.ConditionTrue { - refResourceSynced = true - } if cond.Type == ackv1alpha1.ConditionTypeTerminal && cond.Status == corev1.ConditionTrue { return ackerr.ResourceReferenceTerminalFor( @@ -357,6 +367,13 @@ func getReferencedResourceState_Topic( "Topic", namespace, name) } + var refResourceSynced bool + for _, cond := range obj.Status.Conditions { + if cond.Type == ackv1alpha1.ConditionTypeResourceSynced && + cond.Status == corev1.ConditionTrue { + refResourceSynced = true + } + } if !refResourceSynced { return ackerr.ResourceReferenceNotSyncedFor( "Topic", @@ -378,7 +395,6 @@ func getReferencedResourceState_Topic( func (rm *resourceManager) resolveReferenceForReplicationGroupID( ctx context.Context, apiReader client.Reader, - namespace string, ko *svcapitypes.CacheCluster, ) (hasReferences bool, err error) { if ko.Spec.ReplicationGroupRef != nil && ko.Spec.ReplicationGroupRef.From != nil { @@ -387,6 +403,10 @@ func (rm *resourceManager) resolveReferenceForReplicationGroupID( if arr.Name == nil || *arr.Name == "" { return hasReferences, fmt.Errorf("provided resource reference is nil or empty: ReplicationGroupRef") } + namespace := ko.ObjectMeta.GetNamespace() + if arr.Namespace != nil && *arr.Namespace != "" { + namespace = *arr.Namespace + } obj := &svcapitypes.ReplicationGroup{} if err := getReferencedResourceState_ReplicationGroup(ctx, apiReader, obj, *arr.Name, namespace); err != nil { return hasReferences, err @@ -416,12 +436,8 @@ func getReferencedResourceState_ReplicationGroup( if err != nil { return err } - var refResourceSynced, refResourceTerminal bool + var refResourceTerminal bool for _, cond := range obj.Status.Conditions { - if cond.Type == ackv1alpha1.ConditionTypeResourceSynced && - cond.Status == corev1.ConditionTrue { - refResourceSynced = true - } if cond.Type == ackv1alpha1.ConditionTypeTerminal && cond.Status == corev1.ConditionTrue { return ackerr.ResourceReferenceTerminalFor( @@ -434,6 +450,13 @@ func getReferencedResourceState_ReplicationGroup( "ReplicationGroup", namespace, name) } + var refResourceSynced bool + for _, cond := range obj.Status.Conditions { + if cond.Type == ackv1alpha1.ConditionTypeResourceSynced && + cond.Status == corev1.ConditionTrue { + refResourceSynced = true + } + } if !refResourceSynced { return ackerr.ResourceReferenceNotSyncedFor( "ReplicationGroup", @@ -455,7 +478,6 @@ func getReferencedResourceState_ReplicationGroup( func (rm *resourceManager) resolveReferenceForSnapshotName( ctx context.Context, apiReader client.Reader, - namespace string, ko *svcapitypes.CacheCluster, ) (hasReferences bool, err error) { if ko.Spec.SnapshotRef != nil && ko.Spec.SnapshotRef.From != nil { @@ -464,6 +486,10 @@ func (rm *resourceManager) resolveReferenceForSnapshotName( if arr.Name == nil || *arr.Name == "" { return hasReferences, fmt.Errorf("provided resource reference is nil or empty: SnapshotRef") } + namespace := ko.ObjectMeta.GetNamespace() + if arr.Namespace != nil && *arr.Namespace != "" { + namespace = *arr.Namespace + } obj := &svcapitypes.Snapshot{} if err := getReferencedResourceState_Snapshot(ctx, apiReader, obj, *arr.Name, namespace); err != nil { return hasReferences, err @@ -493,12 +519,8 @@ func getReferencedResourceState_Snapshot( if err != nil { return err } - var refResourceSynced, refResourceTerminal bool + var refResourceTerminal bool for _, cond := range obj.Status.Conditions { - if cond.Type == ackv1alpha1.ConditionTypeResourceSynced && - cond.Status == corev1.ConditionTrue { - refResourceSynced = true - } if cond.Type == ackv1alpha1.ConditionTypeTerminal && cond.Status == corev1.ConditionTrue { return ackerr.ResourceReferenceTerminalFor( @@ -511,6 +533,13 @@ func getReferencedResourceState_Snapshot( "Snapshot", namespace, name) } + var refResourceSynced bool + for _, cond := range obj.Status.Conditions { + if cond.Type == ackv1alpha1.ConditionTypeResourceSynced && + cond.Status == corev1.ConditionTrue { + refResourceSynced = true + } + } if !refResourceSynced { return ackerr.ResourceReferenceNotSyncedFor( "Snapshot", diff --git a/pkg/resource/cache_cluster/resource.go b/pkg/resource/cache_cluster/resource.go index f3ff87d3..05dcfabc 100644 --- a/pkg/resource/cache_cluster/resource.go +++ b/pkg/resource/cache_cluster/resource.go @@ -93,6 +93,17 @@ func (r *resource) SetIdentifiers(identifier *ackv1alpha1.AWSIdentifiers) error return nil } +// PopulateResourceFromAnnotation populates the fields passed from adoption annotation +func (r *resource) PopulateResourceFromAnnotation(fields map[string]string) error { + tmp, ok := fields["cacheClusterID"] + if !ok { + return ackerrors.MissingNameIdentifier + } + r.ko.Spec.CacheClusterID = &tmp + + return nil +} + // DeepCopy will return a copy of the resource func (r *resource) DeepCopy() acktypes.AWSResource { koCopy := r.ko.DeepCopy() diff --git a/pkg/resource/cache_cluster/sdk.go b/pkg/resource/cache_cluster/sdk.go index edb94fbe..97193456 100644 --- a/pkg/resource/cache_cluster/sdk.go +++ b/pkg/resource/cache_cluster/sdk.go @@ -19,6 +19,7 @@ import ( "context" "errors" "fmt" + "math" "reflect" "strings" @@ -28,8 +29,10 @@ import ( ackerr "github.com/aws-controllers-k8s/runtime/pkg/errors" ackrequeue "github.com/aws-controllers-k8s/runtime/pkg/requeue" ackrtlog "github.com/aws-controllers-k8s/runtime/pkg/runtime/log" - "github.com/aws/aws-sdk-go/aws" - svcsdk "github.com/aws/aws-sdk-go/service/elasticache" + "github.com/aws/aws-sdk-go-v2/aws" + svcsdk "github.com/aws/aws-sdk-go-v2/service/elasticache" + svcsdktypes "github.com/aws/aws-sdk-go-v2/service/elasticache/types" + smithy "github.com/aws/smithy-go" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -40,8 +43,7 @@ import ( var ( _ = &metav1.Time{} _ = strings.ToLower("") - _ = &aws.JSONValue{} - _ = &svcsdk.ElastiCache{} + _ = &svcsdk.Client{} _ = &svcapitypes.CacheCluster{} _ = ackv1alpha1.AWSAccountID("") _ = &ackerr.NotFound @@ -49,6 +51,7 @@ var ( _ = &reflect.Value{} _ = fmt.Sprintf("") _ = &ackrequeue.NoRequeue{} + _ = &aws.Config{} ) // sdkFind returns SDK-specific information about a supplied resource @@ -73,10 +76,11 @@ func (rm *resourceManager) sdkFind( return nil, err } var resp *svcsdk.DescribeCacheClustersOutput - resp, err = rm.sdkapi.DescribeCacheClustersWithContext(ctx, input) + resp, err = rm.sdkapi.DescribeCacheClusters(ctx, input) rm.metrics.RecordAPICall("READ_MANY", "DescribeCacheClusters", err) if err != nil { - if awsErr, ok := ackerr.AWSError(err); ok && awsErr.Code() == "CacheClusterNotFound" { + var awsErr smithy.APIError + if errors.As(err, &awsErr) && awsErr.ErrorCode() == "CacheClusterNotFound" { return nil, ackerr.NotFound } return nil, err @@ -160,7 +164,8 @@ func (rm *resourceManager) sdkFind( f9elemf5.Address = f9iter.Endpoint.Address } if f9iter.Endpoint.Port != nil { - f9elemf5.Port = f9iter.Endpoint.Port + portCopy := int64(*f9iter.Endpoint.Port) + f9elemf5.Port = &portCopy } f9elem.Endpoint = f9elemf5 } @@ -179,13 +184,7 @@ func (rm *resourceManager) sdkFind( if elem.CacheParameterGroup != nil { f10 := &svcapitypes.CacheParameterGroupStatus_SDK{} if elem.CacheParameterGroup.CacheNodeIdsToReboot != nil { - f10f0 := []*string{} - for _, f10f0iter := range elem.CacheParameterGroup.CacheNodeIdsToReboot { - var f10f0elem string - f10f0elem = *f10f0iter - f10f0 = append(f10f0, &f10f0elem) - } - f10.CacheNodeIDsToReboot = f10f0 + f10.CacheNodeIDsToReboot = aws.StringSlice(elem.CacheParameterGroup.CacheNodeIdsToReboot) } if elem.CacheParameterGroup.CacheParameterGroupName != nil { f10.CacheParameterGroupName = elem.CacheParameterGroup.CacheParameterGroupName @@ -229,7 +228,8 @@ func (rm *resourceManager) sdkFind( f14.Address = elem.ConfigurationEndpoint.Address } if elem.ConfigurationEndpoint.Port != nil { - f14.Port = elem.ConfigurationEndpoint.Port + portCopy := int64(*elem.ConfigurationEndpoint.Port) + f14.Port = &portCopy } ko.Status.ConfigurationEndpoint = f14 } else { @@ -245,13 +245,13 @@ func (rm *resourceManager) sdkFind( } else { ko.Spec.EngineVersion = nil } - if elem.IpDiscovery != nil { - ko.Spec.IPDiscovery = elem.IpDiscovery + if elem.IpDiscovery != "" { + ko.Spec.IPDiscovery = aws.String(string(elem.IpDiscovery)) } else { ko.Spec.IPDiscovery = nil } - if elem.NetworkType != nil { - ko.Spec.NetworkType = elem.NetworkType + if elem.NetworkType != "" { + ko.Spec.NetworkType = aws.String(string(elem.NetworkType)) } else { ko.Spec.NetworkType = nil } @@ -268,23 +268,18 @@ func (rm *resourceManager) sdkFind( ko.Status.NotificationConfiguration = nil } if elem.NumCacheNodes != nil { - ko.Spec.NumCacheNodes = elem.NumCacheNodes + numCacheNodesCopy := int64(*elem.NumCacheNodes) + ko.Spec.NumCacheNodes = &numCacheNodesCopy } else { ko.Spec.NumCacheNodes = nil } if elem.PendingModifiedValues != nil { f21 := &svcapitypes.PendingModifiedValues{} - if elem.PendingModifiedValues.AuthTokenStatus != nil { - f21.AuthTokenStatus = elem.PendingModifiedValues.AuthTokenStatus + if elem.PendingModifiedValues.AuthTokenStatus != "" { + f21.AuthTokenStatus = aws.String(string(elem.PendingModifiedValues.AuthTokenStatus)) } if elem.PendingModifiedValues.CacheNodeIdsToRemove != nil { - f21f1 := []*string{} - for _, f21f1iter := range elem.PendingModifiedValues.CacheNodeIdsToRemove { - var f21f1elem string - f21f1elem = *f21f1iter - f21f1 = append(f21f1, &f21f1elem) - } - f21.CacheNodeIDsToRemove = f21f1 + f21.CacheNodeIDsToRemove = aws.StringSlice(elem.PendingModifiedValues.CacheNodeIdsToRemove) } if elem.PendingModifiedValues.CacheNodeType != nil { f21.CacheNodeType = elem.PendingModifiedValues.CacheNodeType @@ -293,13 +288,14 @@ func (rm *resourceManager) sdkFind( f21.EngineVersion = elem.PendingModifiedValues.EngineVersion } if elem.PendingModifiedValues.NumCacheNodes != nil { - f21.NumCacheNodes = elem.PendingModifiedValues.NumCacheNodes + numCacheNodesCopy := int64(*elem.PendingModifiedValues.NumCacheNodes) + f21.NumCacheNodes = &numCacheNodesCopy } if elem.PendingModifiedValues.TransitEncryptionEnabled != nil { f21.TransitEncryptionEnabled = elem.PendingModifiedValues.TransitEncryptionEnabled } - if elem.PendingModifiedValues.TransitEncryptionMode != nil { - f21.TransitEncryptionMode = elem.PendingModifiedValues.TransitEncryptionMode + if elem.PendingModifiedValues.TransitEncryptionMode != "" { + f21.TransitEncryptionMode = aws.String(string(elem.PendingModifiedValues.TransitEncryptionMode)) } ko.Status.PendingModifiedValues = f21 } else { @@ -347,7 +343,8 @@ func (rm *resourceManager) sdkFind( ko.Status.SecurityGroups = nil } if elem.SnapshotRetentionLimit != nil { - ko.Spec.SnapshotRetentionLimit = elem.SnapshotRetentionLimit + snapshotRetentionLimitCopy := int64(*elem.SnapshotRetentionLimit) + ko.Spec.SnapshotRetentionLimit = &snapshotRetentionLimitCopy } else { ko.Spec.SnapshotRetentionLimit = nil } @@ -361,8 +358,8 @@ func (rm *resourceManager) sdkFind( } else { ko.Spec.TransitEncryptionEnabled = nil } - if elem.TransitEncryptionMode != nil { - ko.Status.TransitEncryptionMode = elem.TransitEncryptionMode + if elem.TransitEncryptionMode != "" { + ko.Status.TransitEncryptionMode = aws.String(string(elem.TransitEncryptionMode)) } else { ko.Status.TransitEncryptionMode = nil } @@ -423,7 +420,7 @@ func (rm *resourceManager) newListRequestPayload( res := &svcsdk.DescribeCacheClustersInput{} if r.ko.Spec.CacheClusterID != nil { - res.SetCacheClusterId(*r.ko.Spec.CacheClusterID) + res.CacheClusterId = r.ko.Spec.CacheClusterID } return res, nil @@ -448,7 +445,7 @@ func (rm *resourceManager) sdkCreate( var resp *svcsdk.CreateCacheClusterOutput _ = resp - resp, err = rm.sdkapi.CreateCacheClusterWithContext(ctx, input) + resp, err = rm.sdkapi.CreateCacheCluster(ctx, input) rm.metrics.RecordAPICall("CREATE", "CreateCacheCluster", err) if err != nil { return nil, err @@ -529,7 +526,8 @@ func (rm *resourceManager) sdkCreate( f9elemf5.Address = f9iter.Endpoint.Address } if f9iter.Endpoint.Port != nil { - f9elemf5.Port = f9iter.Endpoint.Port + portCopy := int64(*f9iter.Endpoint.Port) + f9elemf5.Port = &portCopy } f9elem.Endpoint = f9elemf5 } @@ -548,13 +546,7 @@ func (rm *resourceManager) sdkCreate( if resp.CacheCluster.CacheParameterGroup != nil { f10 := &svcapitypes.CacheParameterGroupStatus_SDK{} if resp.CacheCluster.CacheParameterGroup.CacheNodeIdsToReboot != nil { - f10f0 := []*string{} - for _, f10f0iter := range resp.CacheCluster.CacheParameterGroup.CacheNodeIdsToReboot { - var f10f0elem string - f10f0elem = *f10f0iter - f10f0 = append(f10f0, &f10f0elem) - } - f10.CacheNodeIDsToReboot = f10f0 + f10.CacheNodeIDsToReboot = aws.StringSlice(resp.CacheCluster.CacheParameterGroup.CacheNodeIdsToReboot) } if resp.CacheCluster.CacheParameterGroup.CacheParameterGroupName != nil { f10.CacheParameterGroupName = resp.CacheCluster.CacheParameterGroup.CacheParameterGroupName @@ -598,7 +590,8 @@ func (rm *resourceManager) sdkCreate( f14.Address = resp.CacheCluster.ConfigurationEndpoint.Address } if resp.CacheCluster.ConfigurationEndpoint.Port != nil { - f14.Port = resp.CacheCluster.ConfigurationEndpoint.Port + portCopy := int64(*resp.CacheCluster.ConfigurationEndpoint.Port) + f14.Port = &portCopy } ko.Status.ConfigurationEndpoint = f14 } else { @@ -614,13 +607,13 @@ func (rm *resourceManager) sdkCreate( } else { ko.Spec.EngineVersion = nil } - if resp.CacheCluster.IpDiscovery != nil { - ko.Spec.IPDiscovery = resp.CacheCluster.IpDiscovery + if resp.CacheCluster.IpDiscovery != "" { + ko.Spec.IPDiscovery = aws.String(string(resp.CacheCluster.IpDiscovery)) } else { ko.Spec.IPDiscovery = nil } - if resp.CacheCluster.NetworkType != nil { - ko.Spec.NetworkType = resp.CacheCluster.NetworkType + if resp.CacheCluster.NetworkType != "" { + ko.Spec.NetworkType = aws.String(string(resp.CacheCluster.NetworkType)) } else { ko.Spec.NetworkType = nil } @@ -637,23 +630,18 @@ func (rm *resourceManager) sdkCreate( ko.Status.NotificationConfiguration = nil } if resp.CacheCluster.NumCacheNodes != nil { - ko.Spec.NumCacheNodes = resp.CacheCluster.NumCacheNodes + numCacheNodesCopy := int64(*resp.CacheCluster.NumCacheNodes) + ko.Spec.NumCacheNodes = &numCacheNodesCopy } else { ko.Spec.NumCacheNodes = nil } if resp.CacheCluster.PendingModifiedValues != nil { f21 := &svcapitypes.PendingModifiedValues{} - if resp.CacheCluster.PendingModifiedValues.AuthTokenStatus != nil { - f21.AuthTokenStatus = resp.CacheCluster.PendingModifiedValues.AuthTokenStatus + if resp.CacheCluster.PendingModifiedValues.AuthTokenStatus != "" { + f21.AuthTokenStatus = aws.String(string(resp.CacheCluster.PendingModifiedValues.AuthTokenStatus)) } if resp.CacheCluster.PendingModifiedValues.CacheNodeIdsToRemove != nil { - f21f1 := []*string{} - for _, f21f1iter := range resp.CacheCluster.PendingModifiedValues.CacheNodeIdsToRemove { - var f21f1elem string - f21f1elem = *f21f1iter - f21f1 = append(f21f1, &f21f1elem) - } - f21.CacheNodeIDsToRemove = f21f1 + f21.CacheNodeIDsToRemove = aws.StringSlice(resp.CacheCluster.PendingModifiedValues.CacheNodeIdsToRemove) } if resp.CacheCluster.PendingModifiedValues.CacheNodeType != nil { f21.CacheNodeType = resp.CacheCluster.PendingModifiedValues.CacheNodeType @@ -662,13 +650,14 @@ func (rm *resourceManager) sdkCreate( f21.EngineVersion = resp.CacheCluster.PendingModifiedValues.EngineVersion } if resp.CacheCluster.PendingModifiedValues.NumCacheNodes != nil { - f21.NumCacheNodes = resp.CacheCluster.PendingModifiedValues.NumCacheNodes + numCacheNodesCopy := int64(*resp.CacheCluster.PendingModifiedValues.NumCacheNodes) + f21.NumCacheNodes = &numCacheNodesCopy } if resp.CacheCluster.PendingModifiedValues.TransitEncryptionEnabled != nil { f21.TransitEncryptionEnabled = resp.CacheCluster.PendingModifiedValues.TransitEncryptionEnabled } - if resp.CacheCluster.PendingModifiedValues.TransitEncryptionMode != nil { - f21.TransitEncryptionMode = resp.CacheCluster.PendingModifiedValues.TransitEncryptionMode + if resp.CacheCluster.PendingModifiedValues.TransitEncryptionMode != "" { + f21.TransitEncryptionMode = aws.String(string(resp.CacheCluster.PendingModifiedValues.TransitEncryptionMode)) } ko.Status.PendingModifiedValues = f21 } else { @@ -716,7 +705,8 @@ func (rm *resourceManager) sdkCreate( ko.Status.SecurityGroups = nil } if resp.CacheCluster.SnapshotRetentionLimit != nil { - ko.Spec.SnapshotRetentionLimit = resp.CacheCluster.SnapshotRetentionLimit + snapshotRetentionLimitCopy := int64(*resp.CacheCluster.SnapshotRetentionLimit) + ko.Spec.SnapshotRetentionLimit = &snapshotRetentionLimitCopy } else { ko.Spec.SnapshotRetentionLimit = nil } @@ -730,8 +720,8 @@ func (rm *resourceManager) sdkCreate( } else { ko.Spec.TransitEncryptionEnabled = nil } - if resp.CacheCluster.TransitEncryptionMode != nil { - ko.Status.TransitEncryptionMode = resp.CacheCluster.TransitEncryptionMode + if resp.CacheCluster.TransitEncryptionMode != "" { + ko.Status.TransitEncryptionMode = aws.String(string(resp.CacheCluster.TransitEncryptionMode)) } else { ko.Status.TransitEncryptionMode = nil } @@ -761,7 +751,7 @@ func (rm *resourceManager) newCreateRequestPayload( res := &svcsdk.CreateCacheClusterInput{} if r.ko.Spec.AZMode != nil { - res.SetAZMode(*r.ko.Spec.AZMode) + res.AZMode = svcsdktypes.AZMode(*r.ko.Spec.AZMode) } if r.ko.Spec.AuthToken != nil { tmpSecret, err := rm.rr.SecretValueFromReference(ctx, r.ko.Spec.AuthToken) @@ -769,168 +759,153 @@ func (rm *resourceManager) newCreateRequestPayload( return nil, ackrequeue.Needed(err) } if tmpSecret != "" { - res.SetAuthToken(tmpSecret) + res.AuthToken = aws.String(tmpSecret) } } if r.ko.Spec.AutoMinorVersionUpgrade != nil { - res.SetAutoMinorVersionUpgrade(*r.ko.Spec.AutoMinorVersionUpgrade) + res.AutoMinorVersionUpgrade = r.ko.Spec.AutoMinorVersionUpgrade } if r.ko.Spec.CacheClusterID != nil { - res.SetCacheClusterId(*r.ko.Spec.CacheClusterID) + res.CacheClusterId = r.ko.Spec.CacheClusterID } if r.ko.Spec.CacheNodeType != nil { - res.SetCacheNodeType(*r.ko.Spec.CacheNodeType) + res.CacheNodeType = r.ko.Spec.CacheNodeType } if r.ko.Spec.CacheParameterGroupName != nil { - res.SetCacheParameterGroupName(*r.ko.Spec.CacheParameterGroupName) + res.CacheParameterGroupName = r.ko.Spec.CacheParameterGroupName } if r.ko.Spec.CacheSecurityGroupNames != nil { - f6 := []*string{} - for _, f6iter := range r.ko.Spec.CacheSecurityGroupNames { - var f6elem string - f6elem = *f6iter - f6 = append(f6, &f6elem) - } - res.SetCacheSecurityGroupNames(f6) + res.CacheSecurityGroupNames = aws.ToStringSlice(r.ko.Spec.CacheSecurityGroupNames) } if r.ko.Spec.CacheSubnetGroupName != nil { - res.SetCacheSubnetGroupName(*r.ko.Spec.CacheSubnetGroupName) + res.CacheSubnetGroupName = r.ko.Spec.CacheSubnetGroupName } if r.ko.Spec.Engine != nil { - res.SetEngine(*r.ko.Spec.Engine) + res.Engine = r.ko.Spec.Engine } if r.ko.Spec.EngineVersion != nil { - res.SetEngineVersion(*r.ko.Spec.EngineVersion) + res.EngineVersion = r.ko.Spec.EngineVersion } if r.ko.Spec.IPDiscovery != nil { - res.SetIpDiscovery(*r.ko.Spec.IPDiscovery) + res.IpDiscovery = svcsdktypes.IpDiscovery(*r.ko.Spec.IPDiscovery) } if r.ko.Spec.LogDeliveryConfigurations != nil { - f11 := []*svcsdk.LogDeliveryConfigurationRequest{} + f11 := []svcsdktypes.LogDeliveryConfigurationRequest{} for _, f11iter := range r.ko.Spec.LogDeliveryConfigurations { - f11elem := &svcsdk.LogDeliveryConfigurationRequest{} + f11elem := &svcsdktypes.LogDeliveryConfigurationRequest{} if f11iter.DestinationDetails != nil { - f11elemf0 := &svcsdk.DestinationDetails{} + f11elemf0 := &svcsdktypes.DestinationDetails{} if f11iter.DestinationDetails.CloudWatchLogsDetails != nil { - f11elemf0f0 := &svcsdk.CloudWatchLogsDestinationDetails{} + f11elemf0f0 := &svcsdktypes.CloudWatchLogsDestinationDetails{} if f11iter.DestinationDetails.CloudWatchLogsDetails.LogGroup != nil { - f11elemf0f0.SetLogGroup(*f11iter.DestinationDetails.CloudWatchLogsDetails.LogGroup) + f11elemf0f0.LogGroup = f11iter.DestinationDetails.CloudWatchLogsDetails.LogGroup } - f11elemf0.SetCloudWatchLogsDetails(f11elemf0f0) + f11elemf0.CloudWatchLogsDetails = f11elemf0f0 } if f11iter.DestinationDetails.KinesisFirehoseDetails != nil { - f11elemf0f1 := &svcsdk.KinesisFirehoseDestinationDetails{} + f11elemf0f1 := &svcsdktypes.KinesisFirehoseDestinationDetails{} if f11iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream != nil { - f11elemf0f1.SetDeliveryStream(*f11iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream) + f11elemf0f1.DeliveryStream = f11iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream } - f11elemf0.SetKinesisFirehoseDetails(f11elemf0f1) + f11elemf0.KinesisFirehoseDetails = f11elemf0f1 } - f11elem.SetDestinationDetails(f11elemf0) + f11elem.DestinationDetails = f11elemf0 } if f11iter.DestinationType != nil { - f11elem.SetDestinationType(*f11iter.DestinationType) + f11elem.DestinationType = svcsdktypes.DestinationType(*f11iter.DestinationType) } if f11iter.Enabled != nil { - f11elem.SetEnabled(*f11iter.Enabled) + f11elem.Enabled = f11iter.Enabled } if f11iter.LogFormat != nil { - f11elem.SetLogFormat(*f11iter.LogFormat) + f11elem.LogFormat = svcsdktypes.LogFormat(*f11iter.LogFormat) } if f11iter.LogType != nil { - f11elem.SetLogType(*f11iter.LogType) + f11elem.LogType = svcsdktypes.LogType(*f11iter.LogType) } - f11 = append(f11, f11elem) + f11 = append(f11, *f11elem) } - res.SetLogDeliveryConfigurations(f11) + res.LogDeliveryConfigurations = f11 } if r.ko.Spec.NetworkType != nil { - res.SetNetworkType(*r.ko.Spec.NetworkType) + res.NetworkType = svcsdktypes.NetworkType(*r.ko.Spec.NetworkType) } if r.ko.Spec.NotificationTopicARN != nil { - res.SetNotificationTopicArn(*r.ko.Spec.NotificationTopicARN) + res.NotificationTopicArn = r.ko.Spec.NotificationTopicARN } if r.ko.Spec.NumCacheNodes != nil { - res.SetNumCacheNodes(*r.ko.Spec.NumCacheNodes) + numCacheNodesCopy0 := *r.ko.Spec.NumCacheNodes + if numCacheNodesCopy0 > math.MaxInt32 || numCacheNodesCopy0 < math.MinInt32 { + return nil, fmt.Errorf("error: field NumCacheNodes is of type int32") + } + numCacheNodesCopy := int32(numCacheNodesCopy0) + res.NumCacheNodes = &numCacheNodesCopy } if r.ko.Spec.OutpostMode != nil { - res.SetOutpostMode(*r.ko.Spec.OutpostMode) + res.OutpostMode = svcsdktypes.OutpostMode(*r.ko.Spec.OutpostMode) } if r.ko.Spec.Port != nil { - res.SetPort(*r.ko.Spec.Port) + portCopy0 := *r.ko.Spec.Port + if portCopy0 > math.MaxInt32 || portCopy0 < math.MinInt32 { + return nil, fmt.Errorf("error: field Port is of type int32") + } + portCopy := int32(portCopy0) + res.Port = &portCopy } if r.ko.Spec.PreferredAvailabilityZone != nil { - res.SetPreferredAvailabilityZone(*r.ko.Spec.PreferredAvailabilityZone) + res.PreferredAvailabilityZone = r.ko.Spec.PreferredAvailabilityZone } if r.ko.Spec.PreferredAvailabilityZones != nil { - f18 := []*string{} - for _, f18iter := range r.ko.Spec.PreferredAvailabilityZones { - var f18elem string - f18elem = *f18iter - f18 = append(f18, &f18elem) - } - res.SetPreferredAvailabilityZones(f18) + res.PreferredAvailabilityZones = aws.ToStringSlice(r.ko.Spec.PreferredAvailabilityZones) } if r.ko.Spec.PreferredMaintenanceWindow != nil { - res.SetPreferredMaintenanceWindow(*r.ko.Spec.PreferredMaintenanceWindow) + res.PreferredMaintenanceWindow = r.ko.Spec.PreferredMaintenanceWindow } if r.ko.Spec.PreferredOutpostARN != nil { - res.SetPreferredOutpostArn(*r.ko.Spec.PreferredOutpostARN) + res.PreferredOutpostArn = r.ko.Spec.PreferredOutpostARN } if r.ko.Spec.PreferredOutpostARNs != nil { - f21 := []*string{} - for _, f21iter := range r.ko.Spec.PreferredOutpostARNs { - var f21elem string - f21elem = *f21iter - f21 = append(f21, &f21elem) - } - res.SetPreferredOutpostArns(f21) + res.PreferredOutpostArns = aws.ToStringSlice(r.ko.Spec.PreferredOutpostARNs) } if r.ko.Spec.ReplicationGroupID != nil { - res.SetReplicationGroupId(*r.ko.Spec.ReplicationGroupID) + res.ReplicationGroupId = r.ko.Spec.ReplicationGroupID } if r.ko.Spec.SecurityGroupIDs != nil { - f23 := []*string{} - for _, f23iter := range r.ko.Spec.SecurityGroupIDs { - var f23elem string - f23elem = *f23iter - f23 = append(f23, &f23elem) - } - res.SetSecurityGroupIds(f23) + res.SecurityGroupIds = aws.ToStringSlice(r.ko.Spec.SecurityGroupIDs) } if r.ko.Spec.SnapshotARNs != nil { - f24 := []*string{} - for _, f24iter := range r.ko.Spec.SnapshotARNs { - var f24elem string - f24elem = *f24iter - f24 = append(f24, &f24elem) - } - res.SetSnapshotArns(f24) + res.SnapshotArns = aws.ToStringSlice(r.ko.Spec.SnapshotARNs) } if r.ko.Spec.SnapshotName != nil { - res.SetSnapshotName(*r.ko.Spec.SnapshotName) + res.SnapshotName = r.ko.Spec.SnapshotName } if r.ko.Spec.SnapshotRetentionLimit != nil { - res.SetSnapshotRetentionLimit(*r.ko.Spec.SnapshotRetentionLimit) + snapshotRetentionLimitCopy0 := *r.ko.Spec.SnapshotRetentionLimit + if snapshotRetentionLimitCopy0 > math.MaxInt32 || snapshotRetentionLimitCopy0 < math.MinInt32 { + return nil, fmt.Errorf("error: field SnapshotRetentionLimit is of type int32") + } + snapshotRetentionLimitCopy := int32(snapshotRetentionLimitCopy0) + res.SnapshotRetentionLimit = &snapshotRetentionLimitCopy } if r.ko.Spec.SnapshotWindow != nil { - res.SetSnapshotWindow(*r.ko.Spec.SnapshotWindow) + res.SnapshotWindow = r.ko.Spec.SnapshotWindow } if r.ko.Spec.Tags != nil { - f28 := []*svcsdk.Tag{} + f28 := []svcsdktypes.Tag{} for _, f28iter := range r.ko.Spec.Tags { - f28elem := &svcsdk.Tag{} + f28elem := &svcsdktypes.Tag{} if f28iter.Key != nil { - f28elem.SetKey(*f28iter.Key) + f28elem.Key = f28iter.Key } if f28iter.Value != nil { - f28elem.SetValue(*f28iter.Value) + f28elem.Value = f28iter.Value } - f28 = append(f28, f28elem) + f28 = append(f28, *f28elem) } - res.SetTags(f28) + res.Tags = f28 } if r.ko.Spec.TransitEncryptionEnabled != nil { - res.SetTransitEncryptionEnabled(*r.ko.Spec.TransitEncryptionEnabled) + res.TransitEncryptionEnabled = r.ko.Spec.TransitEncryptionEnabled } return res, nil @@ -973,7 +948,7 @@ func (rm *resourceManager) sdkUpdate( var resp *svcsdk.ModifyCacheClusterOutput _ = resp - resp, err = rm.sdkapi.ModifyCacheClusterWithContext(ctx, input) + resp, err = rm.sdkapi.ModifyCacheCluster(ctx, input) rm.metrics.RecordAPICall("UPDATE", "ModifyCacheCluster", err) if err != nil { return nil, err @@ -1054,7 +1029,8 @@ func (rm *resourceManager) sdkUpdate( f9elemf5.Address = f9iter.Endpoint.Address } if f9iter.Endpoint.Port != nil { - f9elemf5.Port = f9iter.Endpoint.Port + portCopy := int64(*f9iter.Endpoint.Port) + f9elemf5.Port = &portCopy } f9elem.Endpoint = f9elemf5 } @@ -1073,13 +1049,7 @@ func (rm *resourceManager) sdkUpdate( if resp.CacheCluster.CacheParameterGroup != nil { f10 := &svcapitypes.CacheParameterGroupStatus_SDK{} if resp.CacheCluster.CacheParameterGroup.CacheNodeIdsToReboot != nil { - f10f0 := []*string{} - for _, f10f0iter := range resp.CacheCluster.CacheParameterGroup.CacheNodeIdsToReboot { - var f10f0elem string - f10f0elem = *f10f0iter - f10f0 = append(f10f0, &f10f0elem) - } - f10.CacheNodeIDsToReboot = f10f0 + f10.CacheNodeIDsToReboot = aws.StringSlice(resp.CacheCluster.CacheParameterGroup.CacheNodeIdsToReboot) } if resp.CacheCluster.CacheParameterGroup.CacheParameterGroupName != nil { f10.CacheParameterGroupName = resp.CacheCluster.CacheParameterGroup.CacheParameterGroupName @@ -1123,7 +1093,8 @@ func (rm *resourceManager) sdkUpdate( f14.Address = resp.CacheCluster.ConfigurationEndpoint.Address } if resp.CacheCluster.ConfigurationEndpoint.Port != nil { - f14.Port = resp.CacheCluster.ConfigurationEndpoint.Port + portCopy := int64(*resp.CacheCluster.ConfigurationEndpoint.Port) + f14.Port = &portCopy } ko.Status.ConfigurationEndpoint = f14 } else { @@ -1139,13 +1110,13 @@ func (rm *resourceManager) sdkUpdate( } else { ko.Spec.EngineVersion = nil } - if resp.CacheCluster.IpDiscovery != nil { - ko.Spec.IPDiscovery = resp.CacheCluster.IpDiscovery + if resp.CacheCluster.IpDiscovery != "" { + ko.Spec.IPDiscovery = aws.String(string(resp.CacheCluster.IpDiscovery)) } else { ko.Spec.IPDiscovery = nil } - if resp.CacheCluster.NetworkType != nil { - ko.Spec.NetworkType = resp.CacheCluster.NetworkType + if resp.CacheCluster.NetworkType != "" { + ko.Spec.NetworkType = aws.String(string(resp.CacheCluster.NetworkType)) } else { ko.Spec.NetworkType = nil } @@ -1162,23 +1133,18 @@ func (rm *resourceManager) sdkUpdate( ko.Status.NotificationConfiguration = nil } if resp.CacheCluster.NumCacheNodes != nil { - ko.Spec.NumCacheNodes = resp.CacheCluster.NumCacheNodes + numCacheNodesCopy := int64(*resp.CacheCluster.NumCacheNodes) + ko.Spec.NumCacheNodes = &numCacheNodesCopy } else { ko.Spec.NumCacheNodes = nil } if resp.CacheCluster.PendingModifiedValues != nil { f21 := &svcapitypes.PendingModifiedValues{} - if resp.CacheCluster.PendingModifiedValues.AuthTokenStatus != nil { - f21.AuthTokenStatus = resp.CacheCluster.PendingModifiedValues.AuthTokenStatus + if resp.CacheCluster.PendingModifiedValues.AuthTokenStatus != "" { + f21.AuthTokenStatus = aws.String(string(resp.CacheCluster.PendingModifiedValues.AuthTokenStatus)) } if resp.CacheCluster.PendingModifiedValues.CacheNodeIdsToRemove != nil { - f21f1 := []*string{} - for _, f21f1iter := range resp.CacheCluster.PendingModifiedValues.CacheNodeIdsToRemove { - var f21f1elem string - f21f1elem = *f21f1iter - f21f1 = append(f21f1, &f21f1elem) - } - f21.CacheNodeIDsToRemove = f21f1 + f21.CacheNodeIDsToRemove = aws.StringSlice(resp.CacheCluster.PendingModifiedValues.CacheNodeIdsToRemove) } if resp.CacheCluster.PendingModifiedValues.CacheNodeType != nil { f21.CacheNodeType = resp.CacheCluster.PendingModifiedValues.CacheNodeType @@ -1187,13 +1153,14 @@ func (rm *resourceManager) sdkUpdate( f21.EngineVersion = resp.CacheCluster.PendingModifiedValues.EngineVersion } if resp.CacheCluster.PendingModifiedValues.NumCacheNodes != nil { - f21.NumCacheNodes = resp.CacheCluster.PendingModifiedValues.NumCacheNodes + numCacheNodesCopy := int64(*resp.CacheCluster.PendingModifiedValues.NumCacheNodes) + f21.NumCacheNodes = &numCacheNodesCopy } if resp.CacheCluster.PendingModifiedValues.TransitEncryptionEnabled != nil { f21.TransitEncryptionEnabled = resp.CacheCluster.PendingModifiedValues.TransitEncryptionEnabled } - if resp.CacheCluster.PendingModifiedValues.TransitEncryptionMode != nil { - f21.TransitEncryptionMode = resp.CacheCluster.PendingModifiedValues.TransitEncryptionMode + if resp.CacheCluster.PendingModifiedValues.TransitEncryptionMode != "" { + f21.TransitEncryptionMode = aws.String(string(resp.CacheCluster.PendingModifiedValues.TransitEncryptionMode)) } ko.Status.PendingModifiedValues = f21 } else { @@ -1241,7 +1208,8 @@ func (rm *resourceManager) sdkUpdate( ko.Status.SecurityGroups = nil } if resp.CacheCluster.SnapshotRetentionLimit != nil { - ko.Spec.SnapshotRetentionLimit = resp.CacheCluster.SnapshotRetentionLimit + snapshotRetentionLimitCopy := int64(*resp.CacheCluster.SnapshotRetentionLimit) + ko.Spec.SnapshotRetentionLimit = &snapshotRetentionLimitCopy } else { ko.Spec.SnapshotRetentionLimit = nil } @@ -1255,8 +1223,8 @@ func (rm *resourceManager) sdkUpdate( } else { ko.Spec.TransitEncryptionEnabled = nil } - if resp.CacheCluster.TransitEncryptionMode != nil { - ko.Status.TransitEncryptionMode = resp.CacheCluster.TransitEncryptionMode + if resp.CacheCluster.TransitEncryptionMode != "" { + ko.Status.TransitEncryptionMode = aws.String(string(resp.CacheCluster.TransitEncryptionMode)) } else { ko.Status.TransitEncryptionMode = nil } @@ -1292,106 +1260,107 @@ func (rm *resourceManager) newUpdateRequestPayload( res := &svcsdk.ModifyCacheClusterInput{} if r.ko.Spec.AZMode != nil { - res.SetAZMode(*r.ko.Spec.AZMode) + res.AZMode = svcsdktypes.AZMode(*r.ko.Spec.AZMode) } - res.SetApplyImmediately(true) + res.ApplyImmediately = true if r.ko.Spec.AuthToken != nil { tmpSecret, err := rm.rr.SecretValueFromReference(ctx, r.ko.Spec.AuthToken) if err != nil { return nil, ackrequeue.Needed(err) } if tmpSecret != "" { - res.SetAuthToken(tmpSecret) + res.AuthToken = aws.String(tmpSecret) } } if r.ko.Spec.AutoMinorVersionUpgrade != nil { - res.SetAutoMinorVersionUpgrade(*r.ko.Spec.AutoMinorVersionUpgrade) + res.AutoMinorVersionUpgrade = r.ko.Spec.AutoMinorVersionUpgrade } if r.ko.Spec.CacheClusterID != nil { - res.SetCacheClusterId(*r.ko.Spec.CacheClusterID) + res.CacheClusterId = r.ko.Spec.CacheClusterID } if r.ko.Spec.CacheNodeType != nil { - res.SetCacheNodeType(*r.ko.Spec.CacheNodeType) + res.CacheNodeType = r.ko.Spec.CacheNodeType } if r.ko.Spec.CacheParameterGroupName != nil { - res.SetCacheParameterGroupName(*r.ko.Spec.CacheParameterGroupName) + res.CacheParameterGroupName = r.ko.Spec.CacheParameterGroupName } if r.ko.Spec.CacheSecurityGroupNames != nil { - f9 := []*string{} - for _, f9iter := range r.ko.Spec.CacheSecurityGroupNames { - var f9elem string - f9elem = *f9iter - f9 = append(f9, &f9elem) - } - res.SetCacheSecurityGroupNames(f9) + res.CacheSecurityGroupNames = aws.ToStringSlice(r.ko.Spec.CacheSecurityGroupNames) + } + if r.ko.Spec.Engine != nil { + res.Engine = r.ko.Spec.Engine } if r.ko.Spec.EngineVersion != nil { - res.SetEngineVersion(*r.ko.Spec.EngineVersion) + res.EngineVersion = r.ko.Spec.EngineVersion } if r.ko.Spec.IPDiscovery != nil { - res.SetIpDiscovery(*r.ko.Spec.IPDiscovery) + res.IpDiscovery = svcsdktypes.IpDiscovery(*r.ko.Spec.IPDiscovery) } if r.ko.Spec.LogDeliveryConfigurations != nil { - f12 := []*svcsdk.LogDeliveryConfigurationRequest{} - for _, f12iter := range r.ko.Spec.LogDeliveryConfigurations { - f12elem := &svcsdk.LogDeliveryConfigurationRequest{} - if f12iter.DestinationDetails != nil { - f12elemf0 := &svcsdk.DestinationDetails{} - if f12iter.DestinationDetails.CloudWatchLogsDetails != nil { - f12elemf0f0 := &svcsdk.CloudWatchLogsDestinationDetails{} - if f12iter.DestinationDetails.CloudWatchLogsDetails.LogGroup != nil { - f12elemf0f0.SetLogGroup(*f12iter.DestinationDetails.CloudWatchLogsDetails.LogGroup) + f13 := []svcsdktypes.LogDeliveryConfigurationRequest{} + for _, f13iter := range r.ko.Spec.LogDeliveryConfigurations { + f13elem := &svcsdktypes.LogDeliveryConfigurationRequest{} + if f13iter.DestinationDetails != nil { + f13elemf0 := &svcsdktypes.DestinationDetails{} + if f13iter.DestinationDetails.CloudWatchLogsDetails != nil { + f13elemf0f0 := &svcsdktypes.CloudWatchLogsDestinationDetails{} + if f13iter.DestinationDetails.CloudWatchLogsDetails.LogGroup != nil { + f13elemf0f0.LogGroup = f13iter.DestinationDetails.CloudWatchLogsDetails.LogGroup } - f12elemf0.SetCloudWatchLogsDetails(f12elemf0f0) + f13elemf0.CloudWatchLogsDetails = f13elemf0f0 } - if f12iter.DestinationDetails.KinesisFirehoseDetails != nil { - f12elemf0f1 := &svcsdk.KinesisFirehoseDestinationDetails{} - if f12iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream != nil { - f12elemf0f1.SetDeliveryStream(*f12iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream) + if f13iter.DestinationDetails.KinesisFirehoseDetails != nil { + f13elemf0f1 := &svcsdktypes.KinesisFirehoseDestinationDetails{} + if f13iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream != nil { + f13elemf0f1.DeliveryStream = f13iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream } - f12elemf0.SetKinesisFirehoseDetails(f12elemf0f1) + f13elemf0.KinesisFirehoseDetails = f13elemf0f1 } - f12elem.SetDestinationDetails(f12elemf0) + f13elem.DestinationDetails = f13elemf0 } - if f12iter.DestinationType != nil { - f12elem.SetDestinationType(*f12iter.DestinationType) + if f13iter.DestinationType != nil { + f13elem.DestinationType = svcsdktypes.DestinationType(*f13iter.DestinationType) } - if f12iter.Enabled != nil { - f12elem.SetEnabled(*f12iter.Enabled) + if f13iter.Enabled != nil { + f13elem.Enabled = f13iter.Enabled } - if f12iter.LogFormat != nil { - f12elem.SetLogFormat(*f12iter.LogFormat) + if f13iter.LogFormat != nil { + f13elem.LogFormat = svcsdktypes.LogFormat(*f13iter.LogFormat) } - if f12iter.LogType != nil { - f12elem.SetLogType(*f12iter.LogType) + if f13iter.LogType != nil { + f13elem.LogType = svcsdktypes.LogType(*f13iter.LogType) } - f12 = append(f12, f12elem) + f13 = append(f13, *f13elem) } - res.SetLogDeliveryConfigurations(f12) + res.LogDeliveryConfigurations = f13 } if r.ko.Spec.NotificationTopicARN != nil { - res.SetNotificationTopicArn(*r.ko.Spec.NotificationTopicARN) + res.NotificationTopicArn = r.ko.Spec.NotificationTopicARN } if r.ko.Spec.NumCacheNodes != nil { - res.SetNumCacheNodes(*r.ko.Spec.NumCacheNodes) + numCacheNodesCopy0 := *r.ko.Spec.NumCacheNodes + if numCacheNodesCopy0 > math.MaxInt32 || numCacheNodesCopy0 < math.MinInt32 { + return nil, fmt.Errorf("error: field NumCacheNodes is of type int32") + } + numCacheNodesCopy := int32(numCacheNodesCopy0) + res.NumCacheNodes = &numCacheNodesCopy } if r.ko.Spec.PreferredMaintenanceWindow != nil { - res.SetPreferredMaintenanceWindow(*r.ko.Spec.PreferredMaintenanceWindow) + res.PreferredMaintenanceWindow = r.ko.Spec.PreferredMaintenanceWindow } if r.ko.Spec.SecurityGroupIDs != nil { - f18 := []*string{} - for _, f18iter := range r.ko.Spec.SecurityGroupIDs { - var f18elem string - f18elem = *f18iter - f18 = append(f18, &f18elem) - } - res.SetSecurityGroupIds(f18) + res.SecurityGroupIds = aws.ToStringSlice(r.ko.Spec.SecurityGroupIDs) } if r.ko.Spec.SnapshotRetentionLimit != nil { - res.SetSnapshotRetentionLimit(*r.ko.Spec.SnapshotRetentionLimit) + snapshotRetentionLimitCopy0 := *r.ko.Spec.SnapshotRetentionLimit + if snapshotRetentionLimitCopy0 > math.MaxInt32 || snapshotRetentionLimitCopy0 < math.MinInt32 { + return nil, fmt.Errorf("error: field SnapshotRetentionLimit is of type int32") + } + snapshotRetentionLimitCopy := int32(snapshotRetentionLimitCopy0) + res.SnapshotRetentionLimit = &snapshotRetentionLimitCopy } if r.ko.Spec.SnapshotWindow != nil { - res.SetSnapshotWindow(*r.ko.Spec.SnapshotWindow) + res.SnapshotWindow = r.ko.Spec.SnapshotWindow } return res, nil @@ -1446,7 +1415,7 @@ func (rm *resourceManager) sdkDelete( } var resp *svcsdk.DeleteCacheClusterOutput _ = resp - resp, err = rm.sdkapi.DeleteCacheClusterWithContext(ctx, input) + resp, err = rm.sdkapi.DeleteCacheCluster(ctx, input) rm.metrics.RecordAPICall("DELETE", "DeleteCacheCluster", err) return nil, err } @@ -1459,7 +1428,7 @@ func (rm *resourceManager) newDeleteRequestPayload( res := &svcsdk.DeleteCacheClusterInput{} if r.ko.Spec.CacheClusterID != nil { - res.SetCacheClusterId(*r.ko.Spec.CacheClusterID) + res.CacheClusterId = r.ko.Spec.CacheClusterID } return res, nil @@ -1567,11 +1536,12 @@ func (rm *resourceManager) terminalAWSError(err error) bool { if err == nil { return false } - awsErr, ok := ackerr.AWSError(err) - if !ok { + + var terminalErr smithy.APIError + if !errors.As(err, &terminalErr) { return false } - switch awsErr.Code() { + switch terminalErr.ErrorCode() { case "ReplicationGroupNotFoundFault", "InvalidReplicationGroupStateFault", "CacheClusterAlreadyExistsFault", diff --git a/pkg/resource/cache_parameter_group/custom_api.go b/pkg/resource/cache_parameter_group/custom_api.go index 4919560e..3ce6b77e 100644 --- a/pkg/resource/cache_parameter_group/custom_api.go +++ b/pkg/resource/cache_parameter_group/custom_api.go @@ -15,6 +15,7 @@ package cache_parameter_group import ( "context" + svcapitypes "github.com/aws-controllers-k8s/elasticache-controller/apis/v1alpha1" ackv1alpha1 "github.com/aws-controllers-k8s/runtime/apis/core/v1alpha1" ackerr "github.com/aws-controllers-k8s/runtime/pkg/errors" diff --git a/pkg/resource/cache_parameter_group/custom_set_output.go b/pkg/resource/cache_parameter_group/custom_set_output.go index 76a3bf96..d41e2efd 100644 --- a/pkg/resource/cache_parameter_group/custom_set_output.go +++ b/pkg/resource/cache_parameter_group/custom_set_output.go @@ -15,6 +15,7 @@ package cache_parameter_group import ( "context" + svcapitypes "github.com/aws-controllers-k8s/elasticache-controller/apis/v1alpha1" ackv1alpha1 "github.com/aws-controllers-k8s/runtime/apis/core/v1alpha1" svcsdk "github.com/aws/aws-sdk-go/service/elasticache" diff --git a/pkg/resource/cache_parameter_group/custom_update_api.go b/pkg/resource/cache_parameter_group/custom_update_api.go index fdb91f14..dbf99b65 100644 --- a/pkg/resource/cache_parameter_group/custom_update_api.go +++ b/pkg/resource/cache_parameter_group/custom_update_api.go @@ -15,6 +15,7 @@ package cache_parameter_group import ( "context" + svcapitypes "github.com/aws-controllers-k8s/elasticache-controller/apis/v1alpha1" ackcompare "github.com/aws-controllers-k8s/runtime/pkg/compare" ) diff --git a/pkg/resource/cache_parameter_group/descriptor.go b/pkg/resource/cache_parameter_group/descriptor.go index f2e6c1c5..95307743 100644 --- a/pkg/resource/cache_parameter_group/descriptor.go +++ b/pkg/resource/cache_parameter_group/descriptor.go @@ -28,7 +28,7 @@ import ( ) const ( - finalizerString = "finalizers.elasticache.services.k8s.aws/CacheParameterGroup" + FinalizerString = "finalizers.elasticache.services.k8s.aws/CacheParameterGroup" ) var ( @@ -88,8 +88,8 @@ func (d *resourceDescriptor) IsManaged( // https://github.com/kubernetes-sigs/controller-runtime/issues/994 is // fixed. This should be able to be: // - // return k8sctrlutil.ContainsFinalizer(obj, finalizerString) - return containsFinalizer(obj, finalizerString) + // return k8sctrlutil.ContainsFinalizer(obj, FinalizerString) + return containsFinalizer(obj, FinalizerString) } // Remove once https://github.com/kubernetes-sigs/controller-runtime/issues/994 @@ -118,7 +118,7 @@ func (d *resourceDescriptor) MarkManaged( // Should not happen. If it does, there is a bug in the code panic("nil RuntimeMetaObject in AWSResource") } - k8sctrlutil.AddFinalizer(obj, finalizerString) + k8sctrlutil.AddFinalizer(obj, FinalizerString) } // MarkUnmanaged removes the supplied resource from management by ACK. What @@ -133,7 +133,7 @@ func (d *resourceDescriptor) MarkUnmanaged( // Should not happen. If it does, there is a bug in the code panic("nil RuntimeMetaObject in AWSResource") } - k8sctrlutil.RemoveFinalizer(obj, finalizerString) + k8sctrlutil.RemoveFinalizer(obj, FinalizerString) } // MarkAdopted places descriptors on the custom resource that indicate the diff --git a/pkg/resource/cache_parameter_group/manager.go b/pkg/resource/cache_parameter_group/manager.go index 7f96de9e..07da86e0 100644 --- a/pkg/resource/cache_parameter_group/manager.go +++ b/pkg/resource/cache_parameter_group/manager.go @@ -32,9 +32,8 @@ import ( acktags "github.com/aws-controllers-k8s/runtime/pkg/tags" acktypes "github.com/aws-controllers-k8s/runtime/pkg/types" ackutil "github.com/aws-controllers-k8s/runtime/pkg/util" - "github.com/aws/aws-sdk-go/aws/session" - svcsdk "github.com/aws/aws-sdk-go/service/elasticache" - svcsdkapi "github.com/aws/aws-sdk-go/service/elasticache/elasticacheiface" + "github.com/aws/aws-sdk-go-v2/aws" + svcsdk "github.com/aws/aws-sdk-go-v2/service/elasticache" "github.com/go-logr/logr" corev1 "k8s.io/api/core/v1" @@ -59,6 +58,9 @@ type resourceManager struct { // cfg is a copy of the ackcfg.Config object passed on start of the service // controller cfg ackcfg.Config + // clientcfg is a copy of the client configuration passed on start of the + // service controller + clientcfg aws.Config // log refers to the logr.Logger object handling logging for the service // controller log logr.Logger @@ -73,12 +75,9 @@ type resourceManager struct { awsAccountID ackv1alpha1.AWSAccountID // The AWS Region that this resource manager targets awsRegion ackv1alpha1.AWSRegion - // sess is the AWS SDK Session object used to communicate with the backend - // AWS service API - sess *session.Session - // sdk is a pointer to the AWS service API interface exposed by the - // aws-sdk-go/services/{alias}/{alias}iface package. - sdkapi svcsdkapi.ElastiCacheAPI + // sdk is a pointer to the AWS service API client exposed by the + // aws-sdk-go-v2/services/{alias} package. + sdkapi *svcsdk.Client } // concreteResource returns a pointer to a resource from the supplied @@ -299,24 +298,25 @@ func (rm *resourceManager) EnsureTags( // newResourceManager returns a new struct implementing // acktypes.AWSResourceManager +// This is for AWS-SDK-GO-V2 - Created newResourceManager With AWS sdk-Go-ClientV2 func newResourceManager( cfg ackcfg.Config, + clientcfg aws.Config, log logr.Logger, metrics *ackmetrics.Metrics, rr acktypes.Reconciler, - sess *session.Session, id ackv1alpha1.AWSAccountID, region ackv1alpha1.AWSRegion, ) (*resourceManager, error) { return &resourceManager{ cfg: cfg, + clientcfg: clientcfg, log: log, metrics: metrics, rr: rr, awsAccountID: id, awsRegion: region, - sess: sess, - sdkapi: svcsdk.New(sess), + sdkapi: svcsdk.NewFromConfig(clientcfg), }, nil } diff --git a/pkg/resource/cache_parameter_group/manager_factory.go b/pkg/resource/cache_parameter_group/manager_factory.go index 76549b75..2eaca521 100644 --- a/pkg/resource/cache_parameter_group/manager_factory.go +++ b/pkg/resource/cache_parameter_group/manager_factory.go @@ -23,7 +23,7 @@ import ( ackcfg "github.com/aws-controllers-k8s/runtime/pkg/config" ackmetrics "github.com/aws-controllers-k8s/runtime/pkg/metrics" acktypes "github.com/aws-controllers-k8s/runtime/pkg/types" - "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go-v2/aws" "github.com/go-logr/logr" svcresource "github.com/aws-controllers-k8s/elasticache-controller/pkg/resource" @@ -47,14 +47,18 @@ func (f *resourceManagerFactory) ResourceDescriptor() acktypes.AWSResourceDescri // supplied AWS account func (f *resourceManagerFactory) ManagerFor( cfg ackcfg.Config, + clientcfg aws.Config, log logr.Logger, metrics *ackmetrics.Metrics, rr acktypes.Reconciler, - sess *session.Session, id ackv1alpha1.AWSAccountID, region ackv1alpha1.AWSRegion, + roleARN ackv1alpha1.AWSResourceName, ) (acktypes.AWSResourceManager, error) { - rmId := fmt.Sprintf("%s/%s", id, region) + // We use the account ID, region, and role ARN to uniquely identify a + // resource manager. This helps us to avoid creating multiple resource + // managers for the same account/region/roleARN combination. + rmId := fmt.Sprintf("%s/%s/%s", id, region, roleARN) f.RLock() rm, found := f.rmCache[rmId] f.RUnlock() @@ -66,7 +70,7 @@ func (f *resourceManagerFactory) ManagerFor( f.Lock() defer f.Unlock() - rm, err := newResourceManager(cfg, log, metrics, rr, sess, id, region) + rm, err := newResourceManager(cfg, clientcfg, log, metrics, rr, id, region) if err != nil { return nil, err } diff --git a/pkg/resource/cache_parameter_group/references.go b/pkg/resource/cache_parameter_group/references.go index 49ddd54d..727b5ded 100644 --- a/pkg/resource/cache_parameter_group/references.go +++ b/pkg/resource/cache_parameter_group/references.go @@ -17,6 +17,7 @@ package cache_parameter_group import ( "context" + "sigs.k8s.io/controller-runtime/pkg/client" acktypes "github.com/aws-controllers-k8s/runtime/pkg/types" diff --git a/pkg/resource/cache_parameter_group/resource.go b/pkg/resource/cache_parameter_group/resource.go index 326ec41f..3f7b0c8d 100644 --- a/pkg/resource/cache_parameter_group/resource.go +++ b/pkg/resource/cache_parameter_group/resource.go @@ -93,6 +93,17 @@ func (r *resource) SetIdentifiers(identifier *ackv1alpha1.AWSIdentifiers) error return nil } +// PopulateResourceFromAnnotation populates the fields passed from adoption annotation +func (r *resource) PopulateResourceFromAnnotation(fields map[string]string) error { + tmp, ok := fields["cacheParameterGroupName"] + if !ok { + return ackerrors.MissingNameIdentifier + } + r.ko.Spec.CacheParameterGroupName = &tmp + + return nil +} + // DeepCopy will return a copy of the resource func (r *resource) DeepCopy() acktypes.AWSResource { koCopy := r.ko.DeepCopy() diff --git a/pkg/resource/cache_parameter_group/sdk.go b/pkg/resource/cache_parameter_group/sdk.go index d263271a..a76d6ba6 100644 --- a/pkg/resource/cache_parameter_group/sdk.go +++ b/pkg/resource/cache_parameter_group/sdk.go @@ -28,8 +28,10 @@ import ( ackerr "github.com/aws-controllers-k8s/runtime/pkg/errors" ackrequeue "github.com/aws-controllers-k8s/runtime/pkg/requeue" ackrtlog "github.com/aws-controllers-k8s/runtime/pkg/runtime/log" - "github.com/aws/aws-sdk-go/aws" - svcsdk "github.com/aws/aws-sdk-go/service/elasticache" + "github.com/aws/aws-sdk-go-v2/aws" + svcsdk "github.com/aws/aws-sdk-go-v2/service/elasticache" + svcsdktypes "github.com/aws/aws-sdk-go-v2/service/elasticache/types" + smithy "github.com/aws/smithy-go" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -40,8 +42,7 @@ import ( var ( _ = &metav1.Time{} _ = strings.ToLower("") - _ = &aws.JSONValue{} - _ = &svcsdk.ElastiCache{} + _ = &svcsdk.Client{} _ = &svcapitypes.CacheParameterGroup{} _ = ackv1alpha1.AWSAccountID("") _ = &ackerr.NotFound @@ -49,6 +50,7 @@ var ( _ = &reflect.Value{} _ = fmt.Sprintf("") _ = &ackrequeue.NoRequeue{} + _ = &aws.Config{} ) // sdkFind returns SDK-specific information about a supplied resource @@ -73,10 +75,11 @@ func (rm *resourceManager) sdkFind( return nil, err } var resp *svcsdk.DescribeCacheParameterGroupsOutput - resp, err = rm.sdkapi.DescribeCacheParameterGroupsWithContext(ctx, input) + resp, err = rm.sdkapi.DescribeCacheParameterGroups(ctx, input) rm.metrics.RecordAPICall("READ_MANY", "DescribeCacheParameterGroups", err) if err != nil { - if awsErr, ok := ackerr.AWSError(err); ok && awsErr.Code() == "CacheParameterGroupNotFound" { + var awsErr smithy.APIError + if errors.As(err, &awsErr) && awsErr.ErrorCode() == "CacheParameterGroupNotFound" { return nil, ackerr.NotFound } return nil, err @@ -149,7 +152,7 @@ func (rm *resourceManager) newListRequestPayload( res := &svcsdk.DescribeCacheParameterGroupsInput{} if r.ko.Spec.CacheParameterGroupName != nil { - res.SetCacheParameterGroupName(*r.ko.Spec.CacheParameterGroupName) + res.CacheParameterGroupName = r.ko.Spec.CacheParameterGroupName } return res, nil @@ -174,7 +177,7 @@ func (rm *resourceManager) sdkCreate( var resp *svcsdk.CreateCacheParameterGroupOutput _ = resp - resp, err = rm.sdkapi.CreateCacheParameterGroupWithContext(ctx, input) + resp, err = rm.sdkapi.CreateCacheParameterGroup(ctx, input) rm.metrics.RecordAPICall("CREATE", "CreateCacheParameterGroup", err) if err != nil { return nil, err @@ -229,27 +232,27 @@ func (rm *resourceManager) newCreateRequestPayload( res := &svcsdk.CreateCacheParameterGroupInput{} if r.ko.Spec.CacheParameterGroupFamily != nil { - res.SetCacheParameterGroupFamily(*r.ko.Spec.CacheParameterGroupFamily) + res.CacheParameterGroupFamily = r.ko.Spec.CacheParameterGroupFamily } if r.ko.Spec.CacheParameterGroupName != nil { - res.SetCacheParameterGroupName(*r.ko.Spec.CacheParameterGroupName) + res.CacheParameterGroupName = r.ko.Spec.CacheParameterGroupName } if r.ko.Spec.Description != nil { - res.SetDescription(*r.ko.Spec.Description) + res.Description = r.ko.Spec.Description } if r.ko.Spec.Tags != nil { - f3 := []*svcsdk.Tag{} + f3 := []svcsdktypes.Tag{} for _, f3iter := range r.ko.Spec.Tags { - f3elem := &svcsdk.Tag{} + f3elem := &svcsdktypes.Tag{} if f3iter.Key != nil { - f3elem.SetKey(*f3iter.Key) + f3elem.Key = f3iter.Key } if f3iter.Value != nil { - f3elem.SetValue(*f3iter.Value) + f3elem.Value = f3iter.Value } - f3 = append(f3, f3elem) + f3 = append(f3, *f3elem) } - res.SetTags(f3) + res.Tags = f3 } return res, nil @@ -282,7 +285,7 @@ func (rm *resourceManager) sdkDelete( } var resp *svcsdk.DeleteCacheParameterGroupOutput _ = resp - resp, err = rm.sdkapi.DeleteCacheParameterGroupWithContext(ctx, input) + resp, err = rm.sdkapi.DeleteCacheParameterGroup(ctx, input) rm.metrics.RecordAPICall("DELETE", "DeleteCacheParameterGroup", err) return nil, err } @@ -295,7 +298,7 @@ func (rm *resourceManager) newDeleteRequestPayload( res := &svcsdk.DeleteCacheParameterGroupInput{} if r.ko.Spec.CacheParameterGroupName != nil { - res.SetCacheParameterGroupName(*r.ko.Spec.CacheParameterGroupName) + res.CacheParameterGroupName = r.ko.Spec.CacheParameterGroupName } return res, nil @@ -403,11 +406,12 @@ func (rm *resourceManager) terminalAWSError(err error) bool { if err == nil { return false } - awsErr, ok := ackerr.AWSError(err) - if !ok { + + var terminalErr smithy.APIError + if !errors.As(err, &terminalErr) { return false } - switch awsErr.Code() { + switch terminalErr.ErrorCode() { case "CacheParameterGroupAlreadyExists", "CacheParameterGroupQuotaExceeded", "InvalidCacheParameterGroupState", diff --git a/pkg/resource/cache_subnet_group/custom_set_output.go b/pkg/resource/cache_subnet_group/custom_set_output.go index 96ae1900..79bdbb93 100644 --- a/pkg/resource/cache_subnet_group/custom_set_output.go +++ b/pkg/resource/cache_subnet_group/custom_set_output.go @@ -15,6 +15,7 @@ package cache_subnet_group import ( "context" + svcapitypes "github.com/aws-controllers-k8s/elasticache-controller/apis/v1alpha1" "github.com/aws/aws-sdk-go/service/elasticache" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" diff --git a/pkg/resource/cache_subnet_group/descriptor.go b/pkg/resource/cache_subnet_group/descriptor.go index 9f059608..7ffe0f4f 100644 --- a/pkg/resource/cache_subnet_group/descriptor.go +++ b/pkg/resource/cache_subnet_group/descriptor.go @@ -28,7 +28,7 @@ import ( ) const ( - finalizerString = "finalizers.elasticache.services.k8s.aws/CacheSubnetGroup" + FinalizerString = "finalizers.elasticache.services.k8s.aws/CacheSubnetGroup" ) var ( @@ -88,8 +88,8 @@ func (d *resourceDescriptor) IsManaged( // https://github.com/kubernetes-sigs/controller-runtime/issues/994 is // fixed. This should be able to be: // - // return k8sctrlutil.ContainsFinalizer(obj, finalizerString) - return containsFinalizer(obj, finalizerString) + // return k8sctrlutil.ContainsFinalizer(obj, FinalizerString) + return containsFinalizer(obj, FinalizerString) } // Remove once https://github.com/kubernetes-sigs/controller-runtime/issues/994 @@ -118,7 +118,7 @@ func (d *resourceDescriptor) MarkManaged( // Should not happen. If it does, there is a bug in the code panic("nil RuntimeMetaObject in AWSResource") } - k8sctrlutil.AddFinalizer(obj, finalizerString) + k8sctrlutil.AddFinalizer(obj, FinalizerString) } // MarkUnmanaged removes the supplied resource from management by ACK. What @@ -133,7 +133,7 @@ func (d *resourceDescriptor) MarkUnmanaged( // Should not happen. If it does, there is a bug in the code panic("nil RuntimeMetaObject in AWSResource") } - k8sctrlutil.RemoveFinalizer(obj, finalizerString) + k8sctrlutil.RemoveFinalizer(obj, FinalizerString) } // MarkAdopted places descriptors on the custom resource that indicate the diff --git a/pkg/resource/cache_subnet_group/manager.go b/pkg/resource/cache_subnet_group/manager.go index ae0ce3f2..9100556d 100644 --- a/pkg/resource/cache_subnet_group/manager.go +++ b/pkg/resource/cache_subnet_group/manager.go @@ -32,9 +32,8 @@ import ( acktags "github.com/aws-controllers-k8s/runtime/pkg/tags" acktypes "github.com/aws-controllers-k8s/runtime/pkg/types" ackutil "github.com/aws-controllers-k8s/runtime/pkg/util" - "github.com/aws/aws-sdk-go/aws/session" - svcsdk "github.com/aws/aws-sdk-go/service/elasticache" - svcsdkapi "github.com/aws/aws-sdk-go/service/elasticache/elasticacheiface" + "github.com/aws/aws-sdk-go-v2/aws" + svcsdk "github.com/aws/aws-sdk-go-v2/service/elasticache" "github.com/go-logr/logr" corev1 "k8s.io/api/core/v1" @@ -59,6 +58,9 @@ type resourceManager struct { // cfg is a copy of the ackcfg.Config object passed on start of the service // controller cfg ackcfg.Config + // clientcfg is a copy of the client configuration passed on start of the + // service controller + clientcfg aws.Config // log refers to the logr.Logger object handling logging for the service // controller log logr.Logger @@ -73,12 +75,9 @@ type resourceManager struct { awsAccountID ackv1alpha1.AWSAccountID // The AWS Region that this resource manager targets awsRegion ackv1alpha1.AWSRegion - // sess is the AWS SDK Session object used to communicate with the backend - // AWS service API - sess *session.Session - // sdk is a pointer to the AWS service API interface exposed by the - // aws-sdk-go/services/{alias}/{alias}iface package. - sdkapi svcsdkapi.ElastiCacheAPI + // sdk is a pointer to the AWS service API client exposed by the + // aws-sdk-go-v2/services/{alias} package. + sdkapi *svcsdk.Client } // concreteResource returns a pointer to a resource from the supplied @@ -299,24 +298,25 @@ func (rm *resourceManager) EnsureTags( // newResourceManager returns a new struct implementing // acktypes.AWSResourceManager +// This is for AWS-SDK-GO-V2 - Created newResourceManager With AWS sdk-Go-ClientV2 func newResourceManager( cfg ackcfg.Config, + clientcfg aws.Config, log logr.Logger, metrics *ackmetrics.Metrics, rr acktypes.Reconciler, - sess *session.Session, id ackv1alpha1.AWSAccountID, region ackv1alpha1.AWSRegion, ) (*resourceManager, error) { return &resourceManager{ cfg: cfg, + clientcfg: clientcfg, log: log, metrics: metrics, rr: rr, awsAccountID: id, awsRegion: region, - sess: sess, - sdkapi: svcsdk.New(sess), + sdkapi: svcsdk.NewFromConfig(clientcfg), }, nil } diff --git a/pkg/resource/cache_subnet_group/manager_factory.go b/pkg/resource/cache_subnet_group/manager_factory.go index 9296270e..016ed5cb 100644 --- a/pkg/resource/cache_subnet_group/manager_factory.go +++ b/pkg/resource/cache_subnet_group/manager_factory.go @@ -23,7 +23,7 @@ import ( ackcfg "github.com/aws-controllers-k8s/runtime/pkg/config" ackmetrics "github.com/aws-controllers-k8s/runtime/pkg/metrics" acktypes "github.com/aws-controllers-k8s/runtime/pkg/types" - "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go-v2/aws" "github.com/go-logr/logr" svcresource "github.com/aws-controllers-k8s/elasticache-controller/pkg/resource" @@ -47,14 +47,18 @@ func (f *resourceManagerFactory) ResourceDescriptor() acktypes.AWSResourceDescri // supplied AWS account func (f *resourceManagerFactory) ManagerFor( cfg ackcfg.Config, + clientcfg aws.Config, log logr.Logger, metrics *ackmetrics.Metrics, rr acktypes.Reconciler, - sess *session.Session, id ackv1alpha1.AWSAccountID, region ackv1alpha1.AWSRegion, + roleARN ackv1alpha1.AWSResourceName, ) (acktypes.AWSResourceManager, error) { - rmId := fmt.Sprintf("%s/%s", id, region) + // We use the account ID, region, and role ARN to uniquely identify a + // resource manager. This helps us to avoid creating multiple resource + // managers for the same account/region/roleARN combination. + rmId := fmt.Sprintf("%s/%s/%s", id, region, roleARN) f.RLock() rm, found := f.rmCache[rmId] f.RUnlock() @@ -66,7 +70,7 @@ func (f *resourceManagerFactory) ManagerFor( f.Lock() defer f.Unlock() - rm, err := newResourceManager(cfg, log, metrics, rr, sess, id, region) + rm, err := newResourceManager(cfg, clientcfg, log, metrics, rr, id, region) if err != nil { return nil, err } diff --git a/pkg/resource/cache_subnet_group/references.go b/pkg/resource/cache_subnet_group/references.go index bc719a34..70fa70ff 100644 --- a/pkg/resource/cache_subnet_group/references.go +++ b/pkg/resource/cache_subnet_group/references.go @@ -60,12 +60,11 @@ func (rm *resourceManager) ResolveReferences( apiReader client.Reader, res acktypes.AWSResource, ) (acktypes.AWSResource, bool, error) { - namespace := res.MetaObject().GetNamespace() ko := rm.concreteResource(res).ko resourceHasReferences := false err := validateReferenceFields(ko) - if fieldHasReferences, err := rm.resolveReferenceForSubnetIDs(ctx, apiReader, namespace, ko); err != nil { + if fieldHasReferences, err := rm.resolveReferenceForSubnetIDs(ctx, apiReader, ko); err != nil { return &resource{ko}, (resourceHasReferences || fieldHasReferences), err } else { resourceHasReferences = resourceHasReferences || fieldHasReferences @@ -94,7 +93,6 @@ func validateReferenceFields(ko *svcapitypes.CacheSubnetGroup) error { func (rm *resourceManager) resolveReferenceForSubnetIDs( ctx context.Context, apiReader client.Reader, - namespace string, ko *svcapitypes.CacheSubnetGroup, ) (hasReferences bool, err error) { for _, f0iter := range ko.Spec.SubnetRefs { @@ -104,6 +102,10 @@ func (rm *resourceManager) resolveReferenceForSubnetIDs( if arr.Name == nil || *arr.Name == "" { return hasReferences, fmt.Errorf("provided resource reference is nil or empty: SubnetRefs") } + namespace := ko.ObjectMeta.GetNamespace() + if arr.Namespace != nil && *arr.Namespace != "" { + namespace = *arr.Namespace + } obj := &ec2apitypes.Subnet{} if err := getReferencedResourceState_Subnet(ctx, apiReader, obj, *arr.Name, namespace); err != nil { return hasReferences, err @@ -137,12 +139,8 @@ func getReferencedResourceState_Subnet( if err != nil { return err } - var refResourceSynced, refResourceTerminal bool + var refResourceTerminal bool for _, cond := range obj.Status.Conditions { - if cond.Type == ackv1alpha1.ConditionTypeResourceSynced && - cond.Status == corev1.ConditionTrue { - refResourceSynced = true - } if cond.Type == ackv1alpha1.ConditionTypeTerminal && cond.Status == corev1.ConditionTrue { return ackerr.ResourceReferenceTerminalFor( @@ -155,6 +153,13 @@ func getReferencedResourceState_Subnet( "Subnet", namespace, name) } + var refResourceSynced bool + for _, cond := range obj.Status.Conditions { + if cond.Type == ackv1alpha1.ConditionTypeResourceSynced && + cond.Status == corev1.ConditionTrue { + refResourceSynced = true + } + } if !refResourceSynced { return ackerr.ResourceReferenceNotSyncedFor( "Subnet", diff --git a/pkg/resource/cache_subnet_group/resource.go b/pkg/resource/cache_subnet_group/resource.go index 220e95cd..6cd55030 100644 --- a/pkg/resource/cache_subnet_group/resource.go +++ b/pkg/resource/cache_subnet_group/resource.go @@ -93,6 +93,17 @@ func (r *resource) SetIdentifiers(identifier *ackv1alpha1.AWSIdentifiers) error return nil } +// PopulateResourceFromAnnotation populates the fields passed from adoption annotation +func (r *resource) PopulateResourceFromAnnotation(fields map[string]string) error { + tmp, ok := fields["cacheSubnetGroupName"] + if !ok { + return ackerrors.MissingNameIdentifier + } + r.ko.Spec.CacheSubnetGroupName = &tmp + + return nil +} + // DeepCopy will return a copy of the resource func (r *resource) DeepCopy() acktypes.AWSResource { koCopy := r.ko.DeepCopy() diff --git a/pkg/resource/cache_subnet_group/sdk.go b/pkg/resource/cache_subnet_group/sdk.go index 63a914f5..9acdd724 100644 --- a/pkg/resource/cache_subnet_group/sdk.go +++ b/pkg/resource/cache_subnet_group/sdk.go @@ -28,8 +28,10 @@ import ( ackerr "github.com/aws-controllers-k8s/runtime/pkg/errors" ackrequeue "github.com/aws-controllers-k8s/runtime/pkg/requeue" ackrtlog "github.com/aws-controllers-k8s/runtime/pkg/runtime/log" - "github.com/aws/aws-sdk-go/aws" - svcsdk "github.com/aws/aws-sdk-go/service/elasticache" + "github.com/aws/aws-sdk-go-v2/aws" + svcsdk "github.com/aws/aws-sdk-go-v2/service/elasticache" + svcsdktypes "github.com/aws/aws-sdk-go-v2/service/elasticache/types" + smithy "github.com/aws/smithy-go" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -40,8 +42,7 @@ import ( var ( _ = &metav1.Time{} _ = strings.ToLower("") - _ = &aws.JSONValue{} - _ = &svcsdk.ElastiCache{} + _ = &svcsdk.Client{} _ = &svcapitypes.CacheSubnetGroup{} _ = ackv1alpha1.AWSAccountID("") _ = &ackerr.NotFound @@ -49,6 +50,7 @@ var ( _ = &reflect.Value{} _ = fmt.Sprintf("") _ = &ackrequeue.NoRequeue{} + _ = &aws.Config{} ) // sdkFind returns SDK-specific information about a supplied resource @@ -73,10 +75,11 @@ func (rm *resourceManager) sdkFind( return nil, err } var resp *svcsdk.DescribeCacheSubnetGroupsOutput - resp, err = rm.sdkapi.DescribeCacheSubnetGroupsWithContext(ctx, input) + resp, err = rm.sdkapi.DescribeCacheSubnetGroups(ctx, input) rm.metrics.RecordAPICall("READ_MANY", "DescribeCacheSubnetGroups", err) if err != nil { - if awsErr, ok := ackerr.AWSError(err); ok && awsErr.Code() == "CacheSubnetGroupNotFoundFault" { + var awsErr smithy.APIError + if errors.As(err, &awsErr) && awsErr.ErrorCode() == "CacheSubnetGroupNotFoundFault" { return nil, ackerr.NotFound } return nil, err @@ -126,12 +129,32 @@ func (rm *resourceManager) sdkFind( } f3elem.SubnetOutpost = f3elemf2 } + if f3iter.SupportedNetworkTypes != nil { + f3elemf3 := []*string{} + for _, f3elemf3iter := range f3iter.SupportedNetworkTypes { + var f3elemf3elem *string + f3elemf3elem = aws.String(string(f3elemf3iter)) + f3elemf3 = append(f3elemf3, f3elemf3elem) + } + f3elem.SupportedNetworkTypes = f3elemf3 + } f3 = append(f3, f3elem) } ko.Status.Subnets = f3 } else { ko.Status.Subnets = nil } + if elem.SupportedNetworkTypes != nil { + f4 := []*string{} + for _, f4iter := range elem.SupportedNetworkTypes { + var f4elem *string + f4elem = aws.String(string(f4iter)) + f4 = append(f4, f4elem) + } + ko.Status.SupportedNetworkTypes = f4 + } else { + ko.Status.SupportedNetworkTypes = nil + } if elem.VpcId != nil { ko.Status.VPCID = elem.VpcId } else { @@ -180,7 +203,7 @@ func (rm *resourceManager) newListRequestPayload( res := &svcsdk.DescribeCacheSubnetGroupsInput{} if r.ko.Spec.CacheSubnetGroupName != nil { - res.SetCacheSubnetGroupName(*r.ko.Spec.CacheSubnetGroupName) + res.CacheSubnetGroupName = r.ko.Spec.CacheSubnetGroupName } return res, nil @@ -205,7 +228,7 @@ func (rm *resourceManager) sdkCreate( var resp *svcsdk.CreateCacheSubnetGroupOutput _ = resp - resp, err = rm.sdkapi.CreateCacheSubnetGroupWithContext(ctx, input) + resp, err = rm.sdkapi.CreateCacheSubnetGroup(ctx, input) rm.metrics.RecordAPICall("CREATE", "CreateCacheSubnetGroup", err) if err != nil { return nil, err @@ -252,12 +275,32 @@ func (rm *resourceManager) sdkCreate( } f3elem.SubnetOutpost = f3elemf2 } + if f3iter.SupportedNetworkTypes != nil { + f3elemf3 := []*string{} + for _, f3elemf3iter := range f3iter.SupportedNetworkTypes { + var f3elemf3elem *string + f3elemf3elem = aws.String(string(f3elemf3iter)) + f3elemf3 = append(f3elemf3, f3elemf3elem) + } + f3elem.SupportedNetworkTypes = f3elemf3 + } f3 = append(f3, f3elem) } ko.Status.Subnets = f3 } else { ko.Status.Subnets = nil } + if resp.CacheSubnetGroup.SupportedNetworkTypes != nil { + f4 := []*string{} + for _, f4iter := range resp.CacheSubnetGroup.SupportedNetworkTypes { + var f4elem *string + f4elem = aws.String(string(f4iter)) + f4 = append(f4, f4elem) + } + ko.Status.SupportedNetworkTypes = f4 + } else { + ko.Status.SupportedNetworkTypes = nil + } if resp.CacheSubnetGroup.VpcId != nil { ko.Status.VPCID = resp.CacheSubnetGroup.VpcId } else { @@ -277,33 +320,27 @@ func (rm *resourceManager) newCreateRequestPayload( res := &svcsdk.CreateCacheSubnetGroupInput{} if r.ko.Spec.CacheSubnetGroupDescription != nil { - res.SetCacheSubnetGroupDescription(*r.ko.Spec.CacheSubnetGroupDescription) + res.CacheSubnetGroupDescription = r.ko.Spec.CacheSubnetGroupDescription } if r.ko.Spec.CacheSubnetGroupName != nil { - res.SetCacheSubnetGroupName(*r.ko.Spec.CacheSubnetGroupName) + res.CacheSubnetGroupName = r.ko.Spec.CacheSubnetGroupName } if r.ko.Spec.SubnetIDs != nil { - f2 := []*string{} - for _, f2iter := range r.ko.Spec.SubnetIDs { - var f2elem string - f2elem = *f2iter - f2 = append(f2, &f2elem) - } - res.SetSubnetIds(f2) + res.SubnetIds = aws.ToStringSlice(r.ko.Spec.SubnetIDs) } if r.ko.Spec.Tags != nil { - f3 := []*svcsdk.Tag{} + f3 := []svcsdktypes.Tag{} for _, f3iter := range r.ko.Spec.Tags { - f3elem := &svcsdk.Tag{} + f3elem := &svcsdktypes.Tag{} if f3iter.Key != nil { - f3elem.SetKey(*f3iter.Key) + f3elem.Key = f3iter.Key } if f3iter.Value != nil { - f3elem.SetValue(*f3iter.Value) + f3elem.Value = f3iter.Value } - f3 = append(f3, f3elem) + f3 = append(f3, *f3elem) } - res.SetTags(f3) + res.Tags = f3 } return res, nil @@ -329,7 +366,7 @@ func (rm *resourceManager) sdkUpdate( var resp *svcsdk.ModifyCacheSubnetGroupOutput _ = resp - resp, err = rm.sdkapi.ModifyCacheSubnetGroupWithContext(ctx, input) + resp, err = rm.sdkapi.ModifyCacheSubnetGroup(ctx, input) rm.metrics.RecordAPICall("UPDATE", "ModifyCacheSubnetGroup", err) if err != nil { return nil, err @@ -376,12 +413,32 @@ func (rm *resourceManager) sdkUpdate( } f3elem.SubnetOutpost = f3elemf2 } + if f3iter.SupportedNetworkTypes != nil { + f3elemf3 := []*string{} + for _, f3elemf3iter := range f3iter.SupportedNetworkTypes { + var f3elemf3elem *string + f3elemf3elem = aws.String(string(f3elemf3iter)) + f3elemf3 = append(f3elemf3, f3elemf3elem) + } + f3elem.SupportedNetworkTypes = f3elemf3 + } f3 = append(f3, f3elem) } ko.Status.Subnets = f3 } else { ko.Status.Subnets = nil } + if resp.CacheSubnetGroup.SupportedNetworkTypes != nil { + f4 := []*string{} + for _, f4iter := range resp.CacheSubnetGroup.SupportedNetworkTypes { + var f4elem *string + f4elem = aws.String(string(f4iter)) + f4 = append(f4, f4elem) + } + ko.Status.SupportedNetworkTypes = f4 + } else { + ko.Status.SupportedNetworkTypes = nil + } if resp.CacheSubnetGroup.VpcId != nil { ko.Status.VPCID = resp.CacheSubnetGroup.VpcId } else { @@ -402,19 +459,13 @@ func (rm *resourceManager) newUpdateRequestPayload( res := &svcsdk.ModifyCacheSubnetGroupInput{} if r.ko.Spec.CacheSubnetGroupDescription != nil { - res.SetCacheSubnetGroupDescription(*r.ko.Spec.CacheSubnetGroupDescription) + res.CacheSubnetGroupDescription = r.ko.Spec.CacheSubnetGroupDescription } if r.ko.Spec.CacheSubnetGroupName != nil { - res.SetCacheSubnetGroupName(*r.ko.Spec.CacheSubnetGroupName) + res.CacheSubnetGroupName = r.ko.Spec.CacheSubnetGroupName } if r.ko.Spec.SubnetIDs != nil { - f2 := []*string{} - for _, f2iter := range r.ko.Spec.SubnetIDs { - var f2elem string - f2elem = *f2iter - f2 = append(f2, &f2elem) - } - res.SetSubnetIds(f2) + res.SubnetIds = aws.ToStringSlice(r.ko.Spec.SubnetIDs) } return res, nil @@ -436,7 +487,7 @@ func (rm *resourceManager) sdkDelete( } var resp *svcsdk.DeleteCacheSubnetGroupOutput _ = resp - resp, err = rm.sdkapi.DeleteCacheSubnetGroupWithContext(ctx, input) + resp, err = rm.sdkapi.DeleteCacheSubnetGroup(ctx, input) rm.metrics.RecordAPICall("DELETE", "DeleteCacheSubnetGroup", err) return nil, err } @@ -449,7 +500,7 @@ func (rm *resourceManager) newDeleteRequestPayload( res := &svcsdk.DeleteCacheSubnetGroupInput{} if r.ko.Spec.CacheSubnetGroupName != nil { - res.SetCacheSubnetGroupName(*r.ko.Spec.CacheSubnetGroupName) + res.CacheSubnetGroupName = r.ko.Spec.CacheSubnetGroupName } return res, nil @@ -557,11 +608,12 @@ func (rm *resourceManager) terminalAWSError(err error) bool { if err == nil { return false } - awsErr, ok := ackerr.AWSError(err) - if !ok { + + var terminalErr smithy.APIError + if !errors.As(err, &terminalErr) { return false } - switch awsErr.Code() { + switch terminalErr.ErrorCode() { case "CacheSubnetGroupQuotaExceeded", "CacheSubnetQuotaExceededFault", "SubnetInUse", diff --git a/pkg/resource/replication_group/custom_update_api.go b/pkg/resource/replication_group/custom_update_api.go index e4c92896..e07ce2da 100644 --- a/pkg/resource/replication_group/custom_update_api.go +++ b/pkg/resource/replication_group/custom_update_api.go @@ -17,13 +17,14 @@ import ( "context" "encoding/json" "fmt" - "github.com/aws-controllers-k8s/runtime/pkg/requeue" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/pkg/errors" "reflect" "sort" "strconv" + "github.com/aws-controllers-k8s/runtime/pkg/requeue" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/pkg/errors" + svcapitypes "github.com/aws-controllers-k8s/elasticache-controller/apis/v1alpha1" ackcompare "github.com/aws-controllers-k8s/runtime/pkg/compare" svcsdk "github.com/aws/aws-sdk-go/service/elasticache" diff --git a/pkg/resource/replication_group/custom_update_api_test.go b/pkg/resource/replication_group/custom_update_api_test.go deleted file mode 100644 index 619f3f82..00000000 --- a/pkg/resource/replication_group/custom_update_api_test.go +++ /dev/null @@ -1,1058 +0,0 @@ -// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"). You may -// not use this file except in compliance with the License. A copy of the -// License is located at -// -// http://aws.amazon.com/apache2.0/ -// -// or in the "license" file accompanying this file. This file is distributed -// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either -// express or implied. See the License for the specific language governing -// permissions and limitations under the License. - -package replication_group - -import ( - "context" - "fmt" - "path/filepath" - "strconv" - "testing" - - "github.com/aws-controllers-k8s/elasticache-controller/pkg/testutil" - ackmetrics "github.com/aws-controllers-k8s/runtime/pkg/metrics" - "github.com/aws-controllers-k8s/runtime/pkg/requeue" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/pkg/errors" - "github.com/stretchr/testify/mock" - "go.uber.org/zap/zapcore" - ctrlrtzap "sigs.k8s.io/controller-runtime/pkg/log/zap" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - svcapitypes "github.com/aws-controllers-k8s/elasticache-controller/apis/v1alpha1" - mocksvcsdkapi "github.com/aws-controllers-k8s/elasticache-controller/mocks/aws-sdk-go/elasticache" - ackcompare "github.com/aws-controllers-k8s/runtime/pkg/compare" - svcsdk "github.com/aws/aws-sdk-go/service/elasticache" -) - -// Helper methods to setup tests -// provideResourceManager returns pointer to resourceManager -func provideResourceManager() *resourceManager { - return provideResourceManagerWithMockSDKAPI(&mocksvcsdkapi.ElastiCacheAPI{}) -} - -// provideResourceManagerWithMockSDKAPI accepts MockElastiCacheAPI and returns pointer to resourceManager -// the returned resourceManager is configured to use mockapi api. -func provideResourceManagerWithMockSDKAPI(mockElastiCacheAPI *mocksvcsdkapi.ElastiCacheAPI) *resourceManager { - zapOptions := ctrlrtzap.Options{ - Development: true, - Level: zapcore.InfoLevel, - } - fakeLogger := ctrlrtzap.New(ctrlrtzap.UseFlagOptions(&zapOptions)) - return &resourceManager{ - rr: nil, - awsAccountID: "", - awsRegion: "", - sess: nil, - sdkapi: mockElastiCacheAPI, - log: fakeLogger, - metrics: ackmetrics.NewMetrics("elasticache"), - } -} - -// provideResource returns pointer to resource -func provideResource() *resource { - return provideResourceWithStatus("available") -} - -// provideCacheCluster returns pointer to CacheCluster -func provideCacheCluster() *svcsdk.CacheCluster { - return &svcsdk.CacheCluster{} -} - -// provideResource returns pointer to resource -func provideResourceWithStatus(rgStatus string) *resource { - return &resource{ - ko: &svcapitypes.ReplicationGroup{ - Status: svcapitypes.ReplicationGroupStatus{ - Status: &rgStatus, - }, - }, - } -} - -// provideNodeGroups provides NodeGroups array for given node IDs -func provideNodeGroups(IDs ...string) []*svcapitypes.NodeGroup { - return provideNodeGroupsWithReplicas(3, IDs...) -} - -// provideMemberClusters returns the member cluster Ids from given node groups -func provideMemberClusters(nodeGroups []*svcapitypes.NodeGroup) []*string { - if nodeGroups == nil { - return nil - } - memberClusters := []*string{} - for _, nodeGroup := range nodeGroups { - for _, member := range nodeGroup.NodeGroupMembers { - cacheClusterId := *member.CacheClusterID - memberClusters = append(memberClusters, &cacheClusterId) - } - } - return memberClusters -} - -// provideNodeGroupsWithReplicas provides NodeGroups array for given node IDs -// each node group is populated with supplied numbers of replica nodes and a primary node. -func provideNodeGroupsWithReplicas(replicasCount int, IDs ...string) []*svcapitypes.NodeGroup { - nodeGroups := []*svcapitypes.NodeGroup{} - for _, ID := range IDs { - nodeId := ID - nodeGroups = append(nodeGroups, &svcapitypes.NodeGroup{ - NodeGroupID: &nodeId, - NodeGroupMembers: provideNodeGroupMembers(&nodeId, replicasCount+1), // primary node + replicas - PrimaryEndpoint: nil, - ReaderEndpoint: nil, - Slots: nil, - Status: nil, - }) - } - return nodeGroups -} - -// provideNodeGroupMembers returns array of NodeGroupMember (replicas and a primary node) for given shard id -func provideNodeGroupMembers(nodeID *string, membersCount int) []*svcapitypes.NodeGroupMember { - if membersCount <= 0 { - return nil - } - rolePrimary := "primary" - roleReplica := "replica" - availabilityZones := provideAvailabilityZones(*nodeID, membersCount) - - members := []*svcapitypes.NodeGroupMember{} - // primary - primary := &svcapitypes.NodeGroupMember{} - cacheClusterId := fmt.Sprintf("RG-%s-00%d", *nodeID, 1) - primary.CacheClusterID = &cacheClusterId - primary.CurrentRole = &rolePrimary - primary.PreferredAvailabilityZone = availabilityZones[0] - members = append(members, primary) - // replicas - for i := 1; i <= membersCount-1; i++ { - replica := &svcapitypes.NodeGroupMember{} - cacheClusterId := fmt.Sprintf("RG-%s-00%d", *nodeID, i+1) - replica.CacheClusterID = &cacheClusterId - replica.CacheNodeID = nodeID - replica.CurrentRole = &roleReplica - replica.PreferredAvailabilityZone = availabilityZones[i] - members = append(members, replica) - } - return members -} - -func provideNodeGroupConfiguration(IDs ...string) []*svcapitypes.NodeGroupConfiguration { - replicasCount := 3 - return provideNodeGroupConfigurationWithReplicas(replicasCount, IDs...) -} - -// provideNodeGroupConfiguration provides NodeGroupConfiguration array for given node IDs and replica count -func provideNodeGroupConfigurationWithReplicas( - replicaCount int, IDs ...string, -) []*svcapitypes.NodeGroupConfiguration { - nodeGroupConfig := []*svcapitypes.NodeGroupConfiguration{} - for _, ID := range IDs { - nodeId := ID - azCount := replicaCount + 1 // replicas + a primary node - numberOfReplicas := int64(replicaCount) - availabilityZones := provideAvailabilityZones(nodeId, azCount) - nodeGroupConfig = append(nodeGroupConfig, &svcapitypes.NodeGroupConfiguration{ - NodeGroupID: &nodeId, - PrimaryAvailabilityZone: availabilityZones[0], - ReplicaAvailabilityZones: availabilityZones[1:], - ReplicaCount: &numberOfReplicas, - Slots: nil, - }) - } - - return nodeGroupConfig -} - -// provideAvailabilityZones returns availability zones array for given nodeId -func provideAvailabilityZones(nodeId string, count int) []*string { - if count <= 0 { - return nil - } - - availabilityZones := []*string{} - for i := 1; i <= count; i++ { - az := fmt.Sprintf("%s_%s%d", nodeId, "az", i) - availabilityZones = append(availabilityZones, &az) - } - return availabilityZones -} - -// validatePayloadReshardingConfig validates given payloadReshardingConfigs against given desiredNodeGroupConfigs -// this is used for tests that are related to shard configuration (scale in/out) -func validatePayloadReshardingConfig( - desiredNodeGroupConfigs []*svcapitypes.NodeGroupConfiguration, - payloadReshardingConfigs []*svcsdk.ReshardingConfiguration, - assert *assert.Assertions, - require *require.Assertions, -) { - assert.NotNil(desiredNodeGroupConfigs) - require.NotNil(payloadReshardingConfigs) // built as provided in desired object NodeGroupConfiguration - for _, desiredNodeGroup := range desiredNodeGroupConfigs { - found := false - for _, payloadReshardConfig := range payloadReshardingConfigs { - require.NotNil(payloadReshardConfig.PreferredAvailabilityZones) - if *desiredNodeGroup.NodeGroupID == *payloadReshardConfig.NodeGroupId { - found = true - expectedShardAZs := []*string{desiredNodeGroup.PrimaryAvailabilityZone} - for _, expectedAZ := range desiredNodeGroup.ReplicaAvailabilityZones { - expectedShardAZs = append(expectedShardAZs, expectedAZ) - } - assert.Equal(len(expectedShardAZs), len(payloadReshardConfig.PreferredAvailabilityZones), - "Node group id %s", *desiredNodeGroup.NodeGroupID) - for i := 0; i < len(expectedShardAZs); i++ { - assert.Equal(*expectedShardAZs[i], *payloadReshardConfig.PreferredAvailabilityZones[i], - "Node group id %s", *desiredNodeGroup.NodeGroupID) - } - break - } - } - assert.True(found, "Expected node group id %s not found in payload", *desiredNodeGroup.NodeGroupID) - } - assert.Equal(len(desiredNodeGroupConfigs), len(payloadReshardingConfigs)) -} - -// validatePayloadReplicaConfig validates given payloadReplicaConfigs against given desiredNodeGroupConfigs -// this is used for tests that are related to increase/decrease replica count. -func validatePayloadReplicaConfig( - desiredNodeGroupConfigs []*svcapitypes.NodeGroupConfiguration, - payloadReplicaConfigs []*svcsdk.ConfigureShard, - assert *assert.Assertions, - require *require.Assertions, -) { - assert.NotNil(desiredNodeGroupConfigs) - require.NotNil(payloadReplicaConfigs) // built as provided in desired object NodeGroupConfiguration - for _, desiredNodeGroup := range desiredNodeGroupConfigs { - found := false - for _, payloadShard := range payloadReplicaConfigs { - require.NotNil(payloadShard.PreferredAvailabilityZones) - if *desiredNodeGroup.NodeGroupID == *payloadShard.NodeGroupId { - found = true - // validate replica count - assert.Equal(*desiredNodeGroup.ReplicaCount, *payloadShard.NewReplicaCount) - - // validate AZs - expectedShardAZs := []*string{desiredNodeGroup.PrimaryAvailabilityZone} - for _, expectedAZ := range desiredNodeGroup.ReplicaAvailabilityZones { - expectedShardAZs = append(expectedShardAZs, expectedAZ) - } - assert.Equal(len(expectedShardAZs), len(payloadShard.PreferredAvailabilityZones), - "Node group id %s", *desiredNodeGroup.NodeGroupID) - for i := 0; i < len(expectedShardAZs); i++ { - assert.Equal(*expectedShardAZs[i], *payloadShard.PreferredAvailabilityZones[i], - "Node group id %s", *desiredNodeGroup.NodeGroupID) - } - break - } - } - assert.True(found, "Expected node group id %s not found in payload", *desiredNodeGroup.NodeGroupID) - } - assert.Equal(len(desiredNodeGroupConfigs), len(payloadReplicaConfigs)) -} - -func TestDecreaseReplicaCountMock(t *testing.T) { - assert := assert.New(t) - // Setup mock API response - var mockDescription = "mock_replication_group_description" - var mockOutput svcsdk.DecreaseReplicaCountOutput - testutil.LoadFromFixture(filepath.Join("testdata", "DecreaseReplicaCountOutput.json"), &mockOutput) - mocksdkapi := &mocksvcsdkapi.ElastiCacheAPI{} - mocksdkapi.On("DecreaseReplicaCountWithContext", mock.Anything, mock.Anything).Return(&mockOutput, nil) - rm := provideResourceManagerWithMockSDKAPI(mocksdkapi) - // Tests - t.Run("MockAPI=DecreaseReplicaCount", func(t *testing.T) { - desired := provideResource() - latest := provideResource() - res, _ := rm.decreaseReplicaCount(context.Background(), desired, latest) - assert.Equal(mockDescription, *res.ko.Spec.Description) - }) -} - -func TestCustomModifyReplicationGroup(t *testing.T) { - assert := assert.New(t) - // Setup - rm := provideResourceManager() - // Tests - t.Run("NoAction=NoDiff", func(t *testing.T) { - desired := provideResource() - latest := provideResource() - var delta ackcompare.Delta - res, err := rm.CustomModifyReplicationGroup(context.TODO(), desired, latest, &delta) - assert.Nil(res) - assert.Nil(err) - }) -} - -func TestCustomModifyReplicationGroup_Unavailable(t *testing.T) { - assert := assert.New(t) - // Setup - rm := provideResourceManager() - // Tests - t.Run("UnavailableRG=Requeue", func(t *testing.T) { - desired := provideResource() - latest := provideResourceWithStatus("modifying") - var delta ackcompare.Delta - res, err := rm.CustomModifyReplicationGroup(context.TODO(), desired, latest, &delta) - assert.Nil(res) - assert.NotNil(err) - var requeueNeededAfter *requeue.RequeueNeededAfter - assert.True(errors.As(err, &requeueNeededAfter)) - }) -} - -func TestCustomModifyReplicationGroup_NodeGroup_Unvailable(t *testing.T) { - assert := assert.New(t) - // Setup - rm := provideResourceManager() - // Tests - t.Run("UnavailableNodeGroup=Requeue", func(t *testing.T) { - desired := provideResource() - latest := provideResource() - latest.ko.Status.NodeGroups = provideNodeGroups("1001") - unavailableStatus := "modifying" - for _, nodeGroup := range latest.ko.Status.NodeGroups { - nodeGroup.Status = &unavailableStatus - } - var delta ackcompare.Delta - res, err := rm.CustomModifyReplicationGroup(context.TODO(), desired, latest, &delta) - assert.Nil(res) - assert.NotNil(err) - var requeueNeededAfter *requeue.RequeueNeededAfter - assert.True(errors.As(err, &requeueNeededAfter)) - }) -} - -func TestCustomModifyReplicationGroup_NodeGroup_MemberClusters_mismatch(t *testing.T) { - assert := assert.New(t) - require := require.New(t) - // Setup - rm := provideResourceManager() - // Tests - t.Run("nodeGroup_memberClustersMismatch=Diff", func(t *testing.T) { - desired := provideResource() - desired.ko.Status.NodeGroups = provideNodeGroups("1001", "1002") - latest := provideResource() - latest.ko.Status.NodeGroups = provideNodeGroups("1001", "1002") - latest.ko.Status.MemberClusters = provideMemberClusters(latest.ko.Status.NodeGroups) - surplusMemberCluster := "RG-Surplus-Member-Cluster" - latest.ko.Status.MemberClusters = append(latest.ko.Status.MemberClusters, &surplusMemberCluster) - availableStatus := "available" - for _, nodeGroup := range latest.ko.Status.NodeGroups { - nodeGroup.Status = &availableStatus - } - var delta ackcompare.Delta - require.NotNil(latest.ko.Status.MemberClusters) - res, err := rm.CustomModifyReplicationGroup(context.TODO(), desired, latest, &delta) - assert.Nil(res) - assert.NotNil(err) // due to surplus member cluster - var requeueNeededAfter *requeue.RequeueNeededAfter - assert.True(errors.As(err, &requeueNeededAfter)) - }) -} - -func TestCustomModifyReplicationGroup_NodeGroup_available(t *testing.T) { - assert := assert.New(t) - require := require.New(t) - // Setup - rm := provideResourceManager() - // Tests - t.Run("availableNodeGroup=NoDiff", func(t *testing.T) { - desired := provideResource() - desired.ko.Status.NodeGroups = provideNodeGroups("1001") - latest := provideResource() - latest.ko.Status.NodeGroups = provideNodeGroups("1001") - latest.ko.Status.MemberClusters = provideMemberClusters(latest.ko.Status.NodeGroups) - availableStatus := "available" - for _, nodeGroup := range latest.ko.Status.NodeGroups { - nodeGroup.Status = &availableStatus - } - var delta ackcompare.Delta - require.NotNil(latest.ko.Status.MemberClusters) - res, err := rm.CustomModifyReplicationGroup(context.TODO(), desired, latest, &delta) - assert.Nil(res) - assert.Nil(err) - }) -} - -func TestCustomModifyReplicationGroup_Scaling_Async_Rollback(t *testing.T) { - assert := assert.New(t) - t.Run("ScaleDownRollback=Diff", func(t *testing.T) { - desired := provideResource() - latest := provideResource() - rgId := "RGID" - desired.ko.Spec.ReplicationGroupID = &rgId - latest.ko.Spec.ReplicationGroupID = &rgId - desired.ko.ObjectMeta.Annotations = make(map[string]string) - desiredCacheNodeType := "cache.t3.micro" - currentCacheNodeType := "cache.m5.large" - desired.ko.Annotations[AnnotationLastRequestedCNT] = desiredCacheNodeType - desired.ko.Spec.CacheNodeType = &desiredCacheNodeType - - rm := provideResourceManager() - - var delta ackcompare.Delta - delta.Add("Spec.CacheNodeType", currentCacheNodeType, desiredCacheNodeType) - - res, err := rm.CustomModifyReplicationGroup(context.TODO(), desired, latest, &delta) - assert.Nil(res) - assert.NotNil(err) - assert.Equal("InvalidParameterCombination: Cannot update CacheNodeType, Please refer to Events for more details", err.Error()) - }) - - t.Run("ScaleInRollback=Diff", func(t *testing.T) { - desired := provideResource() - latest := provideResource() - rgId := "RGID" - desired.ko.Spec.ReplicationGroupID = &rgId - latest.ko.Spec.ReplicationGroupID = &rgId - desired.ko.ObjectMeta.Annotations = make(map[string]string) - - desiredNodeGroup := int64(4) - currentNodeGroup := int64(3) - desired.ko.Annotations[AnnotationLastRequestedNNG] = strconv.Itoa(int(desiredNodeGroup)) - desired.ko.Spec.NumNodeGroups = &desiredNodeGroup - rm := provideResourceManager() - - var delta ackcompare.Delta - delta.Add("Spec.NumNodeGroups", currentNodeGroup, desiredNodeGroup) - - res, err := rm.CustomModifyReplicationGroup(context.TODO(), desired, latest, &delta) - assert.Nil(res) - assert.NotNil(err) - assert.Equal("InvalidParameterCombination: Cannot update NodeGroups, Please refer to Events for more details", err.Error()) - }) -} -func TestCustomModifyReplicationGroup_ScaleUpAndDown_And_Resharding(t *testing.T) { - assert := assert.New(t) - - // Tests - t.Run("ScaleInAndScaleUp=Diff", func(t *testing.T) { - desired := provideResource() - latest := provideResource() - rgId := "RGID" - desired.ko.Spec.ReplicationGroupID = &rgId - latest.ko.Spec.ReplicationGroupID = &rgId - desired.ko.ObjectMeta.Annotations = make(map[string]string) - desiredCacheNodeType := "cache.m5.large" - currentCacheNodeType := "cache.t3.small" - desired.ko.Annotations[AnnotationLastRequestedCNT] = currentCacheNodeType - desired.ko.Spec.CacheNodeType = &desiredCacheNodeType - - desiredNodeGroup := int64(4) - currentNodeGroup := int64(3) - desired.ko.Annotations[AnnotationLastRequestedNNG] = strconv.Itoa(int(currentNodeGroup)) - desired.ko.Spec.NumNodeGroups = &desiredNodeGroup - allowedNodeModifications := []*string{&desiredCacheNodeType} - desired.ko.Status.AllowedScaleUpModifications = allowedNodeModifications - mocksdkapi := &mocksvcsdkapi.ElastiCacheAPI{} - rm := provideResourceManagerWithMockSDKAPI(mocksdkapi) - - var delta ackcompare.Delta - delta.Add("Spec.CacheNodeType", currentCacheNodeType, desiredCacheNodeType) - delta.Add("Spec.NumNodeGroups", currentNodeGroup, desiredNodeGroup) - - res, err := rm.CustomModifyReplicationGroup(context.TODO(), desired, latest, &delta) - assert.Nil(res) - assert.Nil(err) - assert.Empty(mocksdkapi.Calls) - }) - - t.Run("ScaleOutAndScaleDown=Diff", func(t *testing.T) { - desired := provideResource() - latest := provideResource() - rgId := "RGID" - desired.ko.Spec.ReplicationGroupID = &rgId - latest.ko.Spec.ReplicationGroupID = &rgId - - mocksdkapi := &mocksvcsdkapi.ElastiCacheAPI{} - rm := provideResourceManagerWithMockSDKAPI(mocksdkapi) - - var delta ackcompare.Delta - delta.Add("Spec.CacheNodeType", "cache.t3.small", "cache.t3.micro") - desired.ko.ObjectMeta.Annotations = make(map[string]string) - cacheNodeType := "cache.t3.small" - desired.ko.Annotations[AnnotationLastRequestedCNT] = "cache.t3.micro" - desired.ko.Spec.CacheNodeType = &cacheNodeType - oldshardCount := int64(4) - newShardCount := int64(10) - delta.Add("Spec.NumNodeGroups", oldshardCount, newShardCount) - desired.ko.Spec.NumNodeGroups = &newShardCount - latest.ko.Spec.NumNodeGroups = &oldshardCount - desired.ko.Annotations[AnnotationLastRequestedNNG] = strconv.Itoa(int(oldshardCount)) - mocksdkapi.On("ModifyReplicationGroupShardConfigurationWithContext", mock.Anything, mock.Anything).Return(nil, - awserr.New("Invalid", "Invalid error", nil)) - res, err := rm.CustomModifyReplicationGroup(context.TODO(), desired, latest, &delta) - assert.Nil(res) - assert.NotNil(err) - assert.NotEmpty(mocksdkapi.Calls) - assert.Equal("ModifyReplicationGroupShardConfigurationWithContext", mocksdkapi.Calls[0].Method) - }) - -} - -// TestReplicaCountDifference tests scenarios to check if desired, latest replica count -// configurations differ -func TestReplicaCountDifference(t *testing.T) { - assert := assert.New(t) - require := require.New(t) - // setup - rm := provideResourceManager() - // Tests - t.Run("NoDiff=DesiredNil_LatestNil", func(t *testing.T) { - // neither desired nor latest have either ReplicasPerNodeGroup nor NodeGroupConfiguration set - desired := provideResource() - latest := provideResource() - diff := rm.replicaCountDifference(desired, latest) - assert.Nil(desired.ko.Spec.ReplicasPerNodeGroup) - assert.Nil(desired.ko.Spec.NodeGroupConfiguration) - assert.Nil(latest.ko.Spec.ReplicasPerNodeGroup) - assert.Nil(latest.ko.Spec.NodeGroupConfiguration) - assert.Equal(0, diff) - }) - t.Run("NoDiff=DesiredNonNil_LatestNonNil", func(t *testing.T) { - // both desired and latest have ReplicasPerNodeGroup set (but not NodeGroupConfiguration) - desired := provideResource() - latest := provideResource() - desiredReplicaCount := int64(2) - latestReplicaCount := int64(2) - desired.ko.Spec.ReplicasPerNodeGroup = &desiredReplicaCount - latest.ko.Spec.ReplicasPerNodeGroup = &latestReplicaCount - diff := rm.replicaCountDifference(desired, latest) - assert.Nil(desired.ko.Spec.NodeGroupConfiguration) - assert.Nil(latest.ko.Spec.NodeGroupConfiguration) - assert.NotNil(desired.ko.Spec.ReplicasPerNodeGroup) - assert.NotNil(latest.ko.Spec.ReplicasPerNodeGroup) - assert.Equal(0, diff) - }) - t.Run("NoDiff=Spec.NodeGroupConfiguration_Status.NodeGroups", func(t *testing.T) { - // no 'ReplicasPerNodeGroup' in spec but spec has 'NodeGroupConfiguration' with replicas details - // status has matching number of replicas - desired := provideResource() - latest := provideResource() - replicaCount := 2 - desired.ko.Spec.ReplicasPerNodeGroup = nil - desired.ko.Spec.NodeGroupConfiguration = provideNodeGroupConfigurationWithReplicas(replicaCount, "1001", "1002") - latest.ko.Status.NodeGroups = provideNodeGroupsWithReplicas(replicaCount, "1001") - diff := rm.replicaCountDifference(desired, latest) - assert.NotNil(desired.ko.Spec.NodeGroupConfiguration) - for _, nodeGroupConfig := range desired.ko.Spec.NodeGroupConfiguration { - require.NotNil(nodeGroupConfig.ReplicaCount) - assert.Equal(replicaCount, int(*nodeGroupConfig.ReplicaCount)) - } - assert.NotNil(latest.ko.Status.NodeGroups) - for _, nodeGroup := range latest.ko.Status.NodeGroups { - require.NotNil(nodeGroup.NodeGroupMembers) - assert.Equal(replicaCount+1, len(nodeGroup.NodeGroupMembers)) // replica + primary node - } - assert.Equal(0, diff) - }) - t.Run("NoDiff=Prefer_Spec.ReplicasPerNodeGroup", func(t *testing.T) { - // prefer 'ReplicasPerNodeGroup over 'NodeGroupConfiguration' in desired configuration: - // 'ReplicasPerNodeGroup' in desired spec as well as 'NodeGroupConfiguration' with different desired replicas details. - // latest status has matching number of replicas with desired 'ReplicasPerNodeGroup' - desired := provideResource() - latest := provideResource() - desiredReplicaCount := int64(2) - latestReplicaCount := int64(2) - desired.ko.Spec.ReplicasPerNodeGroup = &desiredReplicaCount - latest.ko.Spec.ReplicasPerNodeGroup = &latestReplicaCount - desired.ko.Spec.NodeGroupConfiguration = provideNodeGroupConfigurationWithReplicas(int(desiredReplicaCount)+1, "1001", "1002") - latest.ko.Status.NodeGroups = provideNodeGroupsWithReplicas(int(latestReplicaCount), "1001") - - diff := rm.replicaCountDifference(desired, latest) - assert.NotNil(desired.ko.Spec.NodeGroupConfiguration) - for _, nodeGroupConfig := range desired.ko.Spec.NodeGroupConfiguration { - require.NotNil(nodeGroupConfig.ReplicaCount) - assert.Equal(int(desiredReplicaCount)+1, int(*nodeGroupConfig.ReplicaCount)) - } - assert.NotNil(latest.ko.Status.NodeGroups) - for _, nodeGroup := range latest.ko.Status.NodeGroups { - require.NotNil(nodeGroup.NodeGroupMembers) - assert.Equal(int(desiredReplicaCount)+1, len(nodeGroup.NodeGroupMembers)) // replica + primary node - } - assert.Equal(0, diff) - }) - t.Run("DiffIncreaseReplica=Spec.ReplicasPerNodeGroup", func(t *testing.T) { - // desired ReplicasPerNodeGroup is greater than latest.ReplicasPerNodeGroup, NodeGroupConfiguration nil - desired := provideResource() - latest := provideResource() - desiredReplicaCount := int64(2) - latestReplicaCount := int64(1) - desired.ko.Spec.ReplicasPerNodeGroup = &desiredReplicaCount - latest.ko.Spec.ReplicasPerNodeGroup = &latestReplicaCount - diff := rm.replicaCountDifference(desired, latest) - assert.Nil(desired.ko.Spec.NodeGroupConfiguration) - assert.True(diff > 0) // desired replicas > latest replicas - }) - t.Run("DiffIncreaseReplica=Spec.NodeGroupConfiguration_Status.NodeGroups", func(t *testing.T) { - // no 'ReplicasPerNodeGroup' in spec but spec has 'NodeGroupConfiguration' with replicas details - // status has matching number of replicas - desired := provideResource() - latest := provideResource() - desiredReplicaCount := 2 - latestReplicaCount := 1 - desired.ko.Spec.ReplicasPerNodeGroup = nil - desired.ko.Spec.NodeGroupConfiguration = provideNodeGroupConfigurationWithReplicas(desiredReplicaCount, "1001", "1002") - latest.ko.Status.NodeGroups = provideNodeGroupsWithReplicas(latestReplicaCount, "1001") - diff := rm.replicaCountDifference(desired, latest) - assert.NotNil(desired.ko.Spec.NodeGroupConfiguration) - for _, nodeGroupConfig := range desired.ko.Spec.NodeGroupConfiguration { - require.NotNil(nodeGroupConfig.ReplicaCount) - assert.Equal(desiredReplicaCount, int(*nodeGroupConfig.ReplicaCount)) - } - assert.NotNil(latest.ko.Status.NodeGroups) - for _, nodeGroup := range latest.ko.Status.NodeGroups { - require.NotNil(nodeGroup.NodeGroupMembers) - assert.Equal(latestReplicaCount+1, len(nodeGroup.NodeGroupMembers)) // replicas + primary node - } - assert.True(diff > 0) // desired replicas > latest replicas - }) - t.Run("DiffDecreaseReplica=Spec.ReplicasPerNodeGroup", func(t *testing.T) { - // desired ReplicasPerNodeGroup is lesser than latest.ReplicasPerNodeGroup, NodeGroupConfiguration nil - desired := provideResource() - latest := provideResource() - desiredReplicaCount := int64(2) - latestReplicaCount := int64(3) - desired.ko.Spec.ReplicasPerNodeGroup = &desiredReplicaCount - latest.ko.Spec.ReplicasPerNodeGroup = &latestReplicaCount - diff := rm.replicaCountDifference(desired, latest) - assert.Nil(desired.ko.Spec.NodeGroupConfiguration) - assert.True(diff < 0) // desired replicas < latest replicas - }) - t.Run("DiffDecreaseReplica=Spec.NodeGroupConfiguration_Status.NodeGroups", func(t *testing.T) { - // no 'ReplicasPerNodeGroup' in spec but spec has 'NodeGroupConfiguration' with replicas details - // status has matching number of replicas - desired := provideResource() - latest := provideResource() - desiredReplicaCount := 2 - latestReplicaCount := 3 - desired.ko.Spec.ReplicasPerNodeGroup = nil - desired.ko.Spec.NodeGroupConfiguration = provideNodeGroupConfigurationWithReplicas(desiredReplicaCount, "1001", "1002") - latest.ko.Status.NodeGroups = provideNodeGroupsWithReplicas(latestReplicaCount, "1001") - diff := rm.replicaCountDifference(desired, latest) - assert.NotNil(desired.ko.Spec.NodeGroupConfiguration) - for _, nodeGroupConfig := range desired.ko.Spec.NodeGroupConfiguration { - require.NotNil(nodeGroupConfig.ReplicaCount) - assert.Equal(desiredReplicaCount, int(*nodeGroupConfig.ReplicaCount)) - } - assert.NotNil(latest.ko.Status.NodeGroups) - for _, nodeGroup := range latest.ko.Status.NodeGroups { - require.NotNil(nodeGroup.NodeGroupMembers) - assert.Equal(latestReplicaCount+1, len(nodeGroup.NodeGroupMembers)) // replicas + primary node - } - assert.True(diff < 0) // desired replicas < latest replicas - }) -} - -// TestNewIncreaseReplicaCountRequestPayload tests scenarios to -// check request payload by providing desired spec details for increase replica count. -func TestNewIncreaseReplicaCountRequestPayload(t *testing.T) { - assert := assert.New(t) - require := require.New(t) - // setup - rm := provideResourceManager() - // Tests - t.Run("EmptyPayload=NoSpec", func(t *testing.T) { - desired := provideResource() - latest := provideResource() - payload, err := rm.newIncreaseReplicaCountRequestPayload(desired, latest) - require.NotNil(payload) - require.NotNil(payload.ApplyImmediately) - assert.True(*payload.ApplyImmediately) - assert.Nil(payload.ReplicationGroupId) - assert.Nil(payload.NewReplicaCount) - assert.Nil(payload.ReplicaConfiguration) - assert.Nil(err) - }) - t.Run("Payload=Spec", func(t *testing.T) { - desired := provideResource() - replicationGroupID := "test-rg" - desired.ko.Spec.ReplicationGroupID = &replicationGroupID - desiredReplicaCount := int64(2) - desired.ko.Spec.ReplicasPerNodeGroup = &desiredReplicaCount - desired.ko.Spec.NodeGroupConfiguration = provideNodeGroupConfigurationWithReplicas( - int(desiredReplicaCount), "1001", "1002") - // expected: only node groups that are present on server are included in payload. - expectedPayloadReplicaConfiguration := provideNodeGroupConfigurationWithReplicas( - int(desiredReplicaCount), "1001") - latest := provideResource() - latestReplicaCount := int64(3) - latest.ko.Status.NodeGroups = provideNodeGroupsWithReplicas( - int(latestReplicaCount), "1001", "1003") - payload, err := rm.newIncreaseReplicaCountRequestPayload(desired, latest) - require.NotNil(payload) - require.NotNil(payload.ApplyImmediately) - assert.True(*payload.ApplyImmediately) - assert.Equal(replicationGroupID, *payload.ReplicationGroupId) - assert.Equal(desiredReplicaCount, *payload.NewReplicaCount) - assert.NotNil(payload.ReplicaConfiguration) - validatePayloadReplicaConfig(expectedPayloadReplicaConfiguration, payload.ReplicaConfiguration, assert, require) - assert.Nil(err) - }) -} - -// TestNewDecreaseReplicaCountRequestPayload tests scenarios to -// check request payload by providing desired spec details for decrease replica count. -func TestNewDecreaseReplicaCountRequestPayload(t *testing.T) { - assert := assert.New(t) - require := require.New(t) - // setup - rm := provideResourceManager() - // Tests - t.Run("EmptyPayload=NoSpec", func(t *testing.T) { - desired := provideResource() - latest := provideResource() - payload, err := rm.newDecreaseReplicaCountRequestPayload(desired, latest) - require.NotNil(payload) - require.NotNil(payload.ApplyImmediately) - assert.True(*payload.ApplyImmediately) - assert.Nil(payload.ReplicationGroupId) - assert.Nil(payload.NewReplicaCount) - assert.Nil(payload.ReplicaConfiguration) - assert.Nil(err) - }) - t.Run("Payload=Spec_Server", func(t *testing.T) { - desired := provideResource() - replicationGroupID := "test-rg" - desired.ko.Spec.ReplicationGroupID = &replicationGroupID - desiredReplicaCount := int64(2) - desired.ko.Spec.ReplicasPerNodeGroup = &desiredReplicaCount - desired.ko.Spec.NodeGroupConfiguration = provideNodeGroupConfigurationWithReplicas( - int(desiredReplicaCount), "1001", "1002") - // expected: only node groups that are present on server are included in payload. - expectedPayloadReplicaConfiguration := provideNodeGroupConfigurationWithReplicas( - int(desiredReplicaCount), "1001") - latest := provideResource() - latestReplicaCount := int64(1) - latest.ko.Status.NodeGroups = provideNodeGroupsWithReplicas( - int(latestReplicaCount), "1001") - payload, err := rm.newDecreaseReplicaCountRequestPayload(desired, latest) - require.NotNil(payload) - require.NotNil(payload.ApplyImmediately) - assert.True(*payload.ApplyImmediately) - assert.Equal(replicationGroupID, *payload.ReplicationGroupId) - assert.Equal(desiredReplicaCount, *payload.NewReplicaCount) - assert.NotNil(payload.ReplicaConfiguration) - validatePayloadReplicaConfig(expectedPayloadReplicaConfiguration, payload.ReplicaConfiguration, assert, require) - assert.Nil(err) - }) -} - -// TestShardConfigurationsDiffer tests scenarios to check if desired, latest shards -// configurations differ. -func TestShardConfigurationsDiffer(t *testing.T) { - assert := assert.New(t) - // setup - rm := provideResourceManager() - // Tests - t.Run("NoDiff=NoSpec_NoStatus", func(t *testing.T) { - desired := provideResource() - latest := provideResource() - differ := rm.shardConfigurationsDiffer(desired, latest) - assert.False(differ) - }) - t.Run("NoDiff=NoSpec_Status.NodeGroups", func(t *testing.T) { - desired := provideResource() - latest := provideResource() - latest.ko.Status.NodeGroups = provideNodeGroups("1001") - differ := rm.shardConfigurationsDiffer(desired, latest) - assert.False(differ) - }) - t.Run("Diff=Spec.NumNodeGroups_NoStatus", func(t *testing.T) { - desired := provideResource() - latest := provideResource() - desiredShards := int64(2) - desired.ko.Spec.NumNodeGroups = &desiredShards - differ := rm.shardConfigurationsDiffer(desired, latest) - assert.True(differ) - }) - t.Run("Diff=Spec.NodeGroupConfiguration_NoStatus", func(t *testing.T) { - desired := provideResource() - latest := provideResource() - desired.ko.Spec.NodeGroupConfiguration = provideNodeGroupConfiguration("1001") - differ := rm.shardConfigurationsDiffer(desired, latest) - assert.True(differ) - }) - t.Run("NoDiff=Spec.NodeGroupConfiguration_Status.NodeGroups", func(t *testing.T) { - desired := provideResource() - latest := provideResource() - desired.ko.Spec.NodeGroupConfiguration = provideNodeGroupConfiguration("1001") - latest.ko.Status.NodeGroups = provideNodeGroups("1001") - differ := rm.shardConfigurationsDiffer(desired, latest) - assert.False(differ) - }) - t.Run("Diff=ScaleIn_Spec.NodeGroupConfiguration_Status.NodeGroups", func(t *testing.T) { - desired := provideResource() - latest := provideResource() - desired.ko.Spec.NodeGroupConfiguration = provideNodeGroupConfiguration("1001", "1002") - latest.ko.Status.NodeGroups = provideNodeGroups("1001", "1002", "1003") - differ := rm.shardConfigurationsDiffer(desired, latest) - assert.True(differ) - }) - t.Run("Diff=ScaleOut_Spec.NodeGroupConfiguration_Status.NodeGroups", func(t *testing.T) { - desired := provideResource() - latest := provideResource() - desired.ko.Spec.NodeGroupConfiguration = provideNodeGroupConfiguration("1001", "1002") - latest.ko.Status.NodeGroups = provideNodeGroups("1001") - differ := rm.shardConfigurationsDiffer(desired, latest) - assert.True(differ) - }) - t.Run("NoDiff=Spec.NumNodeGroups_Status.NodeGroups", func(t *testing.T) { - desired := provideResource() - latest := provideResource() - desiredShards := int64(1) - desired.ko.Spec.NumNodeGroups = &desiredShards - latest.ko.Status.NodeGroups = provideNodeGroups("1001") - differ := rm.shardConfigurationsDiffer(desired, latest) - assert.False(differ) - }) - t.Run("Diff=Spec.NumNodeGroups_Status.NodeGroups", func(t *testing.T) { - desired := provideResource() - latest := provideResource() - desiredShards := int64(2) - desired.ko.Spec.NumNodeGroups = &desiredShards - latest.ko.Status.NodeGroups = provideNodeGroups("1001") - differ := rm.shardConfigurationsDiffer(desired, latest) - assert.True(differ) - }) - - t.Run("NoDiff=Prefer_Spec.NumNodeGroups", func(t *testing.T) { - desired := provideResource() - latest := provideResource() - desiredShards := int64(2) - desired.ko.Spec.NumNodeGroups = &desiredShards - desired.ko.Spec.NodeGroupConfiguration = provideNodeGroupConfiguration("1001", "1002", "1003") - latest.ko.Status.NodeGroups = provideNodeGroups("1001", "1002") - differ := rm.shardConfigurationsDiffer(desired, latest) - assert.False(differ) - }) -} - -// TestNewUpdateShardConfigurationRequestPayload tests scenarios to -// check request payload by providing desired, latest details -func TestNewUpdateShardConfigurationRequestPayload(t *testing.T) { - assert := assert.New(t) - require := require.New(t) - // setup - rm := provideResourceManager() - // Tests - t.Run("EmptyPayload=NoSpec_NoStatus", func(t *testing.T) { - desired := provideResource() - latest := provideResource() - payload, err := rm.newUpdateShardConfigurationRequestPayload(desired, latest) - require.NotNil(payload) - require.NotNil(payload.ApplyImmediately) - assert.True(*payload.ApplyImmediately) - assert.Nil(payload.ReplicationGroupId) - assert.Nil(payload.NodeGroupCount) - assert.Nil(payload.ReshardingConfiguration) - assert.Nil(payload.NodeGroupsToRetain) - assert.Nil(payload.NodeGroupsToRemove) - assert.Nil(err) - }) - t.Run("EmptyPayload=NoSpec_Status.NodeGroups", func(t *testing.T) { - desired := provideResource() - latest := provideResource() - latest.ko.Status.NodeGroups = provideNodeGroups("1001") - payload, err := rm.newUpdateShardConfigurationRequestPayload(desired, latest) - require.NotNil(payload) - require.NotNil(payload.ApplyImmediately) - assert.True(*payload.ApplyImmediately) - assert.Nil(payload.ReplicationGroupId) - assert.Nil(payload.NodeGroupCount) - assert.Nil(payload.ReshardingConfiguration) - assert.Nil(payload.NodeGroupsToRetain) - assert.Nil(payload.NodeGroupsToRemove) - assert.Nil(err) - }) - t.Run("ScaleOutPayload=Prefer_Spec.NumNodeGroups_NoStatus", func(t *testing.T) { - desired := provideResource() - latest := provideResource() - desiredShards := int64(2) - desired.ko.Spec.NumNodeGroups = &desiredShards - desired.ko.Spec.NodeGroupConfiguration = provideNodeGroupConfiguration("1001", "1002", "1003") - payload, err := rm.newUpdateShardConfigurationRequestPayload(desired, latest) - require.NotNil(payload) - require.NotNil(payload.ApplyImmediately) - assert.True(*payload.ApplyImmediately) - require.NotNil(payload.NodeGroupCount) - assert.Equal(*desired.ko.Spec.NumNodeGroups, *payload.NodeGroupCount) // preferred NumNodeGroups over len(NodeGroupConfiguration) - require.NotNil(payload.ReshardingConfiguration) // built as provided in desired object NodeGroupConfiguration - assert.Equal(len(desired.ko.Spec.NodeGroupConfiguration), len(payload.ReshardingConfiguration)) - assert.Nil(payload.NodeGroupsToRetain) - assert.Nil(payload.NodeGroupsToRemove) - assert.Nil(err) - }) - t.Run("ScaleOutPayload=Computed_Spec.NodeGroupConfiguration_NoStatus", func(t *testing.T) { - desired := provideResource() - desired.ko.Spec.NodeGroupConfiguration = provideNodeGroupConfiguration("1001", "1002", "1003") - latest := provideResource() - payload, err := rm.newUpdateShardConfigurationRequestPayload(desired, latest) - require.NotNil(payload) - require.NotNil(payload.ApplyImmediately) - assert.True(*payload.ApplyImmediately) - require.NotNil(payload.NodeGroupCount) - assert.Equal(int64(len(desired.ko.Spec.NodeGroupConfiguration)), *payload.NodeGroupCount) - require.NotNil(payload.ReshardingConfiguration) // increase scenario as no-status - assert.Equal(len(desired.ko.Spec.NodeGroupConfiguration), len(payload.ReshardingConfiguration)) - assert.Nil(payload.NodeGroupsToRetain) - assert.Nil(payload.NodeGroupsToRemove) - assert.Nil(err) - }) - t.Run("ScaleOutPayload=Prefer_Spec.NumNodeGroups_Status.NodeGroups", func(t *testing.T) { - desired := provideResource() - latest := provideResource() - replicationGroupID := "test-rg" - desired.ko.Spec.ReplicationGroupID = &replicationGroupID - desiredShards := int64(2) - desired.ko.Spec.NumNodeGroups = &desiredShards - desired.ko.Spec.NodeGroupConfiguration = provideNodeGroupConfiguration("1001", "1002", "1003") - latest.ko.Status.NodeGroups = provideNodeGroups("1001") - payload, err := rm.newUpdateShardConfigurationRequestPayload(desired, latest) - require.NotNil(payload) - require.NotNil(payload.ApplyImmediately) - assert.True(*payload.ApplyImmediately) - assert.Equal(*desired.ko.Spec.ReplicationGroupID, *payload.ReplicationGroupId) - require.NotNil(payload.NodeGroupCount) - assert.Equal(*desired.ko.Spec.NumNodeGroups, *payload.NodeGroupCount) - validatePayloadReshardingConfig(desired.ko.Spec.NodeGroupConfiguration, payload.ReshardingConfiguration, assert, require) - assert.Nil(payload.NodeGroupsToRetain) - assert.Nil(payload.NodeGroupsToRemove) - assert.Nil(err) - }) - t.Run("ScaleOutPayload=Spec.NodeGroupConfiguration_Status.NodeGroups", func(t *testing.T) { - desired := provideResource() - latest := provideResource() - replicationGroupID := "test-rg" - desired.ko.Spec.ReplicationGroupID = &replicationGroupID - desired.ko.Spec.NodeGroupConfiguration = provideNodeGroupConfiguration("1001", "1002", "1003") - latest.ko.Status.NodeGroups = provideNodeGroups("1001") - payload, err := rm.newUpdateShardConfigurationRequestPayload(desired, latest) - require.NotNil(payload) - require.NotNil(payload.ApplyImmediately) - assert.True(*payload.ApplyImmediately) - assert.Equal(*desired.ko.Spec.ReplicationGroupID, *payload.ReplicationGroupId) - require.NotNil(payload.NodeGroupCount) - assert.Equal(int64(len(desired.ko.Spec.NodeGroupConfiguration)), *payload.NodeGroupCount) - require.NotNil(payload.ReshardingConfiguration) - validatePayloadReshardingConfig(desired.ko.Spec.NodeGroupConfiguration, payload.ReshardingConfiguration, assert, require) - assert.Nil(payload.NodeGroupsToRetain) - assert.Nil(payload.NodeGroupsToRemove) - assert.Nil(err) - }) - t.Run("ScaleInPayload=Spec.NodeGroupConfiguration_Status.NodeGroups", func(t *testing.T) { - desired := provideResource() - latest := provideResource() - replicationGroupID := "test-rg" - desired.ko.Spec.ReplicationGroupID = &replicationGroupID - desired.ko.Spec.NodeGroupConfiguration = provideNodeGroupConfiguration("1001") - latest.ko.Status.NodeGroups = provideNodeGroups("1001", "1002", "1003") - payload, err := rm.newUpdateShardConfigurationRequestPayload(desired, latest) - require.NotNil(payload) - require.NotNil(payload.ApplyImmediately) - assert.True(*payload.ApplyImmediately) - assert.Equal(*desired.ko.Spec.ReplicationGroupID, *payload.ReplicationGroupId) - require.NotNil(payload.NodeGroupCount) - assert.Equal(int64(len(desired.ko.Spec.NodeGroupConfiguration)), *payload.NodeGroupCount) - assert.Nil(payload.ReshardingConfiguration) - require.NotNil(payload.NodeGroupsToRetain) - assert.Equal(len(desired.ko.Spec.NodeGroupConfiguration), len(payload.NodeGroupsToRetain)) - for _, desiredNodeGroup := range desired.ko.Spec.NodeGroupConfiguration { - found := false - for _, nodeGroupId := range payload.NodeGroupsToRetain { - if *desiredNodeGroup.NodeGroupID == *nodeGroupId { - found = true - break - } - } - assert.True(found, "Expected node group id %s not found in payload", desiredNodeGroup.NodeGroupID) - } - assert.Nil(payload.NodeGroupsToRemove) - assert.Nil(err) - }) -} - -// TestSecurityGroupIdsDiffer tests scenarios to check if desired, latest (from cache cluster) -// security group ids configuration differs. -func TestSecurityGroupIdsDiffer(t *testing.T) { - assert := assert.New(t) - require := require.New(t) - // setup - rm := provideResourceManager() - // Tests - t.Run("NoDiff=NoSpec_NoStatus", func(t *testing.T) { - desiredRG := provideResource() - latestRG := provideResource() - latestCacheCluster := provideCacheCluster() - require.Nil(desiredRG.ko.Spec.SecurityGroupIDs) - require.Nil(latestCacheCluster.SecurityGroups) - differ := rm.securityGroupIdsDiffer(desiredRG, latestRG, latestCacheCluster) - assert.False(differ) - }) - t.Run("NoDiff=NoSpec_HasStatus", func(t *testing.T) { - desiredRG := provideResource() - latestRG := provideResource() - latestCacheCluster := provideCacheCluster() - latestCacheCluster.SecurityGroups = provideCacheClusterSecurityGroups("sg-001, sg-002") - require.Nil(desiredRG.ko.Spec.SecurityGroupIDs) - require.NotNil(latestCacheCluster.SecurityGroups) - differ := rm.securityGroupIdsDiffer(desiredRG, latestRG, latestCacheCluster) - assert.False(differ) - }) - t.Run("NoDiff=Spec_Status_Match", func(t *testing.T) { - desiredRG := provideResource() - sg1 := "sg-001" - sg2 := "sg-002" - desiredRG.ko.Spec.SecurityGroupIDs = []*string{&sg1, &sg2} - latestRG := provideResource() - latestCacheCluster := provideCacheCluster() - latestCacheCluster.SecurityGroups = provideCacheClusterSecurityGroups(sg2, sg1) // same but out of order - require.NotNil(desiredRG.ko.Spec.SecurityGroupIDs) - require.NotNil(latestCacheCluster.SecurityGroups) - differ := rm.securityGroupIdsDiffer(desiredRG, latestRG, latestCacheCluster) - assert.False(differ) - }) - t.Run("Diff=Spec_Status_MisMatch", func(t *testing.T) { - desiredRG := provideResource() - sg1 := "sg-001" - sg2 := "sg-002" - desiredRG.ko.Spec.SecurityGroupIDs = []*string{&sg1} - latestRG := provideResource() - latestCacheCluster := provideCacheCluster() - latestCacheCluster.SecurityGroups = provideCacheClusterSecurityGroups(sg2, sg1) // sg2 is additional - require.NotNil(desiredRG.ko.Spec.SecurityGroupIDs) - require.NotNil(latestCacheCluster.SecurityGroups) - differ := rm.securityGroupIdsDiffer(desiredRG, latestRG, latestCacheCluster) - assert.True(differ) - }) -} - -// provideNodeGroupsWithReplicas provides NodeGroups array for given node IDs -// each node group is populated with supplied numbers of replica nodes and a primary node. -func provideCacheClusterSecurityGroups(IDs ...string) []*svcsdk.SecurityGroupMembership { - securityGroups := []*svcsdk.SecurityGroupMembership{} - for _, ID := range IDs { - securityGroupId := ID - status := "available" - securityGroups = append(securityGroups, &svcsdk.SecurityGroupMembership{ - SecurityGroupId: &securityGroupId, - Status: &status, - }) - } - return securityGroups -} diff --git a/pkg/resource/replication_group/delta.go b/pkg/resource/replication_group/delta.go index fe220197..8dfa0aa7 100644 --- a/pkg/resource/replication_group/delta.go +++ b/pkg/resource/replication_group/delta.go @@ -91,6 +91,13 @@ func newResourceDelta( if !reflect.DeepEqual(a.ko.Spec.CacheSubnetGroupRef, b.ko.Spec.CacheSubnetGroupRef) { delta.Add("Spec.CacheSubnetGroupRef", a.ko.Spec.CacheSubnetGroupRef, b.ko.Spec.CacheSubnetGroupRef) } + if ackcompare.HasNilDifference(a.ko.Spec.ClusterMode, b.ko.Spec.ClusterMode) { + delta.Add("Spec.ClusterMode", a.ko.Spec.ClusterMode, b.ko.Spec.ClusterMode) + } else if a.ko.Spec.ClusterMode != nil && b.ko.Spec.ClusterMode != nil { + if *a.ko.Spec.ClusterMode != *b.ko.Spec.ClusterMode { + delta.Add("Spec.ClusterMode", a.ko.Spec.ClusterMode, b.ko.Spec.ClusterMode) + } + } if ackcompare.HasNilDifference(a.ko.Spec.DataTieringEnabled, b.ko.Spec.DataTieringEnabled) { delta.Add("Spec.DataTieringEnabled", a.ko.Spec.DataTieringEnabled, b.ko.Spec.DataTieringEnabled) } else if a.ko.Spec.DataTieringEnabled != nil && b.ko.Spec.DataTieringEnabled != nil { @@ -119,6 +126,13 @@ func newResourceDelta( delta.Add("Spec.EngineVersion", a.ko.Spec.EngineVersion, b.ko.Spec.EngineVersion) } } + if ackcompare.HasNilDifference(a.ko.Spec.IPDiscovery, b.ko.Spec.IPDiscovery) { + delta.Add("Spec.IPDiscovery", a.ko.Spec.IPDiscovery, b.ko.Spec.IPDiscovery) + } else if a.ko.Spec.IPDiscovery != nil && b.ko.Spec.IPDiscovery != nil { + if *a.ko.Spec.IPDiscovery != *b.ko.Spec.IPDiscovery { + delta.Add("Spec.IPDiscovery", a.ko.Spec.IPDiscovery, b.ko.Spec.IPDiscovery) + } + } if ackcompare.HasNilDifference(a.ko.Spec.KMSKeyID, b.ko.Spec.KMSKeyID) { delta.Add("Spec.KMSKeyID", a.ko.Spec.KMSKeyID, b.ko.Spec.KMSKeyID) } else if a.ko.Spec.KMSKeyID != nil && b.ko.Spec.KMSKeyID != nil { @@ -126,6 +140,13 @@ func newResourceDelta( delta.Add("Spec.KMSKeyID", a.ko.Spec.KMSKeyID, b.ko.Spec.KMSKeyID) } } + if ackcompare.HasNilDifference(a.ko.Spec.NetworkType, b.ko.Spec.NetworkType) { + delta.Add("Spec.NetworkType", a.ko.Spec.NetworkType, b.ko.Spec.NetworkType) + } else if a.ko.Spec.NetworkType != nil && b.ko.Spec.NetworkType != nil { + if *a.ko.Spec.NetworkType != *b.ko.Spec.NetworkType { + delta.Add("Spec.NetworkType", a.ko.Spec.NetworkType, b.ko.Spec.NetworkType) + } + } if len(a.ko.Spec.NodeGroupConfiguration) != len(b.ko.Spec.NodeGroupConfiguration) { delta.Add("Spec.NodeGroupConfiguration", a.ko.Spec.NodeGroupConfiguration, b.ko.Spec.NodeGroupConfiguration) } else if len(a.ko.Spec.NodeGroupConfiguration) > 0 { @@ -192,6 +213,13 @@ func newResourceDelta( if !reflect.DeepEqual(a.ko.Spec.SecurityGroupRefs, b.ko.Spec.SecurityGroupRefs) { delta.Add("Spec.SecurityGroupRefs", a.ko.Spec.SecurityGroupRefs, b.ko.Spec.SecurityGroupRefs) } + if ackcompare.HasNilDifference(a.ko.Spec.ServerlessCacheSnapshotName, b.ko.Spec.ServerlessCacheSnapshotName) { + delta.Add("Spec.ServerlessCacheSnapshotName", a.ko.Spec.ServerlessCacheSnapshotName, b.ko.Spec.ServerlessCacheSnapshotName) + } else if a.ko.Spec.ServerlessCacheSnapshotName != nil && b.ko.Spec.ServerlessCacheSnapshotName != nil { + if *a.ko.Spec.ServerlessCacheSnapshotName != *b.ko.Spec.ServerlessCacheSnapshotName { + delta.Add("Spec.ServerlessCacheSnapshotName", a.ko.Spec.ServerlessCacheSnapshotName, b.ko.Spec.ServerlessCacheSnapshotName) + } + } if len(a.ko.Spec.SnapshotARNs) != len(b.ko.Spec.SnapshotARNs) { delta.Add("Spec.SnapshotARNs", a.ko.Spec.SnapshotARNs, b.ko.Spec.SnapshotARNs) } else if len(a.ko.Spec.SnapshotARNs) > 0 { @@ -230,6 +258,13 @@ func newResourceDelta( delta.Add("Spec.TransitEncryptionEnabled", a.ko.Spec.TransitEncryptionEnabled, b.ko.Spec.TransitEncryptionEnabled) } } + if ackcompare.HasNilDifference(a.ko.Spec.TransitEncryptionMode, b.ko.Spec.TransitEncryptionMode) { + delta.Add("Spec.TransitEncryptionMode", a.ko.Spec.TransitEncryptionMode, b.ko.Spec.TransitEncryptionMode) + } else if a.ko.Spec.TransitEncryptionMode != nil && b.ko.Spec.TransitEncryptionMode != nil { + if *a.ko.Spec.TransitEncryptionMode != *b.ko.Spec.TransitEncryptionMode { + delta.Add("Spec.TransitEncryptionMode", a.ko.Spec.TransitEncryptionMode, b.ko.Spec.TransitEncryptionMode) + } + } if len(a.ko.Spec.UserGroupIDs) != len(b.ko.Spec.UserGroupIDs) { delta.Add("Spec.UserGroupIDs", a.ko.Spec.UserGroupIDs, b.ko.Spec.UserGroupIDs) } else if len(a.ko.Spec.UserGroupIDs) > 0 { diff --git a/pkg/resource/replication_group/descriptor.go b/pkg/resource/replication_group/descriptor.go index 4d336b65..973139ee 100644 --- a/pkg/resource/replication_group/descriptor.go +++ b/pkg/resource/replication_group/descriptor.go @@ -28,7 +28,7 @@ import ( ) const ( - finalizerString = "finalizers.elasticache.services.k8s.aws/ReplicationGroup" + FinalizerString = "finalizers.elasticache.services.k8s.aws/ReplicationGroup" ) var ( @@ -88,8 +88,8 @@ func (d *resourceDescriptor) IsManaged( // https://github.com/kubernetes-sigs/controller-runtime/issues/994 is // fixed. This should be able to be: // - // return k8sctrlutil.ContainsFinalizer(obj, finalizerString) - return containsFinalizer(obj, finalizerString) + // return k8sctrlutil.ContainsFinalizer(obj, FinalizerString) + return containsFinalizer(obj, FinalizerString) } // Remove once https://github.com/kubernetes-sigs/controller-runtime/issues/994 @@ -118,7 +118,7 @@ func (d *resourceDescriptor) MarkManaged( // Should not happen. If it does, there is a bug in the code panic("nil RuntimeMetaObject in AWSResource") } - k8sctrlutil.AddFinalizer(obj, finalizerString) + k8sctrlutil.AddFinalizer(obj, FinalizerString) } // MarkUnmanaged removes the supplied resource from management by ACK. What @@ -133,7 +133,7 @@ func (d *resourceDescriptor) MarkUnmanaged( // Should not happen. If it does, there is a bug in the code panic("nil RuntimeMetaObject in AWSResource") } - k8sctrlutil.RemoveFinalizer(obj, finalizerString) + k8sctrlutil.RemoveFinalizer(obj, FinalizerString) } // MarkAdopted places descriptors on the custom resource that indicate the diff --git a/pkg/resource/replication_group/hooks_test.go b/pkg/resource/replication_group/hooks_test.go deleted file mode 100644 index f12a5257..00000000 --- a/pkg/resource/replication_group/hooks_test.go +++ /dev/null @@ -1,129 +0,0 @@ -package replication_group - -import ( - "context" - "testing" - - ackv1alpha1 "github.com/aws-controllers-k8s/runtime/apis/core/v1alpha1" - "github.com/aws/aws-sdk-go/aws" - "github.com/stretchr/testify/mock" - - svcapitypes "github.com/aws-controllers-k8s/elasticache-controller/apis/v1alpha1" - mocksvcsdkapi "github.com/aws-controllers-k8s/elasticache-controller/mocks/aws-sdk-go/elasticache" -) - -func Test_resourceManager_syncTags(t *testing.T) { - testhelper := func() (*resourceManager, *mocksvcsdkapi.ElastiCacheAPI) { - mocksdkapi := &mocksvcsdkapi.ElastiCacheAPI{} - mocksdkapi.On("RemoveTagsFromResourceWithContext", mock.Anything, mock.Anything).Return(nil, nil) - mocksdkapi.On("AddTagsToResourceWithContext", mock.Anything, mock.Anything).Return(nil, nil) - rm := provideResourceManagerWithMockSDKAPI(mocksdkapi) - return rm, mocksdkapi - } - t.Run("add and remove tags, only execute add tags", func(t *testing.T) { - rm, mocksdkapi := testhelper() - _ = rm.syncTags(context.Background(), - &resource{ko: &svcapitypes.ReplicationGroup{ - Spec: svcapitypes.ReplicationGroupSpec{ - Tags: []*svcapitypes.Tag{ - { - Key: aws.String("add 1"), - Value: aws.String("to add"), - }, - { - Key: aws.String("add 2"), - Value: aws.String("to add"), - }, - }, - }, - }}, - &resource{ko: &svcapitypes.ReplicationGroup{ - Spec: svcapitypes.ReplicationGroupSpec{ - Tags: []*svcapitypes.Tag{ - { - Key: aws.String("remove"), - Value: aws.String("to remove"), - }, - }, - }, - Status: svcapitypes.ReplicationGroupStatus{ - ACKResourceMetadata: &ackv1alpha1.ResourceMetadata{ - ARN: (*ackv1alpha1.AWSResourceName)(aws.String("testARN")), - }, - }, - }}) - mocksdkapi.AssertNumberOfCalls(t, "RemoveTagsFromResourceWithContext", 0) - mocksdkapi.AssertNumberOfCalls(t, "AddTagsToResourceWithContext", 1) - }) - - t.Run("remove tags", func(t *testing.T) { - rm, mocksdkapi := testhelper() - _ = rm.syncTags(context.Background(), - &resource{ko: &svcapitypes.ReplicationGroup{ - Spec: svcapitypes.ReplicationGroupSpec{ - Tags: []*svcapitypes.Tag{}, - }, - }}, - &resource{ko: &svcapitypes.ReplicationGroup{ - Spec: svcapitypes.ReplicationGroupSpec{ - Tags: []*svcapitypes.Tag{ - { - Key: aws.String("remove 1"), - Value: aws.String("to remove"), - }, - { - Key: aws.String("remove 2"), - Value: aws.String("to remove"), - }, - }, - }, - Status: svcapitypes.ReplicationGroupStatus{ - ACKResourceMetadata: &ackv1alpha1.ResourceMetadata{ - ARN: (*ackv1alpha1.AWSResourceName)(aws.String("testARN")), - }, - }, - }}) - mocksdkapi.AssertNumberOfCalls(t, "RemoveTagsFromResourceWithContext", 1) - mocksdkapi.AssertNumberOfCalls(t, "AddTagsToResourceWithContext", 0) - }) - - t.Run("modify existent tags, not remove call", func(t *testing.T) { - rm, mocksdkapi := testhelper() - _ = rm.syncTags(context.Background(), - &resource{ko: &svcapitypes.ReplicationGroup{ - Spec: svcapitypes.ReplicationGroupSpec{ - Tags: []*svcapitypes.Tag{ - { - Key: aws.String("key1"), - Value: aws.String("new value1"), - }, - { - Key: aws.String("key2"), - Value: aws.String("new value2"), - }, - }, - }, - }}, - &resource{ko: &svcapitypes.ReplicationGroup{ - Spec: svcapitypes.ReplicationGroupSpec{ - Tags: []*svcapitypes.Tag{ - { - Key: aws.String("key1"), - Value: aws.String("value1"), - }, - { - Key: aws.String("key2"), - Value: aws.String("value2"), - }, - }, - }, - Status: svcapitypes.ReplicationGroupStatus{ - ACKResourceMetadata: &ackv1alpha1.ResourceMetadata{ - ARN: (*ackv1alpha1.AWSResourceName)(aws.String("testARN")), - }, - }, - }}) - mocksdkapi.AssertNumberOfCalls(t, "RemoveTagsFromResourceWithContext", 0) - mocksdkapi.AssertNumberOfCalls(t, "AddTagsToResourceWithContext", 1) - }) -} diff --git a/pkg/resource/replication_group/manager.go b/pkg/resource/replication_group/manager.go index 6f349203..3d7146e9 100644 --- a/pkg/resource/replication_group/manager.go +++ b/pkg/resource/replication_group/manager.go @@ -32,9 +32,8 @@ import ( acktags "github.com/aws-controllers-k8s/runtime/pkg/tags" acktypes "github.com/aws-controllers-k8s/runtime/pkg/types" ackutil "github.com/aws-controllers-k8s/runtime/pkg/util" - "github.com/aws/aws-sdk-go/aws/session" - svcsdk "github.com/aws/aws-sdk-go/service/elasticache" - svcsdkapi "github.com/aws/aws-sdk-go/service/elasticache/elasticacheiface" + "github.com/aws/aws-sdk-go-v2/aws" + svcsdk "github.com/aws/aws-sdk-go-v2/service/elasticache" "github.com/go-logr/logr" corev1 "k8s.io/api/core/v1" @@ -59,6 +58,9 @@ type resourceManager struct { // cfg is a copy of the ackcfg.Config object passed on start of the service // controller cfg ackcfg.Config + // clientcfg is a copy of the client configuration passed on start of the + // service controller + clientcfg aws.Config // log refers to the logr.Logger object handling logging for the service // controller log logr.Logger @@ -73,12 +75,9 @@ type resourceManager struct { awsAccountID ackv1alpha1.AWSAccountID // The AWS Region that this resource manager targets awsRegion ackv1alpha1.AWSRegion - // sess is the AWS SDK Session object used to communicate with the backend - // AWS service API - sess *session.Session - // sdk is a pointer to the AWS service API interface exposed by the - // aws-sdk-go/services/{alias}/{alias}iface package. - sdkapi svcsdkapi.ElastiCacheAPI + // sdk is a pointer to the AWS service API client exposed by the + // aws-sdk-go-v2/services/{alias} package. + sdkapi *svcsdk.Client } // concreteResource returns a pointer to a resource from the supplied @@ -299,24 +298,25 @@ func (rm *resourceManager) EnsureTags( // newResourceManager returns a new struct implementing // acktypes.AWSResourceManager +// This is for AWS-SDK-GO-V2 - Created newResourceManager With AWS sdk-Go-ClientV2 func newResourceManager( cfg ackcfg.Config, + clientcfg aws.Config, log logr.Logger, metrics *ackmetrics.Metrics, rr acktypes.Reconciler, - sess *session.Session, id ackv1alpha1.AWSAccountID, region ackv1alpha1.AWSRegion, ) (*resourceManager, error) { return &resourceManager{ cfg: cfg, + clientcfg: clientcfg, log: log, metrics: metrics, rr: rr, awsAccountID: id, awsRegion: region, - sess: sess, - sdkapi: svcsdk.New(sess), + sdkapi: svcsdk.NewFromConfig(clientcfg), }, nil } diff --git a/pkg/resource/replication_group/manager_factory.go b/pkg/resource/replication_group/manager_factory.go index 279b8c73..2de9f614 100644 --- a/pkg/resource/replication_group/manager_factory.go +++ b/pkg/resource/replication_group/manager_factory.go @@ -23,7 +23,7 @@ import ( ackcfg "github.com/aws-controllers-k8s/runtime/pkg/config" ackmetrics "github.com/aws-controllers-k8s/runtime/pkg/metrics" acktypes "github.com/aws-controllers-k8s/runtime/pkg/types" - "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go-v2/aws" "github.com/go-logr/logr" svcresource "github.com/aws-controllers-k8s/elasticache-controller/pkg/resource" @@ -47,14 +47,18 @@ func (f *resourceManagerFactory) ResourceDescriptor() acktypes.AWSResourceDescri // supplied AWS account func (f *resourceManagerFactory) ManagerFor( cfg ackcfg.Config, + clientcfg aws.Config, log logr.Logger, metrics *ackmetrics.Metrics, rr acktypes.Reconciler, - sess *session.Session, id ackv1alpha1.AWSAccountID, region ackv1alpha1.AWSRegion, + roleARN ackv1alpha1.AWSResourceName, ) (acktypes.AWSResourceManager, error) { - rmId := fmt.Sprintf("%s/%s", id, region) + // We use the account ID, region, and role ARN to uniquely identify a + // resource manager. This helps us to avoid creating multiple resource + // managers for the same account/region/roleARN combination. + rmId := fmt.Sprintf("%s/%s/%s", id, region, roleARN) f.RLock() rm, found := f.rmCache[rmId] f.RUnlock() @@ -66,7 +70,7 @@ func (f *resourceManagerFactory) ManagerFor( f.Lock() defer f.Unlock() - rm, err := newResourceManager(cfg, log, metrics, rr, sess, id, region) + rm, err := newResourceManager(cfg, clientcfg, log, metrics, rr, id, region) if err != nil { return nil, err } diff --git a/pkg/resource/replication_group/manager_test_suite_test.go b/pkg/resource/replication_group/manager_test_suite_test.go deleted file mode 100644 index a05403f5..00000000 --- a/pkg/resource/replication_group/manager_test_suite_test.go +++ /dev/null @@ -1,131 +0,0 @@ -// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"). You may -// not use this file except in compliance with the License. A copy of the -// License is located at -// -// http://aws.amazon.com/apache2.0/ -// -// or in the "license" file accompanying this file. This file is distributed -// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either -// express or implied. See the License for the specific language governing -// permissions and limitations under the License. - -package replication_group - -import ( - "errors" - "fmt" - "path/filepath" - "testing" - - acktypes "github.com/aws-controllers-k8s/runtime/pkg/types" - svcsdk "github.com/aws/aws-sdk-go/service/elasticache" - "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" - - mocksvcsdkapi "github.com/aws-controllers-k8s/elasticache-controller/mocks/aws-sdk-go/elasticache" - "github.com/aws-controllers-k8s/elasticache-controller/pkg/testutil" -) - -// TestDeclarativeTestSuite runs the test suite for replication group -func TestDeclarativeTestSuite(t *testing.T) { - var ts = testutil.TestSuite{} - testutil.LoadFromFixture(filepath.Join("testdata", "test_suite.yaml"), &ts) - var delegate = testRunnerDelegate{t: t} - var runner = testutil.TestSuiteRunner{TestSuite: &ts, Delegate: &delegate} - runner.RunTests() -} - -// testRunnerDelegate implements testutil.TestRunnerDelegate -type testRunnerDelegate struct { - t *testing.T -} - -func (d *testRunnerDelegate) ResourceDescriptor() acktypes.AWSResourceDescriptor { - return &resourceDescriptor{} -} - -func (d *testRunnerDelegate) ResourceManager(mocksdkapi *mocksvcsdkapi.ElastiCacheAPI) acktypes.AWSResourceManager { - return provideResourceManagerWithMockSDKAPI(mocksdkapi) -} - -func (d *testRunnerDelegate) GoTestRunner() *testing.T { - return d.t -} - -func (d *testRunnerDelegate) EmptyServiceAPIOutput(apiName string) (interface{}, error) { - if apiName == "" { - return nil, errors.New("no API name specified") - } - //TODO: use reflection, template to auto generate this block/method. - switch apiName { - case "DescribeReplicationGroupsWithContext": - var output svcsdk.DescribeReplicationGroupsOutput - return &output, nil - case "ListAllowedNodeTypeModifications": - var output svcsdk.ListAllowedNodeTypeModificationsOutput - return &output, nil - case "DescribeEventsWithContext": - var output svcsdk.DescribeEventsOutput - return &output, nil - case "CreateReplicationGroupWithContext": - var output svcsdk.CreateReplicationGroupOutput - return &output, nil - case "DecreaseReplicaCountWithContext": - var output svcsdk.DecreaseReplicaCountOutput - return &output, nil - case "DeleteReplicationGroupWithContext": - var output svcsdk.DeleteReplicationGroupOutput - return &output, nil - case "DescribeCacheClustersWithContext": - var output svcsdk.DescribeCacheClustersOutput - return &output, nil - case "IncreaseReplicaCountWithContext": - var output svcsdk.IncreaseReplicaCountOutput - return &output, nil - case "ModifyReplicationGroupShardConfigurationWithContext": - var output svcsdk.ModifyReplicationGroupShardConfigurationOutput - return &output, nil - case "ModifyReplicationGroupWithContext": - var output svcsdk.ModifyReplicationGroupOutput - return &output, nil - case "ListTagsForResourceWithContext": - var output svcsdk.TagListMessage - return &output, nil - } - return nil, errors.New(fmt.Sprintf("no matching API name found for: %s", apiName)) -} - -func (d *testRunnerDelegate) Equal(a acktypes.AWSResource, b acktypes.AWSResource) bool { - ac := a.(*resource) - bc := b.(*resource) - opts := []cmp.Option{cmpopts.EquateEmpty()} - - for i := range ac.ko.Status.Conditions { - ac.ko.Status.Conditions[i].LastTransitionTime = nil - } - for i := range bc.ko.Status.Conditions { - bc.ko.Status.Conditions[i].LastTransitionTime = nil - } - - var specMatch = false - if cmp.Equal(ac.ko.Spec, bc.ko.Spec, opts...) { - specMatch = true - } else { - fmt.Printf("Difference ko.Spec (-expected +actual):\n\n") - fmt.Println(cmp.Diff(ac.ko.Spec, bc.ko.Spec, opts...)) - specMatch = false - } - - var statusMatch = false - if cmp.Equal(ac.ko.Status, bc.ko.Status, opts...) { - statusMatch = true - } else { - fmt.Printf("Difference ko.Status (-expected +actual):\n\n") - fmt.Println(cmp.Diff(ac.ko.Status, bc.ko.Status, opts...)) - statusMatch = false - } - - return statusMatch && specMatch -} diff --git a/pkg/resource/replication_group/post_set_output.go b/pkg/resource/replication_group/post_set_output.go index 808aca09..29cd6d74 100644 --- a/pkg/resource/replication_group/post_set_output.go +++ b/pkg/resource/replication_group/post_set_output.go @@ -15,6 +15,7 @@ package replication_group import ( "context" + svcapitypes "github.com/aws-controllers-k8s/elasticache-controller/apis/v1alpha1" svcsdk "github.com/aws/aws-sdk-go/service/elasticache" diff --git a/pkg/resource/replication_group/references.go b/pkg/resource/replication_group/references.go index 10a62617..8e631e7b 100644 --- a/pkg/resource/replication_group/references.go +++ b/pkg/resource/replication_group/references.go @@ -68,24 +68,23 @@ func (rm *resourceManager) ResolveReferences( apiReader client.Reader, res acktypes.AWSResource, ) (acktypes.AWSResource, bool, error) { - namespace := res.MetaObject().GetNamespace() ko := rm.concreteResource(res).ko resourceHasReferences := false err := validateReferenceFields(ko) - if fieldHasReferences, err := rm.resolveReferenceForCacheParameterGroupName(ctx, apiReader, namespace, ko); err != nil { + if fieldHasReferences, err := rm.resolveReferenceForCacheParameterGroupName(ctx, apiReader, ko); err != nil { return &resource{ko}, (resourceHasReferences || fieldHasReferences), err } else { resourceHasReferences = resourceHasReferences || fieldHasReferences } - if fieldHasReferences, err := rm.resolveReferenceForCacheSubnetGroupName(ctx, apiReader, namespace, ko); err != nil { + if fieldHasReferences, err := rm.resolveReferenceForCacheSubnetGroupName(ctx, apiReader, ko); err != nil { return &resource{ko}, (resourceHasReferences || fieldHasReferences), err } else { resourceHasReferences = resourceHasReferences || fieldHasReferences } - if fieldHasReferences, err := rm.resolveReferenceForSecurityGroupIDs(ctx, apiReader, namespace, ko); err != nil { + if fieldHasReferences, err := rm.resolveReferenceForSecurityGroupIDs(ctx, apiReader, ko); err != nil { return &resource{ko}, (resourceHasReferences || fieldHasReferences), err } else { resourceHasReferences = resourceHasReferences || fieldHasReferences @@ -119,7 +118,6 @@ func validateReferenceFields(ko *svcapitypes.ReplicationGroup) error { func (rm *resourceManager) resolveReferenceForCacheParameterGroupName( ctx context.Context, apiReader client.Reader, - namespace string, ko *svcapitypes.ReplicationGroup, ) (hasReferences bool, err error) { if ko.Spec.CacheParameterGroupRef != nil && ko.Spec.CacheParameterGroupRef.From != nil { @@ -128,6 +126,10 @@ func (rm *resourceManager) resolveReferenceForCacheParameterGroupName( if arr.Name == nil || *arr.Name == "" { return hasReferences, fmt.Errorf("provided resource reference is nil or empty: CacheParameterGroupRef") } + namespace := ko.ObjectMeta.GetNamespace() + if arr.Namespace != nil && *arr.Namespace != "" { + namespace = *arr.Namespace + } obj := &svcapitypes.CacheParameterGroup{} if err := getReferencedResourceState_CacheParameterGroup(ctx, apiReader, obj, *arr.Name, namespace); err != nil { return hasReferences, err @@ -157,12 +159,8 @@ func getReferencedResourceState_CacheParameterGroup( if err != nil { return err } - var refResourceSynced, refResourceTerminal bool + var refResourceTerminal bool for _, cond := range obj.Status.Conditions { - if cond.Type == ackv1alpha1.ConditionTypeResourceSynced && - cond.Status == corev1.ConditionTrue { - refResourceSynced = true - } if cond.Type == ackv1alpha1.ConditionTypeTerminal && cond.Status == corev1.ConditionTrue { return ackerr.ResourceReferenceTerminalFor( @@ -175,6 +173,13 @@ func getReferencedResourceState_CacheParameterGroup( "CacheParameterGroup", namespace, name) } + var refResourceSynced bool + for _, cond := range obj.Status.Conditions { + if cond.Type == ackv1alpha1.ConditionTypeResourceSynced && + cond.Status == corev1.ConditionTrue { + refResourceSynced = true + } + } if !refResourceSynced { return ackerr.ResourceReferenceNotSyncedFor( "CacheParameterGroup", @@ -196,7 +201,6 @@ func getReferencedResourceState_CacheParameterGroup( func (rm *resourceManager) resolveReferenceForCacheSubnetGroupName( ctx context.Context, apiReader client.Reader, - namespace string, ko *svcapitypes.ReplicationGroup, ) (hasReferences bool, err error) { if ko.Spec.CacheSubnetGroupRef != nil && ko.Spec.CacheSubnetGroupRef.From != nil { @@ -205,6 +209,10 @@ func (rm *resourceManager) resolveReferenceForCacheSubnetGroupName( if arr.Name == nil || *arr.Name == "" { return hasReferences, fmt.Errorf("provided resource reference is nil or empty: CacheSubnetGroupRef") } + namespace := ko.ObjectMeta.GetNamespace() + if arr.Namespace != nil && *arr.Namespace != "" { + namespace = *arr.Namespace + } obj := &svcapitypes.CacheSubnetGroup{} if err := getReferencedResourceState_CacheSubnetGroup(ctx, apiReader, obj, *arr.Name, namespace); err != nil { return hasReferences, err @@ -234,12 +242,8 @@ func getReferencedResourceState_CacheSubnetGroup( if err != nil { return err } - var refResourceSynced, refResourceTerminal bool + var refResourceTerminal bool for _, cond := range obj.Status.Conditions { - if cond.Type == ackv1alpha1.ConditionTypeResourceSynced && - cond.Status == corev1.ConditionTrue { - refResourceSynced = true - } if cond.Type == ackv1alpha1.ConditionTypeTerminal && cond.Status == corev1.ConditionTrue { return ackerr.ResourceReferenceTerminalFor( @@ -252,6 +256,13 @@ func getReferencedResourceState_CacheSubnetGroup( "CacheSubnetGroup", namespace, name) } + var refResourceSynced bool + for _, cond := range obj.Status.Conditions { + if cond.Type == ackv1alpha1.ConditionTypeResourceSynced && + cond.Status == corev1.ConditionTrue { + refResourceSynced = true + } + } if !refResourceSynced { return ackerr.ResourceReferenceNotSyncedFor( "CacheSubnetGroup", @@ -273,7 +284,6 @@ func getReferencedResourceState_CacheSubnetGroup( func (rm *resourceManager) resolveReferenceForSecurityGroupIDs( ctx context.Context, apiReader client.Reader, - namespace string, ko *svcapitypes.ReplicationGroup, ) (hasReferences bool, err error) { for _, f0iter := range ko.Spec.SecurityGroupRefs { @@ -283,6 +293,10 @@ func (rm *resourceManager) resolveReferenceForSecurityGroupIDs( if arr.Name == nil || *arr.Name == "" { return hasReferences, fmt.Errorf("provided resource reference is nil or empty: SecurityGroupRefs") } + namespace := ko.ObjectMeta.GetNamespace() + if arr.Namespace != nil && *arr.Namespace != "" { + namespace = *arr.Namespace + } obj := &ec2apitypes.SecurityGroup{} if err := getReferencedResourceState_SecurityGroup(ctx, apiReader, obj, *arr.Name, namespace); err != nil { return hasReferences, err @@ -316,12 +330,8 @@ func getReferencedResourceState_SecurityGroup( if err != nil { return err } - var refResourceSynced, refResourceTerminal bool + var refResourceTerminal bool for _, cond := range obj.Status.Conditions { - if cond.Type == ackv1alpha1.ConditionTypeResourceSynced && - cond.Status == corev1.ConditionTrue { - refResourceSynced = true - } if cond.Type == ackv1alpha1.ConditionTypeTerminal && cond.Status == corev1.ConditionTrue { return ackerr.ResourceReferenceTerminalFor( @@ -334,6 +344,13 @@ func getReferencedResourceState_SecurityGroup( "SecurityGroup", namespace, name) } + var refResourceSynced bool + for _, cond := range obj.Status.Conditions { + if cond.Type == ackv1alpha1.ConditionTypeResourceSynced && + cond.Status == corev1.ConditionTrue { + refResourceSynced = true + } + } if !refResourceSynced { return ackerr.ResourceReferenceNotSyncedFor( "SecurityGroup", diff --git a/pkg/resource/replication_group/resource.go b/pkg/resource/replication_group/resource.go index f021332d..dd0c7a1f 100644 --- a/pkg/resource/replication_group/resource.go +++ b/pkg/resource/replication_group/resource.go @@ -93,6 +93,17 @@ func (r *resource) SetIdentifiers(identifier *ackv1alpha1.AWSIdentifiers) error return nil } +// PopulateResourceFromAnnotation populates the fields passed from adoption annotation +func (r *resource) PopulateResourceFromAnnotation(fields map[string]string) error { + tmp, ok := fields["replicationGroupID"] + if !ok { + return ackerrors.MissingNameIdentifier + } + r.ko.Spec.ReplicationGroupID = &tmp + + return nil +} + // DeepCopy will return a copy of the resource func (r *resource) DeepCopy() acktypes.AWSResource { koCopy := r.ko.DeepCopy() diff --git a/pkg/resource/replication_group/sdk.go b/pkg/resource/replication_group/sdk.go index f55663f8..9bcd3cdf 100644 --- a/pkg/resource/replication_group/sdk.go +++ b/pkg/resource/replication_group/sdk.go @@ -19,6 +19,7 @@ import ( "context" "errors" "fmt" + "math" "reflect" "strings" @@ -28,8 +29,10 @@ import ( ackerr "github.com/aws-controllers-k8s/runtime/pkg/errors" ackrequeue "github.com/aws-controllers-k8s/runtime/pkg/requeue" ackrtlog "github.com/aws-controllers-k8s/runtime/pkg/runtime/log" - "github.com/aws/aws-sdk-go/aws" - svcsdk "github.com/aws/aws-sdk-go/service/elasticache" + "github.com/aws/aws-sdk-go-v2/aws" + svcsdk "github.com/aws/aws-sdk-go-v2/service/elasticache" + svcsdktypes "github.com/aws/aws-sdk-go-v2/service/elasticache/types" + smithy "github.com/aws/smithy-go" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -40,8 +43,7 @@ import ( var ( _ = &metav1.Time{} _ = strings.ToLower("") - _ = &aws.JSONValue{} - _ = &svcsdk.ElastiCache{} + _ = &svcsdk.Client{} _ = &svcapitypes.ReplicationGroup{} _ = ackv1alpha1.AWSAccountID("") _ = &ackerr.NotFound @@ -49,6 +51,7 @@ var ( _ = &reflect.Value{} _ = fmt.Sprintf("") _ = &ackrequeue.NoRequeue{} + _ = &aws.Config{} ) // sdkFind returns SDK-specific information about a supplied resource @@ -73,10 +76,11 @@ func (rm *resourceManager) sdkFind( return nil, err } var resp *svcsdk.DescribeReplicationGroupsOutput - resp, err = rm.sdkapi.DescribeReplicationGroupsWithContext(ctx, input) + resp, err = rm.sdkapi.DescribeReplicationGroups(ctx, input) rm.metrics.RecordAPICall("READ_MANY", "DescribeReplicationGroups", err) if err != nil { - if awsErr, ok := ackerr.AWSError(err); ok && awsErr.Code() == "ReplicationGroupNotFoundFault" { + var awsErr smithy.APIError + if errors.As(err, &awsErr) && awsErr.ErrorCode() == "ReplicationGroupNotFoundFault" { return nil, ackerr.NotFound } return nil, err @@ -115,8 +119,8 @@ func (rm *resourceManager) sdkFind( } else { ko.Status.AutoMinorVersionUpgrade = nil } - if elem.AutomaticFailover != nil { - ko.Status.AutomaticFailover = elem.AutomaticFailover + if elem.AutomaticFailover != "" { + ko.Status.AutomaticFailover = aws.String(string(elem.AutomaticFailover)) } else { ko.Status.AutomaticFailover = nil } @@ -130,20 +134,26 @@ func (rm *resourceManager) sdkFind( } else { ko.Status.ClusterEnabled = nil } + if elem.ClusterMode != "" { + ko.Spec.ClusterMode = aws.String(string(elem.ClusterMode)) + } else { + ko.Spec.ClusterMode = nil + } if elem.ConfigurationEndpoint != nil { - f8 := &svcapitypes.Endpoint{} + f9 := &svcapitypes.Endpoint{} if elem.ConfigurationEndpoint.Address != nil { - f8.Address = elem.ConfigurationEndpoint.Address + f9.Address = elem.ConfigurationEndpoint.Address } if elem.ConfigurationEndpoint.Port != nil { - f8.Port = elem.ConfigurationEndpoint.Port + portCopy := int64(*elem.ConfigurationEndpoint.Port) + f9.Port = &portCopy } - ko.Status.ConfigurationEndpoint = f8 + ko.Status.ConfigurationEndpoint = f9 } else { ko.Status.ConfigurationEndpoint = nil } - if elem.DataTiering != nil { - ko.Status.DataTiering = elem.DataTiering + if elem.DataTiering != "" { + ko.Status.DataTiering = aws.String(string(elem.DataTiering)) } else { ko.Status.DataTiering = nil } @@ -152,239 +162,242 @@ func (rm *resourceManager) sdkFind( } else { ko.Spec.Description = nil } + if elem.Engine != nil { + ko.Spec.Engine = elem.Engine + } else { + ko.Spec.Engine = nil + } if elem.GlobalReplicationGroupInfo != nil { - f11 := &svcapitypes.GlobalReplicationGroupInfo{} + f13 := &svcapitypes.GlobalReplicationGroupInfo{} if elem.GlobalReplicationGroupInfo.GlobalReplicationGroupId != nil { - f11.GlobalReplicationGroupID = elem.GlobalReplicationGroupInfo.GlobalReplicationGroupId + f13.GlobalReplicationGroupID = elem.GlobalReplicationGroupInfo.GlobalReplicationGroupId } if elem.GlobalReplicationGroupInfo.GlobalReplicationGroupMemberRole != nil { - f11.GlobalReplicationGroupMemberRole = elem.GlobalReplicationGroupInfo.GlobalReplicationGroupMemberRole + f13.GlobalReplicationGroupMemberRole = elem.GlobalReplicationGroupInfo.GlobalReplicationGroupMemberRole } - ko.Status.GlobalReplicationGroupInfo = f11 + ko.Status.GlobalReplicationGroupInfo = f13 } else { ko.Status.GlobalReplicationGroupInfo = nil } + if elem.IpDiscovery != "" { + ko.Spec.IPDiscovery = aws.String(string(elem.IpDiscovery)) + } else { + ko.Spec.IPDiscovery = nil + } if elem.KmsKeyId != nil { ko.Spec.KMSKeyID = elem.KmsKeyId } else { ko.Spec.KMSKeyID = nil } if elem.LogDeliveryConfigurations != nil { - f13 := []*svcapitypes.LogDeliveryConfigurationRequest{} - for _, f13iter := range elem.LogDeliveryConfigurations { - f13elem := &svcapitypes.LogDeliveryConfigurationRequest{} - if f13iter.DestinationDetails != nil { - f13elemf0 := &svcapitypes.DestinationDetails{} - if f13iter.DestinationDetails.CloudWatchLogsDetails != nil { - f13elemf0f0 := &svcapitypes.CloudWatchLogsDestinationDetails{} - if f13iter.DestinationDetails.CloudWatchLogsDetails.LogGroup != nil { - f13elemf0f0.LogGroup = f13iter.DestinationDetails.CloudWatchLogsDetails.LogGroup + f16 := []*svcapitypes.LogDeliveryConfigurationRequest{} + for _, f16iter := range elem.LogDeliveryConfigurations { + f16elem := &svcapitypes.LogDeliveryConfigurationRequest{} + if f16iter.DestinationDetails != nil { + f16elemf0 := &svcapitypes.DestinationDetails{} + if f16iter.DestinationDetails.CloudWatchLogsDetails != nil { + f16elemf0f0 := &svcapitypes.CloudWatchLogsDestinationDetails{} + if f16iter.DestinationDetails.CloudWatchLogsDetails.LogGroup != nil { + f16elemf0f0.LogGroup = f16iter.DestinationDetails.CloudWatchLogsDetails.LogGroup } - f13elemf0.CloudWatchLogsDetails = f13elemf0f0 + f16elemf0.CloudWatchLogsDetails = f16elemf0f0 } - if f13iter.DestinationDetails.KinesisFirehoseDetails != nil { - f13elemf0f1 := &svcapitypes.KinesisFirehoseDestinationDetails{} - if f13iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream != nil { - f13elemf0f1.DeliveryStream = f13iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream + if f16iter.DestinationDetails.KinesisFirehoseDetails != nil { + f16elemf0f1 := &svcapitypes.KinesisFirehoseDestinationDetails{} + if f16iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream != nil { + f16elemf0f1.DeliveryStream = f16iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream } - f13elemf0.KinesisFirehoseDetails = f13elemf0f1 + f16elemf0.KinesisFirehoseDetails = f16elemf0f1 } - f13elem.DestinationDetails = f13elemf0 + f16elem.DestinationDetails = f16elemf0 } - if f13iter.DestinationType != nil { - f13elem.DestinationType = f13iter.DestinationType + if f16iter.DestinationType != "" { + f16elem.DestinationType = aws.String(string(f16iter.DestinationType)) } - if f13iter.LogFormat != nil { - f13elem.LogFormat = f13iter.LogFormat + if f16iter.LogFormat != "" { + f16elem.LogFormat = aws.String(string(f16iter.LogFormat)) } - if f13iter.LogType != nil { - f13elem.LogType = f13iter.LogType + if f16iter.LogType != "" { + f16elem.LogType = aws.String(string(f16iter.LogType)) } - f13 = append(f13, f13elem) + f16 = append(f16, f16elem) } - ko.Spec.LogDeliveryConfigurations = f13 + ko.Spec.LogDeliveryConfigurations = f16 } else { ko.Spec.LogDeliveryConfigurations = nil } if elem.MemberClusters != nil { - f14 := []*string{} - for _, f14iter := range elem.MemberClusters { - var f14elem string - f14elem = *f14iter - f14 = append(f14, &f14elem) - } - ko.Status.MemberClusters = f14 + ko.Status.MemberClusters = aws.StringSlice(elem.MemberClusters) } else { ko.Status.MemberClusters = nil } if elem.MemberClustersOutpostArns != nil { - f15 := []*string{} - for _, f15iter := range elem.MemberClustersOutpostArns { - var f15elem string - f15elem = *f15iter - f15 = append(f15, &f15elem) - } - ko.Status.MemberClustersOutpostARNs = f15 + ko.Status.MemberClustersOutpostARNs = aws.StringSlice(elem.MemberClustersOutpostArns) } else { ko.Status.MemberClustersOutpostARNs = nil } - if elem.MultiAZ != nil { - ko.Status.MultiAZ = elem.MultiAZ + if elem.MultiAZ != "" { + ko.Status.MultiAZ = aws.String(string(elem.MultiAZ)) } else { ko.Status.MultiAZ = nil } + if elem.NetworkType != "" { + ko.Spec.NetworkType = aws.String(string(elem.NetworkType)) + } else { + ko.Spec.NetworkType = nil + } if elem.NodeGroups != nil { - f17 := []*svcapitypes.NodeGroup{} - for _, f17iter := range elem.NodeGroups { - f17elem := &svcapitypes.NodeGroup{} - if f17iter.NodeGroupId != nil { - f17elem.NodeGroupID = f17iter.NodeGroupId - } - if f17iter.NodeGroupMembers != nil { - f17elemf1 := []*svcapitypes.NodeGroupMember{} - for _, f17elemf1iter := range f17iter.NodeGroupMembers { - f17elemf1elem := &svcapitypes.NodeGroupMember{} - if f17elemf1iter.CacheClusterId != nil { - f17elemf1elem.CacheClusterID = f17elemf1iter.CacheClusterId + f21 := []*svcapitypes.NodeGroup{} + for _, f21iter := range elem.NodeGroups { + f21elem := &svcapitypes.NodeGroup{} + if f21iter.NodeGroupId != nil { + f21elem.NodeGroupID = f21iter.NodeGroupId + } + if f21iter.NodeGroupMembers != nil { + f21elemf1 := []*svcapitypes.NodeGroupMember{} + for _, f21elemf1iter := range f21iter.NodeGroupMembers { + f21elemf1elem := &svcapitypes.NodeGroupMember{} + if f21elemf1iter.CacheClusterId != nil { + f21elemf1elem.CacheClusterID = f21elemf1iter.CacheClusterId } - if f17elemf1iter.CacheNodeId != nil { - f17elemf1elem.CacheNodeID = f17elemf1iter.CacheNodeId + if f21elemf1iter.CacheNodeId != nil { + f21elemf1elem.CacheNodeID = f21elemf1iter.CacheNodeId } - if f17elemf1iter.CurrentRole != nil { - f17elemf1elem.CurrentRole = f17elemf1iter.CurrentRole + if f21elemf1iter.CurrentRole != nil { + f21elemf1elem.CurrentRole = f21elemf1iter.CurrentRole } - if f17elemf1iter.PreferredAvailabilityZone != nil { - f17elemf1elem.PreferredAvailabilityZone = f17elemf1iter.PreferredAvailabilityZone + if f21elemf1iter.PreferredAvailabilityZone != nil { + f21elemf1elem.PreferredAvailabilityZone = f21elemf1iter.PreferredAvailabilityZone } - if f17elemf1iter.PreferredOutpostArn != nil { - f17elemf1elem.PreferredOutpostARN = f17elemf1iter.PreferredOutpostArn + if f21elemf1iter.PreferredOutpostArn != nil { + f21elemf1elem.PreferredOutpostARN = f21elemf1iter.PreferredOutpostArn } - if f17elemf1iter.ReadEndpoint != nil { - f17elemf1elemf5 := &svcapitypes.Endpoint{} - if f17elemf1iter.ReadEndpoint.Address != nil { - f17elemf1elemf5.Address = f17elemf1iter.ReadEndpoint.Address + if f21elemf1iter.ReadEndpoint != nil { + f21elemf1elemf5 := &svcapitypes.Endpoint{} + if f21elemf1iter.ReadEndpoint.Address != nil { + f21elemf1elemf5.Address = f21elemf1iter.ReadEndpoint.Address } - if f17elemf1iter.ReadEndpoint.Port != nil { - f17elemf1elemf5.Port = f17elemf1iter.ReadEndpoint.Port + if f21elemf1iter.ReadEndpoint.Port != nil { + portCopy := int64(*f21elemf1iter.ReadEndpoint.Port) + f21elemf1elemf5.Port = &portCopy } - f17elemf1elem.ReadEndpoint = f17elemf1elemf5 + f21elemf1elem.ReadEndpoint = f21elemf1elemf5 } - f17elemf1 = append(f17elemf1, f17elemf1elem) + f21elemf1 = append(f21elemf1, f21elemf1elem) } - f17elem.NodeGroupMembers = f17elemf1 + f21elem.NodeGroupMembers = f21elemf1 } - if f17iter.PrimaryEndpoint != nil { - f17elemf2 := &svcapitypes.Endpoint{} - if f17iter.PrimaryEndpoint.Address != nil { - f17elemf2.Address = f17iter.PrimaryEndpoint.Address + if f21iter.PrimaryEndpoint != nil { + f21elemf2 := &svcapitypes.Endpoint{} + if f21iter.PrimaryEndpoint.Address != nil { + f21elemf2.Address = f21iter.PrimaryEndpoint.Address } - if f17iter.PrimaryEndpoint.Port != nil { - f17elemf2.Port = f17iter.PrimaryEndpoint.Port + if f21iter.PrimaryEndpoint.Port != nil { + portCopy := int64(*f21iter.PrimaryEndpoint.Port) + f21elemf2.Port = &portCopy } - f17elem.PrimaryEndpoint = f17elemf2 + f21elem.PrimaryEndpoint = f21elemf2 } - if f17iter.ReaderEndpoint != nil { - f17elemf3 := &svcapitypes.Endpoint{} - if f17iter.ReaderEndpoint.Address != nil { - f17elemf3.Address = f17iter.ReaderEndpoint.Address + if f21iter.ReaderEndpoint != nil { + f21elemf3 := &svcapitypes.Endpoint{} + if f21iter.ReaderEndpoint.Address != nil { + f21elemf3.Address = f21iter.ReaderEndpoint.Address } - if f17iter.ReaderEndpoint.Port != nil { - f17elemf3.Port = f17iter.ReaderEndpoint.Port + if f21iter.ReaderEndpoint.Port != nil { + portCopy := int64(*f21iter.ReaderEndpoint.Port) + f21elemf3.Port = &portCopy } - f17elem.ReaderEndpoint = f17elemf3 + f21elem.ReaderEndpoint = f21elemf3 } - if f17iter.Slots != nil { - f17elem.Slots = f17iter.Slots + if f21iter.Slots != nil { + f21elem.Slots = f21iter.Slots } - if f17iter.Status != nil { - f17elem.Status = f17iter.Status + if f21iter.Status != nil { + f21elem.Status = f21iter.Status } - f17 = append(f17, f17elem) + f21 = append(f21, f21elem) } - ko.Status.NodeGroups = f17 + ko.Status.NodeGroups = f21 } else { ko.Status.NodeGroups = nil } if elem.PendingModifiedValues != nil { - f18 := &svcapitypes.ReplicationGroupPendingModifiedValues{} - if elem.PendingModifiedValues.AuthTokenStatus != nil { - f18.AuthTokenStatus = elem.PendingModifiedValues.AuthTokenStatus + f22 := &svcapitypes.ReplicationGroupPendingModifiedValues{} + if elem.PendingModifiedValues.AuthTokenStatus != "" { + f22.AuthTokenStatus = aws.String(string(elem.PendingModifiedValues.AuthTokenStatus)) } - if elem.PendingModifiedValues.AutomaticFailoverStatus != nil { - f18.AutomaticFailoverStatus = elem.PendingModifiedValues.AutomaticFailoverStatus + if elem.PendingModifiedValues.AutomaticFailoverStatus != "" { + f22.AutomaticFailoverStatus = aws.String(string(elem.PendingModifiedValues.AutomaticFailoverStatus)) + } + if elem.PendingModifiedValues.ClusterMode != "" { + f22.ClusterMode = aws.String(string(elem.PendingModifiedValues.ClusterMode)) } if elem.PendingModifiedValues.LogDeliveryConfigurations != nil { - f18f2 := []*svcapitypes.PendingLogDeliveryConfiguration{} - for _, f18f2iter := range elem.PendingModifiedValues.LogDeliveryConfigurations { - f18f2elem := &svcapitypes.PendingLogDeliveryConfiguration{} - if f18f2iter.DestinationDetails != nil { - f18f2elemf0 := &svcapitypes.DestinationDetails{} - if f18f2iter.DestinationDetails.CloudWatchLogsDetails != nil { - f18f2elemf0f0 := &svcapitypes.CloudWatchLogsDestinationDetails{} - if f18f2iter.DestinationDetails.CloudWatchLogsDetails.LogGroup != nil { - f18f2elemf0f0.LogGroup = f18f2iter.DestinationDetails.CloudWatchLogsDetails.LogGroup + f22f3 := []*svcapitypes.PendingLogDeliveryConfiguration{} + for _, f22f3iter := range elem.PendingModifiedValues.LogDeliveryConfigurations { + f22f3elem := &svcapitypes.PendingLogDeliveryConfiguration{} + if f22f3iter.DestinationDetails != nil { + f22f3elemf0 := &svcapitypes.DestinationDetails{} + if f22f3iter.DestinationDetails.CloudWatchLogsDetails != nil { + f22f3elemf0f0 := &svcapitypes.CloudWatchLogsDestinationDetails{} + if f22f3iter.DestinationDetails.CloudWatchLogsDetails.LogGroup != nil { + f22f3elemf0f0.LogGroup = f22f3iter.DestinationDetails.CloudWatchLogsDetails.LogGroup } - f18f2elemf0.CloudWatchLogsDetails = f18f2elemf0f0 + f22f3elemf0.CloudWatchLogsDetails = f22f3elemf0f0 } - if f18f2iter.DestinationDetails.KinesisFirehoseDetails != nil { - f18f2elemf0f1 := &svcapitypes.KinesisFirehoseDestinationDetails{} - if f18f2iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream != nil { - f18f2elemf0f1.DeliveryStream = f18f2iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream + if f22f3iter.DestinationDetails.KinesisFirehoseDetails != nil { + f22f3elemf0f1 := &svcapitypes.KinesisFirehoseDestinationDetails{} + if f22f3iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream != nil { + f22f3elemf0f1.DeliveryStream = f22f3iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream } - f18f2elemf0.KinesisFirehoseDetails = f18f2elemf0f1 + f22f3elemf0.KinesisFirehoseDetails = f22f3elemf0f1 } - f18f2elem.DestinationDetails = f18f2elemf0 + f22f3elem.DestinationDetails = f22f3elemf0 } - if f18f2iter.DestinationType != nil { - f18f2elem.DestinationType = f18f2iter.DestinationType + if f22f3iter.DestinationType != "" { + f22f3elem.DestinationType = aws.String(string(f22f3iter.DestinationType)) } - if f18f2iter.LogFormat != nil { - f18f2elem.LogFormat = f18f2iter.LogFormat + if f22f3iter.LogFormat != "" { + f22f3elem.LogFormat = aws.String(string(f22f3iter.LogFormat)) } - if f18f2iter.LogType != nil { - f18f2elem.LogType = f18f2iter.LogType + if f22f3iter.LogType != "" { + f22f3elem.LogType = aws.String(string(f22f3iter.LogType)) } - f18f2 = append(f18f2, f18f2elem) + f22f3 = append(f22f3, f22f3elem) } - f18.LogDeliveryConfigurations = f18f2 + f22.LogDeliveryConfigurations = f22f3 } if elem.PendingModifiedValues.PrimaryClusterId != nil { - f18.PrimaryClusterID = elem.PendingModifiedValues.PrimaryClusterId + f22.PrimaryClusterID = elem.PendingModifiedValues.PrimaryClusterId } if elem.PendingModifiedValues.Resharding != nil { - f18f4 := &svcapitypes.ReshardingStatus{} + f22f5 := &svcapitypes.ReshardingStatus{} if elem.PendingModifiedValues.Resharding.SlotMigration != nil { - f18f4f0 := &svcapitypes.SlotMigration{} + f22f5f0 := &svcapitypes.SlotMigration{} if elem.PendingModifiedValues.Resharding.SlotMigration.ProgressPercentage != nil { - f18f4f0.ProgressPercentage = elem.PendingModifiedValues.Resharding.SlotMigration.ProgressPercentage + f22f5f0.ProgressPercentage = elem.PendingModifiedValues.Resharding.SlotMigration.ProgressPercentage } - f18f4.SlotMigration = f18f4f0 + f22f5.SlotMigration = f22f5f0 } - f18.Resharding = f18f4 + f22.Resharding = f22f5 + } + if elem.PendingModifiedValues.TransitEncryptionEnabled != nil { + f22.TransitEncryptionEnabled = elem.PendingModifiedValues.TransitEncryptionEnabled + } + if elem.PendingModifiedValues.TransitEncryptionMode != "" { + f22.TransitEncryptionMode = aws.String(string(elem.PendingModifiedValues.TransitEncryptionMode)) } if elem.PendingModifiedValues.UserGroups != nil { - f18f5 := &svcapitypes.UserGroupsUpdateStatus{} + f22f8 := &svcapitypes.UserGroupsUpdateStatus{} if elem.PendingModifiedValues.UserGroups.UserGroupIdsToAdd != nil { - f18f5f0 := []*string{} - for _, f18f5f0iter := range elem.PendingModifiedValues.UserGroups.UserGroupIdsToAdd { - var f18f5f0elem string - f18f5f0elem = *f18f5f0iter - f18f5f0 = append(f18f5f0, &f18f5f0elem) - } - f18f5.UserGroupIDsToAdd = f18f5f0 + f22f8.UserGroupIDsToAdd = aws.StringSlice(elem.PendingModifiedValues.UserGroups.UserGroupIdsToAdd) } if elem.PendingModifiedValues.UserGroups.UserGroupIdsToRemove != nil { - f18f5f1 := []*string{} - for _, f18f5f1iter := range elem.PendingModifiedValues.UserGroups.UserGroupIdsToRemove { - var f18f5f1elem string - f18f5f1elem = *f18f5f1iter - f18f5f1 = append(f18f5f1, &f18f5f1elem) - } - f18f5.UserGroupIDsToRemove = f18f5f1 + f22f8.UserGroupIDsToRemove = aws.StringSlice(elem.PendingModifiedValues.UserGroups.UserGroupIdsToRemove) } - f18.UserGroups = f18f5 + f22.UserGroups = f22f8 } - ko.Status.PendingModifiedValues = f18 + ko.Status.PendingModifiedValues = f22 } else { ko.Status.PendingModifiedValues = nil } @@ -399,7 +412,8 @@ func (rm *resourceManager) sdkFind( ko.Spec.ReplicationGroupID = nil } if elem.SnapshotRetentionLimit != nil { - ko.Spec.SnapshotRetentionLimit = elem.SnapshotRetentionLimit + snapshotRetentionLimitCopy := int64(*elem.SnapshotRetentionLimit) + ko.Spec.SnapshotRetentionLimit = &snapshotRetentionLimitCopy } else { ko.Spec.SnapshotRetentionLimit = nil } @@ -423,14 +437,13 @@ func (rm *resourceManager) sdkFind( } else { ko.Spec.TransitEncryptionEnabled = nil } + if elem.TransitEncryptionMode != "" { + ko.Spec.TransitEncryptionMode = aws.String(string(elem.TransitEncryptionMode)) + } else { + ko.Spec.TransitEncryptionMode = nil + } if elem.UserGroupIds != nil { - f26 := []*string{} - for _, f26iter := range elem.UserGroupIds { - var f26elem string - f26elem = *f26iter - f26 = append(f26, &f26elem) - } - ko.Spec.UserGroupIDs = f26 + ko.Spec.UserGroupIDs = aws.StringSlice(elem.UserGroupIds) } else { ko.Spec.UserGroupIDs = nil } @@ -526,7 +539,7 @@ func (rm *resourceManager) newListRequestPayload( res := &svcsdk.DescribeReplicationGroupsInput{} if r.ko.Spec.ReplicationGroupID != nil { - res.SetReplicationGroupId(*r.ko.Spec.ReplicationGroupID) + res.ReplicationGroupId = r.ko.Spec.ReplicationGroupID } return res, nil @@ -551,7 +564,7 @@ func (rm *resourceManager) sdkCreate( var resp *svcsdk.CreateReplicationGroupOutput _ = resp - resp, err = rm.sdkapi.CreateReplicationGroupWithContext(ctx, input) + resp, err = rm.sdkapi.CreateReplicationGroup(ctx, input) rm.metrics.RecordAPICall("CREATE", "CreateReplicationGroup", err) if err != nil { return nil, err @@ -587,8 +600,8 @@ func (rm *resourceManager) sdkCreate( } else { ko.Status.AutoMinorVersionUpgrade = nil } - if resp.ReplicationGroup.AutomaticFailover != nil { - ko.Status.AutomaticFailover = resp.ReplicationGroup.AutomaticFailover + if resp.ReplicationGroup.AutomaticFailover != "" { + ko.Status.AutomaticFailover = aws.String(string(resp.ReplicationGroup.AutomaticFailover)) } else { ko.Status.AutomaticFailover = nil } @@ -602,20 +615,26 @@ func (rm *resourceManager) sdkCreate( } else { ko.Status.ClusterEnabled = nil } + if resp.ReplicationGroup.ClusterMode != "" { + ko.Spec.ClusterMode = aws.String(string(resp.ReplicationGroup.ClusterMode)) + } else { + ko.Spec.ClusterMode = nil + } if resp.ReplicationGroup.ConfigurationEndpoint != nil { - f8 := &svcapitypes.Endpoint{} + f9 := &svcapitypes.Endpoint{} if resp.ReplicationGroup.ConfigurationEndpoint.Address != nil { - f8.Address = resp.ReplicationGroup.ConfigurationEndpoint.Address + f9.Address = resp.ReplicationGroup.ConfigurationEndpoint.Address } if resp.ReplicationGroup.ConfigurationEndpoint.Port != nil { - f8.Port = resp.ReplicationGroup.ConfigurationEndpoint.Port + portCopy := int64(*resp.ReplicationGroup.ConfigurationEndpoint.Port) + f9.Port = &portCopy } - ko.Status.ConfigurationEndpoint = f8 + ko.Status.ConfigurationEndpoint = f9 } else { ko.Status.ConfigurationEndpoint = nil } - if resp.ReplicationGroup.DataTiering != nil { - ko.Status.DataTiering = resp.ReplicationGroup.DataTiering + if resp.ReplicationGroup.DataTiering != "" { + ko.Status.DataTiering = aws.String(string(resp.ReplicationGroup.DataTiering)) } else { ko.Status.DataTiering = nil } @@ -624,239 +643,242 @@ func (rm *resourceManager) sdkCreate( } else { ko.Spec.Description = nil } + if resp.ReplicationGroup.Engine != nil { + ko.Spec.Engine = resp.ReplicationGroup.Engine + } else { + ko.Spec.Engine = nil + } if resp.ReplicationGroup.GlobalReplicationGroupInfo != nil { - f11 := &svcapitypes.GlobalReplicationGroupInfo{} + f13 := &svcapitypes.GlobalReplicationGroupInfo{} if resp.ReplicationGroup.GlobalReplicationGroupInfo.GlobalReplicationGroupId != nil { - f11.GlobalReplicationGroupID = resp.ReplicationGroup.GlobalReplicationGroupInfo.GlobalReplicationGroupId + f13.GlobalReplicationGroupID = resp.ReplicationGroup.GlobalReplicationGroupInfo.GlobalReplicationGroupId } if resp.ReplicationGroup.GlobalReplicationGroupInfo.GlobalReplicationGroupMemberRole != nil { - f11.GlobalReplicationGroupMemberRole = resp.ReplicationGroup.GlobalReplicationGroupInfo.GlobalReplicationGroupMemberRole + f13.GlobalReplicationGroupMemberRole = resp.ReplicationGroup.GlobalReplicationGroupInfo.GlobalReplicationGroupMemberRole } - ko.Status.GlobalReplicationGroupInfo = f11 + ko.Status.GlobalReplicationGroupInfo = f13 } else { ko.Status.GlobalReplicationGroupInfo = nil } + if resp.ReplicationGroup.IpDiscovery != "" { + ko.Spec.IPDiscovery = aws.String(string(resp.ReplicationGroup.IpDiscovery)) + } else { + ko.Spec.IPDiscovery = nil + } if resp.ReplicationGroup.KmsKeyId != nil { ko.Spec.KMSKeyID = resp.ReplicationGroup.KmsKeyId } else { ko.Spec.KMSKeyID = nil } if resp.ReplicationGroup.LogDeliveryConfigurations != nil { - f13 := []*svcapitypes.LogDeliveryConfigurationRequest{} - for _, f13iter := range resp.ReplicationGroup.LogDeliveryConfigurations { - f13elem := &svcapitypes.LogDeliveryConfigurationRequest{} - if f13iter.DestinationDetails != nil { - f13elemf0 := &svcapitypes.DestinationDetails{} - if f13iter.DestinationDetails.CloudWatchLogsDetails != nil { - f13elemf0f0 := &svcapitypes.CloudWatchLogsDestinationDetails{} - if f13iter.DestinationDetails.CloudWatchLogsDetails.LogGroup != nil { - f13elemf0f0.LogGroup = f13iter.DestinationDetails.CloudWatchLogsDetails.LogGroup + f16 := []*svcapitypes.LogDeliveryConfigurationRequest{} + for _, f16iter := range resp.ReplicationGroup.LogDeliveryConfigurations { + f16elem := &svcapitypes.LogDeliveryConfigurationRequest{} + if f16iter.DestinationDetails != nil { + f16elemf0 := &svcapitypes.DestinationDetails{} + if f16iter.DestinationDetails.CloudWatchLogsDetails != nil { + f16elemf0f0 := &svcapitypes.CloudWatchLogsDestinationDetails{} + if f16iter.DestinationDetails.CloudWatchLogsDetails.LogGroup != nil { + f16elemf0f0.LogGroup = f16iter.DestinationDetails.CloudWatchLogsDetails.LogGroup } - f13elemf0.CloudWatchLogsDetails = f13elemf0f0 + f16elemf0.CloudWatchLogsDetails = f16elemf0f0 } - if f13iter.DestinationDetails.KinesisFirehoseDetails != nil { - f13elemf0f1 := &svcapitypes.KinesisFirehoseDestinationDetails{} - if f13iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream != nil { - f13elemf0f1.DeliveryStream = f13iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream + if f16iter.DestinationDetails.KinesisFirehoseDetails != nil { + f16elemf0f1 := &svcapitypes.KinesisFirehoseDestinationDetails{} + if f16iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream != nil { + f16elemf0f1.DeliveryStream = f16iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream } - f13elemf0.KinesisFirehoseDetails = f13elemf0f1 + f16elemf0.KinesisFirehoseDetails = f16elemf0f1 } - f13elem.DestinationDetails = f13elemf0 + f16elem.DestinationDetails = f16elemf0 } - if f13iter.DestinationType != nil { - f13elem.DestinationType = f13iter.DestinationType + if f16iter.DestinationType != "" { + f16elem.DestinationType = aws.String(string(f16iter.DestinationType)) } - if f13iter.LogFormat != nil { - f13elem.LogFormat = f13iter.LogFormat + if f16iter.LogFormat != "" { + f16elem.LogFormat = aws.String(string(f16iter.LogFormat)) } - if f13iter.LogType != nil { - f13elem.LogType = f13iter.LogType + if f16iter.LogType != "" { + f16elem.LogType = aws.String(string(f16iter.LogType)) } - f13 = append(f13, f13elem) + f16 = append(f16, f16elem) } - ko.Spec.LogDeliveryConfigurations = f13 + ko.Spec.LogDeliveryConfigurations = f16 } else { ko.Spec.LogDeliveryConfigurations = nil } if resp.ReplicationGroup.MemberClusters != nil { - f14 := []*string{} - for _, f14iter := range resp.ReplicationGroup.MemberClusters { - var f14elem string - f14elem = *f14iter - f14 = append(f14, &f14elem) - } - ko.Status.MemberClusters = f14 + ko.Status.MemberClusters = aws.StringSlice(resp.ReplicationGroup.MemberClusters) } else { ko.Status.MemberClusters = nil } if resp.ReplicationGroup.MemberClustersOutpostArns != nil { - f15 := []*string{} - for _, f15iter := range resp.ReplicationGroup.MemberClustersOutpostArns { - var f15elem string - f15elem = *f15iter - f15 = append(f15, &f15elem) - } - ko.Status.MemberClustersOutpostARNs = f15 + ko.Status.MemberClustersOutpostARNs = aws.StringSlice(resp.ReplicationGroup.MemberClustersOutpostArns) } else { ko.Status.MemberClustersOutpostARNs = nil } - if resp.ReplicationGroup.MultiAZ != nil { - ko.Status.MultiAZ = resp.ReplicationGroup.MultiAZ + if resp.ReplicationGroup.MultiAZ != "" { + ko.Status.MultiAZ = aws.String(string(resp.ReplicationGroup.MultiAZ)) } else { ko.Status.MultiAZ = nil } + if resp.ReplicationGroup.NetworkType != "" { + ko.Spec.NetworkType = aws.String(string(resp.ReplicationGroup.NetworkType)) + } else { + ko.Spec.NetworkType = nil + } if resp.ReplicationGroup.NodeGroups != nil { - f17 := []*svcapitypes.NodeGroup{} - for _, f17iter := range resp.ReplicationGroup.NodeGroups { - f17elem := &svcapitypes.NodeGroup{} - if f17iter.NodeGroupId != nil { - f17elem.NodeGroupID = f17iter.NodeGroupId - } - if f17iter.NodeGroupMembers != nil { - f17elemf1 := []*svcapitypes.NodeGroupMember{} - for _, f17elemf1iter := range f17iter.NodeGroupMembers { - f17elemf1elem := &svcapitypes.NodeGroupMember{} - if f17elemf1iter.CacheClusterId != nil { - f17elemf1elem.CacheClusterID = f17elemf1iter.CacheClusterId + f21 := []*svcapitypes.NodeGroup{} + for _, f21iter := range resp.ReplicationGroup.NodeGroups { + f21elem := &svcapitypes.NodeGroup{} + if f21iter.NodeGroupId != nil { + f21elem.NodeGroupID = f21iter.NodeGroupId + } + if f21iter.NodeGroupMembers != nil { + f21elemf1 := []*svcapitypes.NodeGroupMember{} + for _, f21elemf1iter := range f21iter.NodeGroupMembers { + f21elemf1elem := &svcapitypes.NodeGroupMember{} + if f21elemf1iter.CacheClusterId != nil { + f21elemf1elem.CacheClusterID = f21elemf1iter.CacheClusterId } - if f17elemf1iter.CacheNodeId != nil { - f17elemf1elem.CacheNodeID = f17elemf1iter.CacheNodeId + if f21elemf1iter.CacheNodeId != nil { + f21elemf1elem.CacheNodeID = f21elemf1iter.CacheNodeId } - if f17elemf1iter.CurrentRole != nil { - f17elemf1elem.CurrentRole = f17elemf1iter.CurrentRole + if f21elemf1iter.CurrentRole != nil { + f21elemf1elem.CurrentRole = f21elemf1iter.CurrentRole } - if f17elemf1iter.PreferredAvailabilityZone != nil { - f17elemf1elem.PreferredAvailabilityZone = f17elemf1iter.PreferredAvailabilityZone + if f21elemf1iter.PreferredAvailabilityZone != nil { + f21elemf1elem.PreferredAvailabilityZone = f21elemf1iter.PreferredAvailabilityZone } - if f17elemf1iter.PreferredOutpostArn != nil { - f17elemf1elem.PreferredOutpostARN = f17elemf1iter.PreferredOutpostArn + if f21elemf1iter.PreferredOutpostArn != nil { + f21elemf1elem.PreferredOutpostARN = f21elemf1iter.PreferredOutpostArn } - if f17elemf1iter.ReadEndpoint != nil { - f17elemf1elemf5 := &svcapitypes.Endpoint{} - if f17elemf1iter.ReadEndpoint.Address != nil { - f17elemf1elemf5.Address = f17elemf1iter.ReadEndpoint.Address + if f21elemf1iter.ReadEndpoint != nil { + f21elemf1elemf5 := &svcapitypes.Endpoint{} + if f21elemf1iter.ReadEndpoint.Address != nil { + f21elemf1elemf5.Address = f21elemf1iter.ReadEndpoint.Address } - if f17elemf1iter.ReadEndpoint.Port != nil { - f17elemf1elemf5.Port = f17elemf1iter.ReadEndpoint.Port + if f21elemf1iter.ReadEndpoint.Port != nil { + portCopy := int64(*f21elemf1iter.ReadEndpoint.Port) + f21elemf1elemf5.Port = &portCopy } - f17elemf1elem.ReadEndpoint = f17elemf1elemf5 + f21elemf1elem.ReadEndpoint = f21elemf1elemf5 } - f17elemf1 = append(f17elemf1, f17elemf1elem) + f21elemf1 = append(f21elemf1, f21elemf1elem) } - f17elem.NodeGroupMembers = f17elemf1 + f21elem.NodeGroupMembers = f21elemf1 } - if f17iter.PrimaryEndpoint != nil { - f17elemf2 := &svcapitypes.Endpoint{} - if f17iter.PrimaryEndpoint.Address != nil { - f17elemf2.Address = f17iter.PrimaryEndpoint.Address + if f21iter.PrimaryEndpoint != nil { + f21elemf2 := &svcapitypes.Endpoint{} + if f21iter.PrimaryEndpoint.Address != nil { + f21elemf2.Address = f21iter.PrimaryEndpoint.Address } - if f17iter.PrimaryEndpoint.Port != nil { - f17elemf2.Port = f17iter.PrimaryEndpoint.Port + if f21iter.PrimaryEndpoint.Port != nil { + portCopy := int64(*f21iter.PrimaryEndpoint.Port) + f21elemf2.Port = &portCopy } - f17elem.PrimaryEndpoint = f17elemf2 + f21elem.PrimaryEndpoint = f21elemf2 } - if f17iter.ReaderEndpoint != nil { - f17elemf3 := &svcapitypes.Endpoint{} - if f17iter.ReaderEndpoint.Address != nil { - f17elemf3.Address = f17iter.ReaderEndpoint.Address + if f21iter.ReaderEndpoint != nil { + f21elemf3 := &svcapitypes.Endpoint{} + if f21iter.ReaderEndpoint.Address != nil { + f21elemf3.Address = f21iter.ReaderEndpoint.Address } - if f17iter.ReaderEndpoint.Port != nil { - f17elemf3.Port = f17iter.ReaderEndpoint.Port + if f21iter.ReaderEndpoint.Port != nil { + portCopy := int64(*f21iter.ReaderEndpoint.Port) + f21elemf3.Port = &portCopy } - f17elem.ReaderEndpoint = f17elemf3 + f21elem.ReaderEndpoint = f21elemf3 } - if f17iter.Slots != nil { - f17elem.Slots = f17iter.Slots + if f21iter.Slots != nil { + f21elem.Slots = f21iter.Slots } - if f17iter.Status != nil { - f17elem.Status = f17iter.Status + if f21iter.Status != nil { + f21elem.Status = f21iter.Status } - f17 = append(f17, f17elem) + f21 = append(f21, f21elem) } - ko.Status.NodeGroups = f17 + ko.Status.NodeGroups = f21 } else { ko.Status.NodeGroups = nil } if resp.ReplicationGroup.PendingModifiedValues != nil { - f18 := &svcapitypes.ReplicationGroupPendingModifiedValues{} - if resp.ReplicationGroup.PendingModifiedValues.AuthTokenStatus != nil { - f18.AuthTokenStatus = resp.ReplicationGroup.PendingModifiedValues.AuthTokenStatus + f22 := &svcapitypes.ReplicationGroupPendingModifiedValues{} + if resp.ReplicationGroup.PendingModifiedValues.AuthTokenStatus != "" { + f22.AuthTokenStatus = aws.String(string(resp.ReplicationGroup.PendingModifiedValues.AuthTokenStatus)) } - if resp.ReplicationGroup.PendingModifiedValues.AutomaticFailoverStatus != nil { - f18.AutomaticFailoverStatus = resp.ReplicationGroup.PendingModifiedValues.AutomaticFailoverStatus + if resp.ReplicationGroup.PendingModifiedValues.AutomaticFailoverStatus != "" { + f22.AutomaticFailoverStatus = aws.String(string(resp.ReplicationGroup.PendingModifiedValues.AutomaticFailoverStatus)) + } + if resp.ReplicationGroup.PendingModifiedValues.ClusterMode != "" { + f22.ClusterMode = aws.String(string(resp.ReplicationGroup.PendingModifiedValues.ClusterMode)) } if resp.ReplicationGroup.PendingModifiedValues.LogDeliveryConfigurations != nil { - f18f2 := []*svcapitypes.PendingLogDeliveryConfiguration{} - for _, f18f2iter := range resp.ReplicationGroup.PendingModifiedValues.LogDeliveryConfigurations { - f18f2elem := &svcapitypes.PendingLogDeliveryConfiguration{} - if f18f2iter.DestinationDetails != nil { - f18f2elemf0 := &svcapitypes.DestinationDetails{} - if f18f2iter.DestinationDetails.CloudWatchLogsDetails != nil { - f18f2elemf0f0 := &svcapitypes.CloudWatchLogsDestinationDetails{} - if f18f2iter.DestinationDetails.CloudWatchLogsDetails.LogGroup != nil { - f18f2elemf0f0.LogGroup = f18f2iter.DestinationDetails.CloudWatchLogsDetails.LogGroup + f22f3 := []*svcapitypes.PendingLogDeliveryConfiguration{} + for _, f22f3iter := range resp.ReplicationGroup.PendingModifiedValues.LogDeliveryConfigurations { + f22f3elem := &svcapitypes.PendingLogDeliveryConfiguration{} + if f22f3iter.DestinationDetails != nil { + f22f3elemf0 := &svcapitypes.DestinationDetails{} + if f22f3iter.DestinationDetails.CloudWatchLogsDetails != nil { + f22f3elemf0f0 := &svcapitypes.CloudWatchLogsDestinationDetails{} + if f22f3iter.DestinationDetails.CloudWatchLogsDetails.LogGroup != nil { + f22f3elemf0f0.LogGroup = f22f3iter.DestinationDetails.CloudWatchLogsDetails.LogGroup } - f18f2elemf0.CloudWatchLogsDetails = f18f2elemf0f0 + f22f3elemf0.CloudWatchLogsDetails = f22f3elemf0f0 } - if f18f2iter.DestinationDetails.KinesisFirehoseDetails != nil { - f18f2elemf0f1 := &svcapitypes.KinesisFirehoseDestinationDetails{} - if f18f2iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream != nil { - f18f2elemf0f1.DeliveryStream = f18f2iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream + if f22f3iter.DestinationDetails.KinesisFirehoseDetails != nil { + f22f3elemf0f1 := &svcapitypes.KinesisFirehoseDestinationDetails{} + if f22f3iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream != nil { + f22f3elemf0f1.DeliveryStream = f22f3iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream } - f18f2elemf0.KinesisFirehoseDetails = f18f2elemf0f1 + f22f3elemf0.KinesisFirehoseDetails = f22f3elemf0f1 } - f18f2elem.DestinationDetails = f18f2elemf0 + f22f3elem.DestinationDetails = f22f3elemf0 } - if f18f2iter.DestinationType != nil { - f18f2elem.DestinationType = f18f2iter.DestinationType + if f22f3iter.DestinationType != "" { + f22f3elem.DestinationType = aws.String(string(f22f3iter.DestinationType)) } - if f18f2iter.LogFormat != nil { - f18f2elem.LogFormat = f18f2iter.LogFormat + if f22f3iter.LogFormat != "" { + f22f3elem.LogFormat = aws.String(string(f22f3iter.LogFormat)) } - if f18f2iter.LogType != nil { - f18f2elem.LogType = f18f2iter.LogType + if f22f3iter.LogType != "" { + f22f3elem.LogType = aws.String(string(f22f3iter.LogType)) } - f18f2 = append(f18f2, f18f2elem) + f22f3 = append(f22f3, f22f3elem) } - f18.LogDeliveryConfigurations = f18f2 + f22.LogDeliveryConfigurations = f22f3 } if resp.ReplicationGroup.PendingModifiedValues.PrimaryClusterId != nil { - f18.PrimaryClusterID = resp.ReplicationGroup.PendingModifiedValues.PrimaryClusterId + f22.PrimaryClusterID = resp.ReplicationGroup.PendingModifiedValues.PrimaryClusterId } if resp.ReplicationGroup.PendingModifiedValues.Resharding != nil { - f18f4 := &svcapitypes.ReshardingStatus{} + f22f5 := &svcapitypes.ReshardingStatus{} if resp.ReplicationGroup.PendingModifiedValues.Resharding.SlotMigration != nil { - f18f4f0 := &svcapitypes.SlotMigration{} + f22f5f0 := &svcapitypes.SlotMigration{} if resp.ReplicationGroup.PendingModifiedValues.Resharding.SlotMigration.ProgressPercentage != nil { - f18f4f0.ProgressPercentage = resp.ReplicationGroup.PendingModifiedValues.Resharding.SlotMigration.ProgressPercentage + f22f5f0.ProgressPercentage = resp.ReplicationGroup.PendingModifiedValues.Resharding.SlotMigration.ProgressPercentage } - f18f4.SlotMigration = f18f4f0 + f22f5.SlotMigration = f22f5f0 } - f18.Resharding = f18f4 + f22.Resharding = f22f5 + } + if resp.ReplicationGroup.PendingModifiedValues.TransitEncryptionEnabled != nil { + f22.TransitEncryptionEnabled = resp.ReplicationGroup.PendingModifiedValues.TransitEncryptionEnabled + } + if resp.ReplicationGroup.PendingModifiedValues.TransitEncryptionMode != "" { + f22.TransitEncryptionMode = aws.String(string(resp.ReplicationGroup.PendingModifiedValues.TransitEncryptionMode)) } if resp.ReplicationGroup.PendingModifiedValues.UserGroups != nil { - f18f5 := &svcapitypes.UserGroupsUpdateStatus{} + f22f8 := &svcapitypes.UserGroupsUpdateStatus{} if resp.ReplicationGroup.PendingModifiedValues.UserGroups.UserGroupIdsToAdd != nil { - f18f5f0 := []*string{} - for _, f18f5f0iter := range resp.ReplicationGroup.PendingModifiedValues.UserGroups.UserGroupIdsToAdd { - var f18f5f0elem string - f18f5f0elem = *f18f5f0iter - f18f5f0 = append(f18f5f0, &f18f5f0elem) - } - f18f5.UserGroupIDsToAdd = f18f5f0 + f22f8.UserGroupIDsToAdd = aws.StringSlice(resp.ReplicationGroup.PendingModifiedValues.UserGroups.UserGroupIdsToAdd) } if resp.ReplicationGroup.PendingModifiedValues.UserGroups.UserGroupIdsToRemove != nil { - f18f5f1 := []*string{} - for _, f18f5f1iter := range resp.ReplicationGroup.PendingModifiedValues.UserGroups.UserGroupIdsToRemove { - var f18f5f1elem string - f18f5f1elem = *f18f5f1iter - f18f5f1 = append(f18f5f1, &f18f5f1elem) - } - f18f5.UserGroupIDsToRemove = f18f5f1 + f22f8.UserGroupIDsToRemove = aws.StringSlice(resp.ReplicationGroup.PendingModifiedValues.UserGroups.UserGroupIdsToRemove) } - f18.UserGroups = f18f5 + f22.UserGroups = f22f8 } - ko.Status.PendingModifiedValues = f18 + ko.Status.PendingModifiedValues = f22 } else { ko.Status.PendingModifiedValues = nil } @@ -871,7 +893,8 @@ func (rm *resourceManager) sdkCreate( ko.Spec.ReplicationGroupID = nil } if resp.ReplicationGroup.SnapshotRetentionLimit != nil { - ko.Spec.SnapshotRetentionLimit = resp.ReplicationGroup.SnapshotRetentionLimit + snapshotRetentionLimitCopy := int64(*resp.ReplicationGroup.SnapshotRetentionLimit) + ko.Spec.SnapshotRetentionLimit = &snapshotRetentionLimitCopy } else { ko.Spec.SnapshotRetentionLimit = nil } @@ -895,14 +918,13 @@ func (rm *resourceManager) sdkCreate( } else { ko.Spec.TransitEncryptionEnabled = nil } + if resp.ReplicationGroup.TransitEncryptionMode != "" { + ko.Spec.TransitEncryptionMode = aws.String(string(resp.ReplicationGroup.TransitEncryptionMode)) + } else { + ko.Spec.TransitEncryptionMode = nil + } if resp.ReplicationGroup.UserGroupIds != nil { - f26 := []*string{} - for _, f26iter := range resp.ReplicationGroup.UserGroupIds { - var f26elem string - f26elem = *f26iter - f26 = append(f26, &f26elem) - } - ko.Spec.UserGroupIDs = f26 + ko.Spec.UserGroupIDs = aws.StringSlice(resp.ReplicationGroup.UserGroupIds) } else { ko.Spec.UserGroupIDs = nil } @@ -925,7 +947,7 @@ func (rm *resourceManager) newCreateRequestPayload( res := &svcsdk.CreateReplicationGroupInput{} if r.ko.Spec.AtRestEncryptionEnabled != nil { - res.SetAtRestEncryptionEnabled(*r.ko.Spec.AtRestEncryptionEnabled) + res.AtRestEncryptionEnabled = r.ko.Spec.AtRestEncryptionEnabled } if r.ko.Spec.AuthToken != nil { tmpSecret, err := rm.rr.SecretValueFromReference(ctx, r.ko.Spec.AuthToken) @@ -933,209 +955,207 @@ func (rm *resourceManager) newCreateRequestPayload( return nil, ackrequeue.Needed(err) } if tmpSecret != "" { - res.SetAuthToken(tmpSecret) + res.AuthToken = aws.String(tmpSecret) } } if r.ko.Spec.AutomaticFailoverEnabled != nil { - res.SetAutomaticFailoverEnabled(*r.ko.Spec.AutomaticFailoverEnabled) + res.AutomaticFailoverEnabled = r.ko.Spec.AutomaticFailoverEnabled } if r.ko.Spec.CacheNodeType != nil { - res.SetCacheNodeType(*r.ko.Spec.CacheNodeType) + res.CacheNodeType = r.ko.Spec.CacheNodeType } if r.ko.Spec.CacheParameterGroupName != nil { - res.SetCacheParameterGroupName(*r.ko.Spec.CacheParameterGroupName) + res.CacheParameterGroupName = r.ko.Spec.CacheParameterGroupName } if r.ko.Spec.CacheSecurityGroupNames != nil { - f5 := []*string{} - for _, f5iter := range r.ko.Spec.CacheSecurityGroupNames { - var f5elem string - f5elem = *f5iter - f5 = append(f5, &f5elem) - } - res.SetCacheSecurityGroupNames(f5) + res.CacheSecurityGroupNames = aws.ToStringSlice(r.ko.Spec.CacheSecurityGroupNames) } if r.ko.Spec.CacheSubnetGroupName != nil { - res.SetCacheSubnetGroupName(*r.ko.Spec.CacheSubnetGroupName) + res.CacheSubnetGroupName = r.ko.Spec.CacheSubnetGroupName + } + if r.ko.Spec.ClusterMode != nil { + res.ClusterMode = svcsdktypes.ClusterMode(*r.ko.Spec.ClusterMode) } if r.ko.Spec.DataTieringEnabled != nil { - res.SetDataTieringEnabled(*r.ko.Spec.DataTieringEnabled) + res.DataTieringEnabled = r.ko.Spec.DataTieringEnabled } if r.ko.Spec.Engine != nil { - res.SetEngine(*r.ko.Spec.Engine) + res.Engine = r.ko.Spec.Engine } if r.ko.Spec.EngineVersion != nil { - res.SetEngineVersion(*r.ko.Spec.EngineVersion) + res.EngineVersion = r.ko.Spec.EngineVersion + } + if r.ko.Spec.IPDiscovery != nil { + res.IpDiscovery = svcsdktypes.IpDiscovery(*r.ko.Spec.IPDiscovery) } if r.ko.Spec.KMSKeyID != nil { - res.SetKmsKeyId(*r.ko.Spec.KMSKeyID) + res.KmsKeyId = r.ko.Spec.KMSKeyID } if r.ko.Spec.LogDeliveryConfigurations != nil { - f11 := []*svcsdk.LogDeliveryConfigurationRequest{} - for _, f11iter := range r.ko.Spec.LogDeliveryConfigurations { - f11elem := &svcsdk.LogDeliveryConfigurationRequest{} - if f11iter.DestinationDetails != nil { - f11elemf0 := &svcsdk.DestinationDetails{} - if f11iter.DestinationDetails.CloudWatchLogsDetails != nil { - f11elemf0f0 := &svcsdk.CloudWatchLogsDestinationDetails{} - if f11iter.DestinationDetails.CloudWatchLogsDetails.LogGroup != nil { - f11elemf0f0.SetLogGroup(*f11iter.DestinationDetails.CloudWatchLogsDetails.LogGroup) + f13 := []svcsdktypes.LogDeliveryConfigurationRequest{} + for _, f13iter := range r.ko.Spec.LogDeliveryConfigurations { + f13elem := &svcsdktypes.LogDeliveryConfigurationRequest{} + if f13iter.DestinationDetails != nil { + f13elemf0 := &svcsdktypes.DestinationDetails{} + if f13iter.DestinationDetails.CloudWatchLogsDetails != nil { + f13elemf0f0 := &svcsdktypes.CloudWatchLogsDestinationDetails{} + if f13iter.DestinationDetails.CloudWatchLogsDetails.LogGroup != nil { + f13elemf0f0.LogGroup = f13iter.DestinationDetails.CloudWatchLogsDetails.LogGroup } - f11elemf0.SetCloudWatchLogsDetails(f11elemf0f0) + f13elemf0.CloudWatchLogsDetails = f13elemf0f0 } - if f11iter.DestinationDetails.KinesisFirehoseDetails != nil { - f11elemf0f1 := &svcsdk.KinesisFirehoseDestinationDetails{} - if f11iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream != nil { - f11elemf0f1.SetDeliveryStream(*f11iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream) + if f13iter.DestinationDetails.KinesisFirehoseDetails != nil { + f13elemf0f1 := &svcsdktypes.KinesisFirehoseDestinationDetails{} + if f13iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream != nil { + f13elemf0f1.DeliveryStream = f13iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream } - f11elemf0.SetKinesisFirehoseDetails(f11elemf0f1) + f13elemf0.KinesisFirehoseDetails = f13elemf0f1 } - f11elem.SetDestinationDetails(f11elemf0) + f13elem.DestinationDetails = f13elemf0 } - if f11iter.DestinationType != nil { - f11elem.SetDestinationType(*f11iter.DestinationType) + if f13iter.DestinationType != nil { + f13elem.DestinationType = svcsdktypes.DestinationType(*f13iter.DestinationType) } - if f11iter.Enabled != nil { - f11elem.SetEnabled(*f11iter.Enabled) + if f13iter.Enabled != nil { + f13elem.Enabled = f13iter.Enabled } - if f11iter.LogFormat != nil { - f11elem.SetLogFormat(*f11iter.LogFormat) + if f13iter.LogFormat != nil { + f13elem.LogFormat = svcsdktypes.LogFormat(*f13iter.LogFormat) } - if f11iter.LogType != nil { - f11elem.SetLogType(*f11iter.LogType) + if f13iter.LogType != nil { + f13elem.LogType = svcsdktypes.LogType(*f13iter.LogType) } - f11 = append(f11, f11elem) + f13 = append(f13, *f13elem) } - res.SetLogDeliveryConfigurations(f11) + res.LogDeliveryConfigurations = f13 } if r.ko.Spec.MultiAZEnabled != nil { - res.SetMultiAZEnabled(*r.ko.Spec.MultiAZEnabled) + res.MultiAZEnabled = r.ko.Spec.MultiAZEnabled + } + if r.ko.Spec.NetworkType != nil { + res.NetworkType = svcsdktypes.NetworkType(*r.ko.Spec.NetworkType) } if r.ko.Spec.NodeGroupConfiguration != nil { - f13 := []*svcsdk.NodeGroupConfiguration{} - for _, f13iter := range r.ko.Spec.NodeGroupConfiguration { - f13elem := &svcsdk.NodeGroupConfiguration{} - if f13iter.NodeGroupID != nil { - f13elem.SetNodeGroupId(*f13iter.NodeGroupID) - } - if f13iter.PrimaryAvailabilityZone != nil { - f13elem.SetPrimaryAvailabilityZone(*f13iter.PrimaryAvailabilityZone) - } - if f13iter.PrimaryOutpostARN != nil { - f13elem.SetPrimaryOutpostArn(*f13iter.PrimaryOutpostARN) - } - if f13iter.ReplicaAvailabilityZones != nil { - f13elemf3 := []*string{} - for _, f13elemf3iter := range f13iter.ReplicaAvailabilityZones { - var f13elemf3elem string - f13elemf3elem = *f13elemf3iter - f13elemf3 = append(f13elemf3, &f13elemf3elem) - } - f13elem.SetReplicaAvailabilityZones(f13elemf3) + f16 := []svcsdktypes.NodeGroupConfiguration{} + for _, f16iter := range r.ko.Spec.NodeGroupConfiguration { + f16elem := &svcsdktypes.NodeGroupConfiguration{} + if f16iter.NodeGroupID != nil { + f16elem.NodeGroupId = f16iter.NodeGroupID + } + if f16iter.PrimaryAvailabilityZone != nil { + f16elem.PrimaryAvailabilityZone = f16iter.PrimaryAvailabilityZone + } + if f16iter.PrimaryOutpostARN != nil { + f16elem.PrimaryOutpostArn = f16iter.PrimaryOutpostARN } - if f13iter.ReplicaCount != nil { - f13elem.SetReplicaCount(*f13iter.ReplicaCount) + if f16iter.ReplicaAvailabilityZones != nil { + f16elem.ReplicaAvailabilityZones = aws.ToStringSlice(f16iter.ReplicaAvailabilityZones) } - if f13iter.ReplicaOutpostARNs != nil { - f13elemf5 := []*string{} - for _, f13elemf5iter := range f13iter.ReplicaOutpostARNs { - var f13elemf5elem string - f13elemf5elem = *f13elemf5iter - f13elemf5 = append(f13elemf5, &f13elemf5elem) + if f16iter.ReplicaCount != nil { + replicaCountCopy0 := *f16iter.ReplicaCount + if replicaCountCopy0 > math.MaxInt32 || replicaCountCopy0 < math.MinInt32 { + return nil, fmt.Errorf("error: field ReplicaCount is of type int32") } - f13elem.SetReplicaOutpostArns(f13elemf5) + replicaCountCopy := int32(replicaCountCopy0) + f16elem.ReplicaCount = &replicaCountCopy } - if f13iter.Slots != nil { - f13elem.SetSlots(*f13iter.Slots) + if f16iter.ReplicaOutpostARNs != nil { + f16elem.ReplicaOutpostArns = aws.ToStringSlice(f16iter.ReplicaOutpostARNs) } - f13 = append(f13, f13elem) + if f16iter.Slots != nil { + f16elem.Slots = f16iter.Slots + } + f16 = append(f16, *f16elem) } - res.SetNodeGroupConfiguration(f13) + res.NodeGroupConfiguration = f16 } if r.ko.Spec.NotificationTopicARN != nil { - res.SetNotificationTopicArn(*r.ko.Spec.NotificationTopicARN) + res.NotificationTopicArn = r.ko.Spec.NotificationTopicARN } if r.ko.Spec.NumNodeGroups != nil { - res.SetNumNodeGroups(*r.ko.Spec.NumNodeGroups) + numNodeGroupsCopy0 := *r.ko.Spec.NumNodeGroups + if numNodeGroupsCopy0 > math.MaxInt32 || numNodeGroupsCopy0 < math.MinInt32 { + return nil, fmt.Errorf("error: field NumNodeGroups is of type int32") + } + numNodeGroupsCopy := int32(numNodeGroupsCopy0) + res.NumNodeGroups = &numNodeGroupsCopy } if r.ko.Spec.Port != nil { - res.SetPort(*r.ko.Spec.Port) + portCopy0 := *r.ko.Spec.Port + if portCopy0 > math.MaxInt32 || portCopy0 < math.MinInt32 { + return nil, fmt.Errorf("error: field Port is of type int32") + } + portCopy := int32(portCopy0) + res.Port = &portCopy } if r.ko.Spec.PreferredCacheClusterAZs != nil { - f17 := []*string{} - for _, f17iter := range r.ko.Spec.PreferredCacheClusterAZs { - var f17elem string - f17elem = *f17iter - f17 = append(f17, &f17elem) - } - res.SetPreferredCacheClusterAZs(f17) + res.PreferredCacheClusterAZs = aws.ToStringSlice(r.ko.Spec.PreferredCacheClusterAZs) } if r.ko.Spec.PreferredMaintenanceWindow != nil { - res.SetPreferredMaintenanceWindow(*r.ko.Spec.PreferredMaintenanceWindow) + res.PreferredMaintenanceWindow = r.ko.Spec.PreferredMaintenanceWindow } if r.ko.Spec.PrimaryClusterID != nil { - res.SetPrimaryClusterId(*r.ko.Spec.PrimaryClusterID) + res.PrimaryClusterId = r.ko.Spec.PrimaryClusterID } if r.ko.Spec.ReplicasPerNodeGroup != nil { - res.SetReplicasPerNodeGroup(*r.ko.Spec.ReplicasPerNodeGroup) + replicasPerNodeGroupCopy0 := *r.ko.Spec.ReplicasPerNodeGroup + if replicasPerNodeGroupCopy0 > math.MaxInt32 || replicasPerNodeGroupCopy0 < math.MinInt32 { + return nil, fmt.Errorf("error: field ReplicasPerNodeGroup is of type int32") + } + replicasPerNodeGroupCopy := int32(replicasPerNodeGroupCopy0) + res.ReplicasPerNodeGroup = &replicasPerNodeGroupCopy } if r.ko.Spec.Description != nil { - res.SetReplicationGroupDescription(*r.ko.Spec.Description) + res.ReplicationGroupDescription = r.ko.Spec.Description } if r.ko.Spec.ReplicationGroupID != nil { - res.SetReplicationGroupId(*r.ko.Spec.ReplicationGroupID) + res.ReplicationGroupId = r.ko.Spec.ReplicationGroupID } if r.ko.Spec.SecurityGroupIDs != nil { - f23 := []*string{} - for _, f23iter := range r.ko.Spec.SecurityGroupIDs { - var f23elem string - f23elem = *f23iter - f23 = append(f23, &f23elem) - } - res.SetSecurityGroupIds(f23) + res.SecurityGroupIds = aws.ToStringSlice(r.ko.Spec.SecurityGroupIDs) + } + if r.ko.Spec.ServerlessCacheSnapshotName != nil { + res.ServerlessCacheSnapshotName = r.ko.Spec.ServerlessCacheSnapshotName } if r.ko.Spec.SnapshotARNs != nil { - f24 := []*string{} - for _, f24iter := range r.ko.Spec.SnapshotARNs { - var f24elem string - f24elem = *f24iter - f24 = append(f24, &f24elem) - } - res.SetSnapshotArns(f24) + res.SnapshotArns = aws.ToStringSlice(r.ko.Spec.SnapshotARNs) } if r.ko.Spec.SnapshotName != nil { - res.SetSnapshotName(*r.ko.Spec.SnapshotName) + res.SnapshotName = r.ko.Spec.SnapshotName } if r.ko.Spec.SnapshotRetentionLimit != nil { - res.SetSnapshotRetentionLimit(*r.ko.Spec.SnapshotRetentionLimit) + snapshotRetentionLimitCopy0 := *r.ko.Spec.SnapshotRetentionLimit + if snapshotRetentionLimitCopy0 > math.MaxInt32 || snapshotRetentionLimitCopy0 < math.MinInt32 { + return nil, fmt.Errorf("error: field SnapshotRetentionLimit is of type int32") + } + snapshotRetentionLimitCopy := int32(snapshotRetentionLimitCopy0) + res.SnapshotRetentionLimit = &snapshotRetentionLimitCopy } if r.ko.Spec.SnapshotWindow != nil { - res.SetSnapshotWindow(*r.ko.Spec.SnapshotWindow) + res.SnapshotWindow = r.ko.Spec.SnapshotWindow } if r.ko.Spec.Tags != nil { - f28 := []*svcsdk.Tag{} - for _, f28iter := range r.ko.Spec.Tags { - f28elem := &svcsdk.Tag{} - if f28iter.Key != nil { - f28elem.SetKey(*f28iter.Key) + f32 := []svcsdktypes.Tag{} + for _, f32iter := range r.ko.Spec.Tags { + f32elem := &svcsdktypes.Tag{} + if f32iter.Key != nil { + f32elem.Key = f32iter.Key } - if f28iter.Value != nil { - f28elem.SetValue(*f28iter.Value) + if f32iter.Value != nil { + f32elem.Value = f32iter.Value } - f28 = append(f28, f28elem) + f32 = append(f32, *f32elem) } - res.SetTags(f28) + res.Tags = f32 } if r.ko.Spec.TransitEncryptionEnabled != nil { - res.SetTransitEncryptionEnabled(*r.ko.Spec.TransitEncryptionEnabled) + res.TransitEncryptionEnabled = r.ko.Spec.TransitEncryptionEnabled + } + if r.ko.Spec.TransitEncryptionMode != nil { + res.TransitEncryptionMode = svcsdktypes.TransitEncryptionMode(*r.ko.Spec.TransitEncryptionMode) } if r.ko.Spec.UserGroupIDs != nil { - f30 := []*string{} - for _, f30iter := range r.ko.Spec.UserGroupIDs { - var f30elem string - f30elem = *f30iter - f30 = append(f30, &f30elem) - } - res.SetUserGroupIds(f30) + res.UserGroupIds = aws.ToStringSlice(r.ko.Spec.UserGroupIDs) } return res, nil @@ -1224,7 +1244,7 @@ func (rm *resourceManager) sdkUpdate( var resp *svcsdk.ModifyReplicationGroupOutput _ = resp - resp, err = rm.sdkapi.ModifyReplicationGroupWithContext(ctx, input) + resp, err = rm.sdkapi.ModifyReplicationGroup(ctx, input) rm.metrics.RecordAPICall("UPDATE", "ModifyReplicationGroup", err) if err != nil { return nil, err @@ -1260,8 +1280,8 @@ func (rm *resourceManager) sdkUpdate( } else { ko.Status.AutoMinorVersionUpgrade = nil } - if resp.ReplicationGroup.AutomaticFailover != nil { - ko.Status.AutomaticFailover = resp.ReplicationGroup.AutomaticFailover + if resp.ReplicationGroup.AutomaticFailover != "" { + ko.Status.AutomaticFailover = aws.String(string(resp.ReplicationGroup.AutomaticFailover)) } else { ko.Status.AutomaticFailover = nil } @@ -1275,20 +1295,26 @@ func (rm *resourceManager) sdkUpdate( } else { ko.Status.ClusterEnabled = nil } + if resp.ReplicationGroup.ClusterMode != "" { + ko.Spec.ClusterMode = aws.String(string(resp.ReplicationGroup.ClusterMode)) + } else { + ko.Spec.ClusterMode = nil + } if resp.ReplicationGroup.ConfigurationEndpoint != nil { - f8 := &svcapitypes.Endpoint{} + f9 := &svcapitypes.Endpoint{} if resp.ReplicationGroup.ConfigurationEndpoint.Address != nil { - f8.Address = resp.ReplicationGroup.ConfigurationEndpoint.Address + f9.Address = resp.ReplicationGroup.ConfigurationEndpoint.Address } if resp.ReplicationGroup.ConfigurationEndpoint.Port != nil { - f8.Port = resp.ReplicationGroup.ConfigurationEndpoint.Port + portCopy := int64(*resp.ReplicationGroup.ConfigurationEndpoint.Port) + f9.Port = &portCopy } - ko.Status.ConfigurationEndpoint = f8 + ko.Status.ConfigurationEndpoint = f9 } else { ko.Status.ConfigurationEndpoint = nil } - if resp.ReplicationGroup.DataTiering != nil { - ko.Status.DataTiering = resp.ReplicationGroup.DataTiering + if resp.ReplicationGroup.DataTiering != "" { + ko.Status.DataTiering = aws.String(string(resp.ReplicationGroup.DataTiering)) } else { ko.Status.DataTiering = nil } @@ -1297,239 +1323,242 @@ func (rm *resourceManager) sdkUpdate( } else { ko.Spec.Description = nil } + if resp.ReplicationGroup.Engine != nil { + ko.Spec.Engine = resp.ReplicationGroup.Engine + } else { + ko.Spec.Engine = nil + } if resp.ReplicationGroup.GlobalReplicationGroupInfo != nil { - f11 := &svcapitypes.GlobalReplicationGroupInfo{} + f13 := &svcapitypes.GlobalReplicationGroupInfo{} if resp.ReplicationGroup.GlobalReplicationGroupInfo.GlobalReplicationGroupId != nil { - f11.GlobalReplicationGroupID = resp.ReplicationGroup.GlobalReplicationGroupInfo.GlobalReplicationGroupId + f13.GlobalReplicationGroupID = resp.ReplicationGroup.GlobalReplicationGroupInfo.GlobalReplicationGroupId } if resp.ReplicationGroup.GlobalReplicationGroupInfo.GlobalReplicationGroupMemberRole != nil { - f11.GlobalReplicationGroupMemberRole = resp.ReplicationGroup.GlobalReplicationGroupInfo.GlobalReplicationGroupMemberRole + f13.GlobalReplicationGroupMemberRole = resp.ReplicationGroup.GlobalReplicationGroupInfo.GlobalReplicationGroupMemberRole } - ko.Status.GlobalReplicationGroupInfo = f11 + ko.Status.GlobalReplicationGroupInfo = f13 } else { ko.Status.GlobalReplicationGroupInfo = nil } + if resp.ReplicationGroup.IpDiscovery != "" { + ko.Spec.IPDiscovery = aws.String(string(resp.ReplicationGroup.IpDiscovery)) + } else { + ko.Spec.IPDiscovery = nil + } if resp.ReplicationGroup.KmsKeyId != nil { ko.Spec.KMSKeyID = resp.ReplicationGroup.KmsKeyId } else { ko.Spec.KMSKeyID = nil } if resp.ReplicationGroup.LogDeliveryConfigurations != nil { - f13 := []*svcapitypes.LogDeliveryConfigurationRequest{} - for _, f13iter := range resp.ReplicationGroup.LogDeliveryConfigurations { - f13elem := &svcapitypes.LogDeliveryConfigurationRequest{} - if f13iter.DestinationDetails != nil { - f13elemf0 := &svcapitypes.DestinationDetails{} - if f13iter.DestinationDetails.CloudWatchLogsDetails != nil { - f13elemf0f0 := &svcapitypes.CloudWatchLogsDestinationDetails{} - if f13iter.DestinationDetails.CloudWatchLogsDetails.LogGroup != nil { - f13elemf0f0.LogGroup = f13iter.DestinationDetails.CloudWatchLogsDetails.LogGroup + f16 := []*svcapitypes.LogDeliveryConfigurationRequest{} + for _, f16iter := range resp.ReplicationGroup.LogDeliveryConfigurations { + f16elem := &svcapitypes.LogDeliveryConfigurationRequest{} + if f16iter.DestinationDetails != nil { + f16elemf0 := &svcapitypes.DestinationDetails{} + if f16iter.DestinationDetails.CloudWatchLogsDetails != nil { + f16elemf0f0 := &svcapitypes.CloudWatchLogsDestinationDetails{} + if f16iter.DestinationDetails.CloudWatchLogsDetails.LogGroup != nil { + f16elemf0f0.LogGroup = f16iter.DestinationDetails.CloudWatchLogsDetails.LogGroup } - f13elemf0.CloudWatchLogsDetails = f13elemf0f0 + f16elemf0.CloudWatchLogsDetails = f16elemf0f0 } - if f13iter.DestinationDetails.KinesisFirehoseDetails != nil { - f13elemf0f1 := &svcapitypes.KinesisFirehoseDestinationDetails{} - if f13iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream != nil { - f13elemf0f1.DeliveryStream = f13iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream + if f16iter.DestinationDetails.KinesisFirehoseDetails != nil { + f16elemf0f1 := &svcapitypes.KinesisFirehoseDestinationDetails{} + if f16iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream != nil { + f16elemf0f1.DeliveryStream = f16iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream } - f13elemf0.KinesisFirehoseDetails = f13elemf0f1 + f16elemf0.KinesisFirehoseDetails = f16elemf0f1 } - f13elem.DestinationDetails = f13elemf0 + f16elem.DestinationDetails = f16elemf0 } - if f13iter.DestinationType != nil { - f13elem.DestinationType = f13iter.DestinationType + if f16iter.DestinationType != "" { + f16elem.DestinationType = aws.String(string(f16iter.DestinationType)) } - if f13iter.LogFormat != nil { - f13elem.LogFormat = f13iter.LogFormat + if f16iter.LogFormat != "" { + f16elem.LogFormat = aws.String(string(f16iter.LogFormat)) } - if f13iter.LogType != nil { - f13elem.LogType = f13iter.LogType + if f16iter.LogType != "" { + f16elem.LogType = aws.String(string(f16iter.LogType)) } - f13 = append(f13, f13elem) + f16 = append(f16, f16elem) } - ko.Spec.LogDeliveryConfigurations = f13 + ko.Spec.LogDeliveryConfigurations = f16 } else { ko.Spec.LogDeliveryConfigurations = nil } if resp.ReplicationGroup.MemberClusters != nil { - f14 := []*string{} - for _, f14iter := range resp.ReplicationGroup.MemberClusters { - var f14elem string - f14elem = *f14iter - f14 = append(f14, &f14elem) - } - ko.Status.MemberClusters = f14 + ko.Status.MemberClusters = aws.StringSlice(resp.ReplicationGroup.MemberClusters) } else { ko.Status.MemberClusters = nil } if resp.ReplicationGroup.MemberClustersOutpostArns != nil { - f15 := []*string{} - for _, f15iter := range resp.ReplicationGroup.MemberClustersOutpostArns { - var f15elem string - f15elem = *f15iter - f15 = append(f15, &f15elem) - } - ko.Status.MemberClustersOutpostARNs = f15 + ko.Status.MemberClustersOutpostARNs = aws.StringSlice(resp.ReplicationGroup.MemberClustersOutpostArns) } else { ko.Status.MemberClustersOutpostARNs = nil } - if resp.ReplicationGroup.MultiAZ != nil { - ko.Status.MultiAZ = resp.ReplicationGroup.MultiAZ + if resp.ReplicationGroup.MultiAZ != "" { + ko.Status.MultiAZ = aws.String(string(resp.ReplicationGroup.MultiAZ)) } else { ko.Status.MultiAZ = nil } + if resp.ReplicationGroup.NetworkType != "" { + ko.Spec.NetworkType = aws.String(string(resp.ReplicationGroup.NetworkType)) + } else { + ko.Spec.NetworkType = nil + } if resp.ReplicationGroup.NodeGroups != nil { - f17 := []*svcapitypes.NodeGroup{} - for _, f17iter := range resp.ReplicationGroup.NodeGroups { - f17elem := &svcapitypes.NodeGroup{} - if f17iter.NodeGroupId != nil { - f17elem.NodeGroupID = f17iter.NodeGroupId - } - if f17iter.NodeGroupMembers != nil { - f17elemf1 := []*svcapitypes.NodeGroupMember{} - for _, f17elemf1iter := range f17iter.NodeGroupMembers { - f17elemf1elem := &svcapitypes.NodeGroupMember{} - if f17elemf1iter.CacheClusterId != nil { - f17elemf1elem.CacheClusterID = f17elemf1iter.CacheClusterId + f21 := []*svcapitypes.NodeGroup{} + for _, f21iter := range resp.ReplicationGroup.NodeGroups { + f21elem := &svcapitypes.NodeGroup{} + if f21iter.NodeGroupId != nil { + f21elem.NodeGroupID = f21iter.NodeGroupId + } + if f21iter.NodeGroupMembers != nil { + f21elemf1 := []*svcapitypes.NodeGroupMember{} + for _, f21elemf1iter := range f21iter.NodeGroupMembers { + f21elemf1elem := &svcapitypes.NodeGroupMember{} + if f21elemf1iter.CacheClusterId != nil { + f21elemf1elem.CacheClusterID = f21elemf1iter.CacheClusterId } - if f17elemf1iter.CacheNodeId != nil { - f17elemf1elem.CacheNodeID = f17elemf1iter.CacheNodeId + if f21elemf1iter.CacheNodeId != nil { + f21elemf1elem.CacheNodeID = f21elemf1iter.CacheNodeId } - if f17elemf1iter.CurrentRole != nil { - f17elemf1elem.CurrentRole = f17elemf1iter.CurrentRole + if f21elemf1iter.CurrentRole != nil { + f21elemf1elem.CurrentRole = f21elemf1iter.CurrentRole } - if f17elemf1iter.PreferredAvailabilityZone != nil { - f17elemf1elem.PreferredAvailabilityZone = f17elemf1iter.PreferredAvailabilityZone + if f21elemf1iter.PreferredAvailabilityZone != nil { + f21elemf1elem.PreferredAvailabilityZone = f21elemf1iter.PreferredAvailabilityZone } - if f17elemf1iter.PreferredOutpostArn != nil { - f17elemf1elem.PreferredOutpostARN = f17elemf1iter.PreferredOutpostArn + if f21elemf1iter.PreferredOutpostArn != nil { + f21elemf1elem.PreferredOutpostARN = f21elemf1iter.PreferredOutpostArn } - if f17elemf1iter.ReadEndpoint != nil { - f17elemf1elemf5 := &svcapitypes.Endpoint{} - if f17elemf1iter.ReadEndpoint.Address != nil { - f17elemf1elemf5.Address = f17elemf1iter.ReadEndpoint.Address + if f21elemf1iter.ReadEndpoint != nil { + f21elemf1elemf5 := &svcapitypes.Endpoint{} + if f21elemf1iter.ReadEndpoint.Address != nil { + f21elemf1elemf5.Address = f21elemf1iter.ReadEndpoint.Address } - if f17elemf1iter.ReadEndpoint.Port != nil { - f17elemf1elemf5.Port = f17elemf1iter.ReadEndpoint.Port + if f21elemf1iter.ReadEndpoint.Port != nil { + portCopy := int64(*f21elemf1iter.ReadEndpoint.Port) + f21elemf1elemf5.Port = &portCopy } - f17elemf1elem.ReadEndpoint = f17elemf1elemf5 + f21elemf1elem.ReadEndpoint = f21elemf1elemf5 } - f17elemf1 = append(f17elemf1, f17elemf1elem) + f21elemf1 = append(f21elemf1, f21elemf1elem) } - f17elem.NodeGroupMembers = f17elemf1 + f21elem.NodeGroupMembers = f21elemf1 } - if f17iter.PrimaryEndpoint != nil { - f17elemf2 := &svcapitypes.Endpoint{} - if f17iter.PrimaryEndpoint.Address != nil { - f17elemf2.Address = f17iter.PrimaryEndpoint.Address + if f21iter.PrimaryEndpoint != nil { + f21elemf2 := &svcapitypes.Endpoint{} + if f21iter.PrimaryEndpoint.Address != nil { + f21elemf2.Address = f21iter.PrimaryEndpoint.Address } - if f17iter.PrimaryEndpoint.Port != nil { - f17elemf2.Port = f17iter.PrimaryEndpoint.Port + if f21iter.PrimaryEndpoint.Port != nil { + portCopy := int64(*f21iter.PrimaryEndpoint.Port) + f21elemf2.Port = &portCopy } - f17elem.PrimaryEndpoint = f17elemf2 + f21elem.PrimaryEndpoint = f21elemf2 } - if f17iter.ReaderEndpoint != nil { - f17elemf3 := &svcapitypes.Endpoint{} - if f17iter.ReaderEndpoint.Address != nil { - f17elemf3.Address = f17iter.ReaderEndpoint.Address + if f21iter.ReaderEndpoint != nil { + f21elemf3 := &svcapitypes.Endpoint{} + if f21iter.ReaderEndpoint.Address != nil { + f21elemf3.Address = f21iter.ReaderEndpoint.Address } - if f17iter.ReaderEndpoint.Port != nil { - f17elemf3.Port = f17iter.ReaderEndpoint.Port + if f21iter.ReaderEndpoint.Port != nil { + portCopy := int64(*f21iter.ReaderEndpoint.Port) + f21elemf3.Port = &portCopy } - f17elem.ReaderEndpoint = f17elemf3 + f21elem.ReaderEndpoint = f21elemf3 } - if f17iter.Slots != nil { - f17elem.Slots = f17iter.Slots + if f21iter.Slots != nil { + f21elem.Slots = f21iter.Slots } - if f17iter.Status != nil { - f17elem.Status = f17iter.Status + if f21iter.Status != nil { + f21elem.Status = f21iter.Status } - f17 = append(f17, f17elem) + f21 = append(f21, f21elem) } - ko.Status.NodeGroups = f17 + ko.Status.NodeGroups = f21 } else { ko.Status.NodeGroups = nil } if resp.ReplicationGroup.PendingModifiedValues != nil { - f18 := &svcapitypes.ReplicationGroupPendingModifiedValues{} - if resp.ReplicationGroup.PendingModifiedValues.AuthTokenStatus != nil { - f18.AuthTokenStatus = resp.ReplicationGroup.PendingModifiedValues.AuthTokenStatus + f22 := &svcapitypes.ReplicationGroupPendingModifiedValues{} + if resp.ReplicationGroup.PendingModifiedValues.AuthTokenStatus != "" { + f22.AuthTokenStatus = aws.String(string(resp.ReplicationGroup.PendingModifiedValues.AuthTokenStatus)) + } + if resp.ReplicationGroup.PendingModifiedValues.AutomaticFailoverStatus != "" { + f22.AutomaticFailoverStatus = aws.String(string(resp.ReplicationGroup.PendingModifiedValues.AutomaticFailoverStatus)) } - if resp.ReplicationGroup.PendingModifiedValues.AutomaticFailoverStatus != nil { - f18.AutomaticFailoverStatus = resp.ReplicationGroup.PendingModifiedValues.AutomaticFailoverStatus + if resp.ReplicationGroup.PendingModifiedValues.ClusterMode != "" { + f22.ClusterMode = aws.String(string(resp.ReplicationGroup.PendingModifiedValues.ClusterMode)) } if resp.ReplicationGroup.PendingModifiedValues.LogDeliveryConfigurations != nil { - f18f2 := []*svcapitypes.PendingLogDeliveryConfiguration{} - for _, f18f2iter := range resp.ReplicationGroup.PendingModifiedValues.LogDeliveryConfigurations { - f18f2elem := &svcapitypes.PendingLogDeliveryConfiguration{} - if f18f2iter.DestinationDetails != nil { - f18f2elemf0 := &svcapitypes.DestinationDetails{} - if f18f2iter.DestinationDetails.CloudWatchLogsDetails != nil { - f18f2elemf0f0 := &svcapitypes.CloudWatchLogsDestinationDetails{} - if f18f2iter.DestinationDetails.CloudWatchLogsDetails.LogGroup != nil { - f18f2elemf0f0.LogGroup = f18f2iter.DestinationDetails.CloudWatchLogsDetails.LogGroup + f22f3 := []*svcapitypes.PendingLogDeliveryConfiguration{} + for _, f22f3iter := range resp.ReplicationGroup.PendingModifiedValues.LogDeliveryConfigurations { + f22f3elem := &svcapitypes.PendingLogDeliveryConfiguration{} + if f22f3iter.DestinationDetails != nil { + f22f3elemf0 := &svcapitypes.DestinationDetails{} + if f22f3iter.DestinationDetails.CloudWatchLogsDetails != nil { + f22f3elemf0f0 := &svcapitypes.CloudWatchLogsDestinationDetails{} + if f22f3iter.DestinationDetails.CloudWatchLogsDetails.LogGroup != nil { + f22f3elemf0f0.LogGroup = f22f3iter.DestinationDetails.CloudWatchLogsDetails.LogGroup } - f18f2elemf0.CloudWatchLogsDetails = f18f2elemf0f0 + f22f3elemf0.CloudWatchLogsDetails = f22f3elemf0f0 } - if f18f2iter.DestinationDetails.KinesisFirehoseDetails != nil { - f18f2elemf0f1 := &svcapitypes.KinesisFirehoseDestinationDetails{} - if f18f2iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream != nil { - f18f2elemf0f1.DeliveryStream = f18f2iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream + if f22f3iter.DestinationDetails.KinesisFirehoseDetails != nil { + f22f3elemf0f1 := &svcapitypes.KinesisFirehoseDestinationDetails{} + if f22f3iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream != nil { + f22f3elemf0f1.DeliveryStream = f22f3iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream } - f18f2elemf0.KinesisFirehoseDetails = f18f2elemf0f1 + f22f3elemf0.KinesisFirehoseDetails = f22f3elemf0f1 } - f18f2elem.DestinationDetails = f18f2elemf0 + f22f3elem.DestinationDetails = f22f3elemf0 } - if f18f2iter.DestinationType != nil { - f18f2elem.DestinationType = f18f2iter.DestinationType + if f22f3iter.DestinationType != "" { + f22f3elem.DestinationType = aws.String(string(f22f3iter.DestinationType)) } - if f18f2iter.LogFormat != nil { - f18f2elem.LogFormat = f18f2iter.LogFormat + if f22f3iter.LogFormat != "" { + f22f3elem.LogFormat = aws.String(string(f22f3iter.LogFormat)) } - if f18f2iter.LogType != nil { - f18f2elem.LogType = f18f2iter.LogType + if f22f3iter.LogType != "" { + f22f3elem.LogType = aws.String(string(f22f3iter.LogType)) } - f18f2 = append(f18f2, f18f2elem) + f22f3 = append(f22f3, f22f3elem) } - f18.LogDeliveryConfigurations = f18f2 + f22.LogDeliveryConfigurations = f22f3 } if resp.ReplicationGroup.PendingModifiedValues.PrimaryClusterId != nil { - f18.PrimaryClusterID = resp.ReplicationGroup.PendingModifiedValues.PrimaryClusterId + f22.PrimaryClusterID = resp.ReplicationGroup.PendingModifiedValues.PrimaryClusterId } if resp.ReplicationGroup.PendingModifiedValues.Resharding != nil { - f18f4 := &svcapitypes.ReshardingStatus{} + f22f5 := &svcapitypes.ReshardingStatus{} if resp.ReplicationGroup.PendingModifiedValues.Resharding.SlotMigration != nil { - f18f4f0 := &svcapitypes.SlotMigration{} + f22f5f0 := &svcapitypes.SlotMigration{} if resp.ReplicationGroup.PendingModifiedValues.Resharding.SlotMigration.ProgressPercentage != nil { - f18f4f0.ProgressPercentage = resp.ReplicationGroup.PendingModifiedValues.Resharding.SlotMigration.ProgressPercentage + f22f5f0.ProgressPercentage = resp.ReplicationGroup.PendingModifiedValues.Resharding.SlotMigration.ProgressPercentage } - f18f4.SlotMigration = f18f4f0 + f22f5.SlotMigration = f22f5f0 } - f18.Resharding = f18f4 + f22.Resharding = f22f5 + } + if resp.ReplicationGroup.PendingModifiedValues.TransitEncryptionEnabled != nil { + f22.TransitEncryptionEnabled = resp.ReplicationGroup.PendingModifiedValues.TransitEncryptionEnabled + } + if resp.ReplicationGroup.PendingModifiedValues.TransitEncryptionMode != "" { + f22.TransitEncryptionMode = aws.String(string(resp.ReplicationGroup.PendingModifiedValues.TransitEncryptionMode)) } if resp.ReplicationGroup.PendingModifiedValues.UserGroups != nil { - f18f5 := &svcapitypes.UserGroupsUpdateStatus{} + f22f8 := &svcapitypes.UserGroupsUpdateStatus{} if resp.ReplicationGroup.PendingModifiedValues.UserGroups.UserGroupIdsToAdd != nil { - f18f5f0 := []*string{} - for _, f18f5f0iter := range resp.ReplicationGroup.PendingModifiedValues.UserGroups.UserGroupIdsToAdd { - var f18f5f0elem string - f18f5f0elem = *f18f5f0iter - f18f5f0 = append(f18f5f0, &f18f5f0elem) - } - f18f5.UserGroupIDsToAdd = f18f5f0 + f22f8.UserGroupIDsToAdd = aws.StringSlice(resp.ReplicationGroup.PendingModifiedValues.UserGroups.UserGroupIdsToAdd) } if resp.ReplicationGroup.PendingModifiedValues.UserGroups.UserGroupIdsToRemove != nil { - f18f5f1 := []*string{} - for _, f18f5f1iter := range resp.ReplicationGroup.PendingModifiedValues.UserGroups.UserGroupIdsToRemove { - var f18f5f1elem string - f18f5f1elem = *f18f5f1iter - f18f5f1 = append(f18f5f1, &f18f5f1elem) - } - f18f5.UserGroupIDsToRemove = f18f5f1 + f22f8.UserGroupIDsToRemove = aws.StringSlice(resp.ReplicationGroup.PendingModifiedValues.UserGroups.UserGroupIdsToRemove) } - f18.UserGroups = f18f5 + f22.UserGroups = f22f8 } - ko.Status.PendingModifiedValues = f18 + ko.Status.PendingModifiedValues = f22 } else { ko.Status.PendingModifiedValues = nil } @@ -1544,7 +1573,8 @@ func (rm *resourceManager) sdkUpdate( ko.Spec.ReplicationGroupID = nil } if resp.ReplicationGroup.SnapshotRetentionLimit != nil { - ko.Spec.SnapshotRetentionLimit = resp.ReplicationGroup.SnapshotRetentionLimit + snapshotRetentionLimitCopy := int64(*resp.ReplicationGroup.SnapshotRetentionLimit) + ko.Spec.SnapshotRetentionLimit = &snapshotRetentionLimitCopy } else { ko.Spec.SnapshotRetentionLimit = nil } @@ -1568,14 +1598,13 @@ func (rm *resourceManager) sdkUpdate( } else { ko.Spec.TransitEncryptionEnabled = nil } + if resp.ReplicationGroup.TransitEncryptionMode != "" { + ko.Spec.TransitEncryptionMode = aws.String(string(resp.ReplicationGroup.TransitEncryptionMode)) + } else { + ko.Spec.TransitEncryptionMode = nil + } if resp.ReplicationGroup.UserGroupIds != nil { - f26 := []*string{} - for _, f26iter := range resp.ReplicationGroup.UserGroupIds { - var f26elem string - f26elem = *f26iter - f26 = append(f26, &f26elem) - } - ko.Spec.UserGroupIDs = f26 + ko.Spec.UserGroupIDs = aws.StringSlice(resp.ReplicationGroup.UserGroupIds) } else { ko.Spec.UserGroupIDs = nil } @@ -1598,101 +1627,115 @@ func (rm *resourceManager) newUpdateRequestPayload( ) (*svcsdk.ModifyReplicationGroupInput, error) { res := &svcsdk.ModifyReplicationGroupInput{} - res.SetApplyImmediately(true) + res.ApplyImmediately = true if r.ko.Spec.AuthToken != nil { tmpSecret, err := rm.rr.SecretValueFromReference(ctx, r.ko.Spec.AuthToken) if err != nil { return nil, ackrequeue.Needed(err) } if tmpSecret != "" { - res.SetAuthToken(tmpSecret) + res.AuthToken = aws.String(tmpSecret) } } if r.ko.Status.AutoMinorVersionUpgrade != nil { - res.SetAutoMinorVersionUpgrade(*r.ko.Status.AutoMinorVersionUpgrade) + res.AutoMinorVersionUpgrade = r.ko.Status.AutoMinorVersionUpgrade } if r.ko.Spec.AutomaticFailoverEnabled != nil { - res.SetAutomaticFailoverEnabled(*r.ko.Spec.AutomaticFailoverEnabled) + res.AutomaticFailoverEnabled = r.ko.Spec.AutomaticFailoverEnabled } if r.ko.Spec.CacheNodeType != nil { - res.SetCacheNodeType(*r.ko.Spec.CacheNodeType) + res.CacheNodeType = r.ko.Spec.CacheNodeType } if r.ko.Spec.CacheParameterGroupName != nil { - res.SetCacheParameterGroupName(*r.ko.Spec.CacheParameterGroupName) + res.CacheParameterGroupName = r.ko.Spec.CacheParameterGroupName } if r.ko.Spec.CacheSecurityGroupNames != nil { - f7 := []*string{} - for _, f7iter := range r.ko.Spec.CacheSecurityGroupNames { - var f7elem string - f7elem = *f7iter - f7 = append(f7, &f7elem) - } - res.SetCacheSecurityGroupNames(f7) + res.CacheSecurityGroupNames = aws.ToStringSlice(r.ko.Spec.CacheSecurityGroupNames) + } + if r.ko.Spec.ClusterMode != nil { + res.ClusterMode = svcsdktypes.ClusterMode(*r.ko.Spec.ClusterMode) + } + if r.ko.Spec.Engine != nil { + res.Engine = r.ko.Spec.Engine + } + if r.ko.Spec.IPDiscovery != nil { + res.IpDiscovery = svcsdktypes.IpDiscovery(*r.ko.Spec.IPDiscovery) } if r.ko.Spec.LogDeliveryConfigurations != nil { - f8 := []*svcsdk.LogDeliveryConfigurationRequest{} - for _, f8iter := range r.ko.Spec.LogDeliveryConfigurations { - f8elem := &svcsdk.LogDeliveryConfigurationRequest{} - if f8iter.DestinationDetails != nil { - f8elemf0 := &svcsdk.DestinationDetails{} - if f8iter.DestinationDetails.CloudWatchLogsDetails != nil { - f8elemf0f0 := &svcsdk.CloudWatchLogsDestinationDetails{} - if f8iter.DestinationDetails.CloudWatchLogsDetails.LogGroup != nil { - f8elemf0f0.SetLogGroup(*f8iter.DestinationDetails.CloudWatchLogsDetails.LogGroup) + f11 := []svcsdktypes.LogDeliveryConfigurationRequest{} + for _, f11iter := range r.ko.Spec.LogDeliveryConfigurations { + f11elem := &svcsdktypes.LogDeliveryConfigurationRequest{} + if f11iter.DestinationDetails != nil { + f11elemf0 := &svcsdktypes.DestinationDetails{} + if f11iter.DestinationDetails.CloudWatchLogsDetails != nil { + f11elemf0f0 := &svcsdktypes.CloudWatchLogsDestinationDetails{} + if f11iter.DestinationDetails.CloudWatchLogsDetails.LogGroup != nil { + f11elemf0f0.LogGroup = f11iter.DestinationDetails.CloudWatchLogsDetails.LogGroup } - f8elemf0.SetCloudWatchLogsDetails(f8elemf0f0) + f11elemf0.CloudWatchLogsDetails = f11elemf0f0 } - if f8iter.DestinationDetails.KinesisFirehoseDetails != nil { - f8elemf0f1 := &svcsdk.KinesisFirehoseDestinationDetails{} - if f8iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream != nil { - f8elemf0f1.SetDeliveryStream(*f8iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream) + if f11iter.DestinationDetails.KinesisFirehoseDetails != nil { + f11elemf0f1 := &svcsdktypes.KinesisFirehoseDestinationDetails{} + if f11iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream != nil { + f11elemf0f1.DeliveryStream = f11iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream } - f8elemf0.SetKinesisFirehoseDetails(f8elemf0f1) + f11elemf0.KinesisFirehoseDetails = f11elemf0f1 } - f8elem.SetDestinationDetails(f8elemf0) + f11elem.DestinationDetails = f11elemf0 } - if f8iter.DestinationType != nil { - f8elem.SetDestinationType(*f8iter.DestinationType) + if f11iter.DestinationType != nil { + f11elem.DestinationType = svcsdktypes.DestinationType(*f11iter.DestinationType) } - if f8iter.Enabled != nil { - f8elem.SetEnabled(*f8iter.Enabled) + if f11iter.Enabled != nil { + f11elem.Enabled = f11iter.Enabled } - if f8iter.LogFormat != nil { - f8elem.SetLogFormat(*f8iter.LogFormat) + if f11iter.LogFormat != nil { + f11elem.LogFormat = svcsdktypes.LogFormat(*f11iter.LogFormat) } - if f8iter.LogType != nil { - f8elem.SetLogType(*f8iter.LogType) + if f11iter.LogType != nil { + f11elem.LogType = svcsdktypes.LogType(*f11iter.LogType) } - f8 = append(f8, f8elem) + f11 = append(f11, *f11elem) } - res.SetLogDeliveryConfigurations(f8) + res.LogDeliveryConfigurations = f11 } if r.ko.Spec.MultiAZEnabled != nil { - res.SetMultiAZEnabled(*r.ko.Spec.MultiAZEnabled) + res.MultiAZEnabled = r.ko.Spec.MultiAZEnabled } if r.ko.Spec.NotificationTopicARN != nil { - res.SetNotificationTopicArn(*r.ko.Spec.NotificationTopicARN) + res.NotificationTopicArn = r.ko.Spec.NotificationTopicARN } if r.ko.Spec.PreferredMaintenanceWindow != nil { - res.SetPreferredMaintenanceWindow(*r.ko.Spec.PreferredMaintenanceWindow) + res.PreferredMaintenanceWindow = r.ko.Spec.PreferredMaintenanceWindow } if r.ko.Spec.PrimaryClusterID != nil { - res.SetPrimaryClusterId(*r.ko.Spec.PrimaryClusterID) + res.PrimaryClusterId = r.ko.Spec.PrimaryClusterID } if r.ko.Spec.Description != nil { - res.SetReplicationGroupDescription(*r.ko.Spec.Description) + res.ReplicationGroupDescription = r.ko.Spec.Description } if r.ko.Spec.ReplicationGroupID != nil { - res.SetReplicationGroupId(*r.ko.Spec.ReplicationGroupID) + res.ReplicationGroupId = r.ko.Spec.ReplicationGroupID } if r.ko.Spec.SnapshotRetentionLimit != nil { - res.SetSnapshotRetentionLimit(*r.ko.Spec.SnapshotRetentionLimit) + snapshotRetentionLimitCopy0 := *r.ko.Spec.SnapshotRetentionLimit + if snapshotRetentionLimitCopy0 > math.MaxInt32 || snapshotRetentionLimitCopy0 < math.MinInt32 { + return nil, fmt.Errorf("error: field SnapshotRetentionLimit is of type int32") + } + snapshotRetentionLimitCopy := int32(snapshotRetentionLimitCopy0) + res.SnapshotRetentionLimit = &snapshotRetentionLimitCopy } if r.ko.Spec.SnapshotWindow != nil { - res.SetSnapshotWindow(*r.ko.Spec.SnapshotWindow) + res.SnapshotWindow = r.ko.Spec.SnapshotWindow } if r.ko.Status.SnapshottingClusterID != nil { - res.SetSnapshottingClusterId(*r.ko.Status.SnapshottingClusterID) + res.SnapshottingClusterId = r.ko.Status.SnapshottingClusterID + } + if r.ko.Spec.TransitEncryptionEnabled != nil { + res.TransitEncryptionEnabled = r.ko.Spec.TransitEncryptionEnabled + } + if r.ko.Spec.TransitEncryptionMode != nil { + res.TransitEncryptionMode = svcsdktypes.TransitEncryptionMode(*r.ko.Spec.TransitEncryptionMode) } return res, nil @@ -1747,7 +1790,7 @@ func (rm *resourceManager) sdkDelete( } var resp *svcsdk.DeleteReplicationGroupOutput _ = resp - resp, err = rm.sdkapi.DeleteReplicationGroupWithContext(ctx, input) + resp, err = rm.sdkapi.DeleteReplicationGroup(ctx, input) rm.metrics.RecordAPICall("DELETE", "DeleteReplicationGroup", err) // delete call successful if err == nil { @@ -1779,7 +1822,7 @@ func (rm *resourceManager) newDeleteRequestPayload( res := &svcsdk.DeleteReplicationGroupInput{} if r.ko.Spec.ReplicationGroupID != nil { - res.SetReplicationGroupId(*r.ko.Spec.ReplicationGroupID) + res.ReplicationGroupId = r.ko.Spec.ReplicationGroupID } return res, nil @@ -1887,11 +1930,12 @@ func (rm *resourceManager) terminalAWSError(err error) bool { if err == nil { return false } - awsErr, ok := ackerr.AWSError(err) - if !ok { + + var terminalErr smithy.APIError + if !errors.As(err, &terminalErr) { return false } - switch awsErr.Code() { + switch terminalErr.ErrorCode() { case "InvalidParameter", "InvalidParameterValue", "InvalidParameterCombination", @@ -1953,8 +1997,8 @@ func (rm *resourceManager) setReplicationGroupOutput( } else { ko.Status.AutoMinorVersionUpgrade = nil } - if resp.ReplicationGroup.AutomaticFailover != nil { - ko.Status.AutomaticFailover = resp.ReplicationGroup.AutomaticFailover + if resp.ReplicationGroup.AutomaticFailover != "" { + ko.Status.AutomaticFailover = aws.String(string(resp.ReplicationGroup.AutomaticFailover)) } else { ko.Status.AutomaticFailover = nil } @@ -1968,20 +2012,26 @@ func (rm *resourceManager) setReplicationGroupOutput( } else { ko.Status.ClusterEnabled = nil } + if resp.ReplicationGroup.ClusterMode != "" { + ko.Spec.ClusterMode = aws.String(string(resp.ReplicationGroup.ClusterMode)) + } else { + ko.Spec.ClusterMode = nil + } if resp.ReplicationGroup.ConfigurationEndpoint != nil { - f8 := &svcapitypes.Endpoint{} + f9 := &svcapitypes.Endpoint{} if resp.ReplicationGroup.ConfigurationEndpoint.Address != nil { - f8.Address = resp.ReplicationGroup.ConfigurationEndpoint.Address + f9.Address = resp.ReplicationGroup.ConfigurationEndpoint.Address } if resp.ReplicationGroup.ConfigurationEndpoint.Port != nil { - f8.Port = resp.ReplicationGroup.ConfigurationEndpoint.Port + portCopy := int64(*resp.ReplicationGroup.ConfigurationEndpoint.Port) + f9.Port = &portCopy } - ko.Status.ConfigurationEndpoint = f8 + ko.Status.ConfigurationEndpoint = f9 } else { ko.Status.ConfigurationEndpoint = nil } - if resp.ReplicationGroup.DataTiering != nil { - ko.Status.DataTiering = resp.ReplicationGroup.DataTiering + if resp.ReplicationGroup.DataTiering != "" { + ko.Status.DataTiering = aws.String(string(resp.ReplicationGroup.DataTiering)) } else { ko.Status.DataTiering = nil } @@ -1990,239 +2040,242 @@ func (rm *resourceManager) setReplicationGroupOutput( } else { ko.Spec.Description = nil } + if resp.ReplicationGroup.Engine != nil { + ko.Spec.Engine = resp.ReplicationGroup.Engine + } else { + ko.Spec.Engine = nil + } if resp.ReplicationGroup.GlobalReplicationGroupInfo != nil { - f11 := &svcapitypes.GlobalReplicationGroupInfo{} + f13 := &svcapitypes.GlobalReplicationGroupInfo{} if resp.ReplicationGroup.GlobalReplicationGroupInfo.GlobalReplicationGroupId != nil { - f11.GlobalReplicationGroupID = resp.ReplicationGroup.GlobalReplicationGroupInfo.GlobalReplicationGroupId + f13.GlobalReplicationGroupID = resp.ReplicationGroup.GlobalReplicationGroupInfo.GlobalReplicationGroupId } if resp.ReplicationGroup.GlobalReplicationGroupInfo.GlobalReplicationGroupMemberRole != nil { - f11.GlobalReplicationGroupMemberRole = resp.ReplicationGroup.GlobalReplicationGroupInfo.GlobalReplicationGroupMemberRole + f13.GlobalReplicationGroupMemberRole = resp.ReplicationGroup.GlobalReplicationGroupInfo.GlobalReplicationGroupMemberRole } - ko.Status.GlobalReplicationGroupInfo = f11 + ko.Status.GlobalReplicationGroupInfo = f13 } else { ko.Status.GlobalReplicationGroupInfo = nil } + if resp.ReplicationGroup.IpDiscovery != "" { + ko.Spec.IPDiscovery = aws.String(string(resp.ReplicationGroup.IpDiscovery)) + } else { + ko.Spec.IPDiscovery = nil + } if resp.ReplicationGroup.KmsKeyId != nil { ko.Spec.KMSKeyID = resp.ReplicationGroup.KmsKeyId } else { ko.Spec.KMSKeyID = nil } if resp.ReplicationGroup.LogDeliveryConfigurations != nil { - f13 := []*svcapitypes.LogDeliveryConfigurationRequest{} - for _, f13iter := range resp.ReplicationGroup.LogDeliveryConfigurations { - f13elem := &svcapitypes.LogDeliveryConfigurationRequest{} - if f13iter.DestinationDetails != nil { - f13elemf0 := &svcapitypes.DestinationDetails{} - if f13iter.DestinationDetails.CloudWatchLogsDetails != nil { - f13elemf0f0 := &svcapitypes.CloudWatchLogsDestinationDetails{} - if f13iter.DestinationDetails.CloudWatchLogsDetails.LogGroup != nil { - f13elemf0f0.LogGroup = f13iter.DestinationDetails.CloudWatchLogsDetails.LogGroup + f16 := []*svcapitypes.LogDeliveryConfigurationRequest{} + for _, f16iter := range resp.ReplicationGroup.LogDeliveryConfigurations { + f16elem := &svcapitypes.LogDeliveryConfigurationRequest{} + if f16iter.DestinationDetails != nil { + f16elemf0 := &svcapitypes.DestinationDetails{} + if f16iter.DestinationDetails.CloudWatchLogsDetails != nil { + f16elemf0f0 := &svcapitypes.CloudWatchLogsDestinationDetails{} + if f16iter.DestinationDetails.CloudWatchLogsDetails.LogGroup != nil { + f16elemf0f0.LogGroup = f16iter.DestinationDetails.CloudWatchLogsDetails.LogGroup } - f13elemf0.CloudWatchLogsDetails = f13elemf0f0 + f16elemf0.CloudWatchLogsDetails = f16elemf0f0 } - if f13iter.DestinationDetails.KinesisFirehoseDetails != nil { - f13elemf0f1 := &svcapitypes.KinesisFirehoseDestinationDetails{} - if f13iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream != nil { - f13elemf0f1.DeliveryStream = f13iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream + if f16iter.DestinationDetails.KinesisFirehoseDetails != nil { + f16elemf0f1 := &svcapitypes.KinesisFirehoseDestinationDetails{} + if f16iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream != nil { + f16elemf0f1.DeliveryStream = f16iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream } - f13elemf0.KinesisFirehoseDetails = f13elemf0f1 + f16elemf0.KinesisFirehoseDetails = f16elemf0f1 } - f13elem.DestinationDetails = f13elemf0 + f16elem.DestinationDetails = f16elemf0 } - if f13iter.DestinationType != nil { - f13elem.DestinationType = f13iter.DestinationType + if f16iter.DestinationType != "" { + f16elem.DestinationType = aws.String(string(f16iter.DestinationType)) } - if f13iter.LogFormat != nil { - f13elem.LogFormat = f13iter.LogFormat + if f16iter.LogFormat != "" { + f16elem.LogFormat = aws.String(string(f16iter.LogFormat)) } - if f13iter.LogType != nil { - f13elem.LogType = f13iter.LogType + if f16iter.LogType != "" { + f16elem.LogType = aws.String(string(f16iter.LogType)) } - f13 = append(f13, f13elem) + f16 = append(f16, f16elem) } - ko.Spec.LogDeliveryConfigurations = f13 + ko.Spec.LogDeliveryConfigurations = f16 } else { ko.Spec.LogDeliveryConfigurations = nil } if resp.ReplicationGroup.MemberClusters != nil { - f14 := []*string{} - for _, f14iter := range resp.ReplicationGroup.MemberClusters { - var f14elem string - f14elem = *f14iter - f14 = append(f14, &f14elem) - } - ko.Status.MemberClusters = f14 + ko.Status.MemberClusters = aws.StringSlice(resp.ReplicationGroup.MemberClusters) } else { ko.Status.MemberClusters = nil } if resp.ReplicationGroup.MemberClustersOutpostArns != nil { - f15 := []*string{} - for _, f15iter := range resp.ReplicationGroup.MemberClustersOutpostArns { - var f15elem string - f15elem = *f15iter - f15 = append(f15, &f15elem) - } - ko.Status.MemberClustersOutpostARNs = f15 + ko.Status.MemberClustersOutpostARNs = aws.StringSlice(resp.ReplicationGroup.MemberClustersOutpostArns) } else { ko.Status.MemberClustersOutpostARNs = nil } - if resp.ReplicationGroup.MultiAZ != nil { - ko.Status.MultiAZ = resp.ReplicationGroup.MultiAZ + if resp.ReplicationGroup.MultiAZ != "" { + ko.Status.MultiAZ = aws.String(string(resp.ReplicationGroup.MultiAZ)) } else { ko.Status.MultiAZ = nil } + if resp.ReplicationGroup.NetworkType != "" { + ko.Spec.NetworkType = aws.String(string(resp.ReplicationGroup.NetworkType)) + } else { + ko.Spec.NetworkType = nil + } if resp.ReplicationGroup.NodeGroups != nil { - f17 := []*svcapitypes.NodeGroup{} - for _, f17iter := range resp.ReplicationGroup.NodeGroups { - f17elem := &svcapitypes.NodeGroup{} - if f17iter.NodeGroupId != nil { - f17elem.NodeGroupID = f17iter.NodeGroupId - } - if f17iter.NodeGroupMembers != nil { - f17elemf1 := []*svcapitypes.NodeGroupMember{} - for _, f17elemf1iter := range f17iter.NodeGroupMembers { - f17elemf1elem := &svcapitypes.NodeGroupMember{} - if f17elemf1iter.CacheClusterId != nil { - f17elemf1elem.CacheClusterID = f17elemf1iter.CacheClusterId + f21 := []*svcapitypes.NodeGroup{} + for _, f21iter := range resp.ReplicationGroup.NodeGroups { + f21elem := &svcapitypes.NodeGroup{} + if f21iter.NodeGroupId != nil { + f21elem.NodeGroupID = f21iter.NodeGroupId + } + if f21iter.NodeGroupMembers != nil { + f21elemf1 := []*svcapitypes.NodeGroupMember{} + for _, f21elemf1iter := range f21iter.NodeGroupMembers { + f21elemf1elem := &svcapitypes.NodeGroupMember{} + if f21elemf1iter.CacheClusterId != nil { + f21elemf1elem.CacheClusterID = f21elemf1iter.CacheClusterId } - if f17elemf1iter.CacheNodeId != nil { - f17elemf1elem.CacheNodeID = f17elemf1iter.CacheNodeId + if f21elemf1iter.CacheNodeId != nil { + f21elemf1elem.CacheNodeID = f21elemf1iter.CacheNodeId } - if f17elemf1iter.CurrentRole != nil { - f17elemf1elem.CurrentRole = f17elemf1iter.CurrentRole + if f21elemf1iter.CurrentRole != nil { + f21elemf1elem.CurrentRole = f21elemf1iter.CurrentRole } - if f17elemf1iter.PreferredAvailabilityZone != nil { - f17elemf1elem.PreferredAvailabilityZone = f17elemf1iter.PreferredAvailabilityZone + if f21elemf1iter.PreferredAvailabilityZone != nil { + f21elemf1elem.PreferredAvailabilityZone = f21elemf1iter.PreferredAvailabilityZone } - if f17elemf1iter.PreferredOutpostArn != nil { - f17elemf1elem.PreferredOutpostARN = f17elemf1iter.PreferredOutpostArn + if f21elemf1iter.PreferredOutpostArn != nil { + f21elemf1elem.PreferredOutpostARN = f21elemf1iter.PreferredOutpostArn } - if f17elemf1iter.ReadEndpoint != nil { - f17elemf1elemf5 := &svcapitypes.Endpoint{} - if f17elemf1iter.ReadEndpoint.Address != nil { - f17elemf1elemf5.Address = f17elemf1iter.ReadEndpoint.Address + if f21elemf1iter.ReadEndpoint != nil { + f21elemf1elemf5 := &svcapitypes.Endpoint{} + if f21elemf1iter.ReadEndpoint.Address != nil { + f21elemf1elemf5.Address = f21elemf1iter.ReadEndpoint.Address } - if f17elemf1iter.ReadEndpoint.Port != nil { - f17elemf1elemf5.Port = f17elemf1iter.ReadEndpoint.Port + if f21elemf1iter.ReadEndpoint.Port != nil { + portCopy := int64(*f21elemf1iter.ReadEndpoint.Port) + f21elemf1elemf5.Port = &portCopy } - f17elemf1elem.ReadEndpoint = f17elemf1elemf5 + f21elemf1elem.ReadEndpoint = f21elemf1elemf5 } - f17elemf1 = append(f17elemf1, f17elemf1elem) + f21elemf1 = append(f21elemf1, f21elemf1elem) } - f17elem.NodeGroupMembers = f17elemf1 + f21elem.NodeGroupMembers = f21elemf1 } - if f17iter.PrimaryEndpoint != nil { - f17elemf2 := &svcapitypes.Endpoint{} - if f17iter.PrimaryEndpoint.Address != nil { - f17elemf2.Address = f17iter.PrimaryEndpoint.Address + if f21iter.PrimaryEndpoint != nil { + f21elemf2 := &svcapitypes.Endpoint{} + if f21iter.PrimaryEndpoint.Address != nil { + f21elemf2.Address = f21iter.PrimaryEndpoint.Address } - if f17iter.PrimaryEndpoint.Port != nil { - f17elemf2.Port = f17iter.PrimaryEndpoint.Port + if f21iter.PrimaryEndpoint.Port != nil { + portCopy := int64(*f21iter.PrimaryEndpoint.Port) + f21elemf2.Port = &portCopy } - f17elem.PrimaryEndpoint = f17elemf2 + f21elem.PrimaryEndpoint = f21elemf2 } - if f17iter.ReaderEndpoint != nil { - f17elemf3 := &svcapitypes.Endpoint{} - if f17iter.ReaderEndpoint.Address != nil { - f17elemf3.Address = f17iter.ReaderEndpoint.Address + if f21iter.ReaderEndpoint != nil { + f21elemf3 := &svcapitypes.Endpoint{} + if f21iter.ReaderEndpoint.Address != nil { + f21elemf3.Address = f21iter.ReaderEndpoint.Address } - if f17iter.ReaderEndpoint.Port != nil { - f17elemf3.Port = f17iter.ReaderEndpoint.Port + if f21iter.ReaderEndpoint.Port != nil { + portCopy := int64(*f21iter.ReaderEndpoint.Port) + f21elemf3.Port = &portCopy } - f17elem.ReaderEndpoint = f17elemf3 + f21elem.ReaderEndpoint = f21elemf3 } - if f17iter.Slots != nil { - f17elem.Slots = f17iter.Slots + if f21iter.Slots != nil { + f21elem.Slots = f21iter.Slots } - if f17iter.Status != nil { - f17elem.Status = f17iter.Status + if f21iter.Status != nil { + f21elem.Status = f21iter.Status } - f17 = append(f17, f17elem) + f21 = append(f21, f21elem) } - ko.Status.NodeGroups = f17 + ko.Status.NodeGroups = f21 } else { ko.Status.NodeGroups = nil } if resp.ReplicationGroup.PendingModifiedValues != nil { - f18 := &svcapitypes.ReplicationGroupPendingModifiedValues{} - if resp.ReplicationGroup.PendingModifiedValues.AuthTokenStatus != nil { - f18.AuthTokenStatus = resp.ReplicationGroup.PendingModifiedValues.AuthTokenStatus + f22 := &svcapitypes.ReplicationGroupPendingModifiedValues{} + if resp.ReplicationGroup.PendingModifiedValues.AuthTokenStatus != "" { + f22.AuthTokenStatus = aws.String(string(resp.ReplicationGroup.PendingModifiedValues.AuthTokenStatus)) + } + if resp.ReplicationGroup.PendingModifiedValues.AutomaticFailoverStatus != "" { + f22.AutomaticFailoverStatus = aws.String(string(resp.ReplicationGroup.PendingModifiedValues.AutomaticFailoverStatus)) } - if resp.ReplicationGroup.PendingModifiedValues.AutomaticFailoverStatus != nil { - f18.AutomaticFailoverStatus = resp.ReplicationGroup.PendingModifiedValues.AutomaticFailoverStatus + if resp.ReplicationGroup.PendingModifiedValues.ClusterMode != "" { + f22.ClusterMode = aws.String(string(resp.ReplicationGroup.PendingModifiedValues.ClusterMode)) } if resp.ReplicationGroup.PendingModifiedValues.LogDeliveryConfigurations != nil { - f18f2 := []*svcapitypes.PendingLogDeliveryConfiguration{} - for _, f18f2iter := range resp.ReplicationGroup.PendingModifiedValues.LogDeliveryConfigurations { - f18f2elem := &svcapitypes.PendingLogDeliveryConfiguration{} - if f18f2iter.DestinationDetails != nil { - f18f2elemf0 := &svcapitypes.DestinationDetails{} - if f18f2iter.DestinationDetails.CloudWatchLogsDetails != nil { - f18f2elemf0f0 := &svcapitypes.CloudWatchLogsDestinationDetails{} - if f18f2iter.DestinationDetails.CloudWatchLogsDetails.LogGroup != nil { - f18f2elemf0f0.LogGroup = f18f2iter.DestinationDetails.CloudWatchLogsDetails.LogGroup + f22f3 := []*svcapitypes.PendingLogDeliveryConfiguration{} + for _, f22f3iter := range resp.ReplicationGroup.PendingModifiedValues.LogDeliveryConfigurations { + f22f3elem := &svcapitypes.PendingLogDeliveryConfiguration{} + if f22f3iter.DestinationDetails != nil { + f22f3elemf0 := &svcapitypes.DestinationDetails{} + if f22f3iter.DestinationDetails.CloudWatchLogsDetails != nil { + f22f3elemf0f0 := &svcapitypes.CloudWatchLogsDestinationDetails{} + if f22f3iter.DestinationDetails.CloudWatchLogsDetails.LogGroup != nil { + f22f3elemf0f0.LogGroup = f22f3iter.DestinationDetails.CloudWatchLogsDetails.LogGroup } - f18f2elemf0.CloudWatchLogsDetails = f18f2elemf0f0 + f22f3elemf0.CloudWatchLogsDetails = f22f3elemf0f0 } - if f18f2iter.DestinationDetails.KinesisFirehoseDetails != nil { - f18f2elemf0f1 := &svcapitypes.KinesisFirehoseDestinationDetails{} - if f18f2iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream != nil { - f18f2elemf0f1.DeliveryStream = f18f2iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream + if f22f3iter.DestinationDetails.KinesisFirehoseDetails != nil { + f22f3elemf0f1 := &svcapitypes.KinesisFirehoseDestinationDetails{} + if f22f3iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream != nil { + f22f3elemf0f1.DeliveryStream = f22f3iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream } - f18f2elemf0.KinesisFirehoseDetails = f18f2elemf0f1 + f22f3elemf0.KinesisFirehoseDetails = f22f3elemf0f1 } - f18f2elem.DestinationDetails = f18f2elemf0 + f22f3elem.DestinationDetails = f22f3elemf0 } - if f18f2iter.DestinationType != nil { - f18f2elem.DestinationType = f18f2iter.DestinationType + if f22f3iter.DestinationType != "" { + f22f3elem.DestinationType = aws.String(string(f22f3iter.DestinationType)) } - if f18f2iter.LogFormat != nil { - f18f2elem.LogFormat = f18f2iter.LogFormat + if f22f3iter.LogFormat != "" { + f22f3elem.LogFormat = aws.String(string(f22f3iter.LogFormat)) } - if f18f2iter.LogType != nil { - f18f2elem.LogType = f18f2iter.LogType + if f22f3iter.LogType != "" { + f22f3elem.LogType = aws.String(string(f22f3iter.LogType)) } - f18f2 = append(f18f2, f18f2elem) + f22f3 = append(f22f3, f22f3elem) } - f18.LogDeliveryConfigurations = f18f2 + f22.LogDeliveryConfigurations = f22f3 } if resp.ReplicationGroup.PendingModifiedValues.PrimaryClusterId != nil { - f18.PrimaryClusterID = resp.ReplicationGroup.PendingModifiedValues.PrimaryClusterId + f22.PrimaryClusterID = resp.ReplicationGroup.PendingModifiedValues.PrimaryClusterId } if resp.ReplicationGroup.PendingModifiedValues.Resharding != nil { - f18f4 := &svcapitypes.ReshardingStatus{} + f22f5 := &svcapitypes.ReshardingStatus{} if resp.ReplicationGroup.PendingModifiedValues.Resharding.SlotMigration != nil { - f18f4f0 := &svcapitypes.SlotMigration{} + f22f5f0 := &svcapitypes.SlotMigration{} if resp.ReplicationGroup.PendingModifiedValues.Resharding.SlotMigration.ProgressPercentage != nil { - f18f4f0.ProgressPercentage = resp.ReplicationGroup.PendingModifiedValues.Resharding.SlotMigration.ProgressPercentage + f22f5f0.ProgressPercentage = resp.ReplicationGroup.PendingModifiedValues.Resharding.SlotMigration.ProgressPercentage } - f18f4.SlotMigration = f18f4f0 + f22f5.SlotMigration = f22f5f0 } - f18.Resharding = f18f4 + f22.Resharding = f22f5 + } + if resp.ReplicationGroup.PendingModifiedValues.TransitEncryptionEnabled != nil { + f22.TransitEncryptionEnabled = resp.ReplicationGroup.PendingModifiedValues.TransitEncryptionEnabled + } + if resp.ReplicationGroup.PendingModifiedValues.TransitEncryptionMode != "" { + f22.TransitEncryptionMode = aws.String(string(resp.ReplicationGroup.PendingModifiedValues.TransitEncryptionMode)) } if resp.ReplicationGroup.PendingModifiedValues.UserGroups != nil { - f18f5 := &svcapitypes.UserGroupsUpdateStatus{} + f22f8 := &svcapitypes.UserGroupsUpdateStatus{} if resp.ReplicationGroup.PendingModifiedValues.UserGroups.UserGroupIdsToAdd != nil { - f18f5f0 := []*string{} - for _, f18f5f0iter := range resp.ReplicationGroup.PendingModifiedValues.UserGroups.UserGroupIdsToAdd { - var f18f5f0elem string - f18f5f0elem = *f18f5f0iter - f18f5f0 = append(f18f5f0, &f18f5f0elem) - } - f18f5.UserGroupIDsToAdd = f18f5f0 + f22f8.UserGroupIDsToAdd = aws.StringSlice(resp.ReplicationGroup.PendingModifiedValues.UserGroups.UserGroupIdsToAdd) } if resp.ReplicationGroup.PendingModifiedValues.UserGroups.UserGroupIdsToRemove != nil { - f18f5f1 := []*string{} - for _, f18f5f1iter := range resp.ReplicationGroup.PendingModifiedValues.UserGroups.UserGroupIdsToRemove { - var f18f5f1elem string - f18f5f1elem = *f18f5f1iter - f18f5f1 = append(f18f5f1, &f18f5f1elem) - } - f18f5.UserGroupIDsToRemove = f18f5f1 + f22f8.UserGroupIDsToRemove = aws.StringSlice(resp.ReplicationGroup.PendingModifiedValues.UserGroups.UserGroupIdsToRemove) } - f18.UserGroups = f18f5 + f22.UserGroups = f22f8 } - ko.Status.PendingModifiedValues = f18 + ko.Status.PendingModifiedValues = f22 } else { ko.Status.PendingModifiedValues = nil } @@ -2237,7 +2290,8 @@ func (rm *resourceManager) setReplicationGroupOutput( ko.Spec.ReplicationGroupID = nil } if resp.ReplicationGroup.SnapshotRetentionLimit != nil { - ko.Spec.SnapshotRetentionLimit = resp.ReplicationGroup.SnapshotRetentionLimit + snapshotRetentionLimitCopy := int64(*resp.ReplicationGroup.SnapshotRetentionLimit) + ko.Spec.SnapshotRetentionLimit = &snapshotRetentionLimitCopy } else { ko.Spec.SnapshotRetentionLimit = nil } @@ -2261,14 +2315,13 @@ func (rm *resourceManager) setReplicationGroupOutput( } else { ko.Spec.TransitEncryptionEnabled = nil } + if resp.ReplicationGroup.TransitEncryptionMode != "" { + ko.Spec.TransitEncryptionMode = aws.String(string(resp.ReplicationGroup.TransitEncryptionMode)) + } else { + ko.Spec.TransitEncryptionMode = nil + } if resp.ReplicationGroup.UserGroupIds != nil { - f26 := []*string{} - for _, f26iter := range resp.ReplicationGroup.UserGroupIds { - var f26elem string - f26elem = *f26iter - f26 = append(f26, &f26elem) - } - ko.Spec.UserGroupIDs = f26 + ko.Spec.UserGroupIDs = aws.StringSlice(resp.ReplicationGroup.UserGroupIds) } else { ko.Spec.UserGroupIDs = nil } diff --git a/pkg/resource/replication_group/testdata/DecreaseReplicaCountOutput.json b/pkg/resource/replication_group/testdata/DecreaseReplicaCountOutput.json deleted file mode 100644 index b6cbea8b..00000000 --- a/pkg/resource/replication_group/testdata/DecreaseReplicaCountOutput.json +++ /dev/null @@ -1,77 +0,0 @@ -{ - "ReplicationGroup": { - "ReplicationGroupId": "my-cluster", - "Description": "mock_replication_group_description", - "Status": "modifying", - "PendingModifiedValues": {}, - "MemberClusters": [ - "myrepliace", - "my-cluster-001", - "my-cluster-002", - "my-cluster-003" - ], - "NodeGroups": [ - { - "NodeGroupId": "0001", - "Status": "modifying", - "PrimaryEndpoint": { - "Address": "my-cluster.xxxxx.ng.0001.usw2.cache.amazonaws.com", - "Port": 6379 - }, - "ReaderEndpoint": { - "Address": "my-cluster-ro.xxxxx.ng.0001.usw2.cache.amazonaws.com", - "Port": 6379 - }, - "NodeGroupMembers": [ - { - "CacheClusterId": "myrepliace", - "CacheNodeId": "0001", - "ReadEndpoint": { - "Address": "myrepliace.xxxxx.0001.usw2.cache.amazonaws.com", - "Port": 6379 - }, - "PreferredAvailabilityZone": "us-west-2a", - "CurrentRole": "replica" - }, - { - "CacheClusterId": "my-cluster-001", - "CacheNodeId": "0001", - "ReadEndpoint": { - "Address": "my-cluster-001.xxxxx.0001.usw2.cache.amazonaws.com", - "Port": 6379 - }, - "PreferredAvailabilityZone": "us-west-2a", - "CurrentRole": "primary" - }, - { - "CacheClusterId": "my-cluster-002", - "CacheNodeId": "0001", - "ReadEndpoint": { - "Address": "my-cluster-002.xxxxx.0001.usw2.cache.amazonaws.com", - "Port": 6379 - }, - "PreferredAvailabilityZone": "us-west-2a", - "CurrentRole": "replica" - }, - { - "CacheClusterId": "my-cluster-003", - "CacheNodeId": "0001", - "ReadEndpoint": { - "Address": "my-cluster-003.xxxxx.0001.usw2.cache.amazonaws.com", - "Port": 6379 - }, - "PreferredAvailabilityZone": "us-west-2a", - "CurrentRole": "replica" - } - ] - } - ], - "AutomaticFailover": "disabled", - "SnapshotRetentionLimit": 0, - "SnapshotWindow": "07:30-08:30", - "ClusterEnabled": false, - "CacheNodeType": "cache.r5.xlarge", - "TransitEncryptionEnabled": false, - "AtRestEncryptionEnabled": false - } -} diff --git a/pkg/resource/replication_group/testdata/DescribeReplicationGroupsOutput.json b/pkg/resource/replication_group/testdata/DescribeReplicationGroupsOutput.json deleted file mode 100644 index 4367a0ce..00000000 --- a/pkg/resource/replication_group/testdata/DescribeReplicationGroupsOutput.json +++ /dev/null @@ -1,80 +0,0 @@ -{ - "ReplicationGroups": [ - { - "ReplicationGroupId": "my-cluster", - "Description": "mycluster", - "Status": "available", - "PendingModifiedValues": {}, - "MemberClusters": [ - "pat-cluster-001", - "pat-cluster-002", - "pat-cluster-003", - "pat-cluster-004" - ], - "NodeGroups": [ - { - "NodeGroupId": "0001", - "Status": "available", - "PrimaryEndpoint": { - "Address": "my-cluster.xxxxih.ng.0001.usw2.cache.amazonaws.com", - "Port": 6379 - }, - "ReaderEndpoint": { - "Address": "my-cluster-ro.xxxxih.ng.0001.usw2.cache.amazonaws.com", - "Port": 6379 - }, - "NodeGroupMembers": [ - { - "CacheClusterId": "my-cluster-001", - "CacheNodeId": "0001", - "ReadEndpoint": { - "Address": "pat-cluster-001.xxxih.0001.usw2.cache.amazonaws.com", - "Port": 6379 - }, - "PreferredAvailabilityZone": "us-west-2a", - "CurrentRole": "primary" - }, - { - "CacheClusterId": "my-cluster-002", - "CacheNodeId": "0001", - "ReadEndpoint": { - "Address": "pat-cluster-002.xxxxih.0001.usw2.cache.amazonaws.com", - "Port": 6379 - }, - "PreferredAvailabilityZone": "us-west-2a", - "CurrentRole": "replica" - }, - { - "CacheClusterId": "my-cluster-003", - "CacheNodeId": "0001", - "ReadEndpoint": { - "Address": "pat-cluster-003.xxxxih.0001.usw2.cache.amazonaws.com", - "Port": 6379 - }, - "PreferredAvailabilityZone": "us-west-2a", - "CurrentRole": "replica" - }, - { - "CacheClusterId": "my-cluster-004", - "CacheNodeId": "0001", - "ReadEndpoint": { - "Address": "pat-cluster-004.xxxih.0001.usw2.cache.amazonaws.com", - "Port": 6379 - }, - "PreferredAvailabilityZone": "us-west-2a", - "CurrentRole": "replica" - } - ] - } - ], - "AutomaticFailover": "disabled", - "SnapshotRetentionLimit": 0, - "SnapshotWindow": "07:30-08:30", - "ClusterEnabled": false, - "CacheNodeType": "cache.r5.xlarge", - "AuthTokenEnabled": false, - "TransitEncryptionEnabled": false, - "AtRestEncryptionEnabled": false - } - ] -} diff --git a/pkg/resource/replication_group/testdata/allowed_node_types/read_many/rg_cmd_allowed_node_types.json b/pkg/resource/replication_group/testdata/allowed_node_types/read_many/rg_cmd_allowed_node_types.json deleted file mode 100644 index 00c54c51..00000000 --- a/pkg/resource/replication_group/testdata/allowed_node_types/read_many/rg_cmd_allowed_node_types.json +++ /dev/null @@ -1,47 +0,0 @@ -{ - "ScaleUpModifications": [ - "cache.m3.2xlarge", - "cache.m3.large", - "cache.m3.medium", - "cache.m3.xlarge", - "cache.m4.10xlarge", - "cache.m4.2xlarge", - "cache.m4.4xlarge", - "cache.m4.large", - "cache.m4.xlarge", - "cache.m5.12xlarge", - "cache.m5.24xlarge", - "cache.m5.2xlarge", - "cache.m5.4xlarge", - "cache.m5.large", - "cache.m5.xlarge", - "cache.m6g.large", - "cache.r3.2xlarge", - "cache.r3.4xlarge", - "cache.r3.8xlarge", - "cache.r3.large", - "cache.r3.xlarge", - "cache.r4.16xlarge", - "cache.r4.2xlarge", - "cache.r4.4xlarge", - "cache.r4.8xlarge", - "cache.r4.large", - "cache.r4.xlarge", - "cache.r5.12xlarge", - "cache.r5.24xlarge", - "cache.r5.2xlarge", - "cache.r5.4xlarge", - "cache.r5.large", - "cache.r5.xlarge", - "cache.r6g.2xlarge", - "cache.r6g.4xlarge", - "cache.r6g.8xlarge", - "cache.r6g.large", - "cache.r6g.xlarge", - "cache.t2.medium", - "cache.t2.micro", - "cache.t2.small", - "cache.t3.medium", - "cache.t3.small" - ] -} \ No newline at end of file diff --git a/pkg/resource/replication_group/testdata/cache_clusters/read_many/rg_cmd_primary_cache_node.json b/pkg/resource/replication_group/testdata/cache_clusters/read_many/rg_cmd_primary_cache_node.json deleted file mode 100644 index a2e77b85..00000000 --- a/pkg/resource/replication_group/testdata/cache_clusters/read_many/rg_cmd_primary_cache_node.json +++ /dev/null @@ -1,32 +0,0 @@ -{ - "CacheClusters": [ - { - "CacheClusterId": "rg-cmd-001", - "ReplicationGroupId": "rg-cmd", - "CacheClusterStatus": "available", - "SnapshotRetentionLimit": 0, - "ClientDownloadLandingPage": "https://console.aws.amazon.com/elasticache/home#client-download:", - "CacheNodeType": "cache.t3.micro", - "TransitEncryptionEnabled": false, - "Engine": "redis", - "CacheSecurityGroups": [], - "NumCacheNodes": 1, - "AutoMinorVersionUpgrade": true, - "PendingModifiedValues": {}, - "PreferredMaintenanceWindow": "wed:08:00-wed:09:00", - "CacheSubnetGroupName": "default", - "AuthTokenEnabled": false, - "AtRestEncryptionEnabled": false, - "EngineVersion": "5.0.0", - "CacheClusterCreateTime": "2021-04-13T19:07:04.983Z", - "PreferredAvailabilityZone": "us-east-1b", - "SnapshotWindow": "06:30-07:30", - "ARN": "arn:aws:elasticache:us-east-1:012345678910:cluster:rg-cmd-001", - "CacheParameterGroup": { - "CacheNodeIdsToReboot": [], - "CacheParameterGroupName": "default.redis5.0", - "ParameterApplyStatus": "in-sync" - } - } - ] -} \ No newline at end of file diff --git a/pkg/resource/replication_group/testdata/events/read_many/rg_cmd_events.json b/pkg/resource/replication_group/testdata/events/read_many/rg_cmd_events.json deleted file mode 100644 index 67dbfbb5..00000000 --- a/pkg/resource/replication_group/testdata/events/read_many/rg_cmd_events.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "Events": [ - { - "Date": "2021-03-30T20:12:00Z", - "Message": "Replication group rg-cmd created", - "SourceIdentifier": "rg-cmd", - "SourceType": "replication-group" - } - ] -} \ No newline at end of file diff --git a/pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_before_create.yaml b/pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_before_create.yaml deleted file mode 100644 index d193e0bc..00000000 --- a/pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_before_create.yaml +++ /dev/null @@ -1,10 +0,0 @@ -apiVersion: elasticache.services.k8s.aws/v1alpha1 -kind: ReplicationGroup -# omitted metadata -spec: - cacheNodeType: cache.t3.micro - engine: redis - numNodeGroups: 1 - replicasPerNodeGroup: 1 - description: cluster-mode disabled RG - replicationGroupID: rg-cmd \ No newline at end of file diff --git a/pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_before_engine_version_upgrade.yaml b/pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_before_engine_version_upgrade.yaml deleted file mode 100644 index e06a78e8..00000000 --- a/pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_before_engine_version_upgrade.yaml +++ /dev/null @@ -1,99 +0,0 @@ -apiVersion: elasticache.services.k8s.aws/v1alpha1 -kind: ReplicationGroup -# omitted metadata -spec: - cacheNodeType: cache.t3.micro - engine: redis - engineVersion: 5.0.6 # new config has been applied; this is the new engine version but no action has been taken yet - numNodeGroups: 1 - replicasPerNodeGroup: 1 - description: cluster-mode disabled RG - replicationGroupID: rg-cmd - atRestEncryptionEnabled: false - snapshotRetentionLimit: 0 - snapshotWindow: "06:30-07:30" - transitEncryptionEnabled: false -status: - ackResourceMetadata: - arn: arn:aws:elasticache:us-east-1:012345678910:replicationgroup:rg-cmd - ownerAccountID: "" - region: "" - allowedScaleUpModifications: - - cache.m3.2xlarge - - cache.m3.large - - cache.m3.medium - - cache.m3.xlarge - - cache.m4.10xlarge - - cache.m4.2xlarge - - cache.m4.4xlarge - - cache.m4.large - - cache.m4.xlarge - - cache.m5.12xlarge - - cache.m5.24xlarge - - cache.m5.2xlarge - - cache.m5.4xlarge - - cache.m5.large - - cache.m5.xlarge - - cache.r3.2xlarge - - cache.r3.4xlarge - - cache.r3.8xlarge - - cache.r3.large - - cache.r3.xlarge - - cache.r4.16xlarge - - cache.r4.2xlarge - - cache.r4.4xlarge - - cache.r4.8xlarge - - cache.r4.large - - cache.r4.xlarge - - cache.r5.12xlarge - - cache.r5.24xlarge - - cache.r5.2xlarge - - cache.r5.4xlarge - - cache.r5.large - - cache.r5.xlarge - - cache.t2.medium - - cache.t2.micro - - cache.t2.small - - cache.t3.medium - - cache.t3.small - authTokenEnabled: false - automaticFailover: disabled - clusterEnabled: false - conditions: - - status: "True" - type: ACK.ResourceSynced - description: cluster-mode disabled RG - events: - - date: "2021-04-13T19:07:05Z" - message: Replication group rg-cmd created - globalReplicationGroupInfo: {} - memberClusters: - - rg-cmd-001 - - rg-cmd-002 - multiAZ: disabled - nodeGroups: - - nodeGroupID: "0001" - nodeGroupMembers: - - cacheClusterID: rg-cmd-001 - cacheNodeID: "0001" - currentRole: primary - preferredAvailabilityZone: us-east-1b - readEndpoint: - address: rg-cmd-001.xxxxxx.0001.use1.cache.amazonaws.com - port: 6379 - - cacheClusterID: rg-cmd-002 - cacheNodeID: "0001" - currentRole: replica - preferredAvailabilityZone: us-east-1d - readEndpoint: - address: rg-cmd-002.xxxxxx.0001.use1.cache.amazonaws.com - port: 6379 - primaryEndpoint: - address: rg-cmd.xxxxxx.ng.0001.use1.cache.amazonaws.com - port: 6379 - readerEndpoint: - address: rg-cmd-ro.xxxxxx.ng.0001.use1.cache.amazonaws.com - port: 6379 - status: available - pendingModifiedValues: {} - status: available \ No newline at end of file diff --git a/pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_before_engine_version_upgrade_latest.yaml b/pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_before_engine_version_upgrade_latest.yaml deleted file mode 100644 index c5d98929..00000000 --- a/pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_before_engine_version_upgrade_latest.yaml +++ /dev/null @@ -1,99 +0,0 @@ -apiVersion: elasticache.services.k8s.aws/v1alpha1 -kind: ReplicationGroup -# omitted metadata -spec: - cacheNodeType: cache.t3.micro - engine: redis - engineVersion: 5.0.0 # this should still be the old engine version - numNodeGroups: 1 - replicasPerNodeGroup: 1 - description: cluster-mode disabled RG - replicationGroupID: rg-cmd - atRestEncryptionEnabled: false - snapshotRetentionLimit: 0 - snapshotWindow: "06:30-07:30" - transitEncryptionEnabled: false -status: - ackResourceMetadata: - arn: arn:aws:elasticache:us-east-1:012345678910:replicationgroup:rg-cmd - ownerAccountID: "" - region: "" - allowedScaleUpModifications: - - cache.m3.2xlarge - - cache.m3.large - - cache.m3.medium - - cache.m3.xlarge - - cache.m4.10xlarge - - cache.m4.2xlarge - - cache.m4.4xlarge - - cache.m4.large - - cache.m4.xlarge - - cache.m5.12xlarge - - cache.m5.24xlarge - - cache.m5.2xlarge - - cache.m5.4xlarge - - cache.m5.large - - cache.m5.xlarge - - cache.r3.2xlarge - - cache.r3.4xlarge - - cache.r3.8xlarge - - cache.r3.large - - cache.r3.xlarge - - cache.r4.16xlarge - - cache.r4.2xlarge - - cache.r4.4xlarge - - cache.r4.8xlarge - - cache.r4.large - - cache.r4.xlarge - - cache.r5.12xlarge - - cache.r5.24xlarge - - cache.r5.2xlarge - - cache.r5.4xlarge - - cache.r5.large - - cache.r5.xlarge - - cache.t2.medium - - cache.t2.micro - - cache.t2.small - - cache.t3.medium - - cache.t3.small - authTokenEnabled: false - automaticFailover: disabled - clusterEnabled: false - conditions: - - status: "True" - type: ACK.ResourceSynced - description: cluster-mode disabled RG - events: - - date: "2021-04-13T19:07:05Z" - message: Replication group rg-cmd created - globalReplicationGroupInfo: {} - memberClusters: - - rg-cmd-001 - - rg-cmd-002 - multiAZ: disabled - nodeGroups: - - nodeGroupID: "0001" - nodeGroupMembers: - - cacheClusterID: rg-cmd-001 - cacheNodeID: "0001" - currentRole: primary - preferredAvailabilityZone: us-east-1b - readEndpoint: - address: rg-cmd-001.xxxxxx.0001.use1.cache.amazonaws.com - port: 6379 - - cacheClusterID: rg-cmd-002 - cacheNodeID: "0001" - currentRole: replica - preferredAvailabilityZone: us-east-1d - readEndpoint: - address: rg-cmd-002.xxxxxx.0001.use1.cache.amazonaws.com - port: 6379 - primaryEndpoint: - address: rg-cmd.xxxxxx.ng.0001.use1.cache.amazonaws.com - port: 6379 - readerEndpoint: - address: rg-cmd-ro.xxxxxx.ng.0001.use1.cache.amazonaws.com - port: 6379 - status: available - pendingModifiedValues: {} - status: available \ No newline at end of file diff --git a/pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_before_increase_replica.yaml b/pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_before_increase_replica.yaml deleted file mode 100644 index c44f9dca..00000000 --- a/pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_before_increase_replica.yaml +++ /dev/null @@ -1,104 +0,0 @@ -apiVersion: elasticache.services.k8s.aws/v1alpha1 -kind: ReplicationGroup -# omitted metadata -spec: - cacheNodeType: cache.t3.micro - engine: redis - numNodeGroups: 1 - replicasPerNodeGroup: 2 # mismatch between this and member clusters because new config was just applied - description: cluster-mode disabled RG - replicationGroupID: rg-cmd - atRestEncryptionEnabled: false - snapshotRetentionLimit: 0 - snapshotWindow: "10:00-11:00" - transitEncryptionEnabled: false -status: - ackResourceMetadata: - arn: arn:aws:elasticache:us-east-1:012345678910:replicationgroup:rg-cmd - ownerAccountID: "" - region: "" - allowedScaleUpModifications: - - cache.m3.2xlarge - - cache.m3.large - - cache.m3.medium - - cache.m3.xlarge - - cache.m4.10xlarge - - cache.m4.2xlarge - - cache.m4.4xlarge - - cache.m4.large - - cache.m4.xlarge - - cache.m5.12xlarge - - cache.m5.24xlarge - - cache.m5.2xlarge - - cache.m5.4xlarge - - cache.m5.large - - cache.m5.xlarge - - cache.m6g.large - - cache.r3.2xlarge - - cache.r3.4xlarge - - cache.r3.8xlarge - - cache.r3.large - - cache.r3.xlarge - - cache.r4.16xlarge - - cache.r4.2xlarge - - cache.r4.4xlarge - - cache.r4.8xlarge - - cache.r4.large - - cache.r4.xlarge - - cache.r5.12xlarge - - cache.r5.24xlarge - - cache.r5.2xlarge - - cache.r5.4xlarge - - cache.r5.large - - cache.r5.xlarge - - cache.r6g.2xlarge - - cache.r6g.4xlarge - - cache.r6g.8xlarge - - cache.r6g.large - - cache.r6g.xlarge - - cache.t2.medium - - cache.t2.micro - - cache.t2.small - - cache.t3.medium - - cache.t3.small - authTokenEnabled: false - automaticFailover: disabled - clusterEnabled: false - conditions: - - status: "True" - type: ACK.ResourceSynced - description: cluster-mode disabled RG - events: - - date: "2021-03-30T20:12:00Z" - message: Replication group rg-cmd created - globalReplicationGroupInfo: {} - memberClusters: - - rg-cmd-001 - - rg-cmd-002 - multiAZ: disabled - nodeGroups: - - nodeGroupID: "0001" - nodeGroupMembers: - - cacheClusterID: rg-cmd-001 - cacheNodeID: "0001" - currentRole: primary - preferredAvailabilityZone: us-east-1b - readEndpoint: - address: rg-cmd-001.xxxxxx.0001.use1.cache.amazonaws.com - port: 6379 - - cacheClusterID: rg-cmd-002 - cacheNodeID: "0001" - currentRole: replica - preferredAvailabilityZone: us-east-1d - readEndpoint: - address: rg-cmd-002.xxxxxx.0001.use1.cache.amazonaws.com - port: 6379 - primaryEndpoint: - address: rg-cmd.xxxxxx.ng.0001.use1.cache.amazonaws.com - port: 6379 - readerEndpoint: - address: rg-cmd-ro.xxxxxx.ng.0001.use1.cache.amazonaws.com - port: 6379 - status: available - pendingModifiedValues: {} - status: available \ No newline at end of file diff --git a/pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_before_increase_replica_latest.yaml b/pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_before_increase_replica_latest.yaml deleted file mode 100644 index be4111d4..00000000 --- a/pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_before_increase_replica_latest.yaml +++ /dev/null @@ -1,104 +0,0 @@ -apiVersion: elasticache.services.k8s.aws/v1alpha1 -kind: ReplicationGroup -# omitted metadata -spec: - cacheNodeType: cache.t3.micro - engine: redis - numNodeGroups: 1 - replicasPerNodeGroup: 1 # as this is the latest state, replicasPerNodeGroup should be consistent with memberClusters/nodeGroups - description: cluster-mode disabled RG - replicationGroupID: rg-cmd - atRestEncryptionEnabled: false - snapshotRetentionLimit: 0 - snapshotWindow: "10:00-11:00" - transitEncryptionEnabled: false -status: - ackResourceMetadata: - arn: arn:aws:elasticache:us-east-1:012345678910:replicationgroup:rg-cmd - ownerAccountID: "" - region: "" - allowedScaleUpModifications: - - cache.m3.2xlarge - - cache.m3.large - - cache.m3.medium - - cache.m3.xlarge - - cache.m4.10xlarge - - cache.m4.2xlarge - - cache.m4.4xlarge - - cache.m4.large - - cache.m4.xlarge - - cache.m5.12xlarge - - cache.m5.24xlarge - - cache.m5.2xlarge - - cache.m5.4xlarge - - cache.m5.large - - cache.m5.xlarge - - cache.m6g.large - - cache.r3.2xlarge - - cache.r3.4xlarge - - cache.r3.8xlarge - - cache.r3.large - - cache.r3.xlarge - - cache.r4.16xlarge - - cache.r4.2xlarge - - cache.r4.4xlarge - - cache.r4.8xlarge - - cache.r4.large - - cache.r4.xlarge - - cache.r5.12xlarge - - cache.r5.24xlarge - - cache.r5.2xlarge - - cache.r5.4xlarge - - cache.r5.large - - cache.r5.xlarge - - cache.r6g.2xlarge - - cache.r6g.4xlarge - - cache.r6g.8xlarge - - cache.r6g.large - - cache.r6g.xlarge - - cache.t2.medium - - cache.t2.micro - - cache.t2.small - - cache.t3.medium - - cache.t3.small - authTokenEnabled: false - automaticFailover: disabled - clusterEnabled: false - conditions: - - status: "True" - type: ACK.ResourceSynced - description: cluster-mode disabled RG - events: - - date: "2021-03-30T20:12:00Z" - message: Replication group rg-cmd created - globalReplicationGroupInfo: {} - memberClusters: - - rg-cmd-001 - - rg-cmd-002 - multiAZ: disabled - nodeGroups: - - nodeGroupID: "0001" - nodeGroupMembers: - - cacheClusterID: rg-cmd-001 - cacheNodeID: "0001" - currentRole: primary - preferredAvailabilityZone: us-east-1b - readEndpoint: - address: rg-cmd-001.xxxxxx.0001.use1.cache.amazonaws.com - port: 6379 - - cacheClusterID: rg-cmd-002 - cacheNodeID: "0001" - currentRole: replica - preferredAvailabilityZone: us-east-1d - readEndpoint: - address: rg-cmd-002.xxxxxx.0001.use1.cache.amazonaws.com - port: 6379 - primaryEndpoint: - address: rg-cmd.xxxxxx.ng.0001.use1.cache.amazonaws.com - port: 6379 - readerEndpoint: - address: rg-cmd-ro.xxxxxx.ng.0001.use1.cache.amazonaws.com - port: 6379 - status: available - pendingModifiedValues: {} - status: available \ No newline at end of file diff --git a/pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_before_scale_up_desired.yaml b/pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_before_scale_up_desired.yaml deleted file mode 100644 index 42689549..00000000 --- a/pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_before_scale_up_desired.yaml +++ /dev/null @@ -1,91 +0,0 @@ -apiVersion: elasticache.services.k8s.aws/v1alpha1 -kind: ReplicationGroup -# omitted metadata -spec: - cacheNodeType: cache.t3.small - engine: redis - numNodeGroups: 1 - replicasPerNodeGroup: 1 - description: cluster-mode disabled RG - replicationGroupID: rg-cmd -status: - ackResourceMetadata: - arn: arn:aws:elasticache:us-east-1:012345678910:replicationgroup:rg-cmd - ownerAccountID: "" - region: "" - allowedScaleUpModifications: - - cache.m3.large - - cache.m4.10xlarge - - cache.m4.2xlarge - - cache.m4.4xlarge - - cache.m4.large - - cache.m4.xlarge - - cache.m5.12xlarge - - cache.m5.24xlarge - - cache.m5.2xlarge - - cache.m5.4xlarge - - cache.m5.large - - cache.m5.xlarge - - cache.m6g.large - - cache.r4.2xlarge - - cache.r4.4xlarge - - cache.r4.8xlarge - - cache.r4.large - - cache.r4.xlarge - - cache.r5.12xlarge - - cache.r5.24xlarge - - cache.r5.2xlarge - - cache.r5.4xlarge - - cache.r5.large - - cache.r5.xlarge - - cache.r6g.2xlarge - - cache.r6g.4xlarge - - cache.r6g.8xlarge - - cache.r6g.large - - cache.r6g.xlarge - - cache.t2.medium - - cache.t2.micro - - cache.t2.small - - cache.t3.medium - - cache.t3.small - authTokenEnabled: false - automaticFailover: disabled - clusterEnabled: false - conditions: - - status: "True" - type: ACK.ResourceSynced - description: cluster-mode disabled RG - events: - - date: "2021-04-12T23:28:40Z" - message: Replication group rg-cmd created - globalReplicationGroupInfo: {} - memberClusters: - - rg-cmd-001 - - rg-cmd-002 - multiAZ: disabled - nodeGroups: - - nodeGroupID: "0001" - nodeGroupMembers: - - cacheClusterID: rg-cmd-001 - cacheNodeID: "0001" - currentRole: primary - preferredAvailabilityZone: us-east-1d - readEndpoint: - address: rg-cmd-001.xxxxxx.0001.use1.cache.amazonaws.com - port: 6379 - - cacheClusterID: rg-cmd-002 - cacheNodeID: "0001" - currentRole: replica - preferredAvailabilityZone: us-east-1c - readEndpoint: - address: rg-cmd-002.xxxxxx.0001.use1.cache.amazonaws.com - port: 6379 - primaryEndpoint: - address: rg-cmd.xxxxxx.ng.0001.use1.cache.amazonaws.com - port: 6379 - readerEndpoint: - address: rg-cmd-ro.xxxxxx.ng.0001.use1.cache.amazonaws.com - port: 6379 - status: available - pendingModifiedValues: {} - status: available \ No newline at end of file diff --git a/pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_before_scale_up_latest.yaml b/pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_before_scale_up_latest.yaml deleted file mode 100644 index 0b810ca8..00000000 --- a/pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_before_scale_up_latest.yaml +++ /dev/null @@ -1,91 +0,0 @@ -apiVersion: elasticache.services.k8s.aws/v1alpha1 -kind: ReplicationGroup -# omitted metadata -spec: - cacheNodeType: cache.t3.micro - engine: redis - numNodeGroups: 1 - replicasPerNodeGroup: 1 - description: cluster-mode disabled RG - replicationGroupID: rg-cmd -status: - ackResourceMetadata: - arn: arn:aws:elasticache:us-east-1:012345678910:replicationgroup:rg-cmd - ownerAccountID: "" - region: "" - allowedScaleUpModifications: - - cache.m3.large - - cache.m4.10xlarge - - cache.m4.2xlarge - - cache.m4.4xlarge - - cache.m4.large - - cache.m4.xlarge - - cache.m5.12xlarge - - cache.m5.24xlarge - - cache.m5.2xlarge - - cache.m5.4xlarge - - cache.m5.large - - cache.m5.xlarge - - cache.m6g.large - - cache.r4.2xlarge - - cache.r4.4xlarge - - cache.r4.8xlarge - - cache.r4.large - - cache.r4.xlarge - - cache.r5.12xlarge - - cache.r5.24xlarge - - cache.r5.2xlarge - - cache.r5.4xlarge - - cache.r5.large - - cache.r5.xlarge - - cache.r6g.2xlarge - - cache.r6g.4xlarge - - cache.r6g.8xlarge - - cache.r6g.large - - cache.r6g.xlarge - - cache.t2.medium - - cache.t2.micro - - cache.t2.small - - cache.t3.medium - - cache.t3.small - authTokenEnabled: false - automaticFailover: disabled - clusterEnabled: false - conditions: - - status: "True" - type: ACK.ResourceSynced - description: cluster-mode disabled RG - events: - - date: "2021-04-12T23:28:40Z" - message: Replication group rg-cmd created - globalReplicationGroupInfo: {} - memberClusters: - - rg-cmd-001 - - rg-cmd-002 - multiAZ: disabled - nodeGroups: - - nodeGroupID: "0001" - nodeGroupMembers: - - cacheClusterID: rg-cmd-001 - cacheNodeID: "0001" - currentRole: primary - preferredAvailabilityZone: us-east-1d - readEndpoint: - address: rg-cmd-001.xxxxxx.0001.use1.cache.amazonaws.com - port: 6379 - - cacheClusterID: rg-cmd-002 - cacheNodeID: "0001" - currentRole: replica - preferredAvailabilityZone: us-east-1c - readEndpoint: - address: rg-cmd-002.xxxxxx.0001.use1.cache.amazonaws.com - port: 6379 - primaryEndpoint: - address: rg-cmd.xxxxxx.ng.0001.use1.cache.amazonaws.com - port: 6379 - readerEndpoint: - address: rg-cmd-ro.xxxxxx.ng.0001.use1.cache.amazonaws.com - port: 6379 - status: available - pendingModifiedValues: {} - status: available \ No newline at end of file diff --git a/pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_create_completed.yaml b/pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_create_completed.yaml deleted file mode 100644 index 1f24bdc7..00000000 --- a/pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_create_completed.yaml +++ /dev/null @@ -1,105 +0,0 @@ -apiVersion: elasticache.services.k8s.aws/v1alpha1 -kind: ReplicationGroup -# omitted metadata -spec: - atRestEncryptionEnabled: false - cacheNodeType: cache.t3.micro - engine: redis - numNodeGroups: 1 - preferredMaintenanceWindow: "wed:08:00-wed:09:00" - replicasPerNodeGroup: 1 - description: cluster-mode disabled RG - replicationGroupID: rg-cmd - snapshotRetentionLimit: 0 - snapshotWindow: "10:00-11:00" - transitEncryptionEnabled: false -status: - ackResourceMetadata: - arn: arn:aws:elasticache:us-east-1:012345678910:replicationgroup:rg-cmd - ownerAccountID: "" - region: "" - allowedScaleUpModifications: - - cache.m3.2xlarge - - cache.m3.large - - cache.m3.medium - - cache.m3.xlarge - - cache.m4.10xlarge - - cache.m4.2xlarge - - cache.m4.4xlarge - - cache.m4.large - - cache.m4.xlarge - - cache.m5.12xlarge - - cache.m5.24xlarge - - cache.m5.2xlarge - - cache.m5.4xlarge - - cache.m5.large - - cache.m5.xlarge - - cache.m6g.large - - cache.r3.2xlarge - - cache.r3.4xlarge - - cache.r3.8xlarge - - cache.r3.large - - cache.r3.xlarge - - cache.r4.16xlarge - - cache.r4.2xlarge - - cache.r4.4xlarge - - cache.r4.8xlarge - - cache.r4.large - - cache.r4.xlarge - - cache.r5.12xlarge - - cache.r5.24xlarge - - cache.r5.2xlarge - - cache.r5.4xlarge - - cache.r5.large - - cache.r5.xlarge - - cache.r6g.2xlarge - - cache.r6g.4xlarge - - cache.r6g.8xlarge - - cache.r6g.large - - cache.r6g.xlarge - - cache.t2.medium - - cache.t2.micro - - cache.t2.small - - cache.t3.medium - - cache.t3.small - authTokenEnabled: false - automaticFailover: disabled - clusterEnabled: false - conditions: - - status: "True" - type: ACK.ResourceSynced - description: cluster-mode disabled RG - events: - - date: "2021-03-30T20:12:00Z" - message: Replication group rg-cmd created - globalReplicationGroupInfo: {} - memberClusters: - - rg-cmd-001 - - rg-cmd-002 - multiAZ: disabled - nodeGroups: - - nodeGroupID: "0001" - nodeGroupMembers: - - cacheClusterID: rg-cmd-001 - cacheNodeID: "0001" - currentRole: primary - preferredAvailabilityZone: us-east-1b - readEndpoint: - address: rg-cmd-001.xxxxxx.0001.use1.cache.amazonaws.com - port: 6379 - - cacheClusterID: rg-cmd-002 - cacheNodeID: "0001" - currentRole: replica - preferredAvailabilityZone: us-east-1d - readEndpoint: - address: rg-cmd-002.xxxxxx.0001.use1.cache.amazonaws.com - port: 6379 - primaryEndpoint: - address: rg-cmd.xxxxxx.ng.0001.use1.cache.amazonaws.com - port: 6379 - readerEndpoint: - address: rg-cmd-ro.xxxxxx.ng.0001.use1.cache.amazonaws.com - port: 6379 - status: available - pendingModifiedValues: {} - status: available \ No newline at end of file diff --git a/pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_create_completed_latest.yaml b/pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_create_completed_latest.yaml deleted file mode 100644 index dcec1277..00000000 --- a/pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_create_completed_latest.yaml +++ /dev/null @@ -1,106 +0,0 @@ -apiVersion: elasticache.services.k8s.aws/v1alpha1 -kind: ReplicationGroup -# omitted metadata -spec: - atRestEncryptionEnabled: false - cacheNodeType: cache.t3.micro - cacheParameterGroupName: "default.redis5.0" - engine: redis - numNodeGroups: 1 - preferredMaintenanceWindow: "wed:08:00-wed:09:00" - replicasPerNodeGroup: 1 - description: cluster-mode disabled RG - replicationGroupID: rg-cmd - snapshotRetentionLimit: 0 - snapshotWindow: "10:00-11:00" - transitEncryptionEnabled: false -status: - ackResourceMetadata: - arn: arn:aws:elasticache:us-east-1:012345678910:replicationgroup:rg-cmd - ownerAccountID: "" - region: "" - allowedScaleUpModifications: - - cache.m3.2xlarge - - cache.m3.large - - cache.m3.medium - - cache.m3.xlarge - - cache.m4.10xlarge - - cache.m4.2xlarge - - cache.m4.4xlarge - - cache.m4.large - - cache.m4.xlarge - - cache.m5.12xlarge - - cache.m5.24xlarge - - cache.m5.2xlarge - - cache.m5.4xlarge - - cache.m5.large - - cache.m5.xlarge - - cache.m6g.large - - cache.r3.2xlarge - - cache.r3.4xlarge - - cache.r3.8xlarge - - cache.r3.large - - cache.r3.xlarge - - cache.r4.16xlarge - - cache.r4.2xlarge - - cache.r4.4xlarge - - cache.r4.8xlarge - - cache.r4.large - - cache.r4.xlarge - - cache.r5.12xlarge - - cache.r5.24xlarge - - cache.r5.2xlarge - - cache.r5.4xlarge - - cache.r5.large - - cache.r5.xlarge - - cache.r6g.2xlarge - - cache.r6g.4xlarge - - cache.r6g.8xlarge - - cache.r6g.large - - cache.r6g.xlarge - - cache.t2.medium - - cache.t2.micro - - cache.t2.small - - cache.t3.medium - - cache.t3.small - authTokenEnabled: false - automaticFailover: disabled - clusterEnabled: false - conditions: - - status: "True" - type: ACK.ResourceSynced - description: cluster-mode disabled RG - events: - - date: "2021-03-30T20:12:00Z" - message: Replication group rg-cmd created - globalReplicationGroupInfo: {} - memberClusters: - - rg-cmd-001 - - rg-cmd-002 - multiAZ: disabled - nodeGroups: - - nodeGroupID: "0001" - nodeGroupMembers: - - cacheClusterID: rg-cmd-001 - cacheNodeID: "0001" - currentRole: primary - preferredAvailabilityZone: us-east-1b - readEndpoint: - address: rg-cmd-001.xxxxxx.0001.use1.cache.amazonaws.com - port: 6379 - - cacheClusterID: rg-cmd-002 - cacheNodeID: "0001" - currentRole: replica - preferredAvailabilityZone: us-east-1d - readEndpoint: - address: rg-cmd-002.xxxxxx.0001.use1.cache.amazonaws.com - port: 6379 - primaryEndpoint: - address: rg-cmd.xxxxxx.ng.0001.use1.cache.amazonaws.com - port: 6379 - readerEndpoint: - address: rg-cmd-ro.xxxxxx.ng.0001.use1.cache.amazonaws.com - port: 6379 - status: available - pendingModifiedValues: {} - status: available \ No newline at end of file diff --git a/pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_create_completed_not_yet_latest.yaml b/pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_create_completed_not_yet_latest.yaml deleted file mode 100644 index 5ea2e679..00000000 --- a/pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_create_completed_not_yet_latest.yaml +++ /dev/null @@ -1,107 +0,0 @@ -apiVersion: elasticache.services.k8s.aws/v1alpha1 -kind: ReplicationGroup -# omitted metadata -spec: - atRestEncryptionEnabled: false - cacheNodeType: cache.t3.micro - cacheParameterGroupName: "default.redis5.0" - engine: redis - numNodeGroups: 1 - preferredMaintenanceWindow: "wed:08:00-wed:09:00" - replicasPerNodeGroup: 1 - description: cluster-mode disabled RG - replicationGroupID: rg-cmd - snapshotRetentionLimit: 0 - snapshotWindow: "10:00-11:00" - transitEncryptionEnabled: false -status: - ackResourceMetadata: - arn: arn:aws:elasticache:us-east-1:012345678910:replicationgroup:rg-cmd - ownerAccountID: "" - region: "" - allowedScaleUpModifications: - - cache.m3.2xlarge - - cache.m3.large - - cache.m3.medium - - cache.m3.xlarge - - cache.m4.10xlarge - - cache.m4.2xlarge - - cache.m4.4xlarge - - cache.m4.large - - cache.m4.xlarge - - cache.m5.12xlarge - - cache.m5.24xlarge - - cache.m5.2xlarge - - cache.m5.4xlarge - - cache.m5.large - - cache.m5.xlarge - - cache.m6g.large - - cache.r3.2xlarge - - cache.r3.4xlarge - - cache.r3.8xlarge - - cache.r3.large - - cache.r3.xlarge - - cache.r4.16xlarge - - cache.r4.2xlarge - - cache.r4.4xlarge - - cache.r4.8xlarge - - cache.r4.large - - cache.r4.xlarge - - cache.r5.12xlarge - - cache.r5.24xlarge - - cache.r5.2xlarge - - cache.r5.4xlarge - - cache.r5.large - - cache.r5.xlarge - - cache.r6g.2xlarge - - cache.r6g.4xlarge - - cache.r6g.8xlarge - - cache.r6g.large - - cache.r6g.xlarge - - cache.t2.medium - - cache.t2.micro - - cache.t2.small - - cache.t3.medium - - cache.t3.small - authTokenEnabled: false - automaticFailover: disabled - clusterEnabled: false - conditions: - - status: "False" - type: ACK.ResourceSynced - message: "replication group currently being created." - description: cluster-mode disabled RG - events: - - date: "2021-03-30T20:12:00Z" - message: Replication group rg-cmd created - globalReplicationGroupInfo: {} - memberClusters: - - rg-cmd-001 - - rg-cmd-002 - multiAZ: disabled - nodeGroups: - - nodeGroupID: "0001" - nodeGroupMembers: - - cacheClusterID: rg-cmd-001 - cacheNodeID: "0001" - currentRole: primary - preferredAvailabilityZone: us-east-1b - readEndpoint: - address: rg-cmd-001.xxxxxx.0001.use1.cache.amazonaws.com - port: 6379 - - cacheClusterID: rg-cmd-002 - cacheNodeID: "0001" - currentRole: replica - preferredAvailabilityZone: us-east-1d - readEndpoint: - address: rg-cmd-002.xxxxxx.0001.use1.cache.amazonaws.com - port: 6379 - primaryEndpoint: - address: rg-cmd.xxxxxx.ng.0001.use1.cache.amazonaws.com - port: 6379 - readerEndpoint: - address: rg-cmd-ro.xxxxxx.ng.0001.use1.cache.amazonaws.com - port: 6379 - status: available - pendingModifiedValues: {} - status: available \ No newline at end of file diff --git a/pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_create_initiated.yaml b/pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_create_initiated.yaml deleted file mode 100644 index bab3f00a..00000000 --- a/pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_create_initiated.yaml +++ /dev/null @@ -1,32 +0,0 @@ -apiVersion: elasticache.services.k8s.aws/v1alpha1 -kind: ReplicationGroup -# omitted metadata -spec: - cacheNodeType: cache.t3.micro - engine: redis - numNodeGroups: 1 - replicasPerNodeGroup: 1 - description: cluster-mode disabled RG - replicationGroupID: rg-cmd - atRestEncryptionEnabled: false - SnapshotRetentionLimit: 0 - snapshotWindow: "09:00-10:00" - TransitEncryptionEnabled: false -status: - ackResourceMetadata: - arn: arn:aws:elasticache:us-east-1:012345678910:replicationgroup:rg-cmd - ownerAccountID: "" # for tests the ownerAccountID of the resource manager is empty (see implementations of TestRunnerDelegate's ResourceManager function) - region: "" - automaticFailover: disabled - clusterEnabled: false - conditions: - - status: "False" - type: ACK.ResourceSynced - description: cluster-mode disabled RG - globalReplicationGroupInfo: {} - memberClusters: - - rg-cmd-001 - - rg-cmd-002 - multiAZ: disabled - pendingModifiedValues: {} - status: creating \ No newline at end of file diff --git a/pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_delete_initiated.yaml b/pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_delete_initiated.yaml deleted file mode 100644 index 3619a1e9..00000000 --- a/pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_delete_initiated.yaml +++ /dev/null @@ -1,104 +0,0 @@ -apiVersion: elasticache.services.k8s.aws/v1alpha1 -kind: ReplicationGroup -# omitted metadata -spec: - atRestEncryptionEnabled: false - cacheNodeType: cache.t3.micro - engine: redis - numNodeGroups: 1 - replicasPerNodeGroup: 1 - description: cluster-mode disabled RG - replicationGroupID: rg-cmd - snapshotRetentionLimit: 0 - snapshotWindow: "10:00-11:00" - transitEncryptionEnabled: false -status: - ackResourceMetadata: - arn: arn:aws:elasticache:us-east-1:012345678910:replicationgroup:rg-cmd - ownerAccountID: "" - region: "" - allowedScaleUpModifications: - - cache.m3.2xlarge - - cache.m3.large - - cache.m3.medium - - cache.m3.xlarge - - cache.m4.10xlarge - - cache.m4.2xlarge - - cache.m4.4xlarge - - cache.m4.large - - cache.m4.xlarge - - cache.m5.12xlarge - - cache.m5.24xlarge - - cache.m5.2xlarge - - cache.m5.4xlarge - - cache.m5.large - - cache.m5.xlarge - - cache.m6g.large - - cache.r3.2xlarge - - cache.r3.4xlarge - - cache.r3.8xlarge - - cache.r3.large - - cache.r3.xlarge - - cache.r4.16xlarge - - cache.r4.2xlarge - - cache.r4.4xlarge - - cache.r4.8xlarge - - cache.r4.large - - cache.r4.xlarge - - cache.r5.12xlarge - - cache.r5.24xlarge - - cache.r5.2xlarge - - cache.r5.4xlarge - - cache.r5.large - - cache.r5.xlarge - - cache.r6g.2xlarge - - cache.r6g.4xlarge - - cache.r6g.8xlarge - - cache.r6g.large - - cache.r6g.xlarge - - cache.t2.medium - - cache.t2.micro - - cache.t2.small - - cache.t3.medium - - cache.t3.small - authTokenEnabled: false - automaticFailover: disabled - clusterEnabled: false - conditions: - - status: "True" - type: ACK.ResourceSynced - description: cluster-mode disabled RG - events: - - date: "2021-03-30T20:12:00Z" - message: Replication group rg-cmd created - globalReplicationGroupInfo: {} - memberClusters: - - rg-cmd-001 - - rg-cmd-002 - multiAZ: disabled - nodeGroups: - - nodeGroupID: "0001" - nodeGroupMembers: - - cacheClusterID: rg-cmd-001 - cacheNodeID: "0001" - currentRole: primary - preferredAvailabilityZone: us-east-1b - readEndpoint: - address: rg-cmd-001.xxxxxx.0001.use1.cache.amazonaws.com - port: 6379 - - cacheClusterID: rg-cmd-002 - cacheNodeID: "0001" - currentRole: replica - preferredAvailabilityZone: us-east-1d - readEndpoint: - address: rg-cmd-002.xxxxxx.0001.use1.cache.amazonaws.com - port: 6379 - primaryEndpoint: - address: rg-cmd.xxxxxx.ng.0001.use1.cache.amazonaws.com - port: 6379 - readerEndpoint: - address: rg-cmd-ro.xxxxxx.ng.0001.use1.cache.amazonaws.com - port: 6379 - status: available - pendingModifiedValues: {} - status: deleting \ No newline at end of file diff --git a/pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_engine_upgrade_initiated.yaml b/pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_engine_upgrade_initiated.yaml deleted file mode 100644 index f01ca8e3..00000000 --- a/pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_engine_upgrade_initiated.yaml +++ /dev/null @@ -1,61 +0,0 @@ -apiVersion: elasticache.services.k8s.aws/v1alpha1 -kind: ReplicationGroup -# omitted metadata -spec: - cacheNodeType: cache.t3.micro - engine: redis - engineVersion: 5.0.6 # resource has 5.0.0 but custom modify code copies desired EV to latest and doesn't overwrite it - numNodeGroups: 1 - replicasPerNodeGroup: 1 - description: cluster-mode disabled RG - replicationGroupID: rg-cmd - atRestEncryptionEnabled: false - snapshotRetentionLimit: 0 - snapshotWindow: "06:30-07:30" - transitEncryptionEnabled: false -status: - ackResourceMetadata: - arn: arn:aws:elasticache:us-east-1:012345678910:replicationgroup:rg-cmd - ownerAccountID: "" - region: "" - authTokenEnabled: false - automaticFailover: disabled - clusterEnabled: false - conditions: - - status: "False" - type: ACK.ResourceSynced - description: cluster-mode disabled RG - events: - - date: "2021-04-13T19:07:05Z" - message: Replication group rg-cmd created - globalReplicationGroupInfo: {} - memberClusters: - - rg-cmd-001 - - rg-cmd-002 - multiAZ: disabled - nodeGroups: - - nodeGroupID: "0001" - nodeGroupMembers: - - cacheClusterID: rg-cmd-001 - cacheNodeID: "0001" - currentRole: primary - preferredAvailabilityZone: us-east-1b - readEndpoint: - address: rg-cmd-001.xxxxxx.0001.use1.cache.amazonaws.com - port: 6379 - - cacheClusterID: rg-cmd-002 - cacheNodeID: "0001" - currentRole: replica - preferredAvailabilityZone: us-east-1d - readEndpoint: - address: rg-cmd-002.xxxxxx.0001.use1.cache.amazonaws.com - port: 6379 - primaryEndpoint: - address: rg-cmd.xxxxxx.ng.0001.use1.cache.amazonaws.com - port: 6379 - readerEndpoint: - address: rg-cmd-ro.xxxxxx.ng.0001.use1.cache.amazonaws.com - port: 6379 - status: modifying - pendingModifiedValues: {} - status: modifying \ No newline at end of file diff --git a/pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_increase_replica_initiated.yaml b/pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_increase_replica_initiated.yaml deleted file mode 100644 index f1fbb7f7..00000000 --- a/pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_increase_replica_initiated.yaml +++ /dev/null @@ -1,61 +0,0 @@ -apiVersion: elasticache.services.k8s.aws/v1alpha1 -kind: ReplicationGroup -# omitted metadata -spec: - cacheNodeType: cache.t3.micro - engine: redis - numNodeGroups: 1 - replicasPerNodeGroup: 2 # new replica hasn't been added, but 2 comes from desired and isn't overwritten in custom modify code - description: cluster-mode disabled RG - replicationGroupID: rg-cmd - atRestEncryptionEnabled: false - snapshotRetentionLimit: 0 - snapshotWindow: "10:00-11:00" - transitEncryptionEnabled: false -status: - ackResourceMetadata: - arn: arn:aws:elasticache:us-east-1:012345678910:replicationgroup:rg-cmd - ownerAccountID: "" - region: "" - authTokenEnabled: false - automaticFailover: disabled - clusterEnabled: false - conditions: - - status: "False" - type: ACK.ResourceSynced - description: cluster-mode disabled RG - events: - - date: "2021-03-30T20:12:00Z" - message: Replication group rg-cmd created - globalReplicationGroupInfo: {} - memberClusters: - - rg-cmd-001 - - rg-cmd-002 - - rg-cmd-003 - multiAZ: disabled - nodeGroups: - - nodeGroupID: "0001" - nodeGroupMembers: - - cacheClusterID: rg-cmd-001 - cacheNodeID: "0001" - currentRole: primary - preferredAvailabilityZone: us-east-1b - readEndpoint: - address: rg-cmd-001.xxxxxx.0001.use1.cache.amazonaws.com - port: 6379 - - cacheClusterID: rg-cmd-002 - cacheNodeID: "0001" - currentRole: replica - preferredAvailabilityZone: us-east-1d - readEndpoint: - address: rg-cmd-002.xxxxxx.0001.use1.cache.amazonaws.com - port: 6379 - primaryEndpoint: - address: rg-cmd.xxxxxx.ng.0001.use1.cache.amazonaws.com - port: 6379 - readerEndpoint: - address: rg-cmd-ro.xxxxxx.ng.0001.use1.cache.amazonaws.com - port: 6379 - status: modifying - pendingModifiedValues: {} - status: modifying \ No newline at end of file diff --git a/pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_invalid_before_create.yaml b/pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_invalid_before_create.yaml deleted file mode 100644 index 49a06ada..00000000 --- a/pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_invalid_before_create.yaml +++ /dev/null @@ -1,10 +0,0 @@ -apiVersion: elasticache.services.k8s.aws/v1alpha1 -kind: ReplicationGroup -# omitted metadata -spec: - cacheNodeType: cache.t3.micro - engine: redis - numNodeGroups: 1 - replicasPerNodeGroup: 7 - description: cluster-mode disabled RG - replicationGroupID: rg-cmd \ No newline at end of file diff --git a/pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_invalid_create_attempted.yaml b/pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_invalid_create_attempted.yaml deleted file mode 100644 index faa4b7c5..00000000 --- a/pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_invalid_create_attempted.yaml +++ /dev/null @@ -1,18 +0,0 @@ -apiVersion: elasticache.services.k8s.aws/v1alpha1 -kind: ReplicationGroup -# omitted metadata -spec: - cacheNodeType: cache.t3.micro - engine: redis - numNodeGroups: 1 - replicasPerNodeGroup: 7 - description: cluster-mode disabled RG - replicationGroupID: rg-cmd -status: - ackResourceMetadata: - ownerAccountID: "" - region: "" - conditions: - - message: "InvalidParameterValue: The number of replicas per node group must be within 0 and 5.\n\tstatus code: 0, request id: " - status: "True" - type: ACK.Terminal \ No newline at end of file diff --git a/pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_scale_up_initiated.yaml b/pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_scale_up_initiated.yaml deleted file mode 100644 index 978ec98f..00000000 --- a/pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_scale_up_initiated.yaml +++ /dev/null @@ -1,59 +0,0 @@ -apiVersion: elasticache.services.k8s.aws/v1alpha1 -kind: ReplicationGroup -# omitted metadata -spec: - cacheNodeType: cache.t3.small - engine: redis - numNodeGroups: 1 - replicasPerNodeGroup: 1 - description: cluster-mode disabled RG - replicationGroupID: rg-cmd - atRestEncryptionEnabled: false - SnapshotRetentionLimit: 0 - snapshotWindow: "09:00-10:00" - TransitEncryptionEnabled: false -status: - ackResourceMetadata: - arn: arn:aws:elasticache:us-east-1:012345678910:replicationgroup:rg-cmd - ownerAccountID: "" - region: "" - automaticFailover: disabled - clusterEnabled: false - conditions: - - status: "False" - type: ACK.ResourceSynced - description: cluster-mode disabled RG - events: - - date: "2021-04-12T23:28:40Z" - message: Replication group rg-cmd created - globalReplicationGroupInfo: {} - memberClusters: - - rg-cmd-001 - - rg-cmd-002 - multiAZ: disabled - nodeGroups: - - nodeGroupID: "0001" - nodeGroupMembers: - - cacheClusterID: rg-cmd-001 - cacheNodeID: "0001" - currentRole: primary - preferredAvailabilityZone: us-east-1d - readEndpoint: - address: rg-cmd-001.xxxxxx.0001.use1.cache.amazonaws.com - port: 6379 - - cacheClusterID: rg-cmd-002 - cacheNodeID: "0001" - currentRole: replica - preferredAvailabilityZone: us-east-1c - readEndpoint: - address: rg-cmd-002.xxxxxx.0001.use1.cache.amazonaws.com - port: 6379 - primaryEndpoint: - address: rg-cmd.xxxxxx.ng.0001.use1.cache.amazonaws.com - port: 6379 - readerEndpoint: - address: rg-cmd-ro.xxxxxx.ng.0001.use1.cache.amazonaws.com - port: 6379 - status: modifying - pendingModifiedValues: {} - status: modifying \ No newline at end of file diff --git a/pkg/resource/replication_group/testdata/replication_group/cr/rg_cme_before_create.yaml b/pkg/resource/replication_group/testdata/replication_group/cr/rg_cme_before_create.yaml deleted file mode 100644 index 5e8a60e1..00000000 --- a/pkg/resource/replication_group/testdata/replication_group/cr/rg_cme_before_create.yaml +++ /dev/null @@ -1,24 +0,0 @@ -apiVersion: elasticache.services.k8s.aws/v1alpha1 -kind: ReplicationGroup -# omitted metadata -spec: - cacheNodeType: cache.t3.micro - engine: redis - nodeGroupConfiguration: - - nodeGroupID: "1111" - primaryAvailabilityZone: us-east-1a - replicaAvailabilityZones: - - us-east-1b - replicaCount: 1 - slots: 0-5999 - - nodeGroupID: "2222" - primaryAvailabilityZone: us-east-1c - replicaAvailabilityZones: - - us-east-1d - - us-east-1a - - us-east-1b - replicaCount: 3 - slots: 6000-16383 - numNodeGroups: 2 - description: cluster-mode enabled RG - replicationGroupID: rg-cme \ No newline at end of file diff --git a/pkg/resource/replication_group/testdata/replication_group/cr/rg_cme_create_initiated.yaml b/pkg/resource/replication_group/testdata/replication_group/cr/rg_cme_create_initiated.yaml deleted file mode 100644 index d8b43b93..00000000 --- a/pkg/resource/replication_group/testdata/replication_group/cr/rg_cme_create_initiated.yaml +++ /dev/null @@ -1,50 +0,0 @@ -apiVersion: elasticache.services.k8s.aws/v1alpha1 -kind: ReplicationGroup -# omitted metadata -spec: - cacheNodeType: cache.t3.micro - engine: redis - nodeGroupConfiguration: - - nodeGroupID: "1111" - primaryAvailabilityZone: us-east-1a - replicaAvailabilityZones: - - us-east-1b - replicaCount: 1 - slots: 0-5999 - - nodeGroupID: "2222" - primaryAvailabilityZone: us-east-1c - replicaAvailabilityZones: - - us-east-1d - - us-east-1a - - us-east-1b - replicaCount: 3 - slots: 6000-16383 - numNodeGroups: 2 - description: cluster-mode enabled RG - replicationGroupID: rg-cme - atRestEncryptionEnabled: false - SnapshotRetentionLimit: 0 - snapshotWindow: "08:00-09:00" - TransitEncryptionEnabled: false -status: - ackResourceMetadata: - arn: arn:aws:elasticache:us-east-1:012345678910:replicationgroup:rg-cme - ownerAccountID: "" - region: "" - automaticFailover: enabled - clusterEnabled: true - conditions: - - status: "False" - type: ACK.ResourceSynced - description: cluster-mode enabled RG - globalReplicationGroupInfo: {} - memberClusters: - - rg-cme-1111-001 - - rg-cme-1111-002 - - rg-cme-2222-001 - - rg-cme-2222-002 - - rg-cme-2222-003 - - rg-cme-2222-004 - multiAZ: disabled - pendingModifiedValues: {} - status: creating \ No newline at end of file diff --git a/pkg/resource/replication_group/testdata/replication_group/cr/rg_cme_invalid_scale_out_attempted.yaml b/pkg/resource/replication_group/testdata/replication_group/cr/rg_cme_invalid_scale_out_attempted.yaml deleted file mode 100644 index 11ce7a73..00000000 --- a/pkg/resource/replication_group/testdata/replication_group/cr/rg_cme_invalid_scale_out_attempted.yaml +++ /dev/null @@ -1,118 +0,0 @@ -apiVersion: elasticache.services.k8s.aws/v1alpha1 -kind: ReplicationGroup -# omitted metadata -spec: - cacheNodeType: cache.t3.micro - cacheSubnetGroupName: default - engine: redis - nodeGroupConfiguration: - - nodeGroupID: "1111" - primaryAvailabilityZone: us-east-1a - replicaAvailabilityZones: - - us-east-1b - replicaCount: 1 - slots: 0-5999 - - nodeGroupID: "2222" - primaryAvailabilityZone: us-east-1c - replicaAvailabilityZones: - - us-east-1d - - us-east-1a - - us-east-1b - replicaCount: 3 - slots: 6000-16383 - numNodeGroups: 3 # this is the mismatch; 3 shards indicated but only 2 are specified in above nodeGroupConfiguration - description: cluster-mode enabled RG - replicationGroupID: rg-cme -status: - ackResourceMetadata: - arn: arn:aws:elasticache:us-east-1:012345678910:replicationgroup:rg-cme - ownerAccountID: "" - region: "" - allowedScaleUpModifications: - - cache.m3.large - - cache.m4.10xlarge - - cache.m4.2xlarge - - cache.m4.4xlarge - - cache.m4.large - - cache.m4.xlarge - - cache.m5.24xlarge - - cache.m5.2xlarge - - cache.m5.4xlarge - - cache.m5.large - - cache.m5.xlarge - - cache.m6g.large - - cache.r4.2xlarge - - cache.r4.4xlarge - - cache.r4.8xlarge - - cache.r4.large - - cache.r4.xlarge - - cache.r5.12xlarge - - cache.r5.24xlarge - - cache.r5.2xlarge - - cache.r5.4xlarge - - cache.r5.large - - cache.r5.xlarge - - cache.r6g.2xlarge - - cache.r6g.4xlarge - - cache.r6g.8xlarge - - cache.r6g.large - - cache.r6g.xlarge - - cache.t2.medium - - cache.t2.micro - - cache.t2.small - - cache.t3.medium - - cache.t3.small - authTokenEnabled: false - automaticFailover: enabled - clusterEnabled: true - conditions: - - status: "True" #TODO: should synced condition be set false when terminal condition is true? - type: ACK.ResourceSynced - - message: "InvalidParameterValue: Configuration for all the node groups should be provided.\n\tstatus code: 0, request id: " - status: "True" - type: ACK.Terminal - configurationEndpoint: - address: rg-cme.xxxxxx.clustercfg.use1.cache.amazonaws.com - port: 6379 - description: cluster-mode enabled RG - events: - - date: "2021-04-14T19:36:01Z" - message: Replication group rg-cme created - globalReplicationGroupInfo: {} - memberClusters: - - rg-cme-1111-001 - - rg-cme-1111-002 - - rg-cme-2222-001 - - rg-cme-2222-002 - - rg-cme-2222-003 - - rg-cme-2222-004 - multiAZ: disabled - nodeGroups: - - nodeGroupID: "1111" - nodeGroupMembers: - - cacheClusterID: rg-cme-1111-001 - cacheNodeID: "0001" - preferredAvailabilityZone: us-east-1a - - cacheClusterID: rg-cme-1111-002 - cacheNodeID: "0001" - preferredAvailabilityZone: us-east-1b - slots: 0-5999 - status: available - - nodeGroupID: "2222" - nodeGroupMembers: - - cacheClusterID: rg-cme-2222-001 - cacheNodeID: "0001" - preferredAvailabilityZone: us-east-1c - - cacheClusterID: rg-cme-2222-002 - cacheNodeID: "0001" - preferredAvailabilityZone: us-east-1d - - cacheClusterID: rg-cme-2222-003 - cacheNodeID: "0001" - preferredAvailabilityZone: us-east-1a - - cacheClusterID: rg-cme-2222-004 - cacheNodeID: "0001" - preferredAvailabilityZone: us-east-1b - slots: 6000-16383 - status: available - pendingModifiedValues: {} - status: available \ No newline at end of file diff --git a/pkg/resource/replication_group/testdata/replication_group/cr/rg_cme_shard_mismatch.yaml b/pkg/resource/replication_group/testdata/replication_group/cr/rg_cme_shard_mismatch.yaml deleted file mode 100644 index ba840422..00000000 --- a/pkg/resource/replication_group/testdata/replication_group/cr/rg_cme_shard_mismatch.yaml +++ /dev/null @@ -1,115 +0,0 @@ -apiVersion: elasticache.services.k8s.aws/v1alpha1 -kind: ReplicationGroup -# omitted metadata -spec: - cacheNodeType: cache.t3.micro - cacheSubnetGroupName: default - engine: redis - nodeGroupConfiguration: - - nodeGroupID: "1111" - primaryAvailabilityZone: us-east-1a - replicaAvailabilityZones: - - us-east-1b - replicaCount: 1 - slots: 0-5999 - - nodeGroupID: "2222" - primaryAvailabilityZone: us-east-1c - replicaAvailabilityZones: - - us-east-1d - - us-east-1a - - us-east-1b - replicaCount: 3 - slots: 6000-16383 - numNodeGroups: 3 # this is the mismatch; 3 shards indicated but only 2 are specified in above nodeGroupConfiguration - description: cluster-mode enabled RG - replicationGroupID: rg-cme -status: - ackResourceMetadata: - arn: arn:aws:elasticache:us-east-1:012345678910:replicationgroup:rg-cme - ownerAccountID: "" - region: "" - allowedScaleUpModifications: - - cache.m3.large - - cache.m4.10xlarge - - cache.m4.2xlarge - - cache.m4.4xlarge - - cache.m4.large - - cache.m4.xlarge - - cache.m5.24xlarge - - cache.m5.2xlarge - - cache.m5.4xlarge - - cache.m5.large - - cache.m5.xlarge - - cache.m6g.large - - cache.r4.2xlarge - - cache.r4.4xlarge - - cache.r4.8xlarge - - cache.r4.large - - cache.r4.xlarge - - cache.r5.12xlarge - - cache.r5.24xlarge - - cache.r5.2xlarge - - cache.r5.4xlarge - - cache.r5.large - - cache.r5.xlarge - - cache.r6g.2xlarge - - cache.r6g.4xlarge - - cache.r6g.8xlarge - - cache.r6g.large - - cache.r6g.xlarge - - cache.t2.medium - - cache.t2.micro - - cache.t2.small - - cache.t3.medium - - cache.t3.small - authTokenEnabled: false - automaticFailover: enabled - clusterEnabled: true - conditions: - - status: "True" - type: ACK.ResourceSynced - configurationEndpoint: - address: rg-cme.xxxxxx.clustercfg.use1.cache.amazonaws.com - port: 6379 - description: cluster-mode enabled RG - events: - - date: "2021-04-14T19:36:01Z" - message: Replication group rg-cme created - globalReplicationGroupInfo: {} - memberClusters: - - rg-cme-1111-001 - - rg-cme-1111-002 - - rg-cme-2222-001 - - rg-cme-2222-002 - - rg-cme-2222-003 - - rg-cme-2222-004 - multiAZ: disabled - nodeGroups: - - nodeGroupID: "1111" - nodeGroupMembers: - - cacheClusterID: rg-cme-1111-001 - cacheNodeID: "0001" - preferredAvailabilityZone: us-east-1a - - cacheClusterID: rg-cme-1111-002 - cacheNodeID: "0001" - preferredAvailabilityZone: us-east-1b - slots: 0-5999 - status: available - - nodeGroupID: "2222" - nodeGroupMembers: - - cacheClusterID: rg-cme-2222-001 - cacheNodeID: "0001" - preferredAvailabilityZone: us-east-1c - - cacheClusterID: rg-cme-2222-002 - cacheNodeID: "0001" - preferredAvailabilityZone: us-east-1d - - cacheClusterID: rg-cme-2222-003 - cacheNodeID: "0001" - preferredAvailabilityZone: us-east-1a - - cacheClusterID: rg-cme-2222-004 - cacheNodeID: "0001" - preferredAvailabilityZone: us-east-1b - slots: 6000-16383 - status: available - pendingModifiedValues: {} - status: available \ No newline at end of file diff --git a/pkg/resource/replication_group/testdata/replication_group/create/rg_cmd_creating.json b/pkg/resource/replication_group/testdata/replication_group/create/rg_cmd_creating.json deleted file mode 100644 index f10c3ad2..00000000 --- a/pkg/resource/replication_group/testdata/replication_group/create/rg_cmd_creating.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "ReplicationGroup": { - "Status": "creating", - "MultiAZ": "disabled", - "Description": "cluster-mode disabled RG", - "AtRestEncryptionEnabled": false, - "ClusterEnabled": false, - "ReplicationGroupId": "rg-cmd", - "GlobalReplicationGroupInfo": {}, - "SnapshotRetentionLimit": 0, - "AutomaticFailover": "disabled", - "TransitEncryptionEnabled": false, - "SnapshotWindow": "09:00-10:00", - "MemberClusters": [ - "rg-cmd-001", - "rg-cmd-002" - ], - "CacheNodeType": "cache.t3.micro", - "ARN": "arn:aws:elasticache:us-east-1:012345678910:replicationgroup:rg-cmd", - "PendingModifiedValues": {} - } -} \ No newline at end of file diff --git a/pkg/resource/replication_group/testdata/replication_group/create/rg_cme_creating.json b/pkg/resource/replication_group/testdata/replication_group/create/rg_cme_creating.json deleted file mode 100644 index f311d4b2..00000000 --- a/pkg/resource/replication_group/testdata/replication_group/create/rg_cme_creating.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "ReplicationGroup": { - "Status": "creating", - "MultiAZ": "disabled", - "Description": "cluster-mode enabled RG", - "AtRestEncryptionEnabled": false, - "ClusterEnabled": true, - "ReplicationGroupId": "rg-cme", - "GlobalReplicationGroupInfo": {}, - "SnapshotRetentionLimit": 0, - "AutomaticFailover": "enabled", - "TransitEncryptionEnabled": false, - "SnapshotWindow": "08:00-09:00", - "MemberClusters": [ - "rg-cme-1111-001", - "rg-cme-1111-002", - "rg-cme-2222-001", - "rg-cme-2222-002", - "rg-cme-2222-003", - "rg-cme-2222-004" - ], - "CacheNodeType": "cache.t3.micro", - "ARN": "arn:aws:elasticache:us-east-1:012345678910:replicationgroup:rg-cme", - "PendingModifiedValues": {} - } -} \ No newline at end of file diff --git a/pkg/resource/replication_group/testdata/replication_group/delete/rg_cmd_delete_initiated.json b/pkg/resource/replication_group/testdata/replication_group/delete/rg_cmd_delete_initiated.json deleted file mode 100644 index e34ffa8e..00000000 --- a/pkg/resource/replication_group/testdata/replication_group/delete/rg_cmd_delete_initiated.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "ReplicationGroup": { - "Status": "deleting", - "MultiAZ": "disabled", - "Description": "cluster-mode disabled RG", - "AtRestEncryptionEnabled": false, - "ReplicationGroupId": "rg-cmd", - "GlobalReplicationGroupInfo": {}, - "SnapshotRetentionLimit": 0, - "AutomaticFailover": "disabled", - "TransitEncryptionEnabled": false, - "SnapshotWindow": "10:00-11:00", - "ARN": "arn:aws:elasticache:us-east-1:012345678910:replicationgroup:rg-cmd", - "PendingModifiedValues": {} - } -} \ No newline at end of file diff --git a/pkg/resource/replication_group/testdata/replication_group/read_one/rg_cmd_create_completed.json b/pkg/resource/replication_group/testdata/replication_group/read_one/rg_cmd_create_completed.json deleted file mode 100644 index 2c2a695f..00000000 --- a/pkg/resource/replication_group/testdata/replication_group/read_one/rg_cmd_create_completed.json +++ /dev/null @@ -1,61 +0,0 @@ -{ - "ReplicationGroups": [ - { - "Status": "available", - "MultiAZ": "disabled", - "Description": "cluster-mode disabled RG", - "NodeGroups": [ - { - "Status": "available", - "NodeGroupMembers": [ - { - "CurrentRole": "primary", - "PreferredAvailabilityZone": "us-east-1b", - "CacheNodeId": "0001", - "ReadEndpoint": { - "Port": 6379, - "Address": "rg-cmd-001.xxxxxx.0001.use1.cache.amazonaws.com" - }, - "CacheClusterId": "rg-cmd-001" - }, - { - "CurrentRole": "replica", - "PreferredAvailabilityZone": "us-east-1d", - "CacheNodeId": "0001", - "ReadEndpoint": { - "Port": 6379, - "Address": "rg-cmd-002.xxxxxx.0001.use1.cache.amazonaws.com" - }, - "CacheClusterId": "rg-cmd-002" - } - ], - "ReaderEndpoint": { - "Port": 6379, - "Address": "rg-cmd-ro.xxxxxx.ng.0001.use1.cache.amazonaws.com" - }, - "NodeGroupId": "0001", - "PrimaryEndpoint": { - "Port": 6379, - "Address": "rg-cmd.xxxxxx.ng.0001.use1.cache.amazonaws.com" - } - } - ], - "AuthTokenEnabled": false, - "AtRestEncryptionEnabled": false, - "ClusterEnabled": false, - "ReplicationGroupId": "rg-cmd", - "GlobalReplicationGroupInfo": {}, - "SnapshotRetentionLimit": 0, - "AutomaticFailover": "disabled", - "TransitEncryptionEnabled": false, - "SnapshotWindow": "10:00-11:00", - "MemberClusters": [ - "rg-cmd-001", - "rg-cmd-002" - ], - "CacheNodeType": "cache.t3.micro", - "ARN": "arn:aws:elasticache:us-east-1:012345678910:replicationgroup:rg-cmd", - "PendingModifiedValues": {} - } - ] -} \ No newline at end of file diff --git a/pkg/resource/replication_group/testdata/replication_group/read_one/rg_cmd_delete_initiated.json b/pkg/resource/replication_group/testdata/replication_group/read_one/rg_cmd_delete_initiated.json deleted file mode 100644 index ab423000..00000000 --- a/pkg/resource/replication_group/testdata/replication_group/read_one/rg_cmd_delete_initiated.json +++ /dev/null @@ -1,61 +0,0 @@ -{ - "ReplicationGroups": [ - { - "Status": "deleting", - "MultiAZ": "disabled", - "Description": "cluster-mode disabled RG", - "NodeGroups": [ - { - "Status": "available", - "NodeGroupMembers": [ - { - "CurrentRole": "primary", - "PreferredAvailabilityZone": "us-east-1b", - "CacheNodeId": "0001", - "ReadEndpoint": { - "Port": 6379, - "Address": "rg-cmd-001.xxxxxx.0001.use1.cache.amazonaws.com" - }, - "CacheClusterId": "rg-cmd-001" - }, - { - "CurrentRole": "replica", - "PreferredAvailabilityZone": "us-east-1d", - "CacheNodeId": "0001", - "ReadEndpoint": { - "Port": 6379, - "Address": "rg-cmd-002.xxxxxx.0001.use1.cache.amazonaws.com" - }, - "CacheClusterId": "rg-cmd-002" - } - ], - "ReaderEndpoint": { - "Port": 6379, - "Address": "rg-cmd-ro.xxxxxx.ng.0001.use1.cache.amazonaws.com" - }, - "NodeGroupId": "0001", - "PrimaryEndpoint": { - "Port": 6379, - "Address": "rg-cmd.xxxxxx.ng.0001.use1.cache.amazonaws.com" - } - } - ], - "AuthTokenEnabled": false, - "AtRestEncryptionEnabled": false, - "ClusterEnabled": false, - "ReplicationGroupId": "rg-cmd", - "GlobalReplicationGroupInfo": {}, - "SnapshotRetentionLimit": 0, - "AutomaticFailover": "disabled", - "TransitEncryptionEnabled": false, - "SnapshotWindow": "10:00-11:00", - "MemberClusters": [ - "rg-cmd-001", - "rg-cmd-002" - ], - "CacheNodeType": "cache.t3.micro", - "ARN": "arn:aws:elasticache:us-east-1:012345678910:replicationgroup:rg-cmd", - "PendingModifiedValues": {} - } - ] -} \ No newline at end of file diff --git a/pkg/resource/replication_group/testdata/replication_group/read_one/tags.json b/pkg/resource/replication_group/testdata/replication_group/read_one/tags.json deleted file mode 100644 index 1345a376..00000000 --- a/pkg/resource/replication_group/testdata/replication_group/read_one/tags.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "TagList": [] -} \ No newline at end of file diff --git a/pkg/resource/replication_group/testdata/replication_group/update/rg_cmd_engine_upgrade_initiated.json b/pkg/resource/replication_group/testdata/replication_group/update/rg_cmd_engine_upgrade_initiated.json deleted file mode 100644 index 9a02cd6a..00000000 --- a/pkg/resource/replication_group/testdata/replication_group/update/rg_cmd_engine_upgrade_initiated.json +++ /dev/null @@ -1,59 +0,0 @@ -{ - "ReplicationGroup": { - "Status": "modifying", - "MultiAZ": "disabled", - "Description": "cluster-mode disabled RG", - "NodeGroups": [ - { - "Status": "modifying", - "NodeGroupMembers": [ - { - "CurrentRole": "primary", - "PreferredAvailabilityZone": "us-east-1b", - "CacheNodeId": "0001", - "ReadEndpoint": { - "Port": 6379, - "Address": "rg-cmd-001.xxxxxx.0001.use1.cache.amazonaws.com" - }, - "CacheClusterId": "rg-cmd-001" - }, - { - "CurrentRole": "replica", - "PreferredAvailabilityZone": "us-east-1d", - "CacheNodeId": "0001", - "ReadEndpoint": { - "Port": 6379, - "Address": "rg-cmd-002.xxxxxx.0001.use1.cache.amazonaws.com" - }, - "CacheClusterId": "rg-cmd-002" - } - ], - "ReaderEndpoint": { - "Port": 6379, - "Address": "rg-cmd-ro.xxxxxx.ng.0001.use1.cache.amazonaws.com" - }, - "NodeGroupId": "0001", - "PrimaryEndpoint": { - "Port": 6379, - "Address": "rg-cmd.xxxxxx.ng.0001.use1.cache.amazonaws.com" - } - } - ], - "AtRestEncryptionEnabled": false, - "AuthTokenEnabled": false, - "ClusterEnabled": false, - "ReplicationGroupId": "rg-cmd", - "GlobalReplicationGroupInfo": {}, - "SnapshotRetentionLimit": 0, - "AutomaticFailover": "disabled", - "TransitEncryptionEnabled": false, - "SnapshotWindow": "06:30-07:30", - "MemberClusters": [ - "rg-cmd-001", - "rg-cmd-002" - ], - "CacheNodeType": "cache.t3.micro", - "ARN": "arn:aws:elasticache:us-east-1:012345678910:replicationgroup:rg-cmd", - "PendingModifiedValues": {} - } -} \ No newline at end of file diff --git a/pkg/resource/replication_group/testdata/replication_group/update/rg_cmd_increase_replica_initiated.json b/pkg/resource/replication_group/testdata/replication_group/update/rg_cmd_increase_replica_initiated.json deleted file mode 100644 index 51606a39..00000000 --- a/pkg/resource/replication_group/testdata/replication_group/update/rg_cmd_increase_replica_initiated.json +++ /dev/null @@ -1,60 +0,0 @@ -{ - "ReplicationGroup": { - "Status": "modifying", - "MultiAZ": "disabled", - "Description": "cluster-mode disabled RG", - "NodeGroups": [ - { - "Status": "modifying", - "NodeGroupMembers": [ - { - "CurrentRole": "primary", - "PreferredAvailabilityZone": "us-east-1b", - "CacheNodeId": "0001", - "ReadEndpoint": { - "Port": 6379, - "Address": "rg-cmd-001.xxxxxx.0001.use1.cache.amazonaws.com" - }, - "CacheClusterId": "rg-cmd-001" - }, - { - "CurrentRole": "replica", - "PreferredAvailabilityZone": "us-east-1d", - "CacheNodeId": "0001", - "ReadEndpoint": { - "Port": 6379, - "Address": "rg-cmd-002.xxxxxx.0001.use1.cache.amazonaws.com" - }, - "CacheClusterId": "rg-cmd-002" - } - ], - "ReaderEndpoint": { - "Port": 6379, - "Address": "rg-cmd-ro.xxxxxx.ng.0001.use1.cache.amazonaws.com" - }, - "NodeGroupId": "0001", - "PrimaryEndpoint": { - "Port": 6379, - "Address": "rg-cmd.xxxxxx.ng.0001.use1.cache.amazonaws.com" - } - } - ], - "AtRestEncryptionEnabled": false, - "AuthTokenEnabled": false, - "ClusterEnabled": false, - "ReplicationGroupId": "rg-cmd", - "GlobalReplicationGroupInfo": {}, - "SnapshotRetentionLimit": 0, - "AutomaticFailover": "disabled", - "TransitEncryptionEnabled": false, - "SnapshotWindow": "10:00-11:00", - "MemberClusters": [ - "rg-cmd-001", - "rg-cmd-002", - "rg-cmd-003" - ], - "CacheNodeType": "cache.t3.micro", - "ARN": "arn:aws:elasticache:us-east-1:012345678910:replicationgroup:rg-cmd", - "PendingModifiedValues": {} - } -} \ No newline at end of file diff --git a/pkg/resource/replication_group/testdata/replication_group/update/rg_cmd_scale_up_initiated.json b/pkg/resource/replication_group/testdata/replication_group/update/rg_cmd_scale_up_initiated.json deleted file mode 100644 index 7aace9d7..00000000 --- a/pkg/resource/replication_group/testdata/replication_group/update/rg_cmd_scale_up_initiated.json +++ /dev/null @@ -1,58 +0,0 @@ -{ - "ReplicationGroup": { - "Status": "modifying", - "MultiAZ": "disabled", - "Description": "cluster-mode disabled RG", - "NodeGroups": [ - { - "Status": "modifying", - "NodeGroupMembers": [ - { - "CurrentRole": "primary", - "PreferredAvailabilityZone": "us-east-1d", - "CacheNodeId": "0001", - "ReadEndpoint": { - "Port": 6379, - "Address": "rg-cmd-001.xxxxxx.0001.use1.cache.amazonaws.com" - }, - "CacheClusterId": "rg-cmd-001" - }, - { - "CurrentRole": "replica", - "PreferredAvailabilityZone": "us-east-1c", - "CacheNodeId": "0001", - "ReadEndpoint": { - "Port": 6379, - "Address": "rg-cmd-002.xxxxxx.0001.use1.cache.amazonaws.com" - }, - "CacheClusterId": "rg-cmd-002" - } - ], - "ReaderEndpoint": { - "Port": 6379, - "Address": "rg-cmd-ro.xxxxxx.ng.0001.use1.cache.amazonaws.com" - }, - "NodeGroupId": "0001", - "PrimaryEndpoint": { - "Port": 6379, - "Address": "rg-cmd.xxxxxx.ng.0001.use1.cache.amazonaws.com" - } - } - ], - "AtRestEncryptionEnabled": false, - "ClusterEnabled": false, - "ReplicationGroupId": "rg-cmd", - "GlobalReplicationGroupInfo": {}, - "SnapshotRetentionLimit": 0, - "AutomaticFailover": "disabled", - "TransitEncryptionEnabled": false, - "SnapshotWindow": "09:00-10:00", - "MemberClusters": [ - "rg-cmd-001", - "rg-cmd-002" - ], - "CacheNodeType": "cache.t3.small", - "ARN": "arn:aws:elasticache:us-east-1:012345678910:replicationgroup:rg-cmd", - "PendingModifiedValues": {} - } -} diff --git a/pkg/resource/replication_group/testdata/test_suite.yaml b/pkg/resource/replication_group/testdata/test_suite.yaml deleted file mode 100644 index 0037876e..00000000 --- a/pkg/resource/replication_group/testdata/test_suite.yaml +++ /dev/null @@ -1,164 +0,0 @@ -tests: - - name: "Cluster mode disabled replication group" - description: "Cluster mode disabled replication group CRUD tests" - scenarios: - - name: "ReadOne=DNE" - description: "Given that the resource doesn't exist, expect an error" - given: - desired_state: "replication_group/cr/rg_cmd_before_create.yaml" - svc_api: - - operation: DescribeReplicationGroupsWithContext - error: - code: ReplicationGroupNotFoundFault - message: "ReplicationGroup rg-cmd not found" - invoke: ReadOne # Unit under test. Possible values: Create | ReadOne | Update | Delete - expect: # no explicit latest_state expectation; returned resource may be non-nil - error: resource not found - - name: "Create=InvalidInput" - description: "Given one of the parameters is invalid, ko.Status shows a terminal condition" - given: - desired_state: "replication_group/cr/rg_cmd_invalid_before_create.yaml" - svc_api: - - operation: CreateReplicationGroupWithContext - error: - code: InvalidParameterValue - message: "The number of replicas per node group must be within 0 and 5." - invoke: Create - expect: - latest_state: "replication_group/cr/rg_cmd_invalid_create_attempted.yaml" - error: resource is in terminal condition - - name: "Create" - description: "Create a new replication group; ensure ko.Status shows that this create has been initiated" - given: - desired_state: "replication_group/cr/rg_cmd_before_create.yaml" - svc_api: - - operation: CreateReplicationGroupWithContext - output_fixture: "replication_group/create/rg_cmd_creating.json" - invoke: Create - expect: - latest_state: "replication_group/cr/rg_cmd_create_initiated.yaml" - error: nil - - name: "ReadOne=NewlyCreated" - description: "Given that the creation of this RG completes, ko.Status reflects that the RG is ready (e.g. ResourceSynced condition True)" - given: - desired_state: "replication_group/cr/rg_cmd_create_initiated.yaml" # RG is creating, but creating has not yet finished - svc_api: - - operation: DescribeReplicationGroupsWithContext - output_fixture: "replication_group/read_one/rg_cmd_create_completed.json" - - operation: ListAllowedNodeTypeModifications - output_fixture: "allowed_node_types/read_many/rg_cmd_allowed_node_types.json" - - operation: DescribeEventsWithContext - output_fixture: "events/read_many/rg_cmd_events.json" - - operation: DescribeCacheClustersWithContext - output_fixture: "cache_clusters/read_many/rg_cmd_primary_cache_node.json" - - operation: ListTagsForResourceWithContext - output_fixture: "replication_group/read_one/tags.json" - invoke: ReadOne - expect: - latest_state: "replication_group/cr/rg_cmd_create_completed_not_yet_latest.yaml" - error: nil - - name: "ReadOne=NoDiff" - description: "Given desired state matches with server side resource data, ko.Status remain unchanged (resource is stable)" - given: # fixture - desired_state: "replication_group/cr/rg_cmd_create_completed.yaml" - svc_api: - - operation: DescribeReplicationGroupsWithContext - output_fixture: "replication_group/read_one/rg_cmd_create_completed.json" - - operation: ListAllowedNodeTypeModifications - output_fixture: "allowed_node_types/read_many/rg_cmd_allowed_node_types.json" - - operation: DescribeEventsWithContext - output_fixture: "events/read_many/rg_cmd_events.json" - - operation: DescribeCacheClustersWithContext - output_fixture: "cache_clusters/read_many/rg_cmd_primary_cache_node.json" - - operation: ListTagsForResourceWithContext - output_fixture: "replication_group/read_one/tags.json" - invoke: ReadOne - expect: - latest_state: "replication_group/cr/rg_cmd_create_completed_latest.yaml" - error: nil - - name: "Update=IncreaseReplicaCount" - description: "Ensure a replica is added once a new config is provided" - given: - desired_state: "replication_group/cr/rg_cmd_before_increase_replica.yaml" - latest_state: "replication_group/cr/rg_cmd_before_increase_replica_latest.yaml" - svc_api: - - operation: IncreaseReplicaCountWithContext - output_fixture: "replication_group/update/rg_cmd_increase_replica_initiated.json" - invoke: Update - expect: - latest_state: "replication_group/cr/rg_cmd_increase_replica_initiated.yaml" - error: nil - - name: "Update=ScaleUp" - description: "Scale up replication group to larger instance type" - given: - desired_state: "replication_group/cr/rg_cmd_before_scale_up_desired.yaml" - latest_state: "replication_group/cr/rg_cmd_before_scale_up_latest.yaml" - svc_api: - - operation: ModifyReplicationGroupWithContext - output_fixture: "replication_group/update/rg_cmd_scale_up_initiated.json" - invoke: Update - expect: - latest_state: "replication_group/cr/rg_cmd_scale_up_initiated.yaml" - error: nil - - name: "Update=UpgradeEngine" - description: "Upgrade Redis engine version from 5.0.0 to a newer version" - given: - desired_state: "replication_group/cr/rg_cmd_before_engine_version_upgrade.yaml" - latest_state: "replication_group/cr/rg_cmd_before_engine_version_upgrade_latest.yaml" - svc_api: - - operation: ModifyReplicationGroupWithContext - output_fixture: "replication_group/update/rg_cmd_engine_upgrade_initiated.json" - - operation: DescribeCacheClustersWithContext - output_fixture: "cache_clusters/read_many/rg_cmd_primary_cache_node.json" - invoke: Update - expect: - latest_state: "replication_group/cr/rg_cmd_engine_upgrade_initiated.yaml" - error: nil - - name: "DeleteInitiated" - description: "Delete cluster mode-disabled RG. RG moves from available to deleting state." - given: - desired_state: "replication_group/cr/rg_cmd_create_completed.yaml" - svc_api: - - operation: DeleteReplicationGroupWithContext - output_fixture: "replication_group/delete/rg_cmd_delete_initiated.json" - - operation: DescribeReplicationGroupsWithContext - output_fixture: "replication_group/read_one/rg_cmd_delete_initiated.json" - invoke: Delete - expect: - error: "Delete is in progress." - - name: "Deleting" - description: "Delete cluster mode-disabled RG. Retry scenario, RG is in deleting state." - given: - desired_state: "replication_group/cr/rg_cmd_delete_initiated.yaml" - svc_api: - invoke: Delete - expect: - error: "Delete is in progress." - - name: Cluster mode enabled replication group - description: Cluster mode enabled replication group CRUD tests - scenarios: - - name: "Create=CustomShardConfig" - description: Create CME RG with custom node group configuration - given: - desired_state: "replication_group/cr/rg_cme_before_create.yaml" - svc_api: - - operation: CreateReplicationGroupWithContext - output_fixture: "replication_group/create/rg_cme_creating.json" - invoke: Create - expect: - latest_state: "replication_group/cr/rg_cme_create_initiated.yaml" - error: nil - - name: "Update=ShardConfigMismatch" - description: Increasing NumNodeGroups without changing NodeGroupConfiguration should result in a terminal condition - given: - desired_state: "replication_group/cr/rg_cme_shard_mismatch.yaml" - latest_state: "replication_group/cr/rg_cme_shard_mismatch.yaml" - svc_api: - - operation: ModifyReplicationGroupShardConfigurationWithContext - error: - code: InvalidParameterValue - message: Configuration for all the node groups should be provided. - invoke: Update - expect: - latest_state: "replication_group/cr/rg_cme_invalid_scale_out_attempted.yaml" - error: resource is in terminal condition diff --git a/pkg/resource/snapshot/custom_create_api_test.go b/pkg/resource/snapshot/custom_create_api_test.go deleted file mode 100644 index 3c29c0d2..00000000 --- a/pkg/resource/snapshot/custom_create_api_test.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"). You may -// not use this file except in compliance with the License. A copy of the -// License is located at -// -// http://aws.amazon.com/apache2.0/ -// -// or in the "license" file accompanying this file. This file is distributed -// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either -// express or implied. See the License for the specific language governing -// permissions and limitations under the License. - -package snapshot - -import ( - "context" - svcapitypes "github.com/aws-controllers-k8s/elasticache-controller/apis/v1alpha1" - "github.com/stretchr/testify/assert" - "testing" -) - -// Helper methods to setup tests -// provideResourceManager returns pointer to resourceManager -func provideResourceManager() *resourceManager { - return &resourceManager{ - rr: nil, - awsAccountID: "", - awsRegion: "", - sess: nil, - sdkapi: nil, - } -} - -// provideResource returns pointer to resource -func provideResource() *resource { - return &resource{ - ko: &svcapitypes.Snapshot{}, - } -} - -func Test_CustomCreateSnapshot_NotCopySnapshot(t *testing.T) { - assert := assert.New(t) - // Setup - rm := provideResourceManager() - - desired := provideResource() - - var ctx context.Context - - res, err := rm.CustomCreateSnapshot(ctx, desired) - assert.Nil(res) - assert.Nil(err) -} - -func Test_CustomCreateSnapshot_InvalidParam(t *testing.T) { - assert := assert.New(t) - // Setup - rm := provideResourceManager() - desired := provideResource() - sourceSnapshotName := "test-rg-backup" - rgId := "rgId" - desired.ko.Spec = svcapitypes.SnapshotSpec{SourceSnapshotName: &sourceSnapshotName, - ReplicationGroupID: &rgId} - var ctx context.Context - - res, err := rm.CustomCreateSnapshot(ctx, desired) - assert.Nil(res) - assert.NotNil(err) - assert.Equal(err.Error(), "InvalidParameterCombination: Cannot specify CacheClusteId or ReplicationGroupId while SourceSnapshotName is specified") -} diff --git a/pkg/resource/snapshot/custom_set_output.go b/pkg/resource/snapshot/custom_set_output.go index 1e0981d8..2875e231 100644 --- a/pkg/resource/snapshot/custom_set_output.go +++ b/pkg/resource/snapshot/custom_set_output.go @@ -15,6 +15,7 @@ package snapshot import ( "context" + svcapitypes "github.com/aws-controllers-k8s/elasticache-controller/apis/v1alpha1" ackv1alpha1 "github.com/aws-controllers-k8s/runtime/apis/core/v1alpha1" "github.com/aws/aws-sdk-go/service/elasticache" diff --git a/pkg/resource/snapshot/custom_update_api.go b/pkg/resource/snapshot/custom_update_api.go index 19ed766f..04492bc7 100644 --- a/pkg/resource/snapshot/custom_update_api.go +++ b/pkg/resource/snapshot/custom_update_api.go @@ -15,6 +15,7 @@ package snapshot import ( "context" + ackcompare "github.com/aws-controllers-k8s/runtime/pkg/compare" ) diff --git a/pkg/resource/snapshot/descriptor.go b/pkg/resource/snapshot/descriptor.go index fda7ad92..a40d9ed1 100644 --- a/pkg/resource/snapshot/descriptor.go +++ b/pkg/resource/snapshot/descriptor.go @@ -28,7 +28,7 @@ import ( ) const ( - finalizerString = "finalizers.elasticache.services.k8s.aws/Snapshot" + FinalizerString = "finalizers.elasticache.services.k8s.aws/Snapshot" ) var ( @@ -88,8 +88,8 @@ func (d *resourceDescriptor) IsManaged( // https://github.com/kubernetes-sigs/controller-runtime/issues/994 is // fixed. This should be able to be: // - // return k8sctrlutil.ContainsFinalizer(obj, finalizerString) - return containsFinalizer(obj, finalizerString) + // return k8sctrlutil.ContainsFinalizer(obj, FinalizerString) + return containsFinalizer(obj, FinalizerString) } // Remove once https://github.com/kubernetes-sigs/controller-runtime/issues/994 @@ -118,7 +118,7 @@ func (d *resourceDescriptor) MarkManaged( // Should not happen. If it does, there is a bug in the code panic("nil RuntimeMetaObject in AWSResource") } - k8sctrlutil.AddFinalizer(obj, finalizerString) + k8sctrlutil.AddFinalizer(obj, FinalizerString) } // MarkUnmanaged removes the supplied resource from management by ACK. What @@ -133,7 +133,7 @@ func (d *resourceDescriptor) MarkUnmanaged( // Should not happen. If it does, there is a bug in the code panic("nil RuntimeMetaObject in AWSResource") } - k8sctrlutil.RemoveFinalizer(obj, finalizerString) + k8sctrlutil.RemoveFinalizer(obj, FinalizerString) } // MarkAdopted places descriptors on the custom resource that indicate the diff --git a/pkg/resource/snapshot/manager.go b/pkg/resource/snapshot/manager.go index 644d0d46..c3c2d7cb 100644 --- a/pkg/resource/snapshot/manager.go +++ b/pkg/resource/snapshot/manager.go @@ -32,9 +32,8 @@ import ( acktags "github.com/aws-controllers-k8s/runtime/pkg/tags" acktypes "github.com/aws-controllers-k8s/runtime/pkg/types" ackutil "github.com/aws-controllers-k8s/runtime/pkg/util" - "github.com/aws/aws-sdk-go/aws/session" - svcsdk "github.com/aws/aws-sdk-go/service/elasticache" - svcsdkapi "github.com/aws/aws-sdk-go/service/elasticache/elasticacheiface" + "github.com/aws/aws-sdk-go-v2/aws" + svcsdk "github.com/aws/aws-sdk-go-v2/service/elasticache" "github.com/go-logr/logr" corev1 "k8s.io/api/core/v1" @@ -59,6 +58,9 @@ type resourceManager struct { // cfg is a copy of the ackcfg.Config object passed on start of the service // controller cfg ackcfg.Config + // clientcfg is a copy of the client configuration passed on start of the + // service controller + clientcfg aws.Config // log refers to the logr.Logger object handling logging for the service // controller log logr.Logger @@ -73,12 +75,9 @@ type resourceManager struct { awsAccountID ackv1alpha1.AWSAccountID // The AWS Region that this resource manager targets awsRegion ackv1alpha1.AWSRegion - // sess is the AWS SDK Session object used to communicate with the backend - // AWS service API - sess *session.Session - // sdk is a pointer to the AWS service API interface exposed by the - // aws-sdk-go/services/{alias}/{alias}iface package. - sdkapi svcsdkapi.ElastiCacheAPI + // sdk is a pointer to the AWS service API client exposed by the + // aws-sdk-go-v2/services/{alias} package. + sdkapi *svcsdk.Client } // concreteResource returns a pointer to a resource from the supplied @@ -299,24 +298,25 @@ func (rm *resourceManager) EnsureTags( // newResourceManager returns a new struct implementing // acktypes.AWSResourceManager +// This is for AWS-SDK-GO-V2 - Created newResourceManager With AWS sdk-Go-ClientV2 func newResourceManager( cfg ackcfg.Config, + clientcfg aws.Config, log logr.Logger, metrics *ackmetrics.Metrics, rr acktypes.Reconciler, - sess *session.Session, id ackv1alpha1.AWSAccountID, region ackv1alpha1.AWSRegion, ) (*resourceManager, error) { return &resourceManager{ cfg: cfg, + clientcfg: clientcfg, log: log, metrics: metrics, rr: rr, awsAccountID: id, awsRegion: region, - sess: sess, - sdkapi: svcsdk.New(sess), + sdkapi: svcsdk.NewFromConfig(clientcfg), }, nil } diff --git a/pkg/resource/snapshot/manager_factory.go b/pkg/resource/snapshot/manager_factory.go index 6140bba4..e9011e66 100644 --- a/pkg/resource/snapshot/manager_factory.go +++ b/pkg/resource/snapshot/manager_factory.go @@ -23,7 +23,7 @@ import ( ackcfg "github.com/aws-controllers-k8s/runtime/pkg/config" ackmetrics "github.com/aws-controllers-k8s/runtime/pkg/metrics" acktypes "github.com/aws-controllers-k8s/runtime/pkg/types" - "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go-v2/aws" "github.com/go-logr/logr" svcresource "github.com/aws-controllers-k8s/elasticache-controller/pkg/resource" @@ -47,14 +47,18 @@ func (f *resourceManagerFactory) ResourceDescriptor() acktypes.AWSResourceDescri // supplied AWS account func (f *resourceManagerFactory) ManagerFor( cfg ackcfg.Config, + clientcfg aws.Config, log logr.Logger, metrics *ackmetrics.Metrics, rr acktypes.Reconciler, - sess *session.Session, id ackv1alpha1.AWSAccountID, region ackv1alpha1.AWSRegion, + roleARN ackv1alpha1.AWSResourceName, ) (acktypes.AWSResourceManager, error) { - rmId := fmt.Sprintf("%s/%s", id, region) + // We use the account ID, region, and role ARN to uniquely identify a + // resource manager. This helps us to avoid creating multiple resource + // managers for the same account/region/roleARN combination. + rmId := fmt.Sprintf("%s/%s/%s", id, region, roleARN) f.RLock() rm, found := f.rmCache[rmId] f.RUnlock() @@ -66,7 +70,7 @@ func (f *resourceManagerFactory) ManagerFor( f.Lock() defer f.Unlock() - rm, err := newResourceManager(cfg, log, metrics, rr, sess, id, region) + rm, err := newResourceManager(cfg, clientcfg, log, metrics, rr, id, region) if err != nil { return nil, err } diff --git a/pkg/resource/snapshot/references.go b/pkg/resource/snapshot/references.go index ebd5ffdc..78f25a3e 100644 --- a/pkg/resource/snapshot/references.go +++ b/pkg/resource/snapshot/references.go @@ -17,6 +17,7 @@ package snapshot import ( "context" + "sigs.k8s.io/controller-runtime/pkg/client" acktypes "github.com/aws-controllers-k8s/runtime/pkg/types" diff --git a/pkg/resource/snapshot/resource.go b/pkg/resource/snapshot/resource.go index 91a3d63d..486a71d4 100644 --- a/pkg/resource/snapshot/resource.go +++ b/pkg/resource/snapshot/resource.go @@ -93,6 +93,17 @@ func (r *resource) SetIdentifiers(identifier *ackv1alpha1.AWSIdentifiers) error return nil } +// PopulateResourceFromAnnotation populates the fields passed from adoption annotation +func (r *resource) PopulateResourceFromAnnotation(fields map[string]string) error { + tmp, ok := fields["snapshotName"] + if !ok { + return ackerrors.MissingNameIdentifier + } + r.ko.Spec.SnapshotName = &tmp + + return nil +} + // DeepCopy will return a copy of the resource func (r *resource) DeepCopy() acktypes.AWSResource { koCopy := r.ko.DeepCopy() diff --git a/pkg/resource/snapshot/sdk.go b/pkg/resource/snapshot/sdk.go index d0353ff1..7f9bd481 100644 --- a/pkg/resource/snapshot/sdk.go +++ b/pkg/resource/snapshot/sdk.go @@ -28,8 +28,10 @@ import ( ackerr "github.com/aws-controllers-k8s/runtime/pkg/errors" ackrequeue "github.com/aws-controllers-k8s/runtime/pkg/requeue" ackrtlog "github.com/aws-controllers-k8s/runtime/pkg/runtime/log" - "github.com/aws/aws-sdk-go/aws" - svcsdk "github.com/aws/aws-sdk-go/service/elasticache" + "github.com/aws/aws-sdk-go-v2/aws" + svcsdk "github.com/aws/aws-sdk-go-v2/service/elasticache" + svcsdktypes "github.com/aws/aws-sdk-go-v2/service/elasticache/types" + smithy "github.com/aws/smithy-go" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -40,8 +42,7 @@ import ( var ( _ = &metav1.Time{} _ = strings.ToLower("") - _ = &aws.JSONValue{} - _ = &svcsdk.ElastiCache{} + _ = &svcsdk.Client{} _ = &svcapitypes.Snapshot{} _ = ackv1alpha1.AWSAccountID("") _ = &ackerr.NotFound @@ -49,6 +50,7 @@ var ( _ = &reflect.Value{} _ = fmt.Sprintf("") _ = &ackrequeue.NoRequeue{} + _ = &aws.Config{} ) // sdkFind returns SDK-specific information about a supplied resource @@ -73,10 +75,11 @@ func (rm *resourceManager) sdkFind( return nil, err } var resp *svcsdk.DescribeSnapshotsOutput - resp, err = rm.sdkapi.DescribeSnapshotsWithContext(ctx, input) + resp, err = rm.sdkapi.DescribeSnapshots(ctx, input) rm.metrics.RecordAPICall("READ_MANY", "DescribeSnapshots", err) if err != nil { - if awsErr, ok := ackerr.AWSError(err); ok && awsErr.Code() == "CacheClusterNotFound" { + var awsErr smithy.APIError + if errors.As(err, &awsErr) && awsErr.ErrorCode() == "CacheClusterNotFound" { return nil, ackerr.NotFound } return nil, err @@ -100,8 +103,8 @@ func (rm *resourceManager) sdkFind( } else { ko.Status.AutoMinorVersionUpgrade = nil } - if elem.AutomaticFailover != nil { - ko.Status.AutomaticFailover = elem.AutomaticFailover + if elem.AutomaticFailover != "" { + ko.Status.AutomaticFailover = aws.String(string(elem.AutomaticFailover)) } else { ko.Status.AutomaticFailover = nil } @@ -130,8 +133,8 @@ func (rm *resourceManager) sdkFind( } else { ko.Status.CacheSubnetGroupName = nil } - if elem.DataTiering != nil { - ko.Status.DataTiering = elem.DataTiering + if elem.DataTiering != "" { + ko.Status.DataTiering = aws.String(string(elem.DataTiering)) } else { ko.Status.DataTiering = nil } @@ -178,25 +181,14 @@ func (rm *resourceManager) sdkFind( f12elemf4.PrimaryOutpostARN = f12iter.NodeGroupConfiguration.PrimaryOutpostArn } if f12iter.NodeGroupConfiguration.ReplicaAvailabilityZones != nil { - f12elemf4f3 := []*string{} - for _, f12elemf4f3iter := range f12iter.NodeGroupConfiguration.ReplicaAvailabilityZones { - var f12elemf4f3elem string - f12elemf4f3elem = *f12elemf4f3iter - f12elemf4f3 = append(f12elemf4f3, &f12elemf4f3elem) - } - f12elemf4.ReplicaAvailabilityZones = f12elemf4f3 + f12elemf4.ReplicaAvailabilityZones = aws.StringSlice(f12iter.NodeGroupConfiguration.ReplicaAvailabilityZones) } if f12iter.NodeGroupConfiguration.ReplicaCount != nil { - f12elemf4.ReplicaCount = f12iter.NodeGroupConfiguration.ReplicaCount + replicaCountCopy := int64(*f12iter.NodeGroupConfiguration.ReplicaCount) + f12elemf4.ReplicaCount = &replicaCountCopy } if f12iter.NodeGroupConfiguration.ReplicaOutpostArns != nil { - f12elemf4f5 := []*string{} - for _, f12elemf4f5iter := range f12iter.NodeGroupConfiguration.ReplicaOutpostArns { - var f12elemf4f5elem string - f12elemf4f5elem = *f12elemf4f5iter - f12elemf4f5 = append(f12elemf4f5, &f12elemf4f5elem) - } - f12elemf4.ReplicaOutpostARNs = f12elemf4f5 + f12elemf4.ReplicaOutpostARNs = aws.StringSlice(f12iter.NodeGroupConfiguration.ReplicaOutpostArns) } if f12iter.NodeGroupConfiguration.Slots != nil { f12elemf4.Slots = f12iter.NodeGroupConfiguration.Slots @@ -216,17 +208,20 @@ func (rm *resourceManager) sdkFind( ko.Status.NodeSnapshots = nil } if elem.NumCacheNodes != nil { - ko.Status.NumCacheNodes = elem.NumCacheNodes + numCacheNodesCopy := int64(*elem.NumCacheNodes) + ko.Status.NumCacheNodes = &numCacheNodesCopy } else { ko.Status.NumCacheNodes = nil } if elem.NumNodeGroups != nil { - ko.Status.NumNodeGroups = elem.NumNodeGroups + numNodeGroupsCopy := int64(*elem.NumNodeGroups) + ko.Status.NumNodeGroups = &numNodeGroupsCopy } else { ko.Status.NumNodeGroups = nil } if elem.Port != nil { - ko.Status.Port = elem.Port + portCopy := int64(*elem.Port) + ko.Status.Port = &portCopy } else { ko.Status.Port = nil } @@ -261,7 +256,8 @@ func (rm *resourceManager) sdkFind( ko.Spec.SnapshotName = nil } if elem.SnapshotRetentionLimit != nil { - ko.Status.SnapshotRetentionLimit = elem.SnapshotRetentionLimit + snapshotRetentionLimitCopy := int64(*elem.SnapshotRetentionLimit) + ko.Status.SnapshotRetentionLimit = &snapshotRetentionLimitCopy } else { ko.Status.SnapshotRetentionLimit = nil } @@ -324,7 +320,7 @@ func (rm *resourceManager) newListRequestPayload( res := &svcsdk.DescribeSnapshotsInput{} if r.ko.Spec.SnapshotName != nil { - res.SetSnapshotName(*r.ko.Spec.SnapshotName) + res.SnapshotName = r.ko.Spec.SnapshotName } return res, nil @@ -353,7 +349,7 @@ func (rm *resourceManager) sdkCreate( var resp *svcsdk.CreateSnapshotOutput _ = resp - resp, err = rm.sdkapi.CreateSnapshotWithContext(ctx, input) + resp, err = rm.sdkapi.CreateSnapshot(ctx, input) rm.metrics.RecordAPICall("CREATE", "CreateSnapshot", err) if err != nil { return nil, err @@ -374,8 +370,8 @@ func (rm *resourceManager) sdkCreate( } else { ko.Status.AutoMinorVersionUpgrade = nil } - if resp.Snapshot.AutomaticFailover != nil { - ko.Status.AutomaticFailover = resp.Snapshot.AutomaticFailover + if resp.Snapshot.AutomaticFailover != "" { + ko.Status.AutomaticFailover = aws.String(string(resp.Snapshot.AutomaticFailover)) } else { ko.Status.AutomaticFailover = nil } @@ -404,8 +400,8 @@ func (rm *resourceManager) sdkCreate( } else { ko.Status.CacheSubnetGroupName = nil } - if resp.Snapshot.DataTiering != nil { - ko.Status.DataTiering = resp.Snapshot.DataTiering + if resp.Snapshot.DataTiering != "" { + ko.Status.DataTiering = aws.String(string(resp.Snapshot.DataTiering)) } else { ko.Status.DataTiering = nil } @@ -452,25 +448,14 @@ func (rm *resourceManager) sdkCreate( f12elemf4.PrimaryOutpostARN = f12iter.NodeGroupConfiguration.PrimaryOutpostArn } if f12iter.NodeGroupConfiguration.ReplicaAvailabilityZones != nil { - f12elemf4f3 := []*string{} - for _, f12elemf4f3iter := range f12iter.NodeGroupConfiguration.ReplicaAvailabilityZones { - var f12elemf4f3elem string - f12elemf4f3elem = *f12elemf4f3iter - f12elemf4f3 = append(f12elemf4f3, &f12elemf4f3elem) - } - f12elemf4.ReplicaAvailabilityZones = f12elemf4f3 + f12elemf4.ReplicaAvailabilityZones = aws.StringSlice(f12iter.NodeGroupConfiguration.ReplicaAvailabilityZones) } if f12iter.NodeGroupConfiguration.ReplicaCount != nil { - f12elemf4.ReplicaCount = f12iter.NodeGroupConfiguration.ReplicaCount + replicaCountCopy := int64(*f12iter.NodeGroupConfiguration.ReplicaCount) + f12elemf4.ReplicaCount = &replicaCountCopy } if f12iter.NodeGroupConfiguration.ReplicaOutpostArns != nil { - f12elemf4f5 := []*string{} - for _, f12elemf4f5iter := range f12iter.NodeGroupConfiguration.ReplicaOutpostArns { - var f12elemf4f5elem string - f12elemf4f5elem = *f12elemf4f5iter - f12elemf4f5 = append(f12elemf4f5, &f12elemf4f5elem) - } - f12elemf4.ReplicaOutpostARNs = f12elemf4f5 + f12elemf4.ReplicaOutpostARNs = aws.StringSlice(f12iter.NodeGroupConfiguration.ReplicaOutpostArns) } if f12iter.NodeGroupConfiguration.Slots != nil { f12elemf4.Slots = f12iter.NodeGroupConfiguration.Slots @@ -490,17 +475,20 @@ func (rm *resourceManager) sdkCreate( ko.Status.NodeSnapshots = nil } if resp.Snapshot.NumCacheNodes != nil { - ko.Status.NumCacheNodes = resp.Snapshot.NumCacheNodes + numCacheNodesCopy := int64(*resp.Snapshot.NumCacheNodes) + ko.Status.NumCacheNodes = &numCacheNodesCopy } else { ko.Status.NumCacheNodes = nil } if resp.Snapshot.NumNodeGroups != nil { - ko.Status.NumNodeGroups = resp.Snapshot.NumNodeGroups + numNodeGroupsCopy := int64(*resp.Snapshot.NumNodeGroups) + ko.Status.NumNodeGroups = &numNodeGroupsCopy } else { ko.Status.NumNodeGroups = nil } if resp.Snapshot.Port != nil { - ko.Status.Port = resp.Snapshot.Port + portCopy := int64(*resp.Snapshot.Port) + ko.Status.Port = &portCopy } else { ko.Status.Port = nil } @@ -535,7 +523,8 @@ func (rm *resourceManager) sdkCreate( ko.Spec.SnapshotName = nil } if resp.Snapshot.SnapshotRetentionLimit != nil { - ko.Status.SnapshotRetentionLimit = resp.Snapshot.SnapshotRetentionLimit + snapshotRetentionLimitCopy := int64(*resp.Snapshot.SnapshotRetentionLimit) + ko.Status.SnapshotRetentionLimit = &snapshotRetentionLimitCopy } else { ko.Status.SnapshotRetentionLimit = nil } @@ -583,30 +572,30 @@ func (rm *resourceManager) newCreateRequestPayload( res := &svcsdk.CreateSnapshotInput{} if r.ko.Spec.CacheClusterID != nil { - res.SetCacheClusterId(*r.ko.Spec.CacheClusterID) + res.CacheClusterId = r.ko.Spec.CacheClusterID } if r.ko.Spec.KMSKeyID != nil { - res.SetKmsKeyId(*r.ko.Spec.KMSKeyID) + res.KmsKeyId = r.ko.Spec.KMSKeyID } if r.ko.Spec.ReplicationGroupID != nil { - res.SetReplicationGroupId(*r.ko.Spec.ReplicationGroupID) + res.ReplicationGroupId = r.ko.Spec.ReplicationGroupID } if r.ko.Spec.SnapshotName != nil { - res.SetSnapshotName(*r.ko.Spec.SnapshotName) + res.SnapshotName = r.ko.Spec.SnapshotName } if r.ko.Spec.Tags != nil { - f4 := []*svcsdk.Tag{} + f4 := []svcsdktypes.Tag{} for _, f4iter := range r.ko.Spec.Tags { - f4elem := &svcsdk.Tag{} + f4elem := &svcsdktypes.Tag{} if f4iter.Key != nil { - f4elem.SetKey(*f4iter.Key) + f4elem.Key = f4iter.Key } if f4iter.Value != nil { - f4elem.SetValue(*f4iter.Value) + f4elem.Value = f4iter.Value } - f4 = append(f4, f4elem) + f4 = append(f4, *f4elem) } - res.SetTags(f4) + res.Tags = f4 } return res, nil @@ -639,7 +628,7 @@ func (rm *resourceManager) sdkDelete( } var resp *svcsdk.DeleteSnapshotOutput _ = resp - resp, err = rm.sdkapi.DeleteSnapshotWithContext(ctx, input) + resp, err = rm.sdkapi.DeleteSnapshot(ctx, input) rm.metrics.RecordAPICall("DELETE", "DeleteSnapshot", err) return nil, err } @@ -652,7 +641,7 @@ func (rm *resourceManager) newDeleteRequestPayload( res := &svcsdk.DeleteSnapshotInput{} if r.ko.Spec.SnapshotName != nil { - res.SetSnapshotName(*r.ko.Spec.SnapshotName) + res.SnapshotName = r.ko.Spec.SnapshotName } return res, nil @@ -762,11 +751,12 @@ func (rm *resourceManager) terminalAWSError(err error) bool { if err == nil { return false } - awsErr, ok := ackerr.AWSError(err) - if !ok { + + var terminalErr smithy.APIError + if !errors.As(err, &terminalErr) { return false } - switch awsErr.Code() { + switch terminalErr.ErrorCode() { case "InvalidParameter", "InvalidParameterValue", "InvalidParameterCombination", diff --git a/pkg/resource/user/custom_update.go b/pkg/resource/user/custom_update.go index 7b5cefa5..c977f252 100644 --- a/pkg/resource/user/custom_update.go +++ b/pkg/resource/user/custom_update.go @@ -15,6 +15,7 @@ package user import ( "context" + "github.com/pkg/errors" ackcompare "github.com/aws-controllers-k8s/runtime/pkg/compare" diff --git a/pkg/resource/user/delta.go b/pkg/resource/user/delta.go index 746c7b38..cd2a9791 100644 --- a/pkg/resource/user/delta.go +++ b/pkg/resource/user/delta.go @@ -50,6 +50,24 @@ func newResourceDelta( delta.Add("Spec.AccessString", a.ko.Spec.AccessString, b.ko.Spec.AccessString) } } + if ackcompare.HasNilDifference(a.ko.Spec.AuthenticationMode, b.ko.Spec.AuthenticationMode) { + delta.Add("Spec.AuthenticationMode", a.ko.Spec.AuthenticationMode, b.ko.Spec.AuthenticationMode) + } else if a.ko.Spec.AuthenticationMode != nil && b.ko.Spec.AuthenticationMode != nil { + if len(a.ko.Spec.AuthenticationMode.Passwords) != len(b.ko.Spec.AuthenticationMode.Passwords) { + delta.Add("Spec.AuthenticationMode.Passwords", a.ko.Spec.AuthenticationMode.Passwords, b.ko.Spec.AuthenticationMode.Passwords) + } else if len(a.ko.Spec.AuthenticationMode.Passwords) > 0 { + if !ackcompare.SliceStringPEqual(a.ko.Spec.AuthenticationMode.Passwords, b.ko.Spec.AuthenticationMode.Passwords) { + delta.Add("Spec.AuthenticationMode.Passwords", a.ko.Spec.AuthenticationMode.Passwords, b.ko.Spec.AuthenticationMode.Passwords) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.AuthenticationMode.Type, b.ko.Spec.AuthenticationMode.Type) { + delta.Add("Spec.AuthenticationMode.Type", a.ko.Spec.AuthenticationMode.Type, b.ko.Spec.AuthenticationMode.Type) + } else if a.ko.Spec.AuthenticationMode.Type != nil && b.ko.Spec.AuthenticationMode.Type != nil { + if *a.ko.Spec.AuthenticationMode.Type != *b.ko.Spec.AuthenticationMode.Type { + delta.Add("Spec.AuthenticationMode.Type", a.ko.Spec.AuthenticationMode.Type, b.ko.Spec.AuthenticationMode.Type) + } + } + } if ackcompare.HasNilDifference(a.ko.Spec.Engine, b.ko.Spec.Engine) { delta.Add("Spec.Engine", a.ko.Spec.Engine, b.ko.Spec.Engine) } else if a.ko.Spec.Engine != nil && b.ko.Spec.Engine != nil { diff --git a/pkg/resource/user/delta_util.go b/pkg/resource/user/delta_util.go index f36de45e..353a7cc2 100644 --- a/pkg/resource/user/delta_util.go +++ b/pkg/resource/user/delta_util.go @@ -13,8 +13,10 @@ package user -import ackcompare "github.com/aws-controllers-k8s/runtime/pkg/compare" -import "github.com/aws-controllers-k8s/elasticache-controller/pkg/common" +import ( + "github.com/aws-controllers-k8s/elasticache-controller/pkg/common" + ackcompare "github.com/aws-controllers-k8s/runtime/pkg/compare" +) // remove differences which are not meaningful (i.e. ones that don't warrant a call to rm.Update) func filterDelta( diff --git a/pkg/resource/user/descriptor.go b/pkg/resource/user/descriptor.go index eb1e1faf..73d0041b 100644 --- a/pkg/resource/user/descriptor.go +++ b/pkg/resource/user/descriptor.go @@ -28,7 +28,7 @@ import ( ) const ( - finalizerString = "finalizers.elasticache.services.k8s.aws/User" + FinalizerString = "finalizers.elasticache.services.k8s.aws/User" ) var ( @@ -88,8 +88,8 @@ func (d *resourceDescriptor) IsManaged( // https://github.com/kubernetes-sigs/controller-runtime/issues/994 is // fixed. This should be able to be: // - // return k8sctrlutil.ContainsFinalizer(obj, finalizerString) - return containsFinalizer(obj, finalizerString) + // return k8sctrlutil.ContainsFinalizer(obj, FinalizerString) + return containsFinalizer(obj, FinalizerString) } // Remove once https://github.com/kubernetes-sigs/controller-runtime/issues/994 @@ -118,7 +118,7 @@ func (d *resourceDescriptor) MarkManaged( // Should not happen. If it does, there is a bug in the code panic("nil RuntimeMetaObject in AWSResource") } - k8sctrlutil.AddFinalizer(obj, finalizerString) + k8sctrlutil.AddFinalizer(obj, FinalizerString) } // MarkUnmanaged removes the supplied resource from management by ACK. What @@ -133,7 +133,7 @@ func (d *resourceDescriptor) MarkUnmanaged( // Should not happen. If it does, there is a bug in the code panic("nil RuntimeMetaObject in AWSResource") } - k8sctrlutil.RemoveFinalizer(obj, finalizerString) + k8sctrlutil.RemoveFinalizer(obj, FinalizerString) } // MarkAdopted places descriptors on the custom resource that indicate the diff --git a/pkg/resource/user/manager.go b/pkg/resource/user/manager.go index 79704ac9..a4d4ff5f 100644 --- a/pkg/resource/user/manager.go +++ b/pkg/resource/user/manager.go @@ -32,9 +32,8 @@ import ( acktags "github.com/aws-controllers-k8s/runtime/pkg/tags" acktypes "github.com/aws-controllers-k8s/runtime/pkg/types" ackutil "github.com/aws-controllers-k8s/runtime/pkg/util" - "github.com/aws/aws-sdk-go/aws/session" - svcsdk "github.com/aws/aws-sdk-go/service/elasticache" - svcsdkapi "github.com/aws/aws-sdk-go/service/elasticache/elasticacheiface" + "github.com/aws/aws-sdk-go-v2/aws" + svcsdk "github.com/aws/aws-sdk-go-v2/service/elasticache" "github.com/go-logr/logr" corev1 "k8s.io/api/core/v1" @@ -59,6 +58,9 @@ type resourceManager struct { // cfg is a copy of the ackcfg.Config object passed on start of the service // controller cfg ackcfg.Config + // clientcfg is a copy of the client configuration passed on start of the + // service controller + clientcfg aws.Config // log refers to the logr.Logger object handling logging for the service // controller log logr.Logger @@ -73,12 +75,9 @@ type resourceManager struct { awsAccountID ackv1alpha1.AWSAccountID // The AWS Region that this resource manager targets awsRegion ackv1alpha1.AWSRegion - // sess is the AWS SDK Session object used to communicate with the backend - // AWS service API - sess *session.Session - // sdk is a pointer to the AWS service API interface exposed by the - // aws-sdk-go/services/{alias}/{alias}iface package. - sdkapi svcsdkapi.ElastiCacheAPI + // sdk is a pointer to the AWS service API client exposed by the + // aws-sdk-go-v2/services/{alias} package. + sdkapi *svcsdk.Client } // concreteResource returns a pointer to a resource from the supplied @@ -299,24 +298,25 @@ func (rm *resourceManager) EnsureTags( // newResourceManager returns a new struct implementing // acktypes.AWSResourceManager +// This is for AWS-SDK-GO-V2 - Created newResourceManager With AWS sdk-Go-ClientV2 func newResourceManager( cfg ackcfg.Config, + clientcfg aws.Config, log logr.Logger, metrics *ackmetrics.Metrics, rr acktypes.Reconciler, - sess *session.Session, id ackv1alpha1.AWSAccountID, region ackv1alpha1.AWSRegion, ) (*resourceManager, error) { return &resourceManager{ cfg: cfg, + clientcfg: clientcfg, log: log, metrics: metrics, rr: rr, awsAccountID: id, awsRegion: region, - sess: sess, - sdkapi: svcsdk.New(sess), + sdkapi: svcsdk.NewFromConfig(clientcfg), }, nil } diff --git a/pkg/resource/user/manager_factory.go b/pkg/resource/user/manager_factory.go index d5b128a0..64f4166f 100644 --- a/pkg/resource/user/manager_factory.go +++ b/pkg/resource/user/manager_factory.go @@ -23,7 +23,7 @@ import ( ackcfg "github.com/aws-controllers-k8s/runtime/pkg/config" ackmetrics "github.com/aws-controllers-k8s/runtime/pkg/metrics" acktypes "github.com/aws-controllers-k8s/runtime/pkg/types" - "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go-v2/aws" "github.com/go-logr/logr" svcresource "github.com/aws-controllers-k8s/elasticache-controller/pkg/resource" @@ -47,14 +47,18 @@ func (f *resourceManagerFactory) ResourceDescriptor() acktypes.AWSResourceDescri // supplied AWS account func (f *resourceManagerFactory) ManagerFor( cfg ackcfg.Config, + clientcfg aws.Config, log logr.Logger, metrics *ackmetrics.Metrics, rr acktypes.Reconciler, - sess *session.Session, id ackv1alpha1.AWSAccountID, region ackv1alpha1.AWSRegion, + roleARN ackv1alpha1.AWSResourceName, ) (acktypes.AWSResourceManager, error) { - rmId := fmt.Sprintf("%s/%s", id, region) + // We use the account ID, region, and role ARN to uniquely identify a + // resource manager. This helps us to avoid creating multiple resource + // managers for the same account/region/roleARN combination. + rmId := fmt.Sprintf("%s/%s/%s", id, region, roleARN) f.RLock() rm, found := f.rmCache[rmId] f.RUnlock() @@ -66,7 +70,7 @@ func (f *resourceManagerFactory) ManagerFor( f.Lock() defer f.Unlock() - rm, err := newResourceManager(cfg, log, metrics, rr, sess, id, region) + rm, err := newResourceManager(cfg, clientcfg, log, metrics, rr, id, region) if err != nil { return nil, err } diff --git a/pkg/resource/user/references.go b/pkg/resource/user/references.go index 3eb0a444..14bba7e8 100644 --- a/pkg/resource/user/references.go +++ b/pkg/resource/user/references.go @@ -17,6 +17,7 @@ package user import ( "context" + "sigs.k8s.io/controller-runtime/pkg/client" acktypes "github.com/aws-controllers-k8s/runtime/pkg/types" diff --git a/pkg/resource/user/resource.go b/pkg/resource/user/resource.go index d99907fe..66124d11 100644 --- a/pkg/resource/user/resource.go +++ b/pkg/resource/user/resource.go @@ -93,6 +93,17 @@ func (r *resource) SetIdentifiers(identifier *ackv1alpha1.AWSIdentifiers) error return nil } +// PopulateResourceFromAnnotation populates the fields passed from adoption annotation +func (r *resource) PopulateResourceFromAnnotation(fields map[string]string) error { + tmp, ok := fields["userID"] + if !ok { + return ackerrors.MissingNameIdentifier + } + r.ko.Spec.UserID = &tmp + + return nil +} + // DeepCopy will return a copy of the resource func (r *resource) DeepCopy() acktypes.AWSResource { koCopy := r.ko.DeepCopy() diff --git a/pkg/resource/user/sdk.go b/pkg/resource/user/sdk.go index e533f848..ca1e8fc2 100644 --- a/pkg/resource/user/sdk.go +++ b/pkg/resource/user/sdk.go @@ -28,8 +28,10 @@ import ( ackerr "github.com/aws-controllers-k8s/runtime/pkg/errors" ackrequeue "github.com/aws-controllers-k8s/runtime/pkg/requeue" ackrtlog "github.com/aws-controllers-k8s/runtime/pkg/runtime/log" - "github.com/aws/aws-sdk-go/aws" - svcsdk "github.com/aws/aws-sdk-go/service/elasticache" + "github.com/aws/aws-sdk-go-v2/aws" + svcsdk "github.com/aws/aws-sdk-go-v2/service/elasticache" + svcsdktypes "github.com/aws/aws-sdk-go-v2/service/elasticache/types" + smithy "github.com/aws/smithy-go" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -40,8 +42,7 @@ import ( var ( _ = &metav1.Time{} _ = strings.ToLower("") - _ = &aws.JSONValue{} - _ = &svcsdk.ElastiCache{} + _ = &svcsdk.Client{} _ = &svcapitypes.User{} _ = ackv1alpha1.AWSAccountID("") _ = &ackerr.NotFound @@ -49,6 +50,7 @@ var ( _ = &reflect.Value{} _ = fmt.Sprintf("") _ = &ackrequeue.NoRequeue{} + _ = &aws.Config{} ) // sdkFind returns SDK-specific information about a supplied resource @@ -73,10 +75,11 @@ func (rm *resourceManager) sdkFind( return nil, err } var resp *svcsdk.DescribeUsersOutput - resp, err = rm.sdkapi.DescribeUsersWithContext(ctx, input) + resp, err = rm.sdkapi.DescribeUsers(ctx, input) rm.metrics.RecordAPICall("READ_MANY", "DescribeUsers", err) if err != nil { - if awsErr, ok := ackerr.AWSError(err); ok && awsErr.Code() == "UserNotFound" { + var awsErr smithy.APIError + if errors.As(err, &awsErr) && awsErr.ErrorCode() == "UserNotFound" { return nil, ackerr.NotFound } return nil, err @@ -103,10 +106,11 @@ func (rm *resourceManager) sdkFind( if elem.Authentication != nil { f2 := &svcapitypes.Authentication{} if elem.Authentication.PasswordCount != nil { - f2.PasswordCount = elem.Authentication.PasswordCount + passwordCountCopy := int64(*elem.Authentication.PasswordCount) + f2.PasswordCount = &passwordCountCopy } - if elem.Authentication.Type != nil { - f2.Type = elem.Authentication.Type + if elem.Authentication.Type != "" { + f2.Type = aws.String(string(elem.Authentication.Type)) } ko.Status.Authentication = f2 } else { @@ -128,13 +132,7 @@ func (rm *resourceManager) sdkFind( ko.Status.Status = nil } if elem.UserGroupIds != nil { - f6 := []*string{} - for _, f6iter := range elem.UserGroupIds { - var f6elem string - f6elem = *f6iter - f6 = append(f6, &f6elem) - } - ko.Status.UserGroupIDs = f6 + ko.Status.UserGroupIDs = aws.StringSlice(elem.UserGroupIds) } else { ko.Status.UserGroupIDs = nil } @@ -178,7 +176,7 @@ func (rm *resourceManager) newListRequestPayload( res := &svcsdk.DescribeUsersInput{} if r.ko.Spec.UserID != nil { - res.SetUserId(*r.ko.Spec.UserID) + res.UserId = r.ko.Spec.UserID } return res, nil @@ -203,7 +201,7 @@ func (rm *resourceManager) sdkCreate( var resp *svcsdk.CreateUserOutput _ = resp - resp, err = rm.sdkapi.CreateUserWithContext(ctx, input) + resp, err = rm.sdkapi.CreateUser(ctx, input) rm.metrics.RecordAPICall("CREATE", "CreateUser", err) if err != nil { return nil, err @@ -227,10 +225,11 @@ func (rm *resourceManager) sdkCreate( if resp.Authentication != nil { f2 := &svcapitypes.Authentication{} if resp.Authentication.PasswordCount != nil { - f2.PasswordCount = resp.Authentication.PasswordCount + passwordCountCopy := int64(*resp.Authentication.PasswordCount) + f2.PasswordCount = &passwordCountCopy } - if resp.Authentication.Type != nil { - f2.Type = resp.Authentication.Type + if resp.Authentication.Type != "" { + f2.Type = aws.String(string(resp.Authentication.Type)) } ko.Status.Authentication = f2 } else { @@ -252,13 +251,7 @@ func (rm *resourceManager) sdkCreate( ko.Status.Status = nil } if resp.UserGroupIds != nil { - f6 := []*string{} - for _, f6iter := range resp.UserGroupIds { - var f6elem string - f6elem = *f6iter - f6 = append(f6, &f6elem) - } - ko.Status.UserGroupIDs = f6 + ko.Status.UserGroupIDs = aws.StringSlice(resp.UserGroupIds) } else { ko.Status.UserGroupIDs = nil } @@ -292,50 +285,66 @@ func (rm *resourceManager) newCreateRequestPayload( res := &svcsdk.CreateUserInput{} if r.ko.Spec.AccessString != nil { - res.SetAccessString(*r.ko.Spec.AccessString) + res.AccessString = r.ko.Spec.AccessString + } + if r.ko.Spec.AuthenticationMode != nil { + f1 := &svcsdktypes.AuthenticationMode{} + if r.ko.Spec.AuthenticationMode.Passwords != nil { + f1f0 := []string{} + for _, f1f0iter := range r.ko.Spec.AuthenticationMode.Passwords { + var f1f0elem string + f1f0elem = f1f0iter + f1f0 = append(f1f0, f1f0elem) + } + f1.Passwords = f1f0 + } + if r.ko.Spec.AuthenticationMode.Type != nil { + f1.Type = svcsdktypes.InputAuthenticationType(*r.ko.Spec.AuthenticationMode.Type) + } + res.AuthenticationMode = f1 } if r.ko.Spec.Engine != nil { - res.SetEngine(*r.ko.Spec.Engine) + res.Engine = r.ko.Spec.Engine } if r.ko.Spec.NoPasswordRequired != nil { - res.SetNoPasswordRequired(*r.ko.Spec.NoPasswordRequired) + res.NoPasswordRequired = r.ko.Spec.NoPasswordRequired } if r.ko.Spec.Passwords != nil { - f3 := []*string{} - for _, f3iter := range r.ko.Spec.Passwords { - var f3elem string - if f3iter != nil { - tmpSecret, err := rm.rr.SecretValueFromReference(ctx, f3iter) + f4 := []string{} + for _, f4iter := range r.ko.Spec.Passwords { + var f4elem string + if f4iter != nil { + tmpSecret, err := rm.rr.SecretValueFromReference(ctx, f4iter) if err != nil { return nil, ackrequeue.Needed(err) } if tmpSecret != "" { - f3elem = tmpSecret + f4elem = tmpSecret } } - f3 = append(f3, &f3elem) + f4 = append(f4, f4elem) } - res.SetPasswords(f3) + res.Passwords = f4 } if r.ko.Spec.Tags != nil { - f4 := []*svcsdk.Tag{} - for _, f4iter := range r.ko.Spec.Tags { - f4elem := &svcsdk.Tag{} - if f4iter.Key != nil { - f4elem.SetKey(*f4iter.Key) + f5 := []svcsdktypes.Tag{} + for _, f5iter := range r.ko.Spec.Tags { + f5elem := &svcsdktypes.Tag{} + if f5iter.Key != nil { + f5elem.Key = f5iter.Key } - if f4iter.Value != nil { - f4elem.SetValue(*f4iter.Value) + if f5iter.Value != nil { + f5elem.Value = f5iter.Value } - f4 = append(f4, f4elem) + f5 = append(f5, *f5elem) } - res.SetTags(f4) + res.Tags = f5 } if r.ko.Spec.UserID != nil { - res.SetUserId(*r.ko.Spec.UserID) + res.UserId = r.ko.Spec.UserID } if r.ko.Spec.UserName != nil { - res.SetUserName(*r.ko.Spec.UserName) + res.UserName = r.ko.Spec.UserName } return res, nil @@ -366,7 +375,7 @@ func (rm *resourceManager) sdkUpdate( var resp *svcsdk.ModifyUserOutput _ = resp - resp, err = rm.sdkapi.ModifyUserWithContext(ctx, input) + resp, err = rm.sdkapi.ModifyUser(ctx, input) rm.metrics.RecordAPICall("UPDATE", "ModifyUser", err) if err != nil { return nil, err @@ -390,10 +399,11 @@ func (rm *resourceManager) sdkUpdate( if resp.Authentication != nil { f2 := &svcapitypes.Authentication{} if resp.Authentication.PasswordCount != nil { - f2.PasswordCount = resp.Authentication.PasswordCount + passwordCountCopy := int64(*resp.Authentication.PasswordCount) + f2.PasswordCount = &passwordCountCopy } - if resp.Authentication.Type != nil { - f2.Type = resp.Authentication.Type + if resp.Authentication.Type != "" { + f2.Type = aws.String(string(resp.Authentication.Type)) } ko.Status.Authentication = f2 } else { @@ -415,13 +425,7 @@ func (rm *resourceManager) sdkUpdate( ko.Status.Status = nil } if resp.UserGroupIds != nil { - f6 := []*string{} - for _, f6iter := range resp.UserGroupIds { - var f6elem string - f6elem = *f6iter - f6 = append(f6, &f6elem) - } - ko.Status.UserGroupIDs = f6 + ko.Status.UserGroupIDs = aws.StringSlice(resp.UserGroupIds) } else { ko.Status.UserGroupIDs = nil } @@ -455,8 +459,27 @@ func (rm *resourceManager) newUpdateRequestPayload( ) (*svcsdk.ModifyUserInput, error) { res := &svcsdk.ModifyUserInput{} + if r.ko.Spec.AuthenticationMode != nil { + f1 := &svcsdktypes.AuthenticationMode{} + if r.ko.Spec.AuthenticationMode.Passwords != nil { + f1f0 := []string{} + for _, f1f0iter := range r.ko.Spec.AuthenticationMode.Passwords { + var f1f0elem string + f1f0elem = f1f0iter + f1f0 = append(f1f0, f1f0elem) + } + f1.Passwords = f1f0 + } + if r.ko.Spec.AuthenticationMode.Type != nil { + f1.Type = svcsdktypes.InputAuthenticationType(*r.ko.Spec.AuthenticationMode.Type) + } + res.AuthenticationMode = f1 + } + if r.ko.Spec.Engine != nil { + res.Engine = r.ko.Spec.Engine + } if r.ko.Spec.UserID != nil { - res.SetUserId(*r.ko.Spec.UserID) + res.UserId = r.ko.Spec.UserID } return res, nil @@ -478,7 +501,7 @@ func (rm *resourceManager) sdkDelete( } var resp *svcsdk.DeleteUserOutput _ = resp - resp, err = rm.sdkapi.DeleteUserWithContext(ctx, input) + resp, err = rm.sdkapi.DeleteUser(ctx, input) rm.metrics.RecordAPICall("DELETE", "DeleteUser", err) return nil, err } @@ -491,7 +514,7 @@ func (rm *resourceManager) newDeleteRequestPayload( res := &svcsdk.DeleteUserInput{} if r.ko.Spec.UserID != nil { - res.SetUserId(*r.ko.Spec.UserID) + res.UserId = r.ko.Spec.UserID } return res, nil @@ -599,11 +622,12 @@ func (rm *resourceManager) terminalAWSError(err error) bool { if err == nil { return false } - awsErr, ok := ackerr.AWSError(err) - if !ok { + + var terminalErr smithy.APIError + if !errors.As(err, &terminalErr) { return false } - switch awsErr.Code() { + switch terminalErr.ErrorCode() { case "UserAlreadyExists", "UserQuotaExceeded", "DuplicateUserName", diff --git a/pkg/resource/user_group/custom_set_output.go b/pkg/resource/user_group/custom_set_output.go index 6f3f6fbf..11fd45f1 100644 --- a/pkg/resource/user_group/custom_set_output.go +++ b/pkg/resource/user_group/custom_set_output.go @@ -15,6 +15,7 @@ package user_group import ( "context" + svcapitypes "github.com/aws-controllers-k8s/elasticache-controller/apis/v1alpha1" ackv1alpha1 "github.com/aws-controllers-k8s/runtime/apis/core/v1alpha1" "github.com/aws/aws-sdk-go/service/elasticache" diff --git a/pkg/resource/user_group/custom_update_api.go b/pkg/resource/user_group/custom_update_api.go index d3f56e01..3563cd2b 100644 --- a/pkg/resource/user_group/custom_update_api.go +++ b/pkg/resource/user_group/custom_update_api.go @@ -16,6 +16,7 @@ package user_group import ( "context" "errors" + svcapitypes "github.com/aws-controllers-k8s/elasticache-controller/apis/v1alpha1" ackv1alpha1 "github.com/aws-controllers-k8s/runtime/apis/core/v1alpha1" ackcompare "github.com/aws-controllers-k8s/runtime/pkg/compare" diff --git a/pkg/resource/user_group/descriptor.go b/pkg/resource/user_group/descriptor.go index bbafa39c..7e0965bc 100644 --- a/pkg/resource/user_group/descriptor.go +++ b/pkg/resource/user_group/descriptor.go @@ -28,7 +28,7 @@ import ( ) const ( - finalizerString = "finalizers.elasticache.services.k8s.aws/UserGroup" + FinalizerString = "finalizers.elasticache.services.k8s.aws/UserGroup" ) var ( @@ -88,8 +88,8 @@ func (d *resourceDescriptor) IsManaged( // https://github.com/kubernetes-sigs/controller-runtime/issues/994 is // fixed. This should be able to be: // - // return k8sctrlutil.ContainsFinalizer(obj, finalizerString) - return containsFinalizer(obj, finalizerString) + // return k8sctrlutil.ContainsFinalizer(obj, FinalizerString) + return containsFinalizer(obj, FinalizerString) } // Remove once https://github.com/kubernetes-sigs/controller-runtime/issues/994 @@ -118,7 +118,7 @@ func (d *resourceDescriptor) MarkManaged( // Should not happen. If it does, there is a bug in the code panic("nil RuntimeMetaObject in AWSResource") } - k8sctrlutil.AddFinalizer(obj, finalizerString) + k8sctrlutil.AddFinalizer(obj, FinalizerString) } // MarkUnmanaged removes the supplied resource from management by ACK. What @@ -133,7 +133,7 @@ func (d *resourceDescriptor) MarkUnmanaged( // Should not happen. If it does, there is a bug in the code panic("nil RuntimeMetaObject in AWSResource") } - k8sctrlutil.RemoveFinalizer(obj, finalizerString) + k8sctrlutil.RemoveFinalizer(obj, FinalizerString) } // MarkAdopted places descriptors on the custom resource that indicate the diff --git a/pkg/resource/user_group/manager.go b/pkg/resource/user_group/manager.go index 320d777d..d66aef4a 100644 --- a/pkg/resource/user_group/manager.go +++ b/pkg/resource/user_group/manager.go @@ -32,9 +32,8 @@ import ( acktags "github.com/aws-controllers-k8s/runtime/pkg/tags" acktypes "github.com/aws-controllers-k8s/runtime/pkg/types" ackutil "github.com/aws-controllers-k8s/runtime/pkg/util" - "github.com/aws/aws-sdk-go/aws/session" - svcsdk "github.com/aws/aws-sdk-go/service/elasticache" - svcsdkapi "github.com/aws/aws-sdk-go/service/elasticache/elasticacheiface" + "github.com/aws/aws-sdk-go-v2/aws" + svcsdk "github.com/aws/aws-sdk-go-v2/service/elasticache" "github.com/go-logr/logr" corev1 "k8s.io/api/core/v1" @@ -59,6 +58,9 @@ type resourceManager struct { // cfg is a copy of the ackcfg.Config object passed on start of the service // controller cfg ackcfg.Config + // clientcfg is a copy of the client configuration passed on start of the + // service controller + clientcfg aws.Config // log refers to the logr.Logger object handling logging for the service // controller log logr.Logger @@ -73,12 +75,9 @@ type resourceManager struct { awsAccountID ackv1alpha1.AWSAccountID // The AWS Region that this resource manager targets awsRegion ackv1alpha1.AWSRegion - // sess is the AWS SDK Session object used to communicate with the backend - // AWS service API - sess *session.Session - // sdk is a pointer to the AWS service API interface exposed by the - // aws-sdk-go/services/{alias}/{alias}iface package. - sdkapi svcsdkapi.ElastiCacheAPI + // sdk is a pointer to the AWS service API client exposed by the + // aws-sdk-go-v2/services/{alias} package. + sdkapi *svcsdk.Client } // concreteResource returns a pointer to a resource from the supplied @@ -299,24 +298,25 @@ func (rm *resourceManager) EnsureTags( // newResourceManager returns a new struct implementing // acktypes.AWSResourceManager +// This is for AWS-SDK-GO-V2 - Created newResourceManager With AWS sdk-Go-ClientV2 func newResourceManager( cfg ackcfg.Config, + clientcfg aws.Config, log logr.Logger, metrics *ackmetrics.Metrics, rr acktypes.Reconciler, - sess *session.Session, id ackv1alpha1.AWSAccountID, region ackv1alpha1.AWSRegion, ) (*resourceManager, error) { return &resourceManager{ cfg: cfg, + clientcfg: clientcfg, log: log, metrics: metrics, rr: rr, awsAccountID: id, awsRegion: region, - sess: sess, - sdkapi: svcsdk.New(sess), + sdkapi: svcsdk.NewFromConfig(clientcfg), }, nil } diff --git a/pkg/resource/user_group/manager_factory.go b/pkg/resource/user_group/manager_factory.go index db798305..3bf74221 100644 --- a/pkg/resource/user_group/manager_factory.go +++ b/pkg/resource/user_group/manager_factory.go @@ -23,7 +23,7 @@ import ( ackcfg "github.com/aws-controllers-k8s/runtime/pkg/config" ackmetrics "github.com/aws-controllers-k8s/runtime/pkg/metrics" acktypes "github.com/aws-controllers-k8s/runtime/pkg/types" - "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go-v2/aws" "github.com/go-logr/logr" svcresource "github.com/aws-controllers-k8s/elasticache-controller/pkg/resource" @@ -47,14 +47,18 @@ func (f *resourceManagerFactory) ResourceDescriptor() acktypes.AWSResourceDescri // supplied AWS account func (f *resourceManagerFactory) ManagerFor( cfg ackcfg.Config, + clientcfg aws.Config, log logr.Logger, metrics *ackmetrics.Metrics, rr acktypes.Reconciler, - sess *session.Session, id ackv1alpha1.AWSAccountID, region ackv1alpha1.AWSRegion, + roleARN ackv1alpha1.AWSResourceName, ) (acktypes.AWSResourceManager, error) { - rmId := fmt.Sprintf("%s/%s", id, region) + // We use the account ID, region, and role ARN to uniquely identify a + // resource manager. This helps us to avoid creating multiple resource + // managers for the same account/region/roleARN combination. + rmId := fmt.Sprintf("%s/%s/%s", id, region, roleARN) f.RLock() rm, found := f.rmCache[rmId] f.RUnlock() @@ -66,7 +70,7 @@ func (f *resourceManagerFactory) ManagerFor( f.Lock() defer f.Unlock() - rm, err := newResourceManager(cfg, log, metrics, rr, sess, id, region) + rm, err := newResourceManager(cfg, clientcfg, log, metrics, rr, id, region) if err != nil { return nil, err } diff --git a/pkg/resource/user_group/references.go b/pkg/resource/user_group/references.go index ccb5aaa4..8557d535 100644 --- a/pkg/resource/user_group/references.go +++ b/pkg/resource/user_group/references.go @@ -17,6 +17,7 @@ package user_group import ( "context" + "sigs.k8s.io/controller-runtime/pkg/client" acktypes "github.com/aws-controllers-k8s/runtime/pkg/types" diff --git a/pkg/resource/user_group/resource.go b/pkg/resource/user_group/resource.go index 340b70e7..24431730 100644 --- a/pkg/resource/user_group/resource.go +++ b/pkg/resource/user_group/resource.go @@ -93,6 +93,17 @@ func (r *resource) SetIdentifiers(identifier *ackv1alpha1.AWSIdentifiers) error return nil } +// PopulateResourceFromAnnotation populates the fields passed from adoption annotation +func (r *resource) PopulateResourceFromAnnotation(fields map[string]string) error { + tmp, ok := fields["userGroupID"] + if !ok { + return ackerrors.MissingNameIdentifier + } + r.ko.Spec.UserGroupID = &tmp + + return nil +} + // DeepCopy will return a copy of the resource func (r *resource) DeepCopy() acktypes.AWSResource { koCopy := r.ko.DeepCopy() diff --git a/pkg/resource/user_group/sdk.go b/pkg/resource/user_group/sdk.go index a9c736d8..32af0446 100644 --- a/pkg/resource/user_group/sdk.go +++ b/pkg/resource/user_group/sdk.go @@ -28,8 +28,10 @@ import ( ackerr "github.com/aws-controllers-k8s/runtime/pkg/errors" ackrequeue "github.com/aws-controllers-k8s/runtime/pkg/requeue" ackrtlog "github.com/aws-controllers-k8s/runtime/pkg/runtime/log" - "github.com/aws/aws-sdk-go/aws" - svcsdk "github.com/aws/aws-sdk-go/service/elasticache" + "github.com/aws/aws-sdk-go-v2/aws" + svcsdk "github.com/aws/aws-sdk-go-v2/service/elasticache" + svcsdktypes "github.com/aws/aws-sdk-go-v2/service/elasticache/types" + smithy "github.com/aws/smithy-go" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -40,8 +42,7 @@ import ( var ( _ = &metav1.Time{} _ = strings.ToLower("") - _ = &aws.JSONValue{} - _ = &svcsdk.ElastiCache{} + _ = &svcsdk.Client{} _ = &svcapitypes.UserGroup{} _ = ackv1alpha1.AWSAccountID("") _ = &ackerr.NotFound @@ -49,6 +50,7 @@ var ( _ = &reflect.Value{} _ = fmt.Sprintf("") _ = &ackrequeue.NoRequeue{} + _ = &aws.Config{} ) // sdkFind returns SDK-specific information about a supplied resource @@ -73,10 +75,11 @@ func (rm *resourceManager) sdkFind( return nil, err } var resp *svcsdk.DescribeUserGroupsOutput - resp, err = rm.sdkapi.DescribeUserGroupsWithContext(ctx, input) + resp, err = rm.sdkapi.DescribeUserGroups(ctx, input) rm.metrics.RecordAPICall("READ_MANY", "DescribeUserGroups", err) if err != nil { - if awsErr, ok := ackerr.AWSError(err); ok && awsErr.Code() == "UserGroupNotFound" { + var awsErr smithy.APIError + if errors.As(err, &awsErr) && awsErr.ErrorCode() == "UserGroupNotFound" { return nil, ackerr.NotFound } return nil, err @@ -108,38 +111,25 @@ func (rm *resourceManager) sdkFind( if elem.PendingChanges != nil { f3 := &svcapitypes.UserGroupPendingChanges{} if elem.PendingChanges.UserIdsToAdd != nil { - f3f0 := []*string{} - for _, f3f0iter := range elem.PendingChanges.UserIdsToAdd { - var f3f0elem string - f3f0elem = *f3f0iter - f3f0 = append(f3f0, &f3f0elem) - } - f3.UserIDsToAdd = f3f0 + f3.UserIDsToAdd = aws.StringSlice(elem.PendingChanges.UserIdsToAdd) } if elem.PendingChanges.UserIdsToRemove != nil { - f3f1 := []*string{} - for _, f3f1iter := range elem.PendingChanges.UserIdsToRemove { - var f3f1elem string - f3f1elem = *f3f1iter - f3f1 = append(f3f1, &f3f1elem) - } - f3.UserIDsToRemove = f3f1 + f3.UserIDsToRemove = aws.StringSlice(elem.PendingChanges.UserIdsToRemove) } ko.Status.PendingChanges = f3 } else { ko.Status.PendingChanges = nil } if elem.ReplicationGroups != nil { - f4 := []*string{} - for _, f4iter := range elem.ReplicationGroups { - var f4elem string - f4elem = *f4iter - f4 = append(f4, &f4elem) - } - ko.Status.ReplicationGroups = f4 + ko.Status.ReplicationGroups = aws.StringSlice(elem.ReplicationGroups) } else { ko.Status.ReplicationGroups = nil } + if elem.ServerlessCaches != nil { + ko.Status.ServerlessCaches = aws.StringSlice(elem.ServerlessCaches) + } else { + ko.Status.ServerlessCaches = nil + } if elem.Status != nil { ko.Status.Status = elem.Status } else { @@ -151,13 +141,7 @@ func (rm *resourceManager) sdkFind( ko.Spec.UserGroupID = nil } if elem.UserIds != nil { - f7 := []*string{} - for _, f7iter := range elem.UserIds { - var f7elem string - f7elem = *f7iter - f7 = append(f7, &f7elem) - } - ko.Spec.UserIDs = f7 + ko.Spec.UserIDs = aws.StringSlice(elem.UserIds) } else { ko.Spec.UserIDs = nil } @@ -195,7 +179,7 @@ func (rm *resourceManager) newListRequestPayload( res := &svcsdk.DescribeUserGroupsInput{} if r.ko.Spec.UserGroupID != nil { - res.SetUserGroupId(*r.ko.Spec.UserGroupID) + res.UserGroupId = r.ko.Spec.UserGroupID } return res, nil @@ -220,7 +204,7 @@ func (rm *resourceManager) sdkCreate( var resp *svcsdk.CreateUserGroupOutput _ = resp - resp, err = rm.sdkapi.CreateUserGroupWithContext(ctx, input) + resp, err = rm.sdkapi.CreateUserGroup(ctx, input) rm.metrics.RecordAPICall("CREATE", "CreateUserGroup", err) if err != nil { return nil, err @@ -249,38 +233,25 @@ func (rm *resourceManager) sdkCreate( if resp.PendingChanges != nil { f3 := &svcapitypes.UserGroupPendingChanges{} if resp.PendingChanges.UserIdsToAdd != nil { - f3f0 := []*string{} - for _, f3f0iter := range resp.PendingChanges.UserIdsToAdd { - var f3f0elem string - f3f0elem = *f3f0iter - f3f0 = append(f3f0, &f3f0elem) - } - f3.UserIDsToAdd = f3f0 + f3.UserIDsToAdd = aws.StringSlice(resp.PendingChanges.UserIdsToAdd) } if resp.PendingChanges.UserIdsToRemove != nil { - f3f1 := []*string{} - for _, f3f1iter := range resp.PendingChanges.UserIdsToRemove { - var f3f1elem string - f3f1elem = *f3f1iter - f3f1 = append(f3f1, &f3f1elem) - } - f3.UserIDsToRemove = f3f1 + f3.UserIDsToRemove = aws.StringSlice(resp.PendingChanges.UserIdsToRemove) } ko.Status.PendingChanges = f3 } else { ko.Status.PendingChanges = nil } if resp.ReplicationGroups != nil { - f4 := []*string{} - for _, f4iter := range resp.ReplicationGroups { - var f4elem string - f4elem = *f4iter - f4 = append(f4, &f4elem) - } - ko.Status.ReplicationGroups = f4 + ko.Status.ReplicationGroups = aws.StringSlice(resp.ReplicationGroups) } else { ko.Status.ReplicationGroups = nil } + if resp.ServerlessCaches != nil { + ko.Status.ServerlessCaches = aws.StringSlice(resp.ServerlessCaches) + } else { + ko.Status.ServerlessCaches = nil + } if resp.Status != nil { ko.Status.Status = resp.Status } else { @@ -292,13 +263,7 @@ func (rm *resourceManager) sdkCreate( ko.Spec.UserGroupID = nil } if resp.UserIds != nil { - f7 := []*string{} - for _, f7iter := range resp.UserIds { - var f7elem string - f7elem = *f7iter - f7 = append(f7, &f7elem) - } - ko.Spec.UserIDs = f7 + ko.Spec.UserIDs = aws.StringSlice(resp.UserIds) } else { ko.Spec.UserIDs = nil } @@ -321,33 +286,27 @@ func (rm *resourceManager) newCreateRequestPayload( res := &svcsdk.CreateUserGroupInput{} if r.ko.Spec.Engine != nil { - res.SetEngine(*r.ko.Spec.Engine) + res.Engine = r.ko.Spec.Engine } if r.ko.Spec.Tags != nil { - f1 := []*svcsdk.Tag{} + f1 := []svcsdktypes.Tag{} for _, f1iter := range r.ko.Spec.Tags { - f1elem := &svcsdk.Tag{} + f1elem := &svcsdktypes.Tag{} if f1iter.Key != nil { - f1elem.SetKey(*f1iter.Key) + f1elem.Key = f1iter.Key } if f1iter.Value != nil { - f1elem.SetValue(*f1iter.Value) + f1elem.Value = f1iter.Value } - f1 = append(f1, f1elem) + f1 = append(f1, *f1elem) } - res.SetTags(f1) + res.Tags = f1 } if r.ko.Spec.UserGroupID != nil { - res.SetUserGroupId(*r.ko.Spec.UserGroupID) + res.UserGroupId = r.ko.Spec.UserGroupID } if r.ko.Spec.UserIDs != nil { - f3 := []*string{} - for _, f3iter := range r.ko.Spec.UserIDs { - var f3elem string - f3elem = *f3iter - f3 = append(f3, &f3elem) - } - res.SetUserIds(f3) + res.UserIds = aws.ToStringSlice(r.ko.Spec.UserIDs) } return res, nil @@ -380,7 +339,7 @@ func (rm *resourceManager) sdkDelete( } var resp *svcsdk.DeleteUserGroupOutput _ = resp - resp, err = rm.sdkapi.DeleteUserGroupWithContext(ctx, input) + resp, err = rm.sdkapi.DeleteUserGroup(ctx, input) rm.metrics.RecordAPICall("DELETE", "DeleteUserGroup", err) return nil, err } @@ -393,7 +352,7 @@ func (rm *resourceManager) newDeleteRequestPayload( res := &svcsdk.DeleteUserGroupInput{} if r.ko.Spec.UserGroupID != nil { - res.SetUserGroupId(*r.ko.Spec.UserGroupID) + res.UserGroupId = r.ko.Spec.UserGroupID } return res, nil @@ -501,11 +460,12 @@ func (rm *resourceManager) terminalAWSError(err error) bool { if err == nil { return false } - awsErr, ok := ackerr.AWSError(err) - if !ok { + + var terminalErr smithy.APIError + if !errors.As(err, &terminalErr) { return false } - switch awsErr.Code() { + switch terminalErr.ErrorCode() { case "DuplicateUserNameFault", "UserGroupAlreadyExistsFault", "InvalidParameterCombination", diff --git a/pkg/testutil/test_suite_config.go b/pkg/testutil/test_suite_config.go deleted file mode 100644 index 01a1c366..00000000 --- a/pkg/testutil/test_suite_config.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"). You may -// not use this file except in compliance with the License. A copy of the -// License is located at -// -// http://aws.amazon.com/apache2.0/ -// -// or in the "license" file accompanying this file. This file is distributed -// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either -// express or implied. See the License for the specific language governing -// permissions and limitations under the License. - -package testutil - -// TestSuite represents instructions to run unit tests using test fixtures and mock service apis -type TestSuite struct { - Tests []TestConfig `json:"tests"` -} - -// TestConfig represents declarative unit test -type TestConfig struct { - Name string `json:"name"` - Description string `json:"description"` - Scenarios []TestScenario `json:"scenarios"` -} - -// TestScenario represents declarative test scenario details -type TestScenario struct { - Name string `json:"name"` - Description string `json:"description"` - // Fixture lets you specify test scenario given input fixtures - Fixture Fixture `json:"given"` - // UnitUnderTest lets you specify the unit to test - // For example resource manager API: ReadOne, Create, Update, Delete - UnitUnderTest string `json:"invoke"` - // Expect lets you specify test scenario expected outcome fixtures - Expect Expect `json:"expect"` -} - -// Fixture represents test scenario fixture to load from file paths -type Fixture struct { - // DesiredState lets you specify fixture path to load the desired state fixture - DesiredState string `json:"desired_state"` - // LatestState lets you specify fixture path to load the current state fixture - LatestState string `json:"latest_state"` - // ServiceAPIs lets you specify fixture path to mock service sdk api response - ServiceAPIs []ServiceAPI `json:"svc_api"` -} - -// ServiceAPI represents details about the the service sdk api and fixture path to mock its response -type ServiceAPI struct { - Operation string `json:"operation"` - Output string `json:"output_fixture"` - ServiceAPIError *ServiceAPIError `json:"error,omitempty"` -} - -// ServiceAPIError contains the specification for the error of the mock API response -type ServiceAPIError struct { - // Code here is usually the type of fault/error, not the HTTP status code - Code string `json:"code"` - Message string `json:"message"` -} - -// Expect represents test scenario expected outcome fixture to load from file path -type Expect struct { - LatestState string `json:"latest_state"` - // Error is a string matching the message of the expected error returned from the ResourceManager operation. - // Possible errors can be found in runtime/pkg/errors/error.go - Error string `json:"error"` -} diff --git a/pkg/testutil/test_suite_runner.go b/pkg/testutil/test_suite_runner.go deleted file mode 100644 index fbe872e7..00000000 --- a/pkg/testutil/test_suite_runner.go +++ /dev/null @@ -1,202 +0,0 @@ -// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"). You may -// not use this file except in compliance with the License. A copy of the -// License is located at -// -// http://aws.amazon.com/apache2.0/ -// -// or in the "license" file accompanying this file. This file is distributed -// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either -// express or implied. See the License for the specific language governing -// permissions and limitations under the License. - -package testutil - -import ( - "context" - "errors" - "fmt" - "path/filepath" - "strings" - "testing" - - acktypes "github.com/aws-controllers-k8s/runtime/pkg/types" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/mock" - - mocksvcsdkapi "github.com/aws-controllers-k8s/elasticache-controller/mocks/aws-sdk-go/elasticache" -) - -// TestSuiteRunner runs given test suite config with the help of delegate supplied to it -type TestSuiteRunner struct { - TestSuite *TestSuite - Delegate TestRunnerDelegate -} - -// fixtureContext is runtime context for test scenario given fixture. -type fixtureContext struct { - desired acktypes.AWSResource - latest acktypes.AWSResource - mocksdkapi *mocksvcsdkapi.ElastiCacheAPI - resourceManager acktypes.AWSResourceManager -} - -// TODO: remove if no longer used -// expectContext is runtime context for test scenario expectation fixture. -type expectContext struct { - latest acktypes.AWSResource - err error -} - -// TestRunnerDelegate provides interface for custom resource tests to implement. -// TestSuiteRunner depends on it to run tests for custom resource. -type TestRunnerDelegate interface { - ResourceDescriptor() acktypes.AWSResourceDescriptor - Equal(desired acktypes.AWSResource, latest acktypes.AWSResource) bool // remove it when ResourceDescriptor.Delta() is available - ResourceManager(*mocksvcsdkapi.ElastiCacheAPI) acktypes.AWSResourceManager - EmptyServiceAPIOutput(apiName string) (interface{}, error) - GoTestRunner() *testing.T -} - -// RunTests runs the tests from the test suite -func (runner *TestSuiteRunner) RunTests() { - if runner.TestSuite == nil || runner.Delegate == nil { - panic(errors.New("failed to run test suite")) - } - - for _, test := range runner.TestSuite.Tests { - fmt.Printf("Starting test: %s\n", test.Name) - for _, scenario := range test.Scenarios { - fmt.Printf("Running test scenario: %s\n", scenario.Name) - fixtureCxt := runner.setupFixtureContext(&scenario.Fixture) - runner.runTestScenario(scenario.Name, fixtureCxt, scenario.UnitUnderTest, &scenario.Expect) - } - fmt.Printf("Test: %s completed.\n", test.Name) - } -} - -// runTestScenario runs given test scenario which is expressed as: given fixture context, unit to test, expected fixture context. -func (runner *TestSuiteRunner) runTestScenario(scenarioName string, fixtureCxt *fixtureContext, unitUnderTest string, expectation *Expect) { - t := runner.Delegate.GoTestRunner() - t.Run(scenarioName, func(t *testing.T) { - rm := fixtureCxt.resourceManager - assert := assert.New(t) - - var actual acktypes.AWSResource = nil - var err error = nil - switch unitUnderTest { - case "ReadOne": - actual, err = rm.ReadOne(context.Background(), fixtureCxt.desired) - case "Create": - actual, err = rm.Create(context.Background(), fixtureCxt.desired) - case "Update": - delta := runner.Delegate.ResourceDescriptor().Delta(fixtureCxt.desired, fixtureCxt.latest) - actual, err = rm.Update(context.Background(), fixtureCxt.desired, fixtureCxt.latest, delta) - case "Delete": - actual, err = rm.Delete(context.Background(), fixtureCxt.desired) - default: - panic(errors.New(fmt.Sprintf("unit under test: %s not supported", unitUnderTest))) - } - runner.assertExpectations(assert, expectation, actual, err) - }) -} - -/* - assertExpectations validates the actual outcome against the expected outcome. - -There are two components to the expected outcome, corresponding to the return values of the resource manager's CRUD operation: - 1. the actual return value of type AWSResource ("expect.latest_state" in test_suite.yaml) - 2. the error ("expect.error" in test_suite.yaml) - -With each of these components, there are three possibilities in test_suite.yaml, which are interpreted as follows: - 1. the key does not exist, or was provided with no value: no explicit expectations, don't assert anything - 2. the key was provided with value "nil": explicit expectation; assert that the error or return value is nil - 3. the key was provided with value other than "nil": explicit expectation; assert that the value matches the - expected value - -However, if neither expect.latest_state nor error are provided, assertExpectations will fail the test case. -*/ -func (runner *TestSuiteRunner) assertExpectations(assert *assert.Assertions, expectation *Expect, actual acktypes.AWSResource, err error) { - if expectation.LatestState == "" && expectation.Error == "" { - fmt.Println("Invalid test case: no expectation given for either latest_state or error") - assert.True(false) - return - } - - // expectation exists for at least one of LatestState and Error; assert results independently - if expectation.LatestState == "nil" { - assert.Nil(actual) - } else if expectation.LatestState != "" { - expectedLatest := runner.loadAWSResource(expectation.LatestState) - assert.NotNil(actual) - - delta := runner.Delegate.ResourceDescriptor().Delta(expectedLatest, actual) - assert.Equal(0, len(delta.Differences)) - if len(delta.Differences) > 0 { - fmt.Println("Unexpected differences:") - for _, difference := range delta.Differences { - fmt.Printf("Path: %v, expected: %v, actual: %v\n", difference.Path, difference.A, difference.B) - } - } - - // Delta only contains `Spec` differences. Thus, we need Delegate.Equal to compare `Status`. - assert.True(runner.Delegate.Equal(expectedLatest, actual), "Expected status, spec details did not match with actual.") - } - - if expectation.Error == "nil" { - assert.Nil(err) - } else if expectation.Error != "" { - expectedError := errors.New(expectation.Error) - assert.NotNil(err) - - assert.Equal(expectedError.Error(), err.Error()) - } -} - -// setupFixtureContext provides runtime context for test scenario given fixture. -func (runner *TestSuiteRunner) setupFixtureContext(fixture *Fixture) *fixtureContext { - if fixture == nil { - return nil - } - var cxt = fixtureContext{} - if fixture.DesiredState != "" { - cxt.desired = runner.loadAWSResource(fixture.DesiredState) - } - if fixture.LatestState != "" { - cxt.latest = runner.loadAWSResource(fixture.LatestState) - } - mocksdkapi := &mocksvcsdkapi.ElastiCacheAPI{} - for _, serviceApi := range fixture.ServiceAPIs { - if serviceApi.Operation != "" { - - if serviceApi.ServiceAPIError != nil { - mockError := CreateAWSError(*serviceApi.ServiceAPIError) - mocksdkapi.On(serviceApi.Operation, mock.Anything, mock.Anything).Return(nil, mockError) - } else if serviceApi.Operation != "" && serviceApi.Output != "" { - var outputObj, err = runner.Delegate.EmptyServiceAPIOutput(serviceApi.Operation) - apiOutputFixturePath := append([]string{"testdata"}, strings.Split(serviceApi.Output, "/")...) - LoadFromFixture(filepath.Join(apiOutputFixturePath...), outputObj) - mocksdkapi.On(serviceApi.Operation, mock.Anything, mock.Anything).Return(outputObj, nil) - if err != nil { - panic(err) - } - } - } - } - cxt.mocksdkapi = mocksdkapi - cxt.resourceManager = runner.Delegate.ResourceManager(mocksdkapi) - return &cxt -} - -// loadAWSResource loads AWSResource from the supplied fixture file. -func (runner *TestSuiteRunner) loadAWSResource(resourceFixtureFilePath string) acktypes.AWSResource { - if resourceFixtureFilePath == "" { - panic(errors.New(fmt.Sprintf("resourceFixtureFilePath not specified"))) - } - var rd = runner.Delegate.ResourceDescriptor() - ro := rd.EmptyRuntimeObject() - path := append([]string{"testdata"}, strings.Split(resourceFixtureFilePath, "/")...) - LoadFromFixture(filepath.Join(path...), ro) - return rd.ResourceFromRuntimeObject(ro) -} diff --git a/pkg/testutil/util.go b/pkg/testutil/util.go deleted file mode 100644 index 25006859..00000000 --- a/pkg/testutil/util.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"). You may -// not use this file except in compliance with the License. A copy of the -// License is located at -// -// http://aws.amazon.com/apache2.0/ -// -// or in the "license" file accompanying this file. This file is distributed -// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either -// express or implied. See the License for the specific language governing -// permissions and limitations under the License. - -package testutil - -import ( - "encoding/json" - "errors" - "fmt" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/ghodss/yaml" - "io/ioutil" - "path" - "strings" -) - -// LoadFromFixture fills an empty pointer variable with the -// data from a fixture JSON/YAML file. -func LoadFromFixture( - fixturePath string, - output interface{}, // output should be an addressable type (i.e. a pointer) -) { - contents, err := ioutil.ReadFile(fixturePath) - if err != nil { - panic(err) - } - if strings.HasSuffix(fixturePath, ".json") { - err = json.Unmarshal(contents, output) - } else if strings.HasSuffix(fixturePath, ".yaml") || - strings.HasSuffix(fixturePath, ".yml") { - err = yaml.Unmarshal(contents, output) - } else { - panic(errors.New( - fmt.Sprintf("fixture file format not supported: %s", path.Base(fixturePath)))) - } - if err != nil { - panic(err) - } -} - -// CreateAWSError is used for mocking the types of errors received from aws-sdk-go -// so that the expected code path executes. Support for specifying the HTTP status code and request ID -// can be added in the future if needed -func CreateAWSError(awsError ServiceAPIError) awserr.RequestFailure { - error := awserr.New(awsError.Code, awsError.Message, nil) - return awserr.NewRequestFailure(error, 0, "") -} diff --git a/pkg/testutil/util_test.go b/pkg/testutil/util_test.go deleted file mode 100644 index 2fedba01..00000000 --- a/pkg/testutil/util_test.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"). You may -// not use this file except in compliance with the License. A copy of the -// License is located at -// -// http://aws.amazon.com/apache2.0/ -// -// or in the "license" file accompanying this file. This file is distributed -// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either -// express or implied. See the License for the specific language governing -// permissions and limitations under the License. - -package testutil - -import ( - ackerr "github.com/aws-controllers-k8s/runtime/pkg/errors" - "github.com/stretchr/testify/assert" - "testing" -) - -func TestCreateAWSError(t *testing.T) { - assert := assert.New(t) - - // Basic case to test type conversion and extraction of error code/message - t.Run("CreateAWSError", func(t *testing.T) { - errorSpec := ServiceAPIError{Code: "ReplicationGroupNotFoundFault", Message: "ReplicationGroup rg-cmd not found"} - respErr := CreateAWSError(errorSpec) - - awsErr, ok := ackerr.AWSError(respErr) - - assert.True(ok) - assert.Equal("ReplicationGroupNotFoundFault", awsErr.Code()) - assert.Equal("ReplicationGroup rg-cmd not found", awsErr.Message()) - }) - -} diff --git a/pkg/util/engine_version_test.go b/pkg/util/engine_version_test.go deleted file mode 100644 index 8da8d33f..00000000 --- a/pkg/util/engine_version_test.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"). You may -// not use this file except in compliance with the License. A copy of the -// License is located at -// -// http://aws.amazon.com/apache2.0/ -// -// or in the "license" file accompanying this file. This file is distributed -// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either -// express or implied. See the License for the specific language governing -// permissions and limitations under the License. - -package util_test - -import ( - "fmt" - "testing" - - "github.com/stretchr/testify/require" - - "github.com/aws-controllers-k8s/elasticache-controller/pkg/util" -) - -func TestEngineVersionsMatch(t *testing.T) { - tests := []struct { - desiredVersion string - latestVersion string - expected bool - }{ - { - desiredVersion: "6.3", - latestVersion: "6.2.6", - expected: false, - }, - { - desiredVersion: "6.2", - latestVersion: "6.2.6", - expected: true, - }, - { - desiredVersion: "6.x", - latestVersion: "6.0.5", - expected: true, - }, - { - desiredVersion: "13.x", - latestVersion: "6.0.6", - expected: false, - }, - { - desiredVersion: "5.0.3", - latestVersion: "5.0.3", - expected: true, - }, - { - desiredVersion: "5.0.3", - latestVersion: "5.0.4", - expected: false, - }, - } - - for i, tt := range tests { - t.Run(fmt.Sprintf("test-%d", i+1), func(t *testing.T) { - require := require.New(t) - require.Equal(util.EngineVersionsMatch(tt.desiredVersion, tt.latestVersion), tt.expected) - }) - } -} diff --git a/scripts/install-mockery.sh b/scripts/install-mockery.sh deleted file mode 100755 index 462283ad..00000000 --- a/scripts/install-mockery.sh +++ /dev/null @@ -1,25 +0,0 @@ -#!/usr/bin/env bash - -# A script that installs the mockery CLI tool that is used to build Go mocks -# for our interfaces to use in unit testing. This script installs mockery into -# the bin/mockery path and really should just be used in testing scripts. - -set -eo pipefail - -SCRIPTS_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" -ROOT_DIR="$SCRIPTS_DIR/.." -BIN_DIR="$ROOT_DIR/bin" - -OS=$(uname -s) -ARCH=$(uname -m) -VERSION=2.2.2 -MOCKERY_RELEASE_URL="https://github.com/vektra/mockery/releases/download/v${VERSION}/mockery_${VERSION}_${OS}_${ARCH}.tar.gz" - -if [[ ! -f $BIN_DIR/mockery ]]; then - echo -n "Installing mockery into bin/mockery ... " - mkdir -p $BIN_DIR - cd $BIN_DIR - wget -q --no-check-certificate --content-disposition $MOCKERY_RELEASE_URL -O mockery.tar.gz - tar -xf mockery.tar.gz - echo "ok." -fi From 19994cf53e5cb0405e1a2f45e930c1683112b169 Mon Sep 17 00:00:00 2001 From: Arush Sharma Date: Fri, 7 Feb 2025 14:43:27 -0800 Subject: [PATCH 2/2] refactor controller hooks/tests --- Makefile | 37 +- OWNERS_ALIASES | 1 + apis/v1alpha1/ack-generate-metadata.yaml | 10 +- apis/v1alpha1/cache_subnet_group.go | 5 - apis/v1alpha1/generator.yaml | 37 +- apis/v1alpha1/replication_group.go | 35 - apis/v1alpha1/types.go | 14 +- apis/v1alpha1/user.go | 2 - apis/v1alpha1/user_group.go | 4 - apis/v1alpha1/zz_generated.deepcopy.go | 114 - config/controller/kustomization.yaml | 2 +- ...icache.services.k8s.aws_cacheclusters.yaml | 10 + ...he.services.k8s.aws_cachesubnetgroups.yaml | 14 +- ...he.services.k8s.aws_replicationgroups.yaml | 57 +- ...asticache.services.k8s.aws_usergroups.yaml | 7 - .../elasticache.services.k8s.aws_users.yaml | 10 - generator.yaml | 37 +- go.mod | 4 - go.sum | 2 - helm/Chart.yaml | 4 +- ...he.services.k8s.aws_cachesubnetgroups.yaml | 12 - ...he.services.k8s.aws_replicationgroups.yaml | 51 - ...asticache.services.k8s.aws_usergroups.yaml | 7 - .../elasticache.services.k8s.aws_users.yaml | 10 - helm/templates/NOTES.txt | 2 +- helm/values.yaml | 2 +- metadata.yaml | 2 +- .../aws-sdk-go/elasticache/ElastiCacheAPI.go | 7362 ----------------- .../cache_cluster/custom_set_output.go | 14 - pkg/resource/cache_cluster/hooks.go | 15 + pkg/resource/cache_cluster/sdk.go | 4 +- .../custom_set_output.go | 59 - .../custom_update_api.go | 122 - .../{custom_api.go => hooks.go} | 216 +- .../{custom_set_output.go => hooks.go} | 29 +- pkg/resource/cache_subnet_group/sdk.go | 60 - pkg/resource/replication_group/annotations.go | 35 - .../replication_group/custom_set_output.go | 361 - .../replication_group/custom_update_api.go | 772 -- pkg/resource/replication_group/delta.go | 35 - pkg/resource/replication_group/delta_util.go | 163 - pkg/resource/replication_group/hooks.go | 1391 +++- .../replication_group/post_set_output.go | 154 - pkg/resource/replication_group/sdk.go | 1251 ++- .../snapshot/custom_set_conditions.go | 60 - pkg/resource/snapshot/custom_set_output.go | 105 - pkg/resource/snapshot/custom_update_api.go | 30 - .../{custom_create_api.go => hooks.go} | 179 +- pkg/resource/user/custom_set_output.go | 58 - pkg/resource/user/custom_update.go | 42 - pkg/resource/user/delta.go | 18 - pkg/resource/user/delta_util.go | 37 - pkg/resource/user/hooks.go | 166 + pkg/resource/user/post_build_request.go | 39 - pkg/resource/user/post_set_output.go | 63 - pkg/resource/user/sdk.go | 66 +- pkg/resource/user_group/custom_set_output.go | 90 - .../{custom_update_api.go => hooks.go} | 115 +- pkg/resource/user_group/sdk.go | 10 - pkg/util/tags.go | 22 +- .../sdk_update_post_set_output.go.tpl | 2 +- .../sdk_delete_post_request.go.tpl | 2 +- .../replication_group/sdk_file_end.go.tpl | 3 +- ...k_file_end_set_output_post_populate.go.tpl | 1 + .../sdk_update_post_build_request.go.tpl | 18 +- test/e2e/bootstrap_resources.py | 2 +- .../resources/replicationgroup_authtoken.yaml | 17 - .../resources/replicationgroup_cme_ngc.yaml | 24 - ...ml => replicationgroup_create_delete.yaml} | 0 .../replicationgroup_input_coverage.yaml | 56 - .../replicationgroup_largecluster.yaml | 11 - test/e2e/resources/replicationgroup_rpng.yaml | 15 - ...misc.yaml => replicationgroup_update.yaml} | 4 +- test/e2e/service_cleanup.py | 2 +- test/e2e/tests/test_cache_cluster.py | 6 + test/e2e/tests/test_replicationgroup.py | 695 +- .../test_replicationgroup_largecluster.py | 74 - test/e2e/tests/test_user.py | 2 - test/e2e/util.py | 12 +- 79 files changed, 2853 insertions(+), 11728 deletions(-) delete mode 100644 mocks/aws-sdk-go/elasticache/ElastiCacheAPI.go delete mode 100644 pkg/resource/cache_cluster/custom_set_output.go delete mode 100644 pkg/resource/cache_parameter_group/custom_set_output.go delete mode 100644 pkg/resource/cache_parameter_group/custom_update_api.go rename pkg/resource/cache_parameter_group/{custom_api.go => hooks.go} (54%) rename pkg/resource/cache_subnet_group/{custom_set_output.go => hooks.go} (77%) delete mode 100644 pkg/resource/replication_group/annotations.go delete mode 100644 pkg/resource/replication_group/custom_set_output.go delete mode 100644 pkg/resource/replication_group/custom_update_api.go delete mode 100644 pkg/resource/replication_group/delta_util.go delete mode 100644 pkg/resource/replication_group/post_set_output.go delete mode 100644 pkg/resource/snapshot/custom_set_conditions.go delete mode 100644 pkg/resource/snapshot/custom_set_output.go delete mode 100644 pkg/resource/snapshot/custom_update_api.go rename pkg/resource/snapshot/{custom_create_api.go => hooks.go} (52%) delete mode 100644 pkg/resource/user/custom_set_output.go delete mode 100644 pkg/resource/user/custom_update.go delete mode 100644 pkg/resource/user/delta_util.go create mode 100644 pkg/resource/user/hooks.go delete mode 100644 pkg/resource/user/post_build_request.go delete mode 100644 pkg/resource/user/post_set_output.go delete mode 100644 pkg/resource/user_group/custom_set_output.go rename pkg/resource/user_group/{custom_update_api.go => hooks.go} (63%) create mode 100644 templates/hooks/replication_group/sdk_file_end_set_output_post_populate.go.tpl delete mode 100644 test/e2e/resources/replicationgroup_authtoken.yaml delete mode 100644 test/e2e/resources/replicationgroup_cme_ngc.yaml rename test/e2e/resources/{replicationgroup_cmd_update.yaml => replicationgroup_create_delete.yaml} (100%) delete mode 100644 test/e2e/resources/replicationgroup_input_coverage.yaml delete mode 100644 test/e2e/resources/replicationgroup_largecluster.yaml delete mode 100644 test/e2e/resources/replicationgroup_rpng.yaml rename test/e2e/resources/{replicationgroup_cme_misc.yaml => replicationgroup_update.yaml} (65%) delete mode 100644 test/e2e/tests/test_replicationgroup_largecluster.py diff --git a/Makefile b/Makefile index 57ad62ba..8eb9a3b5 100644 --- a/Makefile +++ b/Makefile @@ -3,11 +3,6 @@ SHELL := /bin/bash # Use bash syntax # Set up variables GO111MODULE=on -AWS_SDK_GO_VERSION="$(shell echo $(shell go list -m -f '{{.Version}}' github.com/aws/aws-sdk-go))" -AWS_SDK_GO_VERSIONED_PATH="$(shell echo github.com/aws/aws-sdk-go@$(AWS_SDK_GO_VERSION))" -ELASTICACHE_API_PATH="$(shell echo $(shell go env GOPATH))/pkg/mod/$(AWS_SDK_GO_VERSIONED_PATH)/service/elasticache/elasticacheiface" -SERVICE_CONTROLLER_SRC_PATH="$(shell pwd)" - # Build ldflags VERSION ?= "v0.0.0" GITCOMMIT=$(shell git rev-parse HEAD) @@ -16,34 +11,22 @@ GO_LDFLAGS=-ldflags "-X main.version=$(VERSION) \ -X main.buildHash=$(GITCOMMIT) \ -X main.buildDate=$(BUILDDATE)" -.PHONY: all test local-test clean-mocks mocks +.PHONY: all test local-test all: test -test: | mocks ## Run code tests - go test -v ./... +local-run-controller: ## Run a controller image locally for SERVICE + @go run ./cmd/controller/main.go \ + --aws-region=us-west-2 \ + --enable-development-logging \ + --log-level=debug -test-cover: | mocks ## Run code tests with resources coverage - go test -coverpkg=./pkg/resource/... -covermode=count -coverprofile=coverage.out ./... - go tool cover -func=coverage.out +test: ## Run code tests + go test -v ./... -local-test: | mocks ## Run code tests using go.local.mod file +local-test: ## Run code tests using go.local.mod file go test -modfile=go.local.mod -v ./... -clean-mocks: ## Remove mocks directory - rm -rf mocks - -install-mockery: - @scripts/install-mockery.sh - -mocks: install-mockery ## Build mocks - go get -d $(AWS_SDK_GO_VERSIONED_PATH) - @echo "building mocks for $(ELASTICACHE_API_PATH) ... " - @pushd $(ELASTICACHE_API_PATH) 1>/dev/null; \ - $(SERVICE_CONTROLLER_SRC_PATH)/bin/mockery --all --dir=. --output=$(SERVICE_CONTROLLER_SRC_PATH)/mocks/aws-sdk-go/elasticache/ ; \ - popd 1>/dev/null; - @echo "ok." - help: ## Show this help. @grep -F -h "##" $(MAKEFILE_LIST) | grep -F -v grep | sed -e 's/\\$$//' \ - | awk -F'[:#]' '{print $$1 = sprintf("%-30s", $$1), $$4}' + | awk -F'[:#]' '{print $$1 = sprintf("%-30s", $$1), $$4}' \ No newline at end of file diff --git a/OWNERS_ALIASES b/OWNERS_ALIASES index 127083e5..3332f9fa 100644 --- a/OWNERS_ALIASES +++ b/OWNERS_ALIASES @@ -6,6 +6,7 @@ aliases: - jaypipes - mhausenblas - a-hilaly + - rushmash91 - RedbackThomson - vijtrip2 # TODO: Add your team members to your team controller alias diff --git a/apis/v1alpha1/ack-generate-metadata.yaml b/apis/v1alpha1/ack-generate-metadata.yaml index 580140a8..32b410f1 100755 --- a/apis/v1alpha1/ack-generate-metadata.yaml +++ b/apis/v1alpha1/ack-generate-metadata.yaml @@ -1,13 +1,13 @@ ack_generate_info: - build_date: "2025-02-07T19:16:11Z" - build_hash: 3d74f13b9de7134b4c76ab7526a9c578c4857602 + build_date: "2025-02-17T19:36:08Z" + build_hash: 8762917215d9902b2011a2b0b1b0c776855a683e go_version: go1.23.4 - version: v0.41.0-18-g3d74f13 -api_directory_checksum: eb643965cba3c68f76c1b45f100874d07d39c935 + version: v0.42.0 +api_directory_checksum: 0f68037c7970cd69c9365d191e78762f7ed5a7c5 api_version: v1alpha1 aws_sdk_go_version: v1.32.6 generator_config_info: - file_checksum: 3c359b3f45716af86c99ab2ea0f2ab50eeae5dc9 + file_checksum: 6cf2b7211e26d4a764cd0c6e0959cc77b2b6cbed original_file_name: generator.yaml last_modification: reason: API generation diff --git a/apis/v1alpha1/cache_subnet_group.go b/apis/v1alpha1/cache_subnet_group.go index 150f4ffa..0286553e 100644 --- a/apis/v1alpha1/cache_subnet_group.go +++ b/apis/v1alpha1/cache_subnet_group.go @@ -67,11 +67,6 @@ type CacheSubnetGroupStatus struct { // A list of subnets associated with the cache subnet group. // +kubebuilder:validation:Optional Subnets []*Subnet `json:"subnets,omitempty"` - // Either ipv4 | ipv6 | dual_stack. IPv6 is supported for workloads using Valkey - // 7.2 and above, Redis OSS engine version 6.2 and above or Memcached engine - // version 1.6.6 and above on all instances built on the Nitro system (http://aws.amazon.com/ec2/nitro/). - // +kubebuilder:validation:Optional - SupportedNetworkTypes []*string `json:"supportedNetworkTypes,omitempty"` // The Amazon Virtual Private Cloud identifier (VPC ID) of the cache subnet // group. // +kubebuilder:validation:Optional diff --git a/apis/v1alpha1/generator.yaml b/apis/v1alpha1/generator.yaml index 4183d7a4..f5e398b3 100644 --- a/apis/v1alpha1/generator.yaml +++ b/apis/v1alpha1/generator.yaml @@ -173,6 +173,7 @@ resources: PrimaryClusterId: # note: "PrimaryClusterID" will not function properly compare: is_ignored: true + hooks: sdk_read_many_post_set_output: template_path: hooks/replication_group/sdk_read_many_post_set_output.go.tpl @@ -189,7 +190,7 @@ resources: sdk_file_end: template_path: hooks/replication_group/sdk_file_end.go.tpl sdk_file_end_set_output_post_populate: - code: "rm.customSetOutput(obj, ko) // custom set output from obj" + code: "rm.customSetOutput(ctx, *obj, ko) // custom set output from obj" renames: operations: CreateReplicationGroup: @@ -303,7 +304,7 @@ operations: custom_implementation: CustomModifyReplicationGroup set_output_custom_method_name: CustomModifyReplicationGroupSetOutput override_values: - ApplyImmediately: true + ApplyImmediately: aws.Bool(true) CreateSnapshot: custom_implementation: CustomCreateSnapshot set_output_custom_method_name: CustomCreateSnapshotSetOutput @@ -327,7 +328,7 @@ operations: ModifyCacheCluster: set_output_custom_method_name: customModifyCacheClusterSetOutput override_values: - ApplyImmediately: true + ApplyImmediately: aws.Bool(true) ignore: resource_names: - ServerlessCache @@ -348,4 +349,32 @@ ignore: - CreateReplicationGroupInput.AutoMinorVersionUpgrade - CreateReplicationGroupInput.NumCacheClusters - CacheCluster.LogDeliveryConfigurations - - PendingModifiedValues.LogDeliveryConfigurations \ No newline at end of file + - PendingModifiedValues.LogDeliveryConfigurations + - CreateUserInput.AuthenticationMode + - ModifyUserInput.AuthenticationMode + - CreateCacheSubnetGroupOutput.CacheSubnetGroup.SupportedNetworkTypes + - CreateCacheSubnetGroupOutput.CacheSubnetGroup.Subnets.SupportedNetworkTypes + - ModifyCacheSubnetGroupOutput.CacheSubnetGroup.Subnets.SupportedNetworkTypes + - CreateUserGroupOutput.ServerlessCaches + - CreateReplicationGroupOutput.ReplicationGroup.PendingModifiedValues.TransitEncryptionEnabled + - ModifyReplicationGroupOutput.ReplicationGroup.PendingModifiedValues.TransitEncryptionEnabled + # - CreateReplicationGroupOutput.ReplicationGroup.TransitEncryptionEnabled + # - ModifyReplicationGroupInput.TransitEncryptionEnabled + # - CreateReplicationGroupInput.TransitEncryptionEnabled + - CreateReplicationGroupOutput.ReplicationGroup.PendingModifiedValues.TransitEncryptionMode + - CreateReplicationGroupOutput.ReplicationGroup.TransitEncryptionMode + - CreateReplicationGroupInput.TransitEncryptionMode + - ModifyReplicationGroupOutput.ReplicationGroup.PendingModifiedValues.TransitEncryptionMode + - CreateReplicationGroupOutput.ReplicationGroup.ClusterMode + - CreateReplicationGroupOutput.ReplicationGroup.PendingModifiedValues.ClusterMode + - CreateReplicationGroupInput.ClusterMode + - ModifyReplicationGroupOutput.ReplicationGroup.ClusterMode + - ModifyReplicationGroupOutput.ReplicationGroup.PendingModifiedValues.ClusterMode + - CreateReplicationGroupInput.IpDiscovery + - CreateReplicationGroupOutput.ReplicationGroup.IpDiscovery + - Subnet.SupportedNetworkTypes + - CreateReplicationGroupInput.ServerlessCacheSnapshotName + - CreateReplicationGroupOutput.ReplicationGroup.NetworkType + - CreateReplicationGroupInput.NetworkType + # - ModifyReplicationGroupOutput.ReplicationGroup.ipDiscovery + # - ModifyReplicationGroupInput.ipDiscovery \ No newline at end of file diff --git a/apis/v1alpha1/replication_group.go b/apis/v1alpha1/replication_group.go index c35db1a9..571b7ccf 100644 --- a/apis/v1alpha1/replication_group.go +++ b/apis/v1alpha1/replication_group.go @@ -143,13 +143,6 @@ type ReplicationGroupSpec struct { // see Subnets and Subnet Groups (https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/SubnetGroups.html). CacheSubnetGroupName *string `json:"cacheSubnetGroupName,omitempty"` CacheSubnetGroupRef *ackv1alpha1.AWSResourceReferenceWrapper `json:"cacheSubnetGroupRef,omitempty"` - // Enabled or Disabled. To modify cluster mode from Disabled to Enabled, you - // must first set the cluster mode to Compatible. Compatible mode allows your - // Valkey or Redis OSS clients to connect using both cluster mode enabled and - // cluster mode disabled. After you migrate all Valkey or Redis OSS clients - // to use cluster mode enabled, you can then complete cluster mode configuration - // and set the cluster mode to Enabled. - ClusterMode *string `json:"clusterMode,omitempty"` // Enables data tiering. Data tiering is only supported for replication groups // using the r6gd node type. This parameter must be set to true when using r6gd // nodes. For more information, see Data tiering (https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/data-tiering.html). @@ -171,11 +164,6 @@ type ReplicationGroupSpec struct { // existing cluster or replication group and create it anew with the earlier // engine version. EngineVersion *string `json:"engineVersion,omitempty"` - // The network type you choose when creating a replication group, either ipv4 - // | ipv6. IPv6 is supported for workloads using Valkey 7.2 and above, Redis - // OSS engine version 6.2 and above or Memcached engine version 1.6.6 and above - // on all instances built on the Nitro system (http://aws.amazon.com/ec2/nitro/). - IPDiscovery *string `json:"ipDiscovery,omitempty"` // The ID of the KMS key used to encrypt the disk in the cluster. KMSKeyID *string `json:"kmsKeyID,omitempty"` // Specifies the destination, format and type of the logs. @@ -183,11 +171,6 @@ type ReplicationGroupSpec struct { // A flag indicating if you have Multi-AZ enabled to enhance fault tolerance. // For more information, see Minimizing Downtime: Multi-AZ (http://docs.aws.amazon.com/AmazonElastiCache/latest/dg/AutoFailover.html). MultiAZEnabled *bool `json:"multiAZEnabled,omitempty"` - // Must be either ipv4 | ipv6 | dual_stack. IPv6 is supported for workloads - // using Valkey 7.2 and above, Redis OSS engine version 6.2 and above or Memcached - // engine version 1.6.6 and above on all instances built on the Nitro system - // (http://aws.amazon.com/ec2/nitro/). - NetworkType *string `json:"networkType,omitempty"` // A list of node group (shard) configuration options. Each node group (shard) // configuration has the following members: PrimaryAvailabilityZone, ReplicaAvailabilityZones, // ReplicaCount, and Slots. @@ -279,9 +262,6 @@ type ReplicationGroupSpec struct { // Virtual Private Cloud (Amazon VPC). SecurityGroupIDs []*string `json:"securityGroupIDs,omitempty"` SecurityGroupRefs []*ackv1alpha1.AWSResourceReferenceWrapper `json:"securityGroupRefs,omitempty"` - // The name of the snapshot used to create a replication group. Available for - // Valkey, Redis OSS only. - ServerlessCacheSnapshotName *string `json:"serverlessCacheSnapshotName,omitempty"` // A list of Amazon Resource Names (ARN) that uniquely identify the Valkey or // Redis OSS RDB snapshot files stored in Amazon S3. The snapshot files are // used to populate the new replication group. The Amazon S3 object name in @@ -331,21 +311,6 @@ type ReplicationGroupSpec struct { // For HIPAA compliance, you must specify TransitEncryptionEnabled as true, // an AuthToken, and a CacheSubnetGroup. TransitEncryptionEnabled *bool `json:"transitEncryptionEnabled,omitempty"` - // A setting that allows you to migrate your clients to use in-transit encryption, - // with no downtime. - // - // When setting TransitEncryptionEnabled to true, you can set your TransitEncryptionMode - // to preferred in the same request, to allow both encrypted and unencrypted - // connections at the same time. Once you migrate all your Valkey or Redis OSS - // clients to use encrypted connections you can modify the value to required - // to allow encrypted connections only. - // - // Setting TransitEncryptionMode to required is a two-step process that requires - // you to first set the TransitEncryptionMode to preferred, after that you can - // set TransitEncryptionMode to required. - // - // This process will not trigger the replacement of the replication group. - TransitEncryptionMode *string `json:"transitEncryptionMode,omitempty"` // The user group to associate with the replication group. UserGroupIDs []*string `json:"userGroupIDs,omitempty"` } diff --git a/apis/v1alpha1/types.go b/apis/v1alpha1/types.go index bb7254ef..79f8c59a 100644 --- a/apis/v1alpha1/types.go +++ b/apis/v1alpha1/types.go @@ -37,7 +37,6 @@ type Authentication struct { // Specifies the authentication mode to use. type AuthenticationMode struct { Passwords []*string `json:"passwords,omitempty"` - Type *string `json:"type_,omitempty"` } // Describes an Availability Zone in which the cluster is launched. @@ -247,7 +246,6 @@ type CacheSubnetGroup_SDK struct { CacheSubnetGroupDescription *string `json:"cacheSubnetGroupDescription,omitempty"` CacheSubnetGroupName *string `json:"cacheSubnetGroupName,omitempty"` Subnets []*Subnet `json:"subnets,omitempty"` - SupportedNetworkTypes []*string `json:"supportedNetworkTypes,omitempty"` VPCID *string `json:"vpcID,omitempty"` } @@ -549,13 +547,10 @@ type RegionalConfiguration struct { type ReplicationGroupPendingModifiedValues struct { AuthTokenStatus *string `json:"authTokenStatus,omitempty"` AutomaticFailoverStatus *string `json:"automaticFailoverStatus,omitempty"` - ClusterMode *string `json:"clusterMode,omitempty"` LogDeliveryConfigurations []*PendingLogDeliveryConfiguration `json:"logDeliveryConfigurations,omitempty"` PrimaryClusterID *string `json:"primaryClusterID,omitempty"` // The status of an online resharding operation. - Resharding *ReshardingStatus `json:"resharding,omitempty"` - TransitEncryptionEnabled *bool `json:"transitEncryptionEnabled,omitempty"` - TransitEncryptionMode *string `json:"transitEncryptionMode,omitempty"` + Resharding *ReshardingStatus `json:"resharding,omitempty"` // The status of the user group update. UserGroups *UserGroupsUpdateStatus `json:"userGroups,omitempty"` } @@ -571,7 +566,6 @@ type ReplicationGroup_SDK struct { AutomaticFailover *string `json:"automaticFailover,omitempty"` CacheNodeType *string `json:"cacheNodeType,omitempty"` ClusterEnabled *bool `json:"clusterEnabled,omitempty"` - ClusterMode *string `json:"clusterMode,omitempty"` // Represents the information required for client programs to connect to a cache // node. This value is read-only. ConfigurationEndpoint *Endpoint `json:"configurationEndpoint,omitempty"` @@ -581,13 +575,11 @@ type ReplicationGroup_SDK struct { // The name of the Global datastore and role of this replication group in the // Global datastore. GlobalReplicationGroupInfo *GlobalReplicationGroupInfo `json:"globalReplicationGroupInfo,omitempty"` - IPDiscovery *string `json:"ipDiscovery,omitempty"` KMSKeyID *string `json:"kmsKeyID,omitempty"` LogDeliveryConfigurations []*LogDeliveryConfiguration `json:"logDeliveryConfigurations,omitempty"` MemberClusters []*string `json:"memberClusters,omitempty"` MemberClustersOutpostARNs []*string `json:"memberClustersOutpostARNs,omitempty"` MultiAZ *string `json:"multiAZ,omitempty"` - NetworkType *string `json:"networkType,omitempty"` NodeGroups []*NodeGroup `json:"nodeGroups,omitempty"` // The settings to be applied to the Valkey or Redis OSS replication group, // either immediately or during the next maintenance window. @@ -599,7 +591,6 @@ type ReplicationGroup_SDK struct { SnapshottingClusterID *string `json:"snapshottingClusterID,omitempty"` Status *string `json:"status,omitempty"` TransitEncryptionEnabled *bool `json:"transitEncryptionEnabled,omitempty"` - TransitEncryptionMode *string `json:"transitEncryptionMode,omitempty"` UserGroupIDs []*string `json:"userGroupIDs,omitempty"` } @@ -751,8 +742,7 @@ type Subnet struct { SubnetAvailabilityZone *AvailabilityZone `json:"subnetAvailabilityZone,omitempty"` SubnetIdentifier *string `json:"subnetIdentifier,omitempty"` // The ID of the outpost subnet. - SubnetOutpost *SubnetOutpost `json:"subnetOutpost,omitempty"` - SupportedNetworkTypes []*string `json:"supportedNetworkTypes,omitempty"` + SubnetOutpost *SubnetOutpost `json:"subnetOutpost,omitempty"` } // The ID of the outpost subnet. diff --git a/apis/v1alpha1/user.go b/apis/v1alpha1/user.go index 899bbb25..e0c1bfe4 100644 --- a/apis/v1alpha1/user.go +++ b/apis/v1alpha1/user.go @@ -28,8 +28,6 @@ type UserSpec struct { // Access permissions string used for this user. // +kubebuilder:validation:Required AccessString *string `json:"accessString"` - // Specifies how to authenticate the user. - AuthenticationMode *AuthenticationMode `json:"authenticationMode,omitempty"` // The current supported value is Redis. // +kubebuilder:validation:Required Engine *string `json:"engine"` diff --git a/apis/v1alpha1/user_group.go b/apis/v1alpha1/user_group.go index af23842e..ea432a4e 100644 --- a/apis/v1alpha1/user_group.go +++ b/apis/v1alpha1/user_group.go @@ -61,10 +61,6 @@ type UserGroupStatus struct { // A list of replication groups that the user group can access. // +kubebuilder:validation:Optional ReplicationGroups []*string `json:"replicationGroups,omitempty"` - // Indicates which serverless caches the specified user group is associated - // with. Available for Valkey, Redis OSS and Serverless Memcached only. - // +kubebuilder:validation:Optional - ServerlessCaches []*string `json:"serverlessCaches,omitempty"` // Indicates user group status. Can be "creating", "active", "modifying", "deleting". // +kubebuilder:validation:Optional Status *string `json:"status,omitempty"` diff --git a/apis/v1alpha1/zz_generated.deepcopy.go b/apis/v1alpha1/zz_generated.deepcopy.go index e2afe033..adca7530 100644 --- a/apis/v1alpha1/zz_generated.deepcopy.go +++ b/apis/v1alpha1/zz_generated.deepcopy.go @@ -63,11 +63,6 @@ func (in *AuthenticationMode) DeepCopyInto(out *AuthenticationMode) { } } } - if in.Type != nil { - in, out := &in.Type, &out.Type - *out = new(string) - **out = **in - } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationMode. @@ -1381,17 +1376,6 @@ func (in *CacheSubnetGroupStatus) DeepCopyInto(out *CacheSubnetGroupStatus) { } } } - if in.SupportedNetworkTypes != nil { - in, out := &in.SupportedNetworkTypes, &out.SupportedNetworkTypes - *out = make([]*string, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(string) - **out = **in - } - } - } if in.VPCID != nil { in, out := &in.VPCID, &out.VPCID *out = new(string) @@ -1438,17 +1422,6 @@ func (in *CacheSubnetGroup_SDK) DeepCopyInto(out *CacheSubnetGroup_SDK) { } } } - if in.SupportedNetworkTypes != nil { - in, out := &in.SupportedNetworkTypes, &out.SupportedNetworkTypes - *out = make([]*string, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(string) - **out = **in - } - } - } if in.VPCID != nil { in, out := &in.VPCID, &out.VPCID *out = new(string) @@ -2678,11 +2651,6 @@ func (in *ReplicationGroupPendingModifiedValues) DeepCopyInto(out *ReplicationGr *out = new(string) **out = **in } - if in.ClusterMode != nil { - in, out := &in.ClusterMode, &out.ClusterMode - *out = new(string) - **out = **in - } if in.LogDeliveryConfigurations != nil { in, out := &in.LogDeliveryConfigurations, &out.LogDeliveryConfigurations *out = make([]*PendingLogDeliveryConfiguration, len(*in)) @@ -2704,16 +2672,6 @@ func (in *ReplicationGroupPendingModifiedValues) DeepCopyInto(out *ReplicationGr *out = new(ReshardingStatus) (*in).DeepCopyInto(*out) } - if in.TransitEncryptionEnabled != nil { - in, out := &in.TransitEncryptionEnabled, &out.TransitEncryptionEnabled - *out = new(bool) - **out = **in - } - if in.TransitEncryptionMode != nil { - in, out := &in.TransitEncryptionMode, &out.TransitEncryptionMode - *out = new(string) - **out = **in - } if in.UserGroups != nil { in, out := &in.UserGroups, &out.UserGroups *out = new(UserGroupsUpdateStatus) @@ -2785,11 +2743,6 @@ func (in *ReplicationGroupSpec) DeepCopyInto(out *ReplicationGroupSpec) { *out = new(corev1alpha1.AWSResourceReferenceWrapper) (*in).DeepCopyInto(*out) } - if in.ClusterMode != nil { - in, out := &in.ClusterMode, &out.ClusterMode - *out = new(string) - **out = **in - } if in.DataTieringEnabled != nil { in, out := &in.DataTieringEnabled, &out.DataTieringEnabled *out = new(bool) @@ -2810,11 +2763,6 @@ func (in *ReplicationGroupSpec) DeepCopyInto(out *ReplicationGroupSpec) { *out = new(string) **out = **in } - if in.IPDiscovery != nil { - in, out := &in.IPDiscovery, &out.IPDiscovery - *out = new(string) - **out = **in - } if in.KMSKeyID != nil { in, out := &in.KMSKeyID, &out.KMSKeyID *out = new(string) @@ -2836,11 +2784,6 @@ func (in *ReplicationGroupSpec) DeepCopyInto(out *ReplicationGroupSpec) { *out = new(bool) **out = **in } - if in.NetworkType != nil { - in, out := &in.NetworkType, &out.NetworkType - *out = new(string) - **out = **in - } if in.NodeGroupConfiguration != nil { in, out := &in.NodeGroupConfiguration, &out.NodeGroupConfiguration *out = make([]*NodeGroupConfiguration, len(*in)) @@ -2920,11 +2863,6 @@ func (in *ReplicationGroupSpec) DeepCopyInto(out *ReplicationGroupSpec) { } } } - if in.ServerlessCacheSnapshotName != nil { - in, out := &in.ServerlessCacheSnapshotName, &out.ServerlessCacheSnapshotName - *out = new(string) - **out = **in - } if in.SnapshotARNs != nil { in, out := &in.SnapshotARNs, &out.SnapshotARNs *out = make([]*string, len(*in)) @@ -2967,11 +2905,6 @@ func (in *ReplicationGroupSpec) DeepCopyInto(out *ReplicationGroupSpec) { *out = new(bool) **out = **in } - if in.TransitEncryptionMode != nil { - in, out := &in.TransitEncryptionMode, &out.TransitEncryptionMode - *out = new(string) - **out = **in - } if in.UserGroupIDs != nil { in, out := &in.UserGroupIDs, &out.UserGroupIDs *out = make([]*string, len(*in)) @@ -3208,11 +3141,6 @@ func (in *ReplicationGroup_SDK) DeepCopyInto(out *ReplicationGroup_SDK) { *out = new(bool) **out = **in } - if in.ClusterMode != nil { - in, out := &in.ClusterMode, &out.ClusterMode - *out = new(string) - **out = **in - } if in.ConfigurationEndpoint != nil { in, out := &in.ConfigurationEndpoint, &out.ConfigurationEndpoint *out = new(Endpoint) @@ -3238,11 +3166,6 @@ func (in *ReplicationGroup_SDK) DeepCopyInto(out *ReplicationGroup_SDK) { *out = new(GlobalReplicationGroupInfo) (*in).DeepCopyInto(*out) } - if in.IPDiscovery != nil { - in, out := &in.IPDiscovery, &out.IPDiscovery - *out = new(string) - **out = **in - } if in.KMSKeyID != nil { in, out := &in.KMSKeyID, &out.KMSKeyID *out = new(string) @@ -3286,11 +3209,6 @@ func (in *ReplicationGroup_SDK) DeepCopyInto(out *ReplicationGroup_SDK) { *out = new(string) **out = **in } - if in.NetworkType != nil { - in, out := &in.NetworkType, &out.NetworkType - *out = new(string) - **out = **in - } if in.NodeGroups != nil { in, out := &in.NodeGroups, &out.NodeGroups *out = make([]*NodeGroup, len(*in)) @@ -3341,11 +3259,6 @@ func (in *ReplicationGroup_SDK) DeepCopyInto(out *ReplicationGroup_SDK) { *out = new(bool) **out = **in } - if in.TransitEncryptionMode != nil { - in, out := &in.TransitEncryptionMode, &out.TransitEncryptionMode - *out = new(string) - **out = **in - } if in.UserGroupIDs != nil { in, out := &in.UserGroupIDs, &out.UserGroupIDs *out = make([]*string, len(*in)) @@ -4263,17 +4176,6 @@ func (in *Subnet) DeepCopyInto(out *Subnet) { *out = new(SubnetOutpost) (*in).DeepCopyInto(*out) } - if in.SupportedNetworkTypes != nil { - in, out := &in.SupportedNetworkTypes, &out.SupportedNetworkTypes - *out = make([]*string, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(string) - **out = **in - } - } - } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Subnet. @@ -4665,17 +4567,6 @@ func (in *UserGroupStatus) DeepCopyInto(out *UserGroupStatus) { } } } - if in.ServerlessCaches != nil { - in, out := &in.ServerlessCaches, &out.ServerlessCaches - *out = make([]*string, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(string) - **out = **in - } - } - } if in.Status != nil { in, out := &in.Status, &out.Status *out = new(string) @@ -4848,11 +4739,6 @@ func (in *UserSpec) DeepCopyInto(out *UserSpec) { *out = new(string) **out = **in } - if in.AuthenticationMode != nil { - in, out := &in.AuthenticationMode, &out.AuthenticationMode - *out = new(AuthenticationMode) - (*in).DeepCopyInto(*out) - } if in.Engine != nil { in, out := &in.Engine, &out.Engine *out = new(string) diff --git a/config/controller/kustomization.yaml b/config/controller/kustomization.yaml index 5c69fbb5..38a413ef 100644 --- a/config/controller/kustomization.yaml +++ b/config/controller/kustomization.yaml @@ -6,4 +6,4 @@ kind: Kustomization images: - name: controller newName: public.ecr.aws/aws-controllers-k8s/elasticache-controller - newTag: 1.6.0 + newTag: 0.1.0 diff --git a/config/crd/bases/elasticache.services.k8s.aws_cacheclusters.yaml b/config/crd/bases/elasticache.services.k8s.aws_cacheclusters.yaml index ece8c670..d28e6014 100644 --- a/config/crd/bases/elasticache.services.k8s.aws_cacheclusters.yaml +++ b/config/crd/bases/elasticache.services.k8s.aws_cacheclusters.yaml @@ -195,6 +195,8 @@ spec: properties: name: type: string + namespace: + type: string type: object type: object cacheSecurityGroupNames: @@ -230,6 +232,8 @@ spec: properties: name: type: string + namespace: + type: string type: object type: object engine: @@ -319,6 +323,8 @@ spec: properties: name: type: string + namespace: + type: string type: object type: object numCacheNodes: @@ -413,6 +419,8 @@ spec: properties: name: type: string + namespace: + type: string type: object type: object securityGroupIDs: @@ -458,6 +466,8 @@ spec: properties: name: type: string + namespace: + type: string type: object type: object snapshotRetentionLimit: diff --git a/config/crd/bases/elasticache.services.k8s.aws_cachesubnetgroups.yaml b/config/crd/bases/elasticache.services.k8s.aws_cachesubnetgroups.yaml index 93f825a9..7433e5f8 100644 --- a/config/crd/bases/elasticache.services.k8s.aws_cachesubnetgroups.yaml +++ b/config/crd/bases/elasticache.services.k8s.aws_cachesubnetgroups.yaml @@ -76,6 +76,8 @@ spec: properties: name: type: string + namespace: + type: string type: object type: object type: array @@ -213,20 +215,8 @@ spec: subnetOutpostARN: type: string type: object - supportedNetworkTypes: - items: - type: string - type: array type: object type: array - supportedNetworkTypes: - description: |- - Either ipv4 | ipv6 | dual_stack. IPv6 is supported for workloads using Valkey - 7.2 and above, Redis OSS engine version 6.2 and above or Memcached engine - version 1.6.6 and above on all instances built on the Nitro system (http://aws.amazon.com/ec2/nitro/). - items: - type: string - type: array vpcID: description: |- The Amazon Virtual Private Cloud identifier (VPC ID) of the cache subnet diff --git a/config/crd/bases/elasticache.services.k8s.aws_replicationgroups.yaml b/config/crd/bases/elasticache.services.k8s.aws_replicationgroups.yaml index de3642c9..8d9dc023 100644 --- a/config/crd/bases/elasticache.services.k8s.aws_replicationgroups.yaml +++ b/config/crd/bases/elasticache.services.k8s.aws_replicationgroups.yaml @@ -188,6 +188,8 @@ spec: properties: name: type: string + namespace: + type: string type: object type: object cacheSecurityGroupNames: @@ -217,17 +219,10 @@ spec: properties: name: type: string + namespace: + type: string type: object type: object - clusterMode: - description: |- - Enabled or Disabled. To modify cluster mode from Disabled to Enabled, you - must first set the cluster mode to Compatible. Compatible mode allows your - Valkey or Redis OSS clients to connect using both cluster mode enabled and - cluster mode disabled. After you migrate all Valkey or Redis OSS clients - to use cluster mode enabled, you can then complete cluster mode configuration - and set the cluster mode to Enabled. - type: string dataTieringEnabled: description: |- Enables data tiering. Data tiering is only supported for replication groups @@ -255,13 +250,6 @@ spec: existing cluster or replication group and create it anew with the earlier engine version. type: string - ipDiscovery: - description: |- - The network type you choose when creating a replication group, either ipv4 - | ipv6. IPv6 is supported for workloads using Valkey 7.2 and above, Redis - OSS engine version 6.2 and above or Memcached engine version 1.6.6 and above - on all instances built on the Nitro system (http://aws.amazon.com/ec2/nitro/). - type: string kmsKeyID: description: The ID of the KMS key used to encrypt the disk in the cluster. @@ -306,13 +294,6 @@ spec: A flag indicating if you have Multi-AZ enabled to enhance fault tolerance. For more information, see Minimizing Downtime: Multi-AZ (http://docs.aws.amazon.com/AmazonElastiCache/latest/dg/AutoFailover.html). type: boolean - networkType: - description: |- - Must be either ipv4 | ipv6 | dual_stack. IPv6 is supported for workloads - using Valkey 7.2 and above, Redis OSS engine version 6.2 and above or Memcached - engine version 1.6.6 and above on all instances built on the Nitro system - (http://aws.amazon.com/ec2/nitro/). - type: string nodeGroupConfiguration: description: |- A list of node group (shard) configuration options. Each node group (shard) @@ -468,14 +449,11 @@ spec: properties: name: type: string + namespace: + type: string type: object type: object type: array - serverlessCacheSnapshotName: - description: |- - The name of the snapshot used to create a replication group. Available for - Valkey, Redis OSS only. - type: string snapshotARNs: description: |- A list of Amazon Resource Names (ARN) that uniquely identify the Valkey or @@ -554,23 +532,6 @@ spec: For HIPAA compliance, you must specify TransitEncryptionEnabled as true, an AuthToken, and a CacheSubnetGroup. type: boolean - transitEncryptionMode: - description: |- - A setting that allows you to migrate your clients to use in-transit encryption, - with no downtime. - - When setting TransitEncryptionEnabled to true, you can set your TransitEncryptionMode - to preferred in the same request, to allow both encrypted and unencrypted - connections at the same time. Once you migrate all your Valkey or Redis OSS - clients to use encrypted connections you can modify the value to required - to allow encrypted connections only. - - Setting TransitEncryptionMode to required is a two-step process that requires - you to first set the TransitEncryptionMode to preferred, after that you can - set TransitEncryptionMode to required. - - This process will not trigger the replacement of the replication group. - type: string userGroupIDs: description: The user group to associate with the replication group. items: @@ -877,8 +838,6 @@ spec: type: string automaticFailoverStatus: type: string - clusterMode: - type: string logDeliveryConfigurations: items: description: The log delivery configurations being modified @@ -924,10 +883,6 @@ spec: type: number type: object type: object - transitEncryptionEnabled: - type: boolean - transitEncryptionMode: - type: string userGroups: description: The status of the user group update. properties: diff --git a/config/crd/bases/elasticache.services.k8s.aws_usergroups.yaml b/config/crd/bases/elasticache.services.k8s.aws_usergroups.yaml index 4761393d..6e2bb2d8 100644 --- a/config/crd/bases/elasticache.services.k8s.aws_usergroups.yaml +++ b/config/crd/bases/elasticache.services.k8s.aws_usergroups.yaml @@ -162,13 +162,6 @@ spec: items: type: string type: array - serverlessCaches: - description: |- - Indicates which serverless caches the specified user group is associated - with. Available for Valkey, Redis OSS and Serverless Memcached only. - items: - type: string - type: array status: description: Indicates user group status. Can be "creating", "active", "modifying", "deleting". diff --git a/config/crd/bases/elasticache.services.k8s.aws_users.yaml b/config/crd/bases/elasticache.services.k8s.aws_users.yaml index 20322f8c..98b1e262 100644 --- a/config/crd/bases/elasticache.services.k8s.aws_users.yaml +++ b/config/crd/bases/elasticache.services.k8s.aws_users.yaml @@ -41,16 +41,6 @@ spec: accessString: description: Access permissions string used for this user. type: string - authenticationMode: - description: Specifies how to authenticate the user. - properties: - passwords: - items: - type: string - type: array - type_: - type: string - type: object engine: description: The current supported value is Redis. type: string diff --git a/generator.yaml b/generator.yaml index 4183d7a4..f5e398b3 100644 --- a/generator.yaml +++ b/generator.yaml @@ -173,6 +173,7 @@ resources: PrimaryClusterId: # note: "PrimaryClusterID" will not function properly compare: is_ignored: true + hooks: sdk_read_many_post_set_output: template_path: hooks/replication_group/sdk_read_many_post_set_output.go.tpl @@ -189,7 +190,7 @@ resources: sdk_file_end: template_path: hooks/replication_group/sdk_file_end.go.tpl sdk_file_end_set_output_post_populate: - code: "rm.customSetOutput(obj, ko) // custom set output from obj" + code: "rm.customSetOutput(ctx, *obj, ko) // custom set output from obj" renames: operations: CreateReplicationGroup: @@ -303,7 +304,7 @@ operations: custom_implementation: CustomModifyReplicationGroup set_output_custom_method_name: CustomModifyReplicationGroupSetOutput override_values: - ApplyImmediately: true + ApplyImmediately: aws.Bool(true) CreateSnapshot: custom_implementation: CustomCreateSnapshot set_output_custom_method_name: CustomCreateSnapshotSetOutput @@ -327,7 +328,7 @@ operations: ModifyCacheCluster: set_output_custom_method_name: customModifyCacheClusterSetOutput override_values: - ApplyImmediately: true + ApplyImmediately: aws.Bool(true) ignore: resource_names: - ServerlessCache @@ -348,4 +349,32 @@ ignore: - CreateReplicationGroupInput.AutoMinorVersionUpgrade - CreateReplicationGroupInput.NumCacheClusters - CacheCluster.LogDeliveryConfigurations - - PendingModifiedValues.LogDeliveryConfigurations \ No newline at end of file + - PendingModifiedValues.LogDeliveryConfigurations + - CreateUserInput.AuthenticationMode + - ModifyUserInput.AuthenticationMode + - CreateCacheSubnetGroupOutput.CacheSubnetGroup.SupportedNetworkTypes + - CreateCacheSubnetGroupOutput.CacheSubnetGroup.Subnets.SupportedNetworkTypes + - ModifyCacheSubnetGroupOutput.CacheSubnetGroup.Subnets.SupportedNetworkTypes + - CreateUserGroupOutput.ServerlessCaches + - CreateReplicationGroupOutput.ReplicationGroup.PendingModifiedValues.TransitEncryptionEnabled + - ModifyReplicationGroupOutput.ReplicationGroup.PendingModifiedValues.TransitEncryptionEnabled + # - CreateReplicationGroupOutput.ReplicationGroup.TransitEncryptionEnabled + # - ModifyReplicationGroupInput.TransitEncryptionEnabled + # - CreateReplicationGroupInput.TransitEncryptionEnabled + - CreateReplicationGroupOutput.ReplicationGroup.PendingModifiedValues.TransitEncryptionMode + - CreateReplicationGroupOutput.ReplicationGroup.TransitEncryptionMode + - CreateReplicationGroupInput.TransitEncryptionMode + - ModifyReplicationGroupOutput.ReplicationGroup.PendingModifiedValues.TransitEncryptionMode + - CreateReplicationGroupOutput.ReplicationGroup.ClusterMode + - CreateReplicationGroupOutput.ReplicationGroup.PendingModifiedValues.ClusterMode + - CreateReplicationGroupInput.ClusterMode + - ModifyReplicationGroupOutput.ReplicationGroup.ClusterMode + - ModifyReplicationGroupOutput.ReplicationGroup.PendingModifiedValues.ClusterMode + - CreateReplicationGroupInput.IpDiscovery + - CreateReplicationGroupOutput.ReplicationGroup.IpDiscovery + - Subnet.SupportedNetworkTypes + - CreateReplicationGroupInput.ServerlessCacheSnapshotName + - CreateReplicationGroupOutput.ReplicationGroup.NetworkType + - CreateReplicationGroupInput.NetworkType + # - ModifyReplicationGroupOutput.ReplicationGroup.ipDiscovery + # - ModifyReplicationGroupInput.ipDiscovery \ No newline at end of file diff --git a/go.mod b/go.mod index ea17f515..71fffe11 100644 --- a/go.mod +++ b/go.mod @@ -15,7 +15,6 @@ require ( github.com/go-logr/logr v1.4.2 github.com/pkg/errors v0.9.1 github.com/spf13/pflag v1.0.5 - github.com/stretchr/testify v1.9.0 k8s.io/api v0.31.0 k8s.io/apimachinery v0.31.0 k8s.io/client-go v0.31.0 @@ -57,20 +56,17 @@ require ( github.com/itchyny/gojq v0.12.6 // indirect github.com/itchyny/timefmt-go v0.1.3 // indirect github.com/jaypipes/envutil v1.0.0 // indirect - github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect - github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/prometheus/client_golang v1.19.1 // indirect github.com/prometheus/client_model v0.6.1 // indirect github.com/prometheus/common v0.55.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect github.com/samber/lo v1.37.0 // indirect - github.com/stretchr/objx v0.5.2 // indirect github.com/x448/float16 v0.8.4 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.26.0 // indirect diff --git a/go.sum b/go.sum index 69a2275a..404de65c 100644 --- a/go.sum +++ b/go.sum @@ -97,8 +97,6 @@ github.com/jaypipes/envutil v1.0.0 h1:u6Vwy9HwruFihoZrL0bxDLCa/YNadGVwKyPElNmZWo github.com/jaypipes/envutil v1.0.0/go.mod h1:vgIRDly+xgBq0eeZRcflOHMMobMwgC6MkMbxo/Nw65M= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= -github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= -github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= diff --git a/helm/Chart.yaml b/helm/Chart.yaml index ef5f5eb2..d776d053 100644 --- a/helm/Chart.yaml +++ b/helm/Chart.yaml @@ -1,8 +1,8 @@ apiVersion: v1 name: elasticache-chart description: A Helm chart for the ACK service controller for Amazon ElastiCache (ElastiCache) -version: 1.6.0 -appVersion: 1.6.0 +version: 0.1.0 +appVersion: 0.1.0 home: https://github.com/aws-controllers-k8s/elasticache-controller icon: https://raw.githubusercontent.com/aws/eks-charts/master/docs/logo/aws.png sources: diff --git a/helm/crds/elasticache.services.k8s.aws_cachesubnetgroups.yaml b/helm/crds/elasticache.services.k8s.aws_cachesubnetgroups.yaml index d47ddb62..0333f34c 100644 --- a/helm/crds/elasticache.services.k8s.aws_cachesubnetgroups.yaml +++ b/helm/crds/elasticache.services.k8s.aws_cachesubnetgroups.yaml @@ -215,20 +215,8 @@ spec: subnetOutpostARN: type: string type: object - supportedNetworkTypes: - items: - type: string - type: array type: object type: array - supportedNetworkTypes: - description: |- - Either ipv4 | ipv6 | dual_stack. IPv6 is supported for workloads using Valkey - 7.2 and above, Redis OSS engine version 6.2 and above or Memcached engine - version 1.6.6 and above on all instances built on the Nitro system (http://aws.amazon.com/ec2/nitro/). - items: - type: string - type: array vpcID: description: |- The Amazon Virtual Private Cloud identifier (VPC ID) of the cache subnet diff --git a/helm/crds/elasticache.services.k8s.aws_replicationgroups.yaml b/helm/crds/elasticache.services.k8s.aws_replicationgroups.yaml index 29ec95de..2a52fdf5 100644 --- a/helm/crds/elasticache.services.k8s.aws_replicationgroups.yaml +++ b/helm/crds/elasticache.services.k8s.aws_replicationgroups.yaml @@ -223,15 +223,6 @@ spec: type: string type: object type: object - clusterMode: - description: |- - Enabled or Disabled. To modify cluster mode from Disabled to Enabled, you - must first set the cluster mode to Compatible. Compatible mode allows your - Valkey or Redis OSS clients to connect using both cluster mode enabled and - cluster mode disabled. After you migrate all Valkey or Redis OSS clients - to use cluster mode enabled, you can then complete cluster mode configuration - and set the cluster mode to Enabled. - type: string dataTieringEnabled: description: |- Enables data tiering. Data tiering is only supported for replication groups @@ -259,13 +250,6 @@ spec: existing cluster or replication group and create it anew with the earlier engine version. type: string - ipDiscovery: - description: |- - The network type you choose when creating a replication group, either ipv4 - | ipv6. IPv6 is supported for workloads using Valkey 7.2 and above, Redis - OSS engine version 6.2 and above or Memcached engine version 1.6.6 and above - on all instances built on the Nitro system (http://aws.amazon.com/ec2/nitro/). - type: string kmsKeyID: description: The ID of the KMS key used to encrypt the disk in the cluster. @@ -310,13 +294,6 @@ spec: A flag indicating if you have Multi-AZ enabled to enhance fault tolerance. For more information, see Minimizing Downtime: Multi-AZ (http://docs.aws.amazon.com/AmazonElastiCache/latest/dg/AutoFailover.html). type: boolean - networkType: - description: |- - Must be either ipv4 | ipv6 | dual_stack. IPv6 is supported for workloads - using Valkey 7.2 and above, Redis OSS engine version 6.2 and above or Memcached - engine version 1.6.6 and above on all instances built on the Nitro system - (http://aws.amazon.com/ec2/nitro/). - type: string nodeGroupConfiguration: description: |- A list of node group (shard) configuration options. Each node group (shard) @@ -477,11 +454,6 @@ spec: type: object type: object type: array - serverlessCacheSnapshotName: - description: |- - The name of the snapshot used to create a replication group. Available for - Valkey, Redis OSS only. - type: string snapshotARNs: description: |- A list of Amazon Resource Names (ARN) that uniquely identify the Valkey or @@ -560,23 +532,6 @@ spec: For HIPAA compliance, you must specify TransitEncryptionEnabled as true, an AuthToken, and a CacheSubnetGroup. type: boolean - transitEncryptionMode: - description: |- - A setting that allows you to migrate your clients to use in-transit encryption, - with no downtime. - - When setting TransitEncryptionEnabled to true, you can set your TransitEncryptionMode - to preferred in the same request, to allow both encrypted and unencrypted - connections at the same time. Once you migrate all your Valkey or Redis OSS - clients to use encrypted connections you can modify the value to required - to allow encrypted connections only. - - Setting TransitEncryptionMode to required is a two-step process that requires - you to first set the TransitEncryptionMode to preferred, after that you can - set TransitEncryptionMode to required. - - This process will not trigger the replacement of the replication group. - type: string userGroupIDs: description: The user group to associate with the replication group. items: @@ -883,8 +838,6 @@ spec: type: string automaticFailoverStatus: type: string - clusterMode: - type: string logDeliveryConfigurations: items: description: The log delivery configurations being modified @@ -930,10 +883,6 @@ spec: type: number type: object type: object - transitEncryptionEnabled: - type: boolean - transitEncryptionMode: - type: string userGroups: description: The status of the user group update. properties: diff --git a/helm/crds/elasticache.services.k8s.aws_usergroups.yaml b/helm/crds/elasticache.services.k8s.aws_usergroups.yaml index 4761393d..6e2bb2d8 100644 --- a/helm/crds/elasticache.services.k8s.aws_usergroups.yaml +++ b/helm/crds/elasticache.services.k8s.aws_usergroups.yaml @@ -162,13 +162,6 @@ spec: items: type: string type: array - serverlessCaches: - description: |- - Indicates which serverless caches the specified user group is associated - with. Available for Valkey, Redis OSS and Serverless Memcached only. - items: - type: string - type: array status: description: Indicates user group status. Can be "creating", "active", "modifying", "deleting". diff --git a/helm/crds/elasticache.services.k8s.aws_users.yaml b/helm/crds/elasticache.services.k8s.aws_users.yaml index 20322f8c..98b1e262 100644 --- a/helm/crds/elasticache.services.k8s.aws_users.yaml +++ b/helm/crds/elasticache.services.k8s.aws_users.yaml @@ -41,16 +41,6 @@ spec: accessString: description: Access permissions string used for this user. type: string - authenticationMode: - description: Specifies how to authenticate the user. - properties: - passwords: - items: - type: string - type: array - type_: - type: string - type: object engine: description: The current supported value is Redis. type: string diff --git a/helm/templates/NOTES.txt b/helm/templates/NOTES.txt index 3b3f27dd..752add27 100644 --- a/helm/templates/NOTES.txt +++ b/helm/templates/NOTES.txt @@ -1,5 +1,5 @@ {{ .Chart.Name }} has been installed. -This chart deploys "public.ecr.aws/aws-controllers-k8s/elasticache-controller:1.6.0". +This chart deploys "public.ecr.aws/aws-controllers-k8s/elasticache-controller:0.1.0". Check its status by running: kubectl --namespace {{ .Release.Namespace }} get pods -l "app.kubernetes.io/instance={{ .Release.Name }}" diff --git a/helm/values.yaml b/helm/values.yaml index f1221ed7..ebb124b4 100644 --- a/helm/values.yaml +++ b/helm/values.yaml @@ -4,7 +4,7 @@ image: repository: public.ecr.aws/aws-controllers-k8s/elasticache-controller - tag: 1.6.0 + tag: 0.1.0 pullPolicy: IfNotPresent pullSecrets: [] diff --git a/metadata.yaml b/metadata.yaml index 77faee84..70ec689a 100644 --- a/metadata.yaml +++ b/metadata.yaml @@ -2,7 +2,7 @@ service: full_name: "Amazon ElastiCache" short_name: "ElastiCache" link: "https://aws.amazon.com/elasticache/" - documentation: "https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/WhatIs.html" + documentation: "https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/WhatIs.html" api_versions: - api_version: v1alpha1 status: available diff --git a/mocks/aws-sdk-go/elasticache/ElastiCacheAPI.go b/mocks/aws-sdk-go/elasticache/ElastiCacheAPI.go deleted file mode 100644 index 4e4bd5f0..00000000 --- a/mocks/aws-sdk-go/elasticache/ElastiCacheAPI.go +++ /dev/null @@ -1,7362 +0,0 @@ -// Code generated by mockery v2.33.2. DO NOT EDIT. - -package mocks - -import ( - context "context" - - elasticache "github.com/aws/aws-sdk-go/service/elasticache" - - mock "github.com/stretchr/testify/mock" - - request "github.com/aws/aws-sdk-go/aws/request" -) - -// ElastiCacheAPI is an autogenerated mock type for the ElastiCacheAPI type -type ElastiCacheAPI struct { - mock.Mock -} - -// AddTagsToResource provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) AddTagsToResource(_a0 *elasticache.AddTagsToResourceInput) (*elasticache.TagListMessage, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.TagListMessage - var r1 error - if rf, ok := ret.Get(0).(func(*elasticache.AddTagsToResourceInput) (*elasticache.TagListMessage, error)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.AddTagsToResourceInput) *elasticache.TagListMessage); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.TagListMessage) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.AddTagsToResourceInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// AddTagsToResourceRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) AddTagsToResourceRequest(_a0 *elasticache.AddTagsToResourceInput) (*request.Request, *elasticache.TagListMessage) { - ret := _m.Called(_a0) - - var r0 *request.Request - var r1 *elasticache.TagListMessage - if rf, ok := ret.Get(0).(func(*elasticache.AddTagsToResourceInput) (*request.Request, *elasticache.TagListMessage)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.AddTagsToResourceInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.AddTagsToResourceInput) *elasticache.TagListMessage); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.TagListMessage) - } - } - - return r0, r1 -} - -// AddTagsToResourceWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) AddTagsToResourceWithContext(_a0 context.Context, _a1 *elasticache.AddTagsToResourceInput, _a2 ...request.Option) (*elasticache.TagListMessage, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.TagListMessage - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.AddTagsToResourceInput, ...request.Option) (*elasticache.TagListMessage, error)); ok { - return rf(_a0, _a1, _a2...) - } - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.AddTagsToResourceInput, ...request.Option) *elasticache.TagListMessage); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.TagListMessage) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.AddTagsToResourceInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// AuthorizeCacheSecurityGroupIngress provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) AuthorizeCacheSecurityGroupIngress(_a0 *elasticache.AuthorizeCacheSecurityGroupIngressInput) (*elasticache.AuthorizeCacheSecurityGroupIngressOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.AuthorizeCacheSecurityGroupIngressOutput - var r1 error - if rf, ok := ret.Get(0).(func(*elasticache.AuthorizeCacheSecurityGroupIngressInput) (*elasticache.AuthorizeCacheSecurityGroupIngressOutput, error)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.AuthorizeCacheSecurityGroupIngressInput) *elasticache.AuthorizeCacheSecurityGroupIngressOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.AuthorizeCacheSecurityGroupIngressOutput) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.AuthorizeCacheSecurityGroupIngressInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// AuthorizeCacheSecurityGroupIngressRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) AuthorizeCacheSecurityGroupIngressRequest(_a0 *elasticache.AuthorizeCacheSecurityGroupIngressInput) (*request.Request, *elasticache.AuthorizeCacheSecurityGroupIngressOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - var r1 *elasticache.AuthorizeCacheSecurityGroupIngressOutput - if rf, ok := ret.Get(0).(func(*elasticache.AuthorizeCacheSecurityGroupIngressInput) (*request.Request, *elasticache.AuthorizeCacheSecurityGroupIngressOutput)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.AuthorizeCacheSecurityGroupIngressInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.AuthorizeCacheSecurityGroupIngressInput) *elasticache.AuthorizeCacheSecurityGroupIngressOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.AuthorizeCacheSecurityGroupIngressOutput) - } - } - - return r0, r1 -} - -// AuthorizeCacheSecurityGroupIngressWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) AuthorizeCacheSecurityGroupIngressWithContext(_a0 context.Context, _a1 *elasticache.AuthorizeCacheSecurityGroupIngressInput, _a2 ...request.Option) (*elasticache.AuthorizeCacheSecurityGroupIngressOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.AuthorizeCacheSecurityGroupIngressOutput - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.AuthorizeCacheSecurityGroupIngressInput, ...request.Option) (*elasticache.AuthorizeCacheSecurityGroupIngressOutput, error)); ok { - return rf(_a0, _a1, _a2...) - } - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.AuthorizeCacheSecurityGroupIngressInput, ...request.Option) *elasticache.AuthorizeCacheSecurityGroupIngressOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.AuthorizeCacheSecurityGroupIngressOutput) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.AuthorizeCacheSecurityGroupIngressInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// BatchApplyUpdateAction provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) BatchApplyUpdateAction(_a0 *elasticache.BatchApplyUpdateActionInput) (*elasticache.BatchApplyUpdateActionOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.BatchApplyUpdateActionOutput - var r1 error - if rf, ok := ret.Get(0).(func(*elasticache.BatchApplyUpdateActionInput) (*elasticache.BatchApplyUpdateActionOutput, error)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.BatchApplyUpdateActionInput) *elasticache.BatchApplyUpdateActionOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.BatchApplyUpdateActionOutput) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.BatchApplyUpdateActionInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// BatchApplyUpdateActionRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) BatchApplyUpdateActionRequest(_a0 *elasticache.BatchApplyUpdateActionInput) (*request.Request, *elasticache.BatchApplyUpdateActionOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - var r1 *elasticache.BatchApplyUpdateActionOutput - if rf, ok := ret.Get(0).(func(*elasticache.BatchApplyUpdateActionInput) (*request.Request, *elasticache.BatchApplyUpdateActionOutput)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.BatchApplyUpdateActionInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.BatchApplyUpdateActionInput) *elasticache.BatchApplyUpdateActionOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.BatchApplyUpdateActionOutput) - } - } - - return r0, r1 -} - -// BatchApplyUpdateActionWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) BatchApplyUpdateActionWithContext(_a0 context.Context, _a1 *elasticache.BatchApplyUpdateActionInput, _a2 ...request.Option) (*elasticache.BatchApplyUpdateActionOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.BatchApplyUpdateActionOutput - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.BatchApplyUpdateActionInput, ...request.Option) (*elasticache.BatchApplyUpdateActionOutput, error)); ok { - return rf(_a0, _a1, _a2...) - } - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.BatchApplyUpdateActionInput, ...request.Option) *elasticache.BatchApplyUpdateActionOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.BatchApplyUpdateActionOutput) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.BatchApplyUpdateActionInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// BatchStopUpdateAction provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) BatchStopUpdateAction(_a0 *elasticache.BatchStopUpdateActionInput) (*elasticache.BatchStopUpdateActionOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.BatchStopUpdateActionOutput - var r1 error - if rf, ok := ret.Get(0).(func(*elasticache.BatchStopUpdateActionInput) (*elasticache.BatchStopUpdateActionOutput, error)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.BatchStopUpdateActionInput) *elasticache.BatchStopUpdateActionOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.BatchStopUpdateActionOutput) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.BatchStopUpdateActionInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// BatchStopUpdateActionRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) BatchStopUpdateActionRequest(_a0 *elasticache.BatchStopUpdateActionInput) (*request.Request, *elasticache.BatchStopUpdateActionOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - var r1 *elasticache.BatchStopUpdateActionOutput - if rf, ok := ret.Get(0).(func(*elasticache.BatchStopUpdateActionInput) (*request.Request, *elasticache.BatchStopUpdateActionOutput)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.BatchStopUpdateActionInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.BatchStopUpdateActionInput) *elasticache.BatchStopUpdateActionOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.BatchStopUpdateActionOutput) - } - } - - return r0, r1 -} - -// BatchStopUpdateActionWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) BatchStopUpdateActionWithContext(_a0 context.Context, _a1 *elasticache.BatchStopUpdateActionInput, _a2 ...request.Option) (*elasticache.BatchStopUpdateActionOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.BatchStopUpdateActionOutput - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.BatchStopUpdateActionInput, ...request.Option) (*elasticache.BatchStopUpdateActionOutput, error)); ok { - return rf(_a0, _a1, _a2...) - } - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.BatchStopUpdateActionInput, ...request.Option) *elasticache.BatchStopUpdateActionOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.BatchStopUpdateActionOutput) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.BatchStopUpdateActionInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// CompleteMigration provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) CompleteMigration(_a0 *elasticache.CompleteMigrationInput) (*elasticache.CompleteMigrationOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.CompleteMigrationOutput - var r1 error - if rf, ok := ret.Get(0).(func(*elasticache.CompleteMigrationInput) (*elasticache.CompleteMigrationOutput, error)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.CompleteMigrationInput) *elasticache.CompleteMigrationOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.CompleteMigrationOutput) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.CompleteMigrationInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// CompleteMigrationRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) CompleteMigrationRequest(_a0 *elasticache.CompleteMigrationInput) (*request.Request, *elasticache.CompleteMigrationOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - var r1 *elasticache.CompleteMigrationOutput - if rf, ok := ret.Get(0).(func(*elasticache.CompleteMigrationInput) (*request.Request, *elasticache.CompleteMigrationOutput)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.CompleteMigrationInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.CompleteMigrationInput) *elasticache.CompleteMigrationOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.CompleteMigrationOutput) - } - } - - return r0, r1 -} - -// CompleteMigrationWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) CompleteMigrationWithContext(_a0 context.Context, _a1 *elasticache.CompleteMigrationInput, _a2 ...request.Option) (*elasticache.CompleteMigrationOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.CompleteMigrationOutput - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.CompleteMigrationInput, ...request.Option) (*elasticache.CompleteMigrationOutput, error)); ok { - return rf(_a0, _a1, _a2...) - } - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.CompleteMigrationInput, ...request.Option) *elasticache.CompleteMigrationOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.CompleteMigrationOutput) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.CompleteMigrationInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// CopyServerlessCacheSnapshot provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) CopyServerlessCacheSnapshot(_a0 *elasticache.CopyServerlessCacheSnapshotInput) (*elasticache.CopyServerlessCacheSnapshotOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.CopyServerlessCacheSnapshotOutput - var r1 error - if rf, ok := ret.Get(0).(func(*elasticache.CopyServerlessCacheSnapshotInput) (*elasticache.CopyServerlessCacheSnapshotOutput, error)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.CopyServerlessCacheSnapshotInput) *elasticache.CopyServerlessCacheSnapshotOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.CopyServerlessCacheSnapshotOutput) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.CopyServerlessCacheSnapshotInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// CopyServerlessCacheSnapshotRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) CopyServerlessCacheSnapshotRequest(_a0 *elasticache.CopyServerlessCacheSnapshotInput) (*request.Request, *elasticache.CopyServerlessCacheSnapshotOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - var r1 *elasticache.CopyServerlessCacheSnapshotOutput - if rf, ok := ret.Get(0).(func(*elasticache.CopyServerlessCacheSnapshotInput) (*request.Request, *elasticache.CopyServerlessCacheSnapshotOutput)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.CopyServerlessCacheSnapshotInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.CopyServerlessCacheSnapshotInput) *elasticache.CopyServerlessCacheSnapshotOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.CopyServerlessCacheSnapshotOutput) - } - } - - return r0, r1 -} - -// CopyServerlessCacheSnapshotWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) CopyServerlessCacheSnapshotWithContext(_a0 context.Context, _a1 *elasticache.CopyServerlessCacheSnapshotInput, _a2 ...request.Option) (*elasticache.CopyServerlessCacheSnapshotOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.CopyServerlessCacheSnapshotOutput - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.CopyServerlessCacheSnapshotInput, ...request.Option) (*elasticache.CopyServerlessCacheSnapshotOutput, error)); ok { - return rf(_a0, _a1, _a2...) - } - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.CopyServerlessCacheSnapshotInput, ...request.Option) *elasticache.CopyServerlessCacheSnapshotOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.CopyServerlessCacheSnapshotOutput) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.CopyServerlessCacheSnapshotInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// CopySnapshot provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) CopySnapshot(_a0 *elasticache.CopySnapshotInput) (*elasticache.CopySnapshotOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.CopySnapshotOutput - var r1 error - if rf, ok := ret.Get(0).(func(*elasticache.CopySnapshotInput) (*elasticache.CopySnapshotOutput, error)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.CopySnapshotInput) *elasticache.CopySnapshotOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.CopySnapshotOutput) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.CopySnapshotInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// CopySnapshotRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) CopySnapshotRequest(_a0 *elasticache.CopySnapshotInput) (*request.Request, *elasticache.CopySnapshotOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - var r1 *elasticache.CopySnapshotOutput - if rf, ok := ret.Get(0).(func(*elasticache.CopySnapshotInput) (*request.Request, *elasticache.CopySnapshotOutput)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.CopySnapshotInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.CopySnapshotInput) *elasticache.CopySnapshotOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.CopySnapshotOutput) - } - } - - return r0, r1 -} - -// CopySnapshotWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) CopySnapshotWithContext(_a0 context.Context, _a1 *elasticache.CopySnapshotInput, _a2 ...request.Option) (*elasticache.CopySnapshotOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.CopySnapshotOutput - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.CopySnapshotInput, ...request.Option) (*elasticache.CopySnapshotOutput, error)); ok { - return rf(_a0, _a1, _a2...) - } - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.CopySnapshotInput, ...request.Option) *elasticache.CopySnapshotOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.CopySnapshotOutput) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.CopySnapshotInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// CreateCacheCluster provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) CreateCacheCluster(_a0 *elasticache.CreateCacheClusterInput) (*elasticache.CreateCacheClusterOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.CreateCacheClusterOutput - var r1 error - if rf, ok := ret.Get(0).(func(*elasticache.CreateCacheClusterInput) (*elasticache.CreateCacheClusterOutput, error)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.CreateCacheClusterInput) *elasticache.CreateCacheClusterOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.CreateCacheClusterOutput) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.CreateCacheClusterInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// CreateCacheClusterRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) CreateCacheClusterRequest(_a0 *elasticache.CreateCacheClusterInput) (*request.Request, *elasticache.CreateCacheClusterOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - var r1 *elasticache.CreateCacheClusterOutput - if rf, ok := ret.Get(0).(func(*elasticache.CreateCacheClusterInput) (*request.Request, *elasticache.CreateCacheClusterOutput)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.CreateCacheClusterInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.CreateCacheClusterInput) *elasticache.CreateCacheClusterOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.CreateCacheClusterOutput) - } - } - - return r0, r1 -} - -// CreateCacheClusterWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) CreateCacheClusterWithContext(_a0 context.Context, _a1 *elasticache.CreateCacheClusterInput, _a2 ...request.Option) (*elasticache.CreateCacheClusterOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.CreateCacheClusterOutput - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.CreateCacheClusterInput, ...request.Option) (*elasticache.CreateCacheClusterOutput, error)); ok { - return rf(_a0, _a1, _a2...) - } - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.CreateCacheClusterInput, ...request.Option) *elasticache.CreateCacheClusterOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.CreateCacheClusterOutput) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.CreateCacheClusterInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// CreateCacheParameterGroup provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) CreateCacheParameterGroup(_a0 *elasticache.CreateCacheParameterGroupInput) (*elasticache.CreateCacheParameterGroupOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.CreateCacheParameterGroupOutput - var r1 error - if rf, ok := ret.Get(0).(func(*elasticache.CreateCacheParameterGroupInput) (*elasticache.CreateCacheParameterGroupOutput, error)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.CreateCacheParameterGroupInput) *elasticache.CreateCacheParameterGroupOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.CreateCacheParameterGroupOutput) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.CreateCacheParameterGroupInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// CreateCacheParameterGroupRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) CreateCacheParameterGroupRequest(_a0 *elasticache.CreateCacheParameterGroupInput) (*request.Request, *elasticache.CreateCacheParameterGroupOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - var r1 *elasticache.CreateCacheParameterGroupOutput - if rf, ok := ret.Get(0).(func(*elasticache.CreateCacheParameterGroupInput) (*request.Request, *elasticache.CreateCacheParameterGroupOutput)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.CreateCacheParameterGroupInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.CreateCacheParameterGroupInput) *elasticache.CreateCacheParameterGroupOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.CreateCacheParameterGroupOutput) - } - } - - return r0, r1 -} - -// CreateCacheParameterGroupWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) CreateCacheParameterGroupWithContext(_a0 context.Context, _a1 *elasticache.CreateCacheParameterGroupInput, _a2 ...request.Option) (*elasticache.CreateCacheParameterGroupOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.CreateCacheParameterGroupOutput - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.CreateCacheParameterGroupInput, ...request.Option) (*elasticache.CreateCacheParameterGroupOutput, error)); ok { - return rf(_a0, _a1, _a2...) - } - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.CreateCacheParameterGroupInput, ...request.Option) *elasticache.CreateCacheParameterGroupOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.CreateCacheParameterGroupOutput) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.CreateCacheParameterGroupInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// CreateCacheSecurityGroup provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) CreateCacheSecurityGroup(_a0 *elasticache.CreateCacheSecurityGroupInput) (*elasticache.CreateCacheSecurityGroupOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.CreateCacheSecurityGroupOutput - var r1 error - if rf, ok := ret.Get(0).(func(*elasticache.CreateCacheSecurityGroupInput) (*elasticache.CreateCacheSecurityGroupOutput, error)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.CreateCacheSecurityGroupInput) *elasticache.CreateCacheSecurityGroupOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.CreateCacheSecurityGroupOutput) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.CreateCacheSecurityGroupInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// CreateCacheSecurityGroupRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) CreateCacheSecurityGroupRequest(_a0 *elasticache.CreateCacheSecurityGroupInput) (*request.Request, *elasticache.CreateCacheSecurityGroupOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - var r1 *elasticache.CreateCacheSecurityGroupOutput - if rf, ok := ret.Get(0).(func(*elasticache.CreateCacheSecurityGroupInput) (*request.Request, *elasticache.CreateCacheSecurityGroupOutput)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.CreateCacheSecurityGroupInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.CreateCacheSecurityGroupInput) *elasticache.CreateCacheSecurityGroupOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.CreateCacheSecurityGroupOutput) - } - } - - return r0, r1 -} - -// CreateCacheSecurityGroupWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) CreateCacheSecurityGroupWithContext(_a0 context.Context, _a1 *elasticache.CreateCacheSecurityGroupInput, _a2 ...request.Option) (*elasticache.CreateCacheSecurityGroupOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.CreateCacheSecurityGroupOutput - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.CreateCacheSecurityGroupInput, ...request.Option) (*elasticache.CreateCacheSecurityGroupOutput, error)); ok { - return rf(_a0, _a1, _a2...) - } - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.CreateCacheSecurityGroupInput, ...request.Option) *elasticache.CreateCacheSecurityGroupOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.CreateCacheSecurityGroupOutput) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.CreateCacheSecurityGroupInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// CreateCacheSubnetGroup provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) CreateCacheSubnetGroup(_a0 *elasticache.CreateCacheSubnetGroupInput) (*elasticache.CreateCacheSubnetGroupOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.CreateCacheSubnetGroupOutput - var r1 error - if rf, ok := ret.Get(0).(func(*elasticache.CreateCacheSubnetGroupInput) (*elasticache.CreateCacheSubnetGroupOutput, error)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.CreateCacheSubnetGroupInput) *elasticache.CreateCacheSubnetGroupOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.CreateCacheSubnetGroupOutput) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.CreateCacheSubnetGroupInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// CreateCacheSubnetGroupRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) CreateCacheSubnetGroupRequest(_a0 *elasticache.CreateCacheSubnetGroupInput) (*request.Request, *elasticache.CreateCacheSubnetGroupOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - var r1 *elasticache.CreateCacheSubnetGroupOutput - if rf, ok := ret.Get(0).(func(*elasticache.CreateCacheSubnetGroupInput) (*request.Request, *elasticache.CreateCacheSubnetGroupOutput)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.CreateCacheSubnetGroupInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.CreateCacheSubnetGroupInput) *elasticache.CreateCacheSubnetGroupOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.CreateCacheSubnetGroupOutput) - } - } - - return r0, r1 -} - -// CreateCacheSubnetGroupWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) CreateCacheSubnetGroupWithContext(_a0 context.Context, _a1 *elasticache.CreateCacheSubnetGroupInput, _a2 ...request.Option) (*elasticache.CreateCacheSubnetGroupOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.CreateCacheSubnetGroupOutput - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.CreateCacheSubnetGroupInput, ...request.Option) (*elasticache.CreateCacheSubnetGroupOutput, error)); ok { - return rf(_a0, _a1, _a2...) - } - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.CreateCacheSubnetGroupInput, ...request.Option) *elasticache.CreateCacheSubnetGroupOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.CreateCacheSubnetGroupOutput) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.CreateCacheSubnetGroupInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// CreateGlobalReplicationGroup provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) CreateGlobalReplicationGroup(_a0 *elasticache.CreateGlobalReplicationGroupInput) (*elasticache.CreateGlobalReplicationGroupOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.CreateGlobalReplicationGroupOutput - var r1 error - if rf, ok := ret.Get(0).(func(*elasticache.CreateGlobalReplicationGroupInput) (*elasticache.CreateGlobalReplicationGroupOutput, error)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.CreateGlobalReplicationGroupInput) *elasticache.CreateGlobalReplicationGroupOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.CreateGlobalReplicationGroupOutput) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.CreateGlobalReplicationGroupInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// CreateGlobalReplicationGroupRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) CreateGlobalReplicationGroupRequest(_a0 *elasticache.CreateGlobalReplicationGroupInput) (*request.Request, *elasticache.CreateGlobalReplicationGroupOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - var r1 *elasticache.CreateGlobalReplicationGroupOutput - if rf, ok := ret.Get(0).(func(*elasticache.CreateGlobalReplicationGroupInput) (*request.Request, *elasticache.CreateGlobalReplicationGroupOutput)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.CreateGlobalReplicationGroupInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.CreateGlobalReplicationGroupInput) *elasticache.CreateGlobalReplicationGroupOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.CreateGlobalReplicationGroupOutput) - } - } - - return r0, r1 -} - -// CreateGlobalReplicationGroupWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) CreateGlobalReplicationGroupWithContext(_a0 context.Context, _a1 *elasticache.CreateGlobalReplicationGroupInput, _a2 ...request.Option) (*elasticache.CreateGlobalReplicationGroupOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.CreateGlobalReplicationGroupOutput - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.CreateGlobalReplicationGroupInput, ...request.Option) (*elasticache.CreateGlobalReplicationGroupOutput, error)); ok { - return rf(_a0, _a1, _a2...) - } - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.CreateGlobalReplicationGroupInput, ...request.Option) *elasticache.CreateGlobalReplicationGroupOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.CreateGlobalReplicationGroupOutput) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.CreateGlobalReplicationGroupInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// CreateReplicationGroup provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) CreateReplicationGroup(_a0 *elasticache.CreateReplicationGroupInput) (*elasticache.CreateReplicationGroupOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.CreateReplicationGroupOutput - var r1 error - if rf, ok := ret.Get(0).(func(*elasticache.CreateReplicationGroupInput) (*elasticache.CreateReplicationGroupOutput, error)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.CreateReplicationGroupInput) *elasticache.CreateReplicationGroupOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.CreateReplicationGroupOutput) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.CreateReplicationGroupInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// CreateReplicationGroupRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) CreateReplicationGroupRequest(_a0 *elasticache.CreateReplicationGroupInput) (*request.Request, *elasticache.CreateReplicationGroupOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - var r1 *elasticache.CreateReplicationGroupOutput - if rf, ok := ret.Get(0).(func(*elasticache.CreateReplicationGroupInput) (*request.Request, *elasticache.CreateReplicationGroupOutput)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.CreateReplicationGroupInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.CreateReplicationGroupInput) *elasticache.CreateReplicationGroupOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.CreateReplicationGroupOutput) - } - } - - return r0, r1 -} - -// CreateReplicationGroupWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) CreateReplicationGroupWithContext(_a0 context.Context, _a1 *elasticache.CreateReplicationGroupInput, _a2 ...request.Option) (*elasticache.CreateReplicationGroupOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.CreateReplicationGroupOutput - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.CreateReplicationGroupInput, ...request.Option) (*elasticache.CreateReplicationGroupOutput, error)); ok { - return rf(_a0, _a1, _a2...) - } - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.CreateReplicationGroupInput, ...request.Option) *elasticache.CreateReplicationGroupOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.CreateReplicationGroupOutput) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.CreateReplicationGroupInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// CreateServerlessCache provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) CreateServerlessCache(_a0 *elasticache.CreateServerlessCacheInput) (*elasticache.CreateServerlessCacheOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.CreateServerlessCacheOutput - var r1 error - if rf, ok := ret.Get(0).(func(*elasticache.CreateServerlessCacheInput) (*elasticache.CreateServerlessCacheOutput, error)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.CreateServerlessCacheInput) *elasticache.CreateServerlessCacheOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.CreateServerlessCacheOutput) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.CreateServerlessCacheInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// CreateServerlessCacheRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) CreateServerlessCacheRequest(_a0 *elasticache.CreateServerlessCacheInput) (*request.Request, *elasticache.CreateServerlessCacheOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - var r1 *elasticache.CreateServerlessCacheOutput - if rf, ok := ret.Get(0).(func(*elasticache.CreateServerlessCacheInput) (*request.Request, *elasticache.CreateServerlessCacheOutput)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.CreateServerlessCacheInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.CreateServerlessCacheInput) *elasticache.CreateServerlessCacheOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.CreateServerlessCacheOutput) - } - } - - return r0, r1 -} - -// CreateServerlessCacheSnapshot provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) CreateServerlessCacheSnapshot(_a0 *elasticache.CreateServerlessCacheSnapshotInput) (*elasticache.CreateServerlessCacheSnapshotOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.CreateServerlessCacheSnapshotOutput - var r1 error - if rf, ok := ret.Get(0).(func(*elasticache.CreateServerlessCacheSnapshotInput) (*elasticache.CreateServerlessCacheSnapshotOutput, error)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.CreateServerlessCacheSnapshotInput) *elasticache.CreateServerlessCacheSnapshotOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.CreateServerlessCacheSnapshotOutput) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.CreateServerlessCacheSnapshotInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// CreateServerlessCacheSnapshotRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) CreateServerlessCacheSnapshotRequest(_a0 *elasticache.CreateServerlessCacheSnapshotInput) (*request.Request, *elasticache.CreateServerlessCacheSnapshotOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - var r1 *elasticache.CreateServerlessCacheSnapshotOutput - if rf, ok := ret.Get(0).(func(*elasticache.CreateServerlessCacheSnapshotInput) (*request.Request, *elasticache.CreateServerlessCacheSnapshotOutput)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.CreateServerlessCacheSnapshotInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.CreateServerlessCacheSnapshotInput) *elasticache.CreateServerlessCacheSnapshotOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.CreateServerlessCacheSnapshotOutput) - } - } - - return r0, r1 -} - -// CreateServerlessCacheSnapshotWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) CreateServerlessCacheSnapshotWithContext(_a0 context.Context, _a1 *elasticache.CreateServerlessCacheSnapshotInput, _a2 ...request.Option) (*elasticache.CreateServerlessCacheSnapshotOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.CreateServerlessCacheSnapshotOutput - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.CreateServerlessCacheSnapshotInput, ...request.Option) (*elasticache.CreateServerlessCacheSnapshotOutput, error)); ok { - return rf(_a0, _a1, _a2...) - } - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.CreateServerlessCacheSnapshotInput, ...request.Option) *elasticache.CreateServerlessCacheSnapshotOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.CreateServerlessCacheSnapshotOutput) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.CreateServerlessCacheSnapshotInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// CreateServerlessCacheWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) CreateServerlessCacheWithContext(_a0 context.Context, _a1 *elasticache.CreateServerlessCacheInput, _a2 ...request.Option) (*elasticache.CreateServerlessCacheOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.CreateServerlessCacheOutput - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.CreateServerlessCacheInput, ...request.Option) (*elasticache.CreateServerlessCacheOutput, error)); ok { - return rf(_a0, _a1, _a2...) - } - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.CreateServerlessCacheInput, ...request.Option) *elasticache.CreateServerlessCacheOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.CreateServerlessCacheOutput) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.CreateServerlessCacheInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// CreateSnapshot provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) CreateSnapshot(_a0 *elasticache.CreateSnapshotInput) (*elasticache.CreateSnapshotOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.CreateSnapshotOutput - var r1 error - if rf, ok := ret.Get(0).(func(*elasticache.CreateSnapshotInput) (*elasticache.CreateSnapshotOutput, error)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.CreateSnapshotInput) *elasticache.CreateSnapshotOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.CreateSnapshotOutput) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.CreateSnapshotInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// CreateSnapshotRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) CreateSnapshotRequest(_a0 *elasticache.CreateSnapshotInput) (*request.Request, *elasticache.CreateSnapshotOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - var r1 *elasticache.CreateSnapshotOutput - if rf, ok := ret.Get(0).(func(*elasticache.CreateSnapshotInput) (*request.Request, *elasticache.CreateSnapshotOutput)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.CreateSnapshotInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.CreateSnapshotInput) *elasticache.CreateSnapshotOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.CreateSnapshotOutput) - } - } - - return r0, r1 -} - -// CreateSnapshotWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) CreateSnapshotWithContext(_a0 context.Context, _a1 *elasticache.CreateSnapshotInput, _a2 ...request.Option) (*elasticache.CreateSnapshotOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.CreateSnapshotOutput - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.CreateSnapshotInput, ...request.Option) (*elasticache.CreateSnapshotOutput, error)); ok { - return rf(_a0, _a1, _a2...) - } - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.CreateSnapshotInput, ...request.Option) *elasticache.CreateSnapshotOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.CreateSnapshotOutput) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.CreateSnapshotInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// CreateUser provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) CreateUser(_a0 *elasticache.CreateUserInput) (*elasticache.CreateUserOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.CreateUserOutput - var r1 error - if rf, ok := ret.Get(0).(func(*elasticache.CreateUserInput) (*elasticache.CreateUserOutput, error)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.CreateUserInput) *elasticache.CreateUserOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.CreateUserOutput) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.CreateUserInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// CreateUserGroup provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) CreateUserGroup(_a0 *elasticache.CreateUserGroupInput) (*elasticache.CreateUserGroupOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.CreateUserGroupOutput - var r1 error - if rf, ok := ret.Get(0).(func(*elasticache.CreateUserGroupInput) (*elasticache.CreateUserGroupOutput, error)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.CreateUserGroupInput) *elasticache.CreateUserGroupOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.CreateUserGroupOutput) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.CreateUserGroupInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// CreateUserGroupRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) CreateUserGroupRequest(_a0 *elasticache.CreateUserGroupInput) (*request.Request, *elasticache.CreateUserGroupOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - var r1 *elasticache.CreateUserGroupOutput - if rf, ok := ret.Get(0).(func(*elasticache.CreateUserGroupInput) (*request.Request, *elasticache.CreateUserGroupOutput)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.CreateUserGroupInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.CreateUserGroupInput) *elasticache.CreateUserGroupOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.CreateUserGroupOutput) - } - } - - return r0, r1 -} - -// CreateUserGroupWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) CreateUserGroupWithContext(_a0 context.Context, _a1 *elasticache.CreateUserGroupInput, _a2 ...request.Option) (*elasticache.CreateUserGroupOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.CreateUserGroupOutput - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.CreateUserGroupInput, ...request.Option) (*elasticache.CreateUserGroupOutput, error)); ok { - return rf(_a0, _a1, _a2...) - } - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.CreateUserGroupInput, ...request.Option) *elasticache.CreateUserGroupOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.CreateUserGroupOutput) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.CreateUserGroupInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// CreateUserRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) CreateUserRequest(_a0 *elasticache.CreateUserInput) (*request.Request, *elasticache.CreateUserOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - var r1 *elasticache.CreateUserOutput - if rf, ok := ret.Get(0).(func(*elasticache.CreateUserInput) (*request.Request, *elasticache.CreateUserOutput)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.CreateUserInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.CreateUserInput) *elasticache.CreateUserOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.CreateUserOutput) - } - } - - return r0, r1 -} - -// CreateUserWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) CreateUserWithContext(_a0 context.Context, _a1 *elasticache.CreateUserInput, _a2 ...request.Option) (*elasticache.CreateUserOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.CreateUserOutput - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.CreateUserInput, ...request.Option) (*elasticache.CreateUserOutput, error)); ok { - return rf(_a0, _a1, _a2...) - } - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.CreateUserInput, ...request.Option) *elasticache.CreateUserOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.CreateUserOutput) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.CreateUserInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DecreaseNodeGroupsInGlobalReplicationGroup provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DecreaseNodeGroupsInGlobalReplicationGroup(_a0 *elasticache.DecreaseNodeGroupsInGlobalReplicationGroupInput) (*elasticache.DecreaseNodeGroupsInGlobalReplicationGroupOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.DecreaseNodeGroupsInGlobalReplicationGroupOutput - var r1 error - if rf, ok := ret.Get(0).(func(*elasticache.DecreaseNodeGroupsInGlobalReplicationGroupInput) (*elasticache.DecreaseNodeGroupsInGlobalReplicationGroupOutput, error)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.DecreaseNodeGroupsInGlobalReplicationGroupInput) *elasticache.DecreaseNodeGroupsInGlobalReplicationGroupOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DecreaseNodeGroupsInGlobalReplicationGroupOutput) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.DecreaseNodeGroupsInGlobalReplicationGroupInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DecreaseNodeGroupsInGlobalReplicationGroupRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DecreaseNodeGroupsInGlobalReplicationGroupRequest(_a0 *elasticache.DecreaseNodeGroupsInGlobalReplicationGroupInput) (*request.Request, *elasticache.DecreaseNodeGroupsInGlobalReplicationGroupOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - var r1 *elasticache.DecreaseNodeGroupsInGlobalReplicationGroupOutput - if rf, ok := ret.Get(0).(func(*elasticache.DecreaseNodeGroupsInGlobalReplicationGroupInput) (*request.Request, *elasticache.DecreaseNodeGroupsInGlobalReplicationGroupOutput)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.DecreaseNodeGroupsInGlobalReplicationGroupInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.DecreaseNodeGroupsInGlobalReplicationGroupInput) *elasticache.DecreaseNodeGroupsInGlobalReplicationGroupOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.DecreaseNodeGroupsInGlobalReplicationGroupOutput) - } - } - - return r0, r1 -} - -// DecreaseNodeGroupsInGlobalReplicationGroupWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) DecreaseNodeGroupsInGlobalReplicationGroupWithContext(_a0 context.Context, _a1 *elasticache.DecreaseNodeGroupsInGlobalReplicationGroupInput, _a2 ...request.Option) (*elasticache.DecreaseNodeGroupsInGlobalReplicationGroupOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.DecreaseNodeGroupsInGlobalReplicationGroupOutput - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DecreaseNodeGroupsInGlobalReplicationGroupInput, ...request.Option) (*elasticache.DecreaseNodeGroupsInGlobalReplicationGroupOutput, error)); ok { - return rf(_a0, _a1, _a2...) - } - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DecreaseNodeGroupsInGlobalReplicationGroupInput, ...request.Option) *elasticache.DecreaseNodeGroupsInGlobalReplicationGroupOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DecreaseNodeGroupsInGlobalReplicationGroupOutput) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.DecreaseNodeGroupsInGlobalReplicationGroupInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DecreaseReplicaCount provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DecreaseReplicaCount(_a0 *elasticache.DecreaseReplicaCountInput) (*elasticache.DecreaseReplicaCountOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.DecreaseReplicaCountOutput - var r1 error - if rf, ok := ret.Get(0).(func(*elasticache.DecreaseReplicaCountInput) (*elasticache.DecreaseReplicaCountOutput, error)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.DecreaseReplicaCountInput) *elasticache.DecreaseReplicaCountOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DecreaseReplicaCountOutput) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.DecreaseReplicaCountInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DecreaseReplicaCountRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DecreaseReplicaCountRequest(_a0 *elasticache.DecreaseReplicaCountInput) (*request.Request, *elasticache.DecreaseReplicaCountOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - var r1 *elasticache.DecreaseReplicaCountOutput - if rf, ok := ret.Get(0).(func(*elasticache.DecreaseReplicaCountInput) (*request.Request, *elasticache.DecreaseReplicaCountOutput)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.DecreaseReplicaCountInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.DecreaseReplicaCountInput) *elasticache.DecreaseReplicaCountOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.DecreaseReplicaCountOutput) - } - } - - return r0, r1 -} - -// DecreaseReplicaCountWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) DecreaseReplicaCountWithContext(_a0 context.Context, _a1 *elasticache.DecreaseReplicaCountInput, _a2 ...request.Option) (*elasticache.DecreaseReplicaCountOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.DecreaseReplicaCountOutput - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DecreaseReplicaCountInput, ...request.Option) (*elasticache.DecreaseReplicaCountOutput, error)); ok { - return rf(_a0, _a1, _a2...) - } - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DecreaseReplicaCountInput, ...request.Option) *elasticache.DecreaseReplicaCountOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DecreaseReplicaCountOutput) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.DecreaseReplicaCountInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DeleteCacheCluster provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DeleteCacheCluster(_a0 *elasticache.DeleteCacheClusterInput) (*elasticache.DeleteCacheClusterOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.DeleteCacheClusterOutput - var r1 error - if rf, ok := ret.Get(0).(func(*elasticache.DeleteCacheClusterInput) (*elasticache.DeleteCacheClusterOutput, error)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.DeleteCacheClusterInput) *elasticache.DeleteCacheClusterOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DeleteCacheClusterOutput) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.DeleteCacheClusterInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DeleteCacheClusterRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DeleteCacheClusterRequest(_a0 *elasticache.DeleteCacheClusterInput) (*request.Request, *elasticache.DeleteCacheClusterOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - var r1 *elasticache.DeleteCacheClusterOutput - if rf, ok := ret.Get(0).(func(*elasticache.DeleteCacheClusterInput) (*request.Request, *elasticache.DeleteCacheClusterOutput)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.DeleteCacheClusterInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.DeleteCacheClusterInput) *elasticache.DeleteCacheClusterOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.DeleteCacheClusterOutput) - } - } - - return r0, r1 -} - -// DeleteCacheClusterWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) DeleteCacheClusterWithContext(_a0 context.Context, _a1 *elasticache.DeleteCacheClusterInput, _a2 ...request.Option) (*elasticache.DeleteCacheClusterOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.DeleteCacheClusterOutput - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DeleteCacheClusterInput, ...request.Option) (*elasticache.DeleteCacheClusterOutput, error)); ok { - return rf(_a0, _a1, _a2...) - } - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DeleteCacheClusterInput, ...request.Option) *elasticache.DeleteCacheClusterOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DeleteCacheClusterOutput) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.DeleteCacheClusterInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DeleteCacheParameterGroup provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DeleteCacheParameterGroup(_a0 *elasticache.DeleteCacheParameterGroupInput) (*elasticache.DeleteCacheParameterGroupOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.DeleteCacheParameterGroupOutput - var r1 error - if rf, ok := ret.Get(0).(func(*elasticache.DeleteCacheParameterGroupInput) (*elasticache.DeleteCacheParameterGroupOutput, error)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.DeleteCacheParameterGroupInput) *elasticache.DeleteCacheParameterGroupOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DeleteCacheParameterGroupOutput) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.DeleteCacheParameterGroupInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DeleteCacheParameterGroupRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DeleteCacheParameterGroupRequest(_a0 *elasticache.DeleteCacheParameterGroupInput) (*request.Request, *elasticache.DeleteCacheParameterGroupOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - var r1 *elasticache.DeleteCacheParameterGroupOutput - if rf, ok := ret.Get(0).(func(*elasticache.DeleteCacheParameterGroupInput) (*request.Request, *elasticache.DeleteCacheParameterGroupOutput)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.DeleteCacheParameterGroupInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.DeleteCacheParameterGroupInput) *elasticache.DeleteCacheParameterGroupOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.DeleteCacheParameterGroupOutput) - } - } - - return r0, r1 -} - -// DeleteCacheParameterGroupWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) DeleteCacheParameterGroupWithContext(_a0 context.Context, _a1 *elasticache.DeleteCacheParameterGroupInput, _a2 ...request.Option) (*elasticache.DeleteCacheParameterGroupOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.DeleteCacheParameterGroupOutput - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DeleteCacheParameterGroupInput, ...request.Option) (*elasticache.DeleteCacheParameterGroupOutput, error)); ok { - return rf(_a0, _a1, _a2...) - } - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DeleteCacheParameterGroupInput, ...request.Option) *elasticache.DeleteCacheParameterGroupOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DeleteCacheParameterGroupOutput) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.DeleteCacheParameterGroupInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DeleteCacheSecurityGroup provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DeleteCacheSecurityGroup(_a0 *elasticache.DeleteCacheSecurityGroupInput) (*elasticache.DeleteCacheSecurityGroupOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.DeleteCacheSecurityGroupOutput - var r1 error - if rf, ok := ret.Get(0).(func(*elasticache.DeleteCacheSecurityGroupInput) (*elasticache.DeleteCacheSecurityGroupOutput, error)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.DeleteCacheSecurityGroupInput) *elasticache.DeleteCacheSecurityGroupOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DeleteCacheSecurityGroupOutput) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.DeleteCacheSecurityGroupInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DeleteCacheSecurityGroupRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DeleteCacheSecurityGroupRequest(_a0 *elasticache.DeleteCacheSecurityGroupInput) (*request.Request, *elasticache.DeleteCacheSecurityGroupOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - var r1 *elasticache.DeleteCacheSecurityGroupOutput - if rf, ok := ret.Get(0).(func(*elasticache.DeleteCacheSecurityGroupInput) (*request.Request, *elasticache.DeleteCacheSecurityGroupOutput)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.DeleteCacheSecurityGroupInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.DeleteCacheSecurityGroupInput) *elasticache.DeleteCacheSecurityGroupOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.DeleteCacheSecurityGroupOutput) - } - } - - return r0, r1 -} - -// DeleteCacheSecurityGroupWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) DeleteCacheSecurityGroupWithContext(_a0 context.Context, _a1 *elasticache.DeleteCacheSecurityGroupInput, _a2 ...request.Option) (*elasticache.DeleteCacheSecurityGroupOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.DeleteCacheSecurityGroupOutput - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DeleteCacheSecurityGroupInput, ...request.Option) (*elasticache.DeleteCacheSecurityGroupOutput, error)); ok { - return rf(_a0, _a1, _a2...) - } - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DeleteCacheSecurityGroupInput, ...request.Option) *elasticache.DeleteCacheSecurityGroupOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DeleteCacheSecurityGroupOutput) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.DeleteCacheSecurityGroupInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DeleteCacheSubnetGroup provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DeleteCacheSubnetGroup(_a0 *elasticache.DeleteCacheSubnetGroupInput) (*elasticache.DeleteCacheSubnetGroupOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.DeleteCacheSubnetGroupOutput - var r1 error - if rf, ok := ret.Get(0).(func(*elasticache.DeleteCacheSubnetGroupInput) (*elasticache.DeleteCacheSubnetGroupOutput, error)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.DeleteCacheSubnetGroupInput) *elasticache.DeleteCacheSubnetGroupOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DeleteCacheSubnetGroupOutput) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.DeleteCacheSubnetGroupInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DeleteCacheSubnetGroupRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DeleteCacheSubnetGroupRequest(_a0 *elasticache.DeleteCacheSubnetGroupInput) (*request.Request, *elasticache.DeleteCacheSubnetGroupOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - var r1 *elasticache.DeleteCacheSubnetGroupOutput - if rf, ok := ret.Get(0).(func(*elasticache.DeleteCacheSubnetGroupInput) (*request.Request, *elasticache.DeleteCacheSubnetGroupOutput)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.DeleteCacheSubnetGroupInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.DeleteCacheSubnetGroupInput) *elasticache.DeleteCacheSubnetGroupOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.DeleteCacheSubnetGroupOutput) - } - } - - return r0, r1 -} - -// DeleteCacheSubnetGroupWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) DeleteCacheSubnetGroupWithContext(_a0 context.Context, _a1 *elasticache.DeleteCacheSubnetGroupInput, _a2 ...request.Option) (*elasticache.DeleteCacheSubnetGroupOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.DeleteCacheSubnetGroupOutput - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DeleteCacheSubnetGroupInput, ...request.Option) (*elasticache.DeleteCacheSubnetGroupOutput, error)); ok { - return rf(_a0, _a1, _a2...) - } - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DeleteCacheSubnetGroupInput, ...request.Option) *elasticache.DeleteCacheSubnetGroupOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DeleteCacheSubnetGroupOutput) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.DeleteCacheSubnetGroupInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DeleteGlobalReplicationGroup provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DeleteGlobalReplicationGroup(_a0 *elasticache.DeleteGlobalReplicationGroupInput) (*elasticache.DeleteGlobalReplicationGroupOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.DeleteGlobalReplicationGroupOutput - var r1 error - if rf, ok := ret.Get(0).(func(*elasticache.DeleteGlobalReplicationGroupInput) (*elasticache.DeleteGlobalReplicationGroupOutput, error)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.DeleteGlobalReplicationGroupInput) *elasticache.DeleteGlobalReplicationGroupOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DeleteGlobalReplicationGroupOutput) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.DeleteGlobalReplicationGroupInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DeleteGlobalReplicationGroupRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DeleteGlobalReplicationGroupRequest(_a0 *elasticache.DeleteGlobalReplicationGroupInput) (*request.Request, *elasticache.DeleteGlobalReplicationGroupOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - var r1 *elasticache.DeleteGlobalReplicationGroupOutput - if rf, ok := ret.Get(0).(func(*elasticache.DeleteGlobalReplicationGroupInput) (*request.Request, *elasticache.DeleteGlobalReplicationGroupOutput)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.DeleteGlobalReplicationGroupInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.DeleteGlobalReplicationGroupInput) *elasticache.DeleteGlobalReplicationGroupOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.DeleteGlobalReplicationGroupOutput) - } - } - - return r0, r1 -} - -// DeleteGlobalReplicationGroupWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) DeleteGlobalReplicationGroupWithContext(_a0 context.Context, _a1 *elasticache.DeleteGlobalReplicationGroupInput, _a2 ...request.Option) (*elasticache.DeleteGlobalReplicationGroupOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.DeleteGlobalReplicationGroupOutput - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DeleteGlobalReplicationGroupInput, ...request.Option) (*elasticache.DeleteGlobalReplicationGroupOutput, error)); ok { - return rf(_a0, _a1, _a2...) - } - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DeleteGlobalReplicationGroupInput, ...request.Option) *elasticache.DeleteGlobalReplicationGroupOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DeleteGlobalReplicationGroupOutput) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.DeleteGlobalReplicationGroupInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DeleteReplicationGroup provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DeleteReplicationGroup(_a0 *elasticache.DeleteReplicationGroupInput) (*elasticache.DeleteReplicationGroupOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.DeleteReplicationGroupOutput - var r1 error - if rf, ok := ret.Get(0).(func(*elasticache.DeleteReplicationGroupInput) (*elasticache.DeleteReplicationGroupOutput, error)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.DeleteReplicationGroupInput) *elasticache.DeleteReplicationGroupOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DeleteReplicationGroupOutput) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.DeleteReplicationGroupInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DeleteReplicationGroupRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DeleteReplicationGroupRequest(_a0 *elasticache.DeleteReplicationGroupInput) (*request.Request, *elasticache.DeleteReplicationGroupOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - var r1 *elasticache.DeleteReplicationGroupOutput - if rf, ok := ret.Get(0).(func(*elasticache.DeleteReplicationGroupInput) (*request.Request, *elasticache.DeleteReplicationGroupOutput)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.DeleteReplicationGroupInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.DeleteReplicationGroupInput) *elasticache.DeleteReplicationGroupOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.DeleteReplicationGroupOutput) - } - } - - return r0, r1 -} - -// DeleteReplicationGroupWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) DeleteReplicationGroupWithContext(_a0 context.Context, _a1 *elasticache.DeleteReplicationGroupInput, _a2 ...request.Option) (*elasticache.DeleteReplicationGroupOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.DeleteReplicationGroupOutput - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DeleteReplicationGroupInput, ...request.Option) (*elasticache.DeleteReplicationGroupOutput, error)); ok { - return rf(_a0, _a1, _a2...) - } - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DeleteReplicationGroupInput, ...request.Option) *elasticache.DeleteReplicationGroupOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DeleteReplicationGroupOutput) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.DeleteReplicationGroupInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DeleteServerlessCache provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DeleteServerlessCache(_a0 *elasticache.DeleteServerlessCacheInput) (*elasticache.DeleteServerlessCacheOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.DeleteServerlessCacheOutput - var r1 error - if rf, ok := ret.Get(0).(func(*elasticache.DeleteServerlessCacheInput) (*elasticache.DeleteServerlessCacheOutput, error)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.DeleteServerlessCacheInput) *elasticache.DeleteServerlessCacheOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DeleteServerlessCacheOutput) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.DeleteServerlessCacheInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DeleteServerlessCacheRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DeleteServerlessCacheRequest(_a0 *elasticache.DeleteServerlessCacheInput) (*request.Request, *elasticache.DeleteServerlessCacheOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - var r1 *elasticache.DeleteServerlessCacheOutput - if rf, ok := ret.Get(0).(func(*elasticache.DeleteServerlessCacheInput) (*request.Request, *elasticache.DeleteServerlessCacheOutput)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.DeleteServerlessCacheInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.DeleteServerlessCacheInput) *elasticache.DeleteServerlessCacheOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.DeleteServerlessCacheOutput) - } - } - - return r0, r1 -} - -// DeleteServerlessCacheSnapshot provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DeleteServerlessCacheSnapshot(_a0 *elasticache.DeleteServerlessCacheSnapshotInput) (*elasticache.DeleteServerlessCacheSnapshotOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.DeleteServerlessCacheSnapshotOutput - var r1 error - if rf, ok := ret.Get(0).(func(*elasticache.DeleteServerlessCacheSnapshotInput) (*elasticache.DeleteServerlessCacheSnapshotOutput, error)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.DeleteServerlessCacheSnapshotInput) *elasticache.DeleteServerlessCacheSnapshotOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DeleteServerlessCacheSnapshotOutput) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.DeleteServerlessCacheSnapshotInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DeleteServerlessCacheSnapshotRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DeleteServerlessCacheSnapshotRequest(_a0 *elasticache.DeleteServerlessCacheSnapshotInput) (*request.Request, *elasticache.DeleteServerlessCacheSnapshotOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - var r1 *elasticache.DeleteServerlessCacheSnapshotOutput - if rf, ok := ret.Get(0).(func(*elasticache.DeleteServerlessCacheSnapshotInput) (*request.Request, *elasticache.DeleteServerlessCacheSnapshotOutput)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.DeleteServerlessCacheSnapshotInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.DeleteServerlessCacheSnapshotInput) *elasticache.DeleteServerlessCacheSnapshotOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.DeleteServerlessCacheSnapshotOutput) - } - } - - return r0, r1 -} - -// DeleteServerlessCacheSnapshotWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) DeleteServerlessCacheSnapshotWithContext(_a0 context.Context, _a1 *elasticache.DeleteServerlessCacheSnapshotInput, _a2 ...request.Option) (*elasticache.DeleteServerlessCacheSnapshotOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.DeleteServerlessCacheSnapshotOutput - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DeleteServerlessCacheSnapshotInput, ...request.Option) (*elasticache.DeleteServerlessCacheSnapshotOutput, error)); ok { - return rf(_a0, _a1, _a2...) - } - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DeleteServerlessCacheSnapshotInput, ...request.Option) *elasticache.DeleteServerlessCacheSnapshotOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DeleteServerlessCacheSnapshotOutput) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.DeleteServerlessCacheSnapshotInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DeleteServerlessCacheWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) DeleteServerlessCacheWithContext(_a0 context.Context, _a1 *elasticache.DeleteServerlessCacheInput, _a2 ...request.Option) (*elasticache.DeleteServerlessCacheOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.DeleteServerlessCacheOutput - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DeleteServerlessCacheInput, ...request.Option) (*elasticache.DeleteServerlessCacheOutput, error)); ok { - return rf(_a0, _a1, _a2...) - } - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DeleteServerlessCacheInput, ...request.Option) *elasticache.DeleteServerlessCacheOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DeleteServerlessCacheOutput) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.DeleteServerlessCacheInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DeleteSnapshot provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DeleteSnapshot(_a0 *elasticache.DeleteSnapshotInput) (*elasticache.DeleteSnapshotOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.DeleteSnapshotOutput - var r1 error - if rf, ok := ret.Get(0).(func(*elasticache.DeleteSnapshotInput) (*elasticache.DeleteSnapshotOutput, error)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.DeleteSnapshotInput) *elasticache.DeleteSnapshotOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DeleteSnapshotOutput) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.DeleteSnapshotInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DeleteSnapshotRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DeleteSnapshotRequest(_a0 *elasticache.DeleteSnapshotInput) (*request.Request, *elasticache.DeleteSnapshotOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - var r1 *elasticache.DeleteSnapshotOutput - if rf, ok := ret.Get(0).(func(*elasticache.DeleteSnapshotInput) (*request.Request, *elasticache.DeleteSnapshotOutput)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.DeleteSnapshotInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.DeleteSnapshotInput) *elasticache.DeleteSnapshotOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.DeleteSnapshotOutput) - } - } - - return r0, r1 -} - -// DeleteSnapshotWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) DeleteSnapshotWithContext(_a0 context.Context, _a1 *elasticache.DeleteSnapshotInput, _a2 ...request.Option) (*elasticache.DeleteSnapshotOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.DeleteSnapshotOutput - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DeleteSnapshotInput, ...request.Option) (*elasticache.DeleteSnapshotOutput, error)); ok { - return rf(_a0, _a1, _a2...) - } - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DeleteSnapshotInput, ...request.Option) *elasticache.DeleteSnapshotOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DeleteSnapshotOutput) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.DeleteSnapshotInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DeleteUser provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DeleteUser(_a0 *elasticache.DeleteUserInput) (*elasticache.DeleteUserOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.DeleteUserOutput - var r1 error - if rf, ok := ret.Get(0).(func(*elasticache.DeleteUserInput) (*elasticache.DeleteUserOutput, error)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.DeleteUserInput) *elasticache.DeleteUserOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DeleteUserOutput) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.DeleteUserInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DeleteUserGroup provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DeleteUserGroup(_a0 *elasticache.DeleteUserGroupInput) (*elasticache.DeleteUserGroupOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.DeleteUserGroupOutput - var r1 error - if rf, ok := ret.Get(0).(func(*elasticache.DeleteUserGroupInput) (*elasticache.DeleteUserGroupOutput, error)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.DeleteUserGroupInput) *elasticache.DeleteUserGroupOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DeleteUserGroupOutput) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.DeleteUserGroupInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DeleteUserGroupRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DeleteUserGroupRequest(_a0 *elasticache.DeleteUserGroupInput) (*request.Request, *elasticache.DeleteUserGroupOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - var r1 *elasticache.DeleteUserGroupOutput - if rf, ok := ret.Get(0).(func(*elasticache.DeleteUserGroupInput) (*request.Request, *elasticache.DeleteUserGroupOutput)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.DeleteUserGroupInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.DeleteUserGroupInput) *elasticache.DeleteUserGroupOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.DeleteUserGroupOutput) - } - } - - return r0, r1 -} - -// DeleteUserGroupWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) DeleteUserGroupWithContext(_a0 context.Context, _a1 *elasticache.DeleteUserGroupInput, _a2 ...request.Option) (*elasticache.DeleteUserGroupOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.DeleteUserGroupOutput - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DeleteUserGroupInput, ...request.Option) (*elasticache.DeleteUserGroupOutput, error)); ok { - return rf(_a0, _a1, _a2...) - } - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DeleteUserGroupInput, ...request.Option) *elasticache.DeleteUserGroupOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DeleteUserGroupOutput) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.DeleteUserGroupInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DeleteUserRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DeleteUserRequest(_a0 *elasticache.DeleteUserInput) (*request.Request, *elasticache.DeleteUserOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - var r1 *elasticache.DeleteUserOutput - if rf, ok := ret.Get(0).(func(*elasticache.DeleteUserInput) (*request.Request, *elasticache.DeleteUserOutput)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.DeleteUserInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.DeleteUserInput) *elasticache.DeleteUserOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.DeleteUserOutput) - } - } - - return r0, r1 -} - -// DeleteUserWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) DeleteUserWithContext(_a0 context.Context, _a1 *elasticache.DeleteUserInput, _a2 ...request.Option) (*elasticache.DeleteUserOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.DeleteUserOutput - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DeleteUserInput, ...request.Option) (*elasticache.DeleteUserOutput, error)); ok { - return rf(_a0, _a1, _a2...) - } - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DeleteUserInput, ...request.Option) *elasticache.DeleteUserOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DeleteUserOutput) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.DeleteUserInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DescribeCacheClusters provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DescribeCacheClusters(_a0 *elasticache.DescribeCacheClustersInput) (*elasticache.DescribeCacheClustersOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.DescribeCacheClustersOutput - var r1 error - if rf, ok := ret.Get(0).(func(*elasticache.DescribeCacheClustersInput) (*elasticache.DescribeCacheClustersOutput, error)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.DescribeCacheClustersInput) *elasticache.DescribeCacheClustersOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DescribeCacheClustersOutput) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.DescribeCacheClustersInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DescribeCacheClustersPages provides a mock function with given fields: _a0, _a1 -func (_m *ElastiCacheAPI) DescribeCacheClustersPages(_a0 *elasticache.DescribeCacheClustersInput, _a1 func(*elasticache.DescribeCacheClustersOutput, bool) bool) error { - ret := _m.Called(_a0, _a1) - - var r0 error - if rf, ok := ret.Get(0).(func(*elasticache.DescribeCacheClustersInput, func(*elasticache.DescribeCacheClustersOutput, bool) bool) error); ok { - r0 = rf(_a0, _a1) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// DescribeCacheClustersPagesWithContext provides a mock function with given fields: _a0, _a1, _a2, _a3 -func (_m *ElastiCacheAPI) DescribeCacheClustersPagesWithContext(_a0 context.Context, _a1 *elasticache.DescribeCacheClustersInput, _a2 func(*elasticache.DescribeCacheClustersOutput, bool) bool, _a3 ...request.Option) error { - _va := make([]interface{}, len(_a3)) - for _i := range _a3 { - _va[_i] = _a3[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1, _a2) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DescribeCacheClustersInput, func(*elasticache.DescribeCacheClustersOutput, bool) bool, ...request.Option) error); ok { - r0 = rf(_a0, _a1, _a2, _a3...) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// DescribeCacheClustersRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DescribeCacheClustersRequest(_a0 *elasticache.DescribeCacheClustersInput) (*request.Request, *elasticache.DescribeCacheClustersOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - var r1 *elasticache.DescribeCacheClustersOutput - if rf, ok := ret.Get(0).(func(*elasticache.DescribeCacheClustersInput) (*request.Request, *elasticache.DescribeCacheClustersOutput)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.DescribeCacheClustersInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.DescribeCacheClustersInput) *elasticache.DescribeCacheClustersOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.DescribeCacheClustersOutput) - } - } - - return r0, r1 -} - -// DescribeCacheClustersWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) DescribeCacheClustersWithContext(_a0 context.Context, _a1 *elasticache.DescribeCacheClustersInput, _a2 ...request.Option) (*elasticache.DescribeCacheClustersOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.DescribeCacheClustersOutput - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DescribeCacheClustersInput, ...request.Option) (*elasticache.DescribeCacheClustersOutput, error)); ok { - return rf(_a0, _a1, _a2...) - } - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DescribeCacheClustersInput, ...request.Option) *elasticache.DescribeCacheClustersOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DescribeCacheClustersOutput) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.DescribeCacheClustersInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DescribeCacheEngineVersions provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DescribeCacheEngineVersions(_a0 *elasticache.DescribeCacheEngineVersionsInput) (*elasticache.DescribeCacheEngineVersionsOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.DescribeCacheEngineVersionsOutput - var r1 error - if rf, ok := ret.Get(0).(func(*elasticache.DescribeCacheEngineVersionsInput) (*elasticache.DescribeCacheEngineVersionsOutput, error)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.DescribeCacheEngineVersionsInput) *elasticache.DescribeCacheEngineVersionsOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DescribeCacheEngineVersionsOutput) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.DescribeCacheEngineVersionsInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DescribeCacheEngineVersionsPages provides a mock function with given fields: _a0, _a1 -func (_m *ElastiCacheAPI) DescribeCacheEngineVersionsPages(_a0 *elasticache.DescribeCacheEngineVersionsInput, _a1 func(*elasticache.DescribeCacheEngineVersionsOutput, bool) bool) error { - ret := _m.Called(_a0, _a1) - - var r0 error - if rf, ok := ret.Get(0).(func(*elasticache.DescribeCacheEngineVersionsInput, func(*elasticache.DescribeCacheEngineVersionsOutput, bool) bool) error); ok { - r0 = rf(_a0, _a1) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// DescribeCacheEngineVersionsPagesWithContext provides a mock function with given fields: _a0, _a1, _a2, _a3 -func (_m *ElastiCacheAPI) DescribeCacheEngineVersionsPagesWithContext(_a0 context.Context, _a1 *elasticache.DescribeCacheEngineVersionsInput, _a2 func(*elasticache.DescribeCacheEngineVersionsOutput, bool) bool, _a3 ...request.Option) error { - _va := make([]interface{}, len(_a3)) - for _i := range _a3 { - _va[_i] = _a3[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1, _a2) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DescribeCacheEngineVersionsInput, func(*elasticache.DescribeCacheEngineVersionsOutput, bool) bool, ...request.Option) error); ok { - r0 = rf(_a0, _a1, _a2, _a3...) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// DescribeCacheEngineVersionsRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DescribeCacheEngineVersionsRequest(_a0 *elasticache.DescribeCacheEngineVersionsInput) (*request.Request, *elasticache.DescribeCacheEngineVersionsOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - var r1 *elasticache.DescribeCacheEngineVersionsOutput - if rf, ok := ret.Get(0).(func(*elasticache.DescribeCacheEngineVersionsInput) (*request.Request, *elasticache.DescribeCacheEngineVersionsOutput)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.DescribeCacheEngineVersionsInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.DescribeCacheEngineVersionsInput) *elasticache.DescribeCacheEngineVersionsOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.DescribeCacheEngineVersionsOutput) - } - } - - return r0, r1 -} - -// DescribeCacheEngineVersionsWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) DescribeCacheEngineVersionsWithContext(_a0 context.Context, _a1 *elasticache.DescribeCacheEngineVersionsInput, _a2 ...request.Option) (*elasticache.DescribeCacheEngineVersionsOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.DescribeCacheEngineVersionsOutput - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DescribeCacheEngineVersionsInput, ...request.Option) (*elasticache.DescribeCacheEngineVersionsOutput, error)); ok { - return rf(_a0, _a1, _a2...) - } - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DescribeCacheEngineVersionsInput, ...request.Option) *elasticache.DescribeCacheEngineVersionsOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DescribeCacheEngineVersionsOutput) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.DescribeCacheEngineVersionsInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DescribeCacheParameterGroups provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DescribeCacheParameterGroups(_a0 *elasticache.DescribeCacheParameterGroupsInput) (*elasticache.DescribeCacheParameterGroupsOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.DescribeCacheParameterGroupsOutput - var r1 error - if rf, ok := ret.Get(0).(func(*elasticache.DescribeCacheParameterGroupsInput) (*elasticache.DescribeCacheParameterGroupsOutput, error)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.DescribeCacheParameterGroupsInput) *elasticache.DescribeCacheParameterGroupsOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DescribeCacheParameterGroupsOutput) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.DescribeCacheParameterGroupsInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DescribeCacheParameterGroupsPages provides a mock function with given fields: _a0, _a1 -func (_m *ElastiCacheAPI) DescribeCacheParameterGroupsPages(_a0 *elasticache.DescribeCacheParameterGroupsInput, _a1 func(*elasticache.DescribeCacheParameterGroupsOutput, bool) bool) error { - ret := _m.Called(_a0, _a1) - - var r0 error - if rf, ok := ret.Get(0).(func(*elasticache.DescribeCacheParameterGroupsInput, func(*elasticache.DescribeCacheParameterGroupsOutput, bool) bool) error); ok { - r0 = rf(_a0, _a1) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// DescribeCacheParameterGroupsPagesWithContext provides a mock function with given fields: _a0, _a1, _a2, _a3 -func (_m *ElastiCacheAPI) DescribeCacheParameterGroupsPagesWithContext(_a0 context.Context, _a1 *elasticache.DescribeCacheParameterGroupsInput, _a2 func(*elasticache.DescribeCacheParameterGroupsOutput, bool) bool, _a3 ...request.Option) error { - _va := make([]interface{}, len(_a3)) - for _i := range _a3 { - _va[_i] = _a3[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1, _a2) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DescribeCacheParameterGroupsInput, func(*elasticache.DescribeCacheParameterGroupsOutput, bool) bool, ...request.Option) error); ok { - r0 = rf(_a0, _a1, _a2, _a3...) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// DescribeCacheParameterGroupsRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DescribeCacheParameterGroupsRequest(_a0 *elasticache.DescribeCacheParameterGroupsInput) (*request.Request, *elasticache.DescribeCacheParameterGroupsOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - var r1 *elasticache.DescribeCacheParameterGroupsOutput - if rf, ok := ret.Get(0).(func(*elasticache.DescribeCacheParameterGroupsInput) (*request.Request, *elasticache.DescribeCacheParameterGroupsOutput)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.DescribeCacheParameterGroupsInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.DescribeCacheParameterGroupsInput) *elasticache.DescribeCacheParameterGroupsOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.DescribeCacheParameterGroupsOutput) - } - } - - return r0, r1 -} - -// DescribeCacheParameterGroupsWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) DescribeCacheParameterGroupsWithContext(_a0 context.Context, _a1 *elasticache.DescribeCacheParameterGroupsInput, _a2 ...request.Option) (*elasticache.DescribeCacheParameterGroupsOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.DescribeCacheParameterGroupsOutput - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DescribeCacheParameterGroupsInput, ...request.Option) (*elasticache.DescribeCacheParameterGroupsOutput, error)); ok { - return rf(_a0, _a1, _a2...) - } - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DescribeCacheParameterGroupsInput, ...request.Option) *elasticache.DescribeCacheParameterGroupsOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DescribeCacheParameterGroupsOutput) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.DescribeCacheParameterGroupsInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DescribeCacheParameters provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DescribeCacheParameters(_a0 *elasticache.DescribeCacheParametersInput) (*elasticache.DescribeCacheParametersOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.DescribeCacheParametersOutput - var r1 error - if rf, ok := ret.Get(0).(func(*elasticache.DescribeCacheParametersInput) (*elasticache.DescribeCacheParametersOutput, error)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.DescribeCacheParametersInput) *elasticache.DescribeCacheParametersOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DescribeCacheParametersOutput) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.DescribeCacheParametersInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DescribeCacheParametersPages provides a mock function with given fields: _a0, _a1 -func (_m *ElastiCacheAPI) DescribeCacheParametersPages(_a0 *elasticache.DescribeCacheParametersInput, _a1 func(*elasticache.DescribeCacheParametersOutput, bool) bool) error { - ret := _m.Called(_a0, _a1) - - var r0 error - if rf, ok := ret.Get(0).(func(*elasticache.DescribeCacheParametersInput, func(*elasticache.DescribeCacheParametersOutput, bool) bool) error); ok { - r0 = rf(_a0, _a1) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// DescribeCacheParametersPagesWithContext provides a mock function with given fields: _a0, _a1, _a2, _a3 -func (_m *ElastiCacheAPI) DescribeCacheParametersPagesWithContext(_a0 context.Context, _a1 *elasticache.DescribeCacheParametersInput, _a2 func(*elasticache.DescribeCacheParametersOutput, bool) bool, _a3 ...request.Option) error { - _va := make([]interface{}, len(_a3)) - for _i := range _a3 { - _va[_i] = _a3[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1, _a2) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DescribeCacheParametersInput, func(*elasticache.DescribeCacheParametersOutput, bool) bool, ...request.Option) error); ok { - r0 = rf(_a0, _a1, _a2, _a3...) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// DescribeCacheParametersRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DescribeCacheParametersRequest(_a0 *elasticache.DescribeCacheParametersInput) (*request.Request, *elasticache.DescribeCacheParametersOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - var r1 *elasticache.DescribeCacheParametersOutput - if rf, ok := ret.Get(0).(func(*elasticache.DescribeCacheParametersInput) (*request.Request, *elasticache.DescribeCacheParametersOutput)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.DescribeCacheParametersInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.DescribeCacheParametersInput) *elasticache.DescribeCacheParametersOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.DescribeCacheParametersOutput) - } - } - - return r0, r1 -} - -// DescribeCacheParametersWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) DescribeCacheParametersWithContext(_a0 context.Context, _a1 *elasticache.DescribeCacheParametersInput, _a2 ...request.Option) (*elasticache.DescribeCacheParametersOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.DescribeCacheParametersOutput - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DescribeCacheParametersInput, ...request.Option) (*elasticache.DescribeCacheParametersOutput, error)); ok { - return rf(_a0, _a1, _a2...) - } - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DescribeCacheParametersInput, ...request.Option) *elasticache.DescribeCacheParametersOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DescribeCacheParametersOutput) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.DescribeCacheParametersInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DescribeCacheSecurityGroups provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DescribeCacheSecurityGroups(_a0 *elasticache.DescribeCacheSecurityGroupsInput) (*elasticache.DescribeCacheSecurityGroupsOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.DescribeCacheSecurityGroupsOutput - var r1 error - if rf, ok := ret.Get(0).(func(*elasticache.DescribeCacheSecurityGroupsInput) (*elasticache.DescribeCacheSecurityGroupsOutput, error)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.DescribeCacheSecurityGroupsInput) *elasticache.DescribeCacheSecurityGroupsOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DescribeCacheSecurityGroupsOutput) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.DescribeCacheSecurityGroupsInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DescribeCacheSecurityGroupsPages provides a mock function with given fields: _a0, _a1 -func (_m *ElastiCacheAPI) DescribeCacheSecurityGroupsPages(_a0 *elasticache.DescribeCacheSecurityGroupsInput, _a1 func(*elasticache.DescribeCacheSecurityGroupsOutput, bool) bool) error { - ret := _m.Called(_a0, _a1) - - var r0 error - if rf, ok := ret.Get(0).(func(*elasticache.DescribeCacheSecurityGroupsInput, func(*elasticache.DescribeCacheSecurityGroupsOutput, bool) bool) error); ok { - r0 = rf(_a0, _a1) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// DescribeCacheSecurityGroupsPagesWithContext provides a mock function with given fields: _a0, _a1, _a2, _a3 -func (_m *ElastiCacheAPI) DescribeCacheSecurityGroupsPagesWithContext(_a0 context.Context, _a1 *elasticache.DescribeCacheSecurityGroupsInput, _a2 func(*elasticache.DescribeCacheSecurityGroupsOutput, bool) bool, _a3 ...request.Option) error { - _va := make([]interface{}, len(_a3)) - for _i := range _a3 { - _va[_i] = _a3[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1, _a2) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DescribeCacheSecurityGroupsInput, func(*elasticache.DescribeCacheSecurityGroupsOutput, bool) bool, ...request.Option) error); ok { - r0 = rf(_a0, _a1, _a2, _a3...) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// DescribeCacheSecurityGroupsRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DescribeCacheSecurityGroupsRequest(_a0 *elasticache.DescribeCacheSecurityGroupsInput) (*request.Request, *elasticache.DescribeCacheSecurityGroupsOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - var r1 *elasticache.DescribeCacheSecurityGroupsOutput - if rf, ok := ret.Get(0).(func(*elasticache.DescribeCacheSecurityGroupsInput) (*request.Request, *elasticache.DescribeCacheSecurityGroupsOutput)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.DescribeCacheSecurityGroupsInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.DescribeCacheSecurityGroupsInput) *elasticache.DescribeCacheSecurityGroupsOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.DescribeCacheSecurityGroupsOutput) - } - } - - return r0, r1 -} - -// DescribeCacheSecurityGroupsWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) DescribeCacheSecurityGroupsWithContext(_a0 context.Context, _a1 *elasticache.DescribeCacheSecurityGroupsInput, _a2 ...request.Option) (*elasticache.DescribeCacheSecurityGroupsOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.DescribeCacheSecurityGroupsOutput - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DescribeCacheSecurityGroupsInput, ...request.Option) (*elasticache.DescribeCacheSecurityGroupsOutput, error)); ok { - return rf(_a0, _a1, _a2...) - } - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DescribeCacheSecurityGroupsInput, ...request.Option) *elasticache.DescribeCacheSecurityGroupsOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DescribeCacheSecurityGroupsOutput) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.DescribeCacheSecurityGroupsInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DescribeCacheSubnetGroups provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DescribeCacheSubnetGroups(_a0 *elasticache.DescribeCacheSubnetGroupsInput) (*elasticache.DescribeCacheSubnetGroupsOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.DescribeCacheSubnetGroupsOutput - var r1 error - if rf, ok := ret.Get(0).(func(*elasticache.DescribeCacheSubnetGroupsInput) (*elasticache.DescribeCacheSubnetGroupsOutput, error)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.DescribeCacheSubnetGroupsInput) *elasticache.DescribeCacheSubnetGroupsOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DescribeCacheSubnetGroupsOutput) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.DescribeCacheSubnetGroupsInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DescribeCacheSubnetGroupsPages provides a mock function with given fields: _a0, _a1 -func (_m *ElastiCacheAPI) DescribeCacheSubnetGroupsPages(_a0 *elasticache.DescribeCacheSubnetGroupsInput, _a1 func(*elasticache.DescribeCacheSubnetGroupsOutput, bool) bool) error { - ret := _m.Called(_a0, _a1) - - var r0 error - if rf, ok := ret.Get(0).(func(*elasticache.DescribeCacheSubnetGroupsInput, func(*elasticache.DescribeCacheSubnetGroupsOutput, bool) bool) error); ok { - r0 = rf(_a0, _a1) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// DescribeCacheSubnetGroupsPagesWithContext provides a mock function with given fields: _a0, _a1, _a2, _a3 -func (_m *ElastiCacheAPI) DescribeCacheSubnetGroupsPagesWithContext(_a0 context.Context, _a1 *elasticache.DescribeCacheSubnetGroupsInput, _a2 func(*elasticache.DescribeCacheSubnetGroupsOutput, bool) bool, _a3 ...request.Option) error { - _va := make([]interface{}, len(_a3)) - for _i := range _a3 { - _va[_i] = _a3[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1, _a2) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DescribeCacheSubnetGroupsInput, func(*elasticache.DescribeCacheSubnetGroupsOutput, bool) bool, ...request.Option) error); ok { - r0 = rf(_a0, _a1, _a2, _a3...) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// DescribeCacheSubnetGroupsRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DescribeCacheSubnetGroupsRequest(_a0 *elasticache.DescribeCacheSubnetGroupsInput) (*request.Request, *elasticache.DescribeCacheSubnetGroupsOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - var r1 *elasticache.DescribeCacheSubnetGroupsOutput - if rf, ok := ret.Get(0).(func(*elasticache.DescribeCacheSubnetGroupsInput) (*request.Request, *elasticache.DescribeCacheSubnetGroupsOutput)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.DescribeCacheSubnetGroupsInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.DescribeCacheSubnetGroupsInput) *elasticache.DescribeCacheSubnetGroupsOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.DescribeCacheSubnetGroupsOutput) - } - } - - return r0, r1 -} - -// DescribeCacheSubnetGroupsWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) DescribeCacheSubnetGroupsWithContext(_a0 context.Context, _a1 *elasticache.DescribeCacheSubnetGroupsInput, _a2 ...request.Option) (*elasticache.DescribeCacheSubnetGroupsOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.DescribeCacheSubnetGroupsOutput - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DescribeCacheSubnetGroupsInput, ...request.Option) (*elasticache.DescribeCacheSubnetGroupsOutput, error)); ok { - return rf(_a0, _a1, _a2...) - } - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DescribeCacheSubnetGroupsInput, ...request.Option) *elasticache.DescribeCacheSubnetGroupsOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DescribeCacheSubnetGroupsOutput) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.DescribeCacheSubnetGroupsInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DescribeEngineDefaultParameters provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DescribeEngineDefaultParameters(_a0 *elasticache.DescribeEngineDefaultParametersInput) (*elasticache.DescribeEngineDefaultParametersOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.DescribeEngineDefaultParametersOutput - var r1 error - if rf, ok := ret.Get(0).(func(*elasticache.DescribeEngineDefaultParametersInput) (*elasticache.DescribeEngineDefaultParametersOutput, error)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.DescribeEngineDefaultParametersInput) *elasticache.DescribeEngineDefaultParametersOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DescribeEngineDefaultParametersOutput) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.DescribeEngineDefaultParametersInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DescribeEngineDefaultParametersPages provides a mock function with given fields: _a0, _a1 -func (_m *ElastiCacheAPI) DescribeEngineDefaultParametersPages(_a0 *elasticache.DescribeEngineDefaultParametersInput, _a1 func(*elasticache.DescribeEngineDefaultParametersOutput, bool) bool) error { - ret := _m.Called(_a0, _a1) - - var r0 error - if rf, ok := ret.Get(0).(func(*elasticache.DescribeEngineDefaultParametersInput, func(*elasticache.DescribeEngineDefaultParametersOutput, bool) bool) error); ok { - r0 = rf(_a0, _a1) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// DescribeEngineDefaultParametersPagesWithContext provides a mock function with given fields: _a0, _a1, _a2, _a3 -func (_m *ElastiCacheAPI) DescribeEngineDefaultParametersPagesWithContext(_a0 context.Context, _a1 *elasticache.DescribeEngineDefaultParametersInput, _a2 func(*elasticache.DescribeEngineDefaultParametersOutput, bool) bool, _a3 ...request.Option) error { - _va := make([]interface{}, len(_a3)) - for _i := range _a3 { - _va[_i] = _a3[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1, _a2) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DescribeEngineDefaultParametersInput, func(*elasticache.DescribeEngineDefaultParametersOutput, bool) bool, ...request.Option) error); ok { - r0 = rf(_a0, _a1, _a2, _a3...) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// DescribeEngineDefaultParametersRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DescribeEngineDefaultParametersRequest(_a0 *elasticache.DescribeEngineDefaultParametersInput) (*request.Request, *elasticache.DescribeEngineDefaultParametersOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - var r1 *elasticache.DescribeEngineDefaultParametersOutput - if rf, ok := ret.Get(0).(func(*elasticache.DescribeEngineDefaultParametersInput) (*request.Request, *elasticache.DescribeEngineDefaultParametersOutput)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.DescribeEngineDefaultParametersInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.DescribeEngineDefaultParametersInput) *elasticache.DescribeEngineDefaultParametersOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.DescribeEngineDefaultParametersOutput) - } - } - - return r0, r1 -} - -// DescribeEngineDefaultParametersWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) DescribeEngineDefaultParametersWithContext(_a0 context.Context, _a1 *elasticache.DescribeEngineDefaultParametersInput, _a2 ...request.Option) (*elasticache.DescribeEngineDefaultParametersOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.DescribeEngineDefaultParametersOutput - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DescribeEngineDefaultParametersInput, ...request.Option) (*elasticache.DescribeEngineDefaultParametersOutput, error)); ok { - return rf(_a0, _a1, _a2...) - } - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DescribeEngineDefaultParametersInput, ...request.Option) *elasticache.DescribeEngineDefaultParametersOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DescribeEngineDefaultParametersOutput) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.DescribeEngineDefaultParametersInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DescribeEvents provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DescribeEvents(_a0 *elasticache.DescribeEventsInput) (*elasticache.DescribeEventsOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.DescribeEventsOutput - var r1 error - if rf, ok := ret.Get(0).(func(*elasticache.DescribeEventsInput) (*elasticache.DescribeEventsOutput, error)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.DescribeEventsInput) *elasticache.DescribeEventsOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DescribeEventsOutput) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.DescribeEventsInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DescribeEventsPages provides a mock function with given fields: _a0, _a1 -func (_m *ElastiCacheAPI) DescribeEventsPages(_a0 *elasticache.DescribeEventsInput, _a1 func(*elasticache.DescribeEventsOutput, bool) bool) error { - ret := _m.Called(_a0, _a1) - - var r0 error - if rf, ok := ret.Get(0).(func(*elasticache.DescribeEventsInput, func(*elasticache.DescribeEventsOutput, bool) bool) error); ok { - r0 = rf(_a0, _a1) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// DescribeEventsPagesWithContext provides a mock function with given fields: _a0, _a1, _a2, _a3 -func (_m *ElastiCacheAPI) DescribeEventsPagesWithContext(_a0 context.Context, _a1 *elasticache.DescribeEventsInput, _a2 func(*elasticache.DescribeEventsOutput, bool) bool, _a3 ...request.Option) error { - _va := make([]interface{}, len(_a3)) - for _i := range _a3 { - _va[_i] = _a3[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1, _a2) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DescribeEventsInput, func(*elasticache.DescribeEventsOutput, bool) bool, ...request.Option) error); ok { - r0 = rf(_a0, _a1, _a2, _a3...) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// DescribeEventsRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DescribeEventsRequest(_a0 *elasticache.DescribeEventsInput) (*request.Request, *elasticache.DescribeEventsOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - var r1 *elasticache.DescribeEventsOutput - if rf, ok := ret.Get(0).(func(*elasticache.DescribeEventsInput) (*request.Request, *elasticache.DescribeEventsOutput)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.DescribeEventsInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.DescribeEventsInput) *elasticache.DescribeEventsOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.DescribeEventsOutput) - } - } - - return r0, r1 -} - -// DescribeEventsWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) DescribeEventsWithContext(_a0 context.Context, _a1 *elasticache.DescribeEventsInput, _a2 ...request.Option) (*elasticache.DescribeEventsOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.DescribeEventsOutput - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DescribeEventsInput, ...request.Option) (*elasticache.DescribeEventsOutput, error)); ok { - return rf(_a0, _a1, _a2...) - } - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DescribeEventsInput, ...request.Option) *elasticache.DescribeEventsOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DescribeEventsOutput) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.DescribeEventsInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DescribeGlobalReplicationGroups provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DescribeGlobalReplicationGroups(_a0 *elasticache.DescribeGlobalReplicationGroupsInput) (*elasticache.DescribeGlobalReplicationGroupsOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.DescribeGlobalReplicationGroupsOutput - var r1 error - if rf, ok := ret.Get(0).(func(*elasticache.DescribeGlobalReplicationGroupsInput) (*elasticache.DescribeGlobalReplicationGroupsOutput, error)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.DescribeGlobalReplicationGroupsInput) *elasticache.DescribeGlobalReplicationGroupsOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DescribeGlobalReplicationGroupsOutput) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.DescribeGlobalReplicationGroupsInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DescribeGlobalReplicationGroupsPages provides a mock function with given fields: _a0, _a1 -func (_m *ElastiCacheAPI) DescribeGlobalReplicationGroupsPages(_a0 *elasticache.DescribeGlobalReplicationGroupsInput, _a1 func(*elasticache.DescribeGlobalReplicationGroupsOutput, bool) bool) error { - ret := _m.Called(_a0, _a1) - - var r0 error - if rf, ok := ret.Get(0).(func(*elasticache.DescribeGlobalReplicationGroupsInput, func(*elasticache.DescribeGlobalReplicationGroupsOutput, bool) bool) error); ok { - r0 = rf(_a0, _a1) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// DescribeGlobalReplicationGroupsPagesWithContext provides a mock function with given fields: _a0, _a1, _a2, _a3 -func (_m *ElastiCacheAPI) DescribeGlobalReplicationGroupsPagesWithContext(_a0 context.Context, _a1 *elasticache.DescribeGlobalReplicationGroupsInput, _a2 func(*elasticache.DescribeGlobalReplicationGroupsOutput, bool) bool, _a3 ...request.Option) error { - _va := make([]interface{}, len(_a3)) - for _i := range _a3 { - _va[_i] = _a3[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1, _a2) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DescribeGlobalReplicationGroupsInput, func(*elasticache.DescribeGlobalReplicationGroupsOutput, bool) bool, ...request.Option) error); ok { - r0 = rf(_a0, _a1, _a2, _a3...) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// DescribeGlobalReplicationGroupsRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DescribeGlobalReplicationGroupsRequest(_a0 *elasticache.DescribeGlobalReplicationGroupsInput) (*request.Request, *elasticache.DescribeGlobalReplicationGroupsOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - var r1 *elasticache.DescribeGlobalReplicationGroupsOutput - if rf, ok := ret.Get(0).(func(*elasticache.DescribeGlobalReplicationGroupsInput) (*request.Request, *elasticache.DescribeGlobalReplicationGroupsOutput)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.DescribeGlobalReplicationGroupsInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.DescribeGlobalReplicationGroupsInput) *elasticache.DescribeGlobalReplicationGroupsOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.DescribeGlobalReplicationGroupsOutput) - } - } - - return r0, r1 -} - -// DescribeGlobalReplicationGroupsWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) DescribeGlobalReplicationGroupsWithContext(_a0 context.Context, _a1 *elasticache.DescribeGlobalReplicationGroupsInput, _a2 ...request.Option) (*elasticache.DescribeGlobalReplicationGroupsOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.DescribeGlobalReplicationGroupsOutput - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DescribeGlobalReplicationGroupsInput, ...request.Option) (*elasticache.DescribeGlobalReplicationGroupsOutput, error)); ok { - return rf(_a0, _a1, _a2...) - } - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DescribeGlobalReplicationGroupsInput, ...request.Option) *elasticache.DescribeGlobalReplicationGroupsOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DescribeGlobalReplicationGroupsOutput) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.DescribeGlobalReplicationGroupsInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DescribeReplicationGroups provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DescribeReplicationGroups(_a0 *elasticache.DescribeReplicationGroupsInput) (*elasticache.DescribeReplicationGroupsOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.DescribeReplicationGroupsOutput - var r1 error - if rf, ok := ret.Get(0).(func(*elasticache.DescribeReplicationGroupsInput) (*elasticache.DescribeReplicationGroupsOutput, error)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.DescribeReplicationGroupsInput) *elasticache.DescribeReplicationGroupsOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DescribeReplicationGroupsOutput) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.DescribeReplicationGroupsInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DescribeReplicationGroupsPages provides a mock function with given fields: _a0, _a1 -func (_m *ElastiCacheAPI) DescribeReplicationGroupsPages(_a0 *elasticache.DescribeReplicationGroupsInput, _a1 func(*elasticache.DescribeReplicationGroupsOutput, bool) bool) error { - ret := _m.Called(_a0, _a1) - - var r0 error - if rf, ok := ret.Get(0).(func(*elasticache.DescribeReplicationGroupsInput, func(*elasticache.DescribeReplicationGroupsOutput, bool) bool) error); ok { - r0 = rf(_a0, _a1) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// DescribeReplicationGroupsPagesWithContext provides a mock function with given fields: _a0, _a1, _a2, _a3 -func (_m *ElastiCacheAPI) DescribeReplicationGroupsPagesWithContext(_a0 context.Context, _a1 *elasticache.DescribeReplicationGroupsInput, _a2 func(*elasticache.DescribeReplicationGroupsOutput, bool) bool, _a3 ...request.Option) error { - _va := make([]interface{}, len(_a3)) - for _i := range _a3 { - _va[_i] = _a3[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1, _a2) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DescribeReplicationGroupsInput, func(*elasticache.DescribeReplicationGroupsOutput, bool) bool, ...request.Option) error); ok { - r0 = rf(_a0, _a1, _a2, _a3...) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// DescribeReplicationGroupsRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DescribeReplicationGroupsRequest(_a0 *elasticache.DescribeReplicationGroupsInput) (*request.Request, *elasticache.DescribeReplicationGroupsOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - var r1 *elasticache.DescribeReplicationGroupsOutput - if rf, ok := ret.Get(0).(func(*elasticache.DescribeReplicationGroupsInput) (*request.Request, *elasticache.DescribeReplicationGroupsOutput)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.DescribeReplicationGroupsInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.DescribeReplicationGroupsInput) *elasticache.DescribeReplicationGroupsOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.DescribeReplicationGroupsOutput) - } - } - - return r0, r1 -} - -// DescribeReplicationGroupsWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) DescribeReplicationGroupsWithContext(_a0 context.Context, _a1 *elasticache.DescribeReplicationGroupsInput, _a2 ...request.Option) (*elasticache.DescribeReplicationGroupsOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.DescribeReplicationGroupsOutput - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DescribeReplicationGroupsInput, ...request.Option) (*elasticache.DescribeReplicationGroupsOutput, error)); ok { - return rf(_a0, _a1, _a2...) - } - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DescribeReplicationGroupsInput, ...request.Option) *elasticache.DescribeReplicationGroupsOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DescribeReplicationGroupsOutput) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.DescribeReplicationGroupsInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DescribeReservedCacheNodes provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DescribeReservedCacheNodes(_a0 *elasticache.DescribeReservedCacheNodesInput) (*elasticache.DescribeReservedCacheNodesOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.DescribeReservedCacheNodesOutput - var r1 error - if rf, ok := ret.Get(0).(func(*elasticache.DescribeReservedCacheNodesInput) (*elasticache.DescribeReservedCacheNodesOutput, error)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.DescribeReservedCacheNodesInput) *elasticache.DescribeReservedCacheNodesOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DescribeReservedCacheNodesOutput) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.DescribeReservedCacheNodesInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DescribeReservedCacheNodesOfferings provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DescribeReservedCacheNodesOfferings(_a0 *elasticache.DescribeReservedCacheNodesOfferingsInput) (*elasticache.DescribeReservedCacheNodesOfferingsOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.DescribeReservedCacheNodesOfferingsOutput - var r1 error - if rf, ok := ret.Get(0).(func(*elasticache.DescribeReservedCacheNodesOfferingsInput) (*elasticache.DescribeReservedCacheNodesOfferingsOutput, error)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.DescribeReservedCacheNodesOfferingsInput) *elasticache.DescribeReservedCacheNodesOfferingsOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DescribeReservedCacheNodesOfferingsOutput) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.DescribeReservedCacheNodesOfferingsInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DescribeReservedCacheNodesOfferingsPages provides a mock function with given fields: _a0, _a1 -func (_m *ElastiCacheAPI) DescribeReservedCacheNodesOfferingsPages(_a0 *elasticache.DescribeReservedCacheNodesOfferingsInput, _a1 func(*elasticache.DescribeReservedCacheNodesOfferingsOutput, bool) bool) error { - ret := _m.Called(_a0, _a1) - - var r0 error - if rf, ok := ret.Get(0).(func(*elasticache.DescribeReservedCacheNodesOfferingsInput, func(*elasticache.DescribeReservedCacheNodesOfferingsOutput, bool) bool) error); ok { - r0 = rf(_a0, _a1) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// DescribeReservedCacheNodesOfferingsPagesWithContext provides a mock function with given fields: _a0, _a1, _a2, _a3 -func (_m *ElastiCacheAPI) DescribeReservedCacheNodesOfferingsPagesWithContext(_a0 context.Context, _a1 *elasticache.DescribeReservedCacheNodesOfferingsInput, _a2 func(*elasticache.DescribeReservedCacheNodesOfferingsOutput, bool) bool, _a3 ...request.Option) error { - _va := make([]interface{}, len(_a3)) - for _i := range _a3 { - _va[_i] = _a3[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1, _a2) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DescribeReservedCacheNodesOfferingsInput, func(*elasticache.DescribeReservedCacheNodesOfferingsOutput, bool) bool, ...request.Option) error); ok { - r0 = rf(_a0, _a1, _a2, _a3...) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// DescribeReservedCacheNodesOfferingsRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DescribeReservedCacheNodesOfferingsRequest(_a0 *elasticache.DescribeReservedCacheNodesOfferingsInput) (*request.Request, *elasticache.DescribeReservedCacheNodesOfferingsOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - var r1 *elasticache.DescribeReservedCacheNodesOfferingsOutput - if rf, ok := ret.Get(0).(func(*elasticache.DescribeReservedCacheNodesOfferingsInput) (*request.Request, *elasticache.DescribeReservedCacheNodesOfferingsOutput)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.DescribeReservedCacheNodesOfferingsInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.DescribeReservedCacheNodesOfferingsInput) *elasticache.DescribeReservedCacheNodesOfferingsOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.DescribeReservedCacheNodesOfferingsOutput) - } - } - - return r0, r1 -} - -// DescribeReservedCacheNodesOfferingsWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) DescribeReservedCacheNodesOfferingsWithContext(_a0 context.Context, _a1 *elasticache.DescribeReservedCacheNodesOfferingsInput, _a2 ...request.Option) (*elasticache.DescribeReservedCacheNodesOfferingsOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.DescribeReservedCacheNodesOfferingsOutput - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DescribeReservedCacheNodesOfferingsInput, ...request.Option) (*elasticache.DescribeReservedCacheNodesOfferingsOutput, error)); ok { - return rf(_a0, _a1, _a2...) - } - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DescribeReservedCacheNodesOfferingsInput, ...request.Option) *elasticache.DescribeReservedCacheNodesOfferingsOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DescribeReservedCacheNodesOfferingsOutput) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.DescribeReservedCacheNodesOfferingsInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DescribeReservedCacheNodesPages provides a mock function with given fields: _a0, _a1 -func (_m *ElastiCacheAPI) DescribeReservedCacheNodesPages(_a0 *elasticache.DescribeReservedCacheNodesInput, _a1 func(*elasticache.DescribeReservedCacheNodesOutput, bool) bool) error { - ret := _m.Called(_a0, _a1) - - var r0 error - if rf, ok := ret.Get(0).(func(*elasticache.DescribeReservedCacheNodesInput, func(*elasticache.DescribeReservedCacheNodesOutput, bool) bool) error); ok { - r0 = rf(_a0, _a1) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// DescribeReservedCacheNodesPagesWithContext provides a mock function with given fields: _a0, _a1, _a2, _a3 -func (_m *ElastiCacheAPI) DescribeReservedCacheNodesPagesWithContext(_a0 context.Context, _a1 *elasticache.DescribeReservedCacheNodesInput, _a2 func(*elasticache.DescribeReservedCacheNodesOutput, bool) bool, _a3 ...request.Option) error { - _va := make([]interface{}, len(_a3)) - for _i := range _a3 { - _va[_i] = _a3[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1, _a2) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DescribeReservedCacheNodesInput, func(*elasticache.DescribeReservedCacheNodesOutput, bool) bool, ...request.Option) error); ok { - r0 = rf(_a0, _a1, _a2, _a3...) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// DescribeReservedCacheNodesRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DescribeReservedCacheNodesRequest(_a0 *elasticache.DescribeReservedCacheNodesInput) (*request.Request, *elasticache.DescribeReservedCacheNodesOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - var r1 *elasticache.DescribeReservedCacheNodesOutput - if rf, ok := ret.Get(0).(func(*elasticache.DescribeReservedCacheNodesInput) (*request.Request, *elasticache.DescribeReservedCacheNodesOutput)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.DescribeReservedCacheNodesInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.DescribeReservedCacheNodesInput) *elasticache.DescribeReservedCacheNodesOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.DescribeReservedCacheNodesOutput) - } - } - - return r0, r1 -} - -// DescribeReservedCacheNodesWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) DescribeReservedCacheNodesWithContext(_a0 context.Context, _a1 *elasticache.DescribeReservedCacheNodesInput, _a2 ...request.Option) (*elasticache.DescribeReservedCacheNodesOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.DescribeReservedCacheNodesOutput - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DescribeReservedCacheNodesInput, ...request.Option) (*elasticache.DescribeReservedCacheNodesOutput, error)); ok { - return rf(_a0, _a1, _a2...) - } - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DescribeReservedCacheNodesInput, ...request.Option) *elasticache.DescribeReservedCacheNodesOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DescribeReservedCacheNodesOutput) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.DescribeReservedCacheNodesInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DescribeServerlessCacheSnapshots provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DescribeServerlessCacheSnapshots(_a0 *elasticache.DescribeServerlessCacheSnapshotsInput) (*elasticache.DescribeServerlessCacheSnapshotsOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.DescribeServerlessCacheSnapshotsOutput - var r1 error - if rf, ok := ret.Get(0).(func(*elasticache.DescribeServerlessCacheSnapshotsInput) (*elasticache.DescribeServerlessCacheSnapshotsOutput, error)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.DescribeServerlessCacheSnapshotsInput) *elasticache.DescribeServerlessCacheSnapshotsOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DescribeServerlessCacheSnapshotsOutput) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.DescribeServerlessCacheSnapshotsInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DescribeServerlessCacheSnapshotsPages provides a mock function with given fields: _a0, _a1 -func (_m *ElastiCacheAPI) DescribeServerlessCacheSnapshotsPages(_a0 *elasticache.DescribeServerlessCacheSnapshotsInput, _a1 func(*elasticache.DescribeServerlessCacheSnapshotsOutput, bool) bool) error { - ret := _m.Called(_a0, _a1) - - var r0 error - if rf, ok := ret.Get(0).(func(*elasticache.DescribeServerlessCacheSnapshotsInput, func(*elasticache.DescribeServerlessCacheSnapshotsOutput, bool) bool) error); ok { - r0 = rf(_a0, _a1) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// DescribeServerlessCacheSnapshotsPagesWithContext provides a mock function with given fields: _a0, _a1, _a2, _a3 -func (_m *ElastiCacheAPI) DescribeServerlessCacheSnapshotsPagesWithContext(_a0 context.Context, _a1 *elasticache.DescribeServerlessCacheSnapshotsInput, _a2 func(*elasticache.DescribeServerlessCacheSnapshotsOutput, bool) bool, _a3 ...request.Option) error { - _va := make([]interface{}, len(_a3)) - for _i := range _a3 { - _va[_i] = _a3[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1, _a2) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DescribeServerlessCacheSnapshotsInput, func(*elasticache.DescribeServerlessCacheSnapshotsOutput, bool) bool, ...request.Option) error); ok { - r0 = rf(_a0, _a1, _a2, _a3...) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// DescribeServerlessCacheSnapshotsRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DescribeServerlessCacheSnapshotsRequest(_a0 *elasticache.DescribeServerlessCacheSnapshotsInput) (*request.Request, *elasticache.DescribeServerlessCacheSnapshotsOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - var r1 *elasticache.DescribeServerlessCacheSnapshotsOutput - if rf, ok := ret.Get(0).(func(*elasticache.DescribeServerlessCacheSnapshotsInput) (*request.Request, *elasticache.DescribeServerlessCacheSnapshotsOutput)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.DescribeServerlessCacheSnapshotsInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.DescribeServerlessCacheSnapshotsInput) *elasticache.DescribeServerlessCacheSnapshotsOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.DescribeServerlessCacheSnapshotsOutput) - } - } - - return r0, r1 -} - -// DescribeServerlessCacheSnapshotsWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) DescribeServerlessCacheSnapshotsWithContext(_a0 context.Context, _a1 *elasticache.DescribeServerlessCacheSnapshotsInput, _a2 ...request.Option) (*elasticache.DescribeServerlessCacheSnapshotsOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.DescribeServerlessCacheSnapshotsOutput - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DescribeServerlessCacheSnapshotsInput, ...request.Option) (*elasticache.DescribeServerlessCacheSnapshotsOutput, error)); ok { - return rf(_a0, _a1, _a2...) - } - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DescribeServerlessCacheSnapshotsInput, ...request.Option) *elasticache.DescribeServerlessCacheSnapshotsOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DescribeServerlessCacheSnapshotsOutput) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.DescribeServerlessCacheSnapshotsInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DescribeServerlessCaches provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DescribeServerlessCaches(_a0 *elasticache.DescribeServerlessCachesInput) (*elasticache.DescribeServerlessCachesOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.DescribeServerlessCachesOutput - var r1 error - if rf, ok := ret.Get(0).(func(*elasticache.DescribeServerlessCachesInput) (*elasticache.DescribeServerlessCachesOutput, error)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.DescribeServerlessCachesInput) *elasticache.DescribeServerlessCachesOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DescribeServerlessCachesOutput) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.DescribeServerlessCachesInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DescribeServerlessCachesPages provides a mock function with given fields: _a0, _a1 -func (_m *ElastiCacheAPI) DescribeServerlessCachesPages(_a0 *elasticache.DescribeServerlessCachesInput, _a1 func(*elasticache.DescribeServerlessCachesOutput, bool) bool) error { - ret := _m.Called(_a0, _a1) - - var r0 error - if rf, ok := ret.Get(0).(func(*elasticache.DescribeServerlessCachesInput, func(*elasticache.DescribeServerlessCachesOutput, bool) bool) error); ok { - r0 = rf(_a0, _a1) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// DescribeServerlessCachesPagesWithContext provides a mock function with given fields: _a0, _a1, _a2, _a3 -func (_m *ElastiCacheAPI) DescribeServerlessCachesPagesWithContext(_a0 context.Context, _a1 *elasticache.DescribeServerlessCachesInput, _a2 func(*elasticache.DescribeServerlessCachesOutput, bool) bool, _a3 ...request.Option) error { - _va := make([]interface{}, len(_a3)) - for _i := range _a3 { - _va[_i] = _a3[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1, _a2) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DescribeServerlessCachesInput, func(*elasticache.DescribeServerlessCachesOutput, bool) bool, ...request.Option) error); ok { - r0 = rf(_a0, _a1, _a2, _a3...) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// DescribeServerlessCachesRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DescribeServerlessCachesRequest(_a0 *elasticache.DescribeServerlessCachesInput) (*request.Request, *elasticache.DescribeServerlessCachesOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - var r1 *elasticache.DescribeServerlessCachesOutput - if rf, ok := ret.Get(0).(func(*elasticache.DescribeServerlessCachesInput) (*request.Request, *elasticache.DescribeServerlessCachesOutput)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.DescribeServerlessCachesInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.DescribeServerlessCachesInput) *elasticache.DescribeServerlessCachesOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.DescribeServerlessCachesOutput) - } - } - - return r0, r1 -} - -// DescribeServerlessCachesWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) DescribeServerlessCachesWithContext(_a0 context.Context, _a1 *elasticache.DescribeServerlessCachesInput, _a2 ...request.Option) (*elasticache.DescribeServerlessCachesOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.DescribeServerlessCachesOutput - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DescribeServerlessCachesInput, ...request.Option) (*elasticache.DescribeServerlessCachesOutput, error)); ok { - return rf(_a0, _a1, _a2...) - } - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DescribeServerlessCachesInput, ...request.Option) *elasticache.DescribeServerlessCachesOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DescribeServerlessCachesOutput) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.DescribeServerlessCachesInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DescribeServiceUpdates provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DescribeServiceUpdates(_a0 *elasticache.DescribeServiceUpdatesInput) (*elasticache.DescribeServiceUpdatesOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.DescribeServiceUpdatesOutput - var r1 error - if rf, ok := ret.Get(0).(func(*elasticache.DescribeServiceUpdatesInput) (*elasticache.DescribeServiceUpdatesOutput, error)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.DescribeServiceUpdatesInput) *elasticache.DescribeServiceUpdatesOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DescribeServiceUpdatesOutput) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.DescribeServiceUpdatesInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DescribeServiceUpdatesPages provides a mock function with given fields: _a0, _a1 -func (_m *ElastiCacheAPI) DescribeServiceUpdatesPages(_a0 *elasticache.DescribeServiceUpdatesInput, _a1 func(*elasticache.DescribeServiceUpdatesOutput, bool) bool) error { - ret := _m.Called(_a0, _a1) - - var r0 error - if rf, ok := ret.Get(0).(func(*elasticache.DescribeServiceUpdatesInput, func(*elasticache.DescribeServiceUpdatesOutput, bool) bool) error); ok { - r0 = rf(_a0, _a1) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// DescribeServiceUpdatesPagesWithContext provides a mock function with given fields: _a0, _a1, _a2, _a3 -func (_m *ElastiCacheAPI) DescribeServiceUpdatesPagesWithContext(_a0 context.Context, _a1 *elasticache.DescribeServiceUpdatesInput, _a2 func(*elasticache.DescribeServiceUpdatesOutput, bool) bool, _a3 ...request.Option) error { - _va := make([]interface{}, len(_a3)) - for _i := range _a3 { - _va[_i] = _a3[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1, _a2) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DescribeServiceUpdatesInput, func(*elasticache.DescribeServiceUpdatesOutput, bool) bool, ...request.Option) error); ok { - r0 = rf(_a0, _a1, _a2, _a3...) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// DescribeServiceUpdatesRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DescribeServiceUpdatesRequest(_a0 *elasticache.DescribeServiceUpdatesInput) (*request.Request, *elasticache.DescribeServiceUpdatesOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - var r1 *elasticache.DescribeServiceUpdatesOutput - if rf, ok := ret.Get(0).(func(*elasticache.DescribeServiceUpdatesInput) (*request.Request, *elasticache.DescribeServiceUpdatesOutput)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.DescribeServiceUpdatesInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.DescribeServiceUpdatesInput) *elasticache.DescribeServiceUpdatesOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.DescribeServiceUpdatesOutput) - } - } - - return r0, r1 -} - -// DescribeServiceUpdatesWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) DescribeServiceUpdatesWithContext(_a0 context.Context, _a1 *elasticache.DescribeServiceUpdatesInput, _a2 ...request.Option) (*elasticache.DescribeServiceUpdatesOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.DescribeServiceUpdatesOutput - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DescribeServiceUpdatesInput, ...request.Option) (*elasticache.DescribeServiceUpdatesOutput, error)); ok { - return rf(_a0, _a1, _a2...) - } - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DescribeServiceUpdatesInput, ...request.Option) *elasticache.DescribeServiceUpdatesOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DescribeServiceUpdatesOutput) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.DescribeServiceUpdatesInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DescribeSnapshots provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DescribeSnapshots(_a0 *elasticache.DescribeSnapshotsInput) (*elasticache.DescribeSnapshotsOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.DescribeSnapshotsOutput - var r1 error - if rf, ok := ret.Get(0).(func(*elasticache.DescribeSnapshotsInput) (*elasticache.DescribeSnapshotsOutput, error)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.DescribeSnapshotsInput) *elasticache.DescribeSnapshotsOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DescribeSnapshotsOutput) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.DescribeSnapshotsInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DescribeSnapshotsPages provides a mock function with given fields: _a0, _a1 -func (_m *ElastiCacheAPI) DescribeSnapshotsPages(_a0 *elasticache.DescribeSnapshotsInput, _a1 func(*elasticache.DescribeSnapshotsOutput, bool) bool) error { - ret := _m.Called(_a0, _a1) - - var r0 error - if rf, ok := ret.Get(0).(func(*elasticache.DescribeSnapshotsInput, func(*elasticache.DescribeSnapshotsOutput, bool) bool) error); ok { - r0 = rf(_a0, _a1) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// DescribeSnapshotsPagesWithContext provides a mock function with given fields: _a0, _a1, _a2, _a3 -func (_m *ElastiCacheAPI) DescribeSnapshotsPagesWithContext(_a0 context.Context, _a1 *elasticache.DescribeSnapshotsInput, _a2 func(*elasticache.DescribeSnapshotsOutput, bool) bool, _a3 ...request.Option) error { - _va := make([]interface{}, len(_a3)) - for _i := range _a3 { - _va[_i] = _a3[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1, _a2) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DescribeSnapshotsInput, func(*elasticache.DescribeSnapshotsOutput, bool) bool, ...request.Option) error); ok { - r0 = rf(_a0, _a1, _a2, _a3...) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// DescribeSnapshotsRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DescribeSnapshotsRequest(_a0 *elasticache.DescribeSnapshotsInput) (*request.Request, *elasticache.DescribeSnapshotsOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - var r1 *elasticache.DescribeSnapshotsOutput - if rf, ok := ret.Get(0).(func(*elasticache.DescribeSnapshotsInput) (*request.Request, *elasticache.DescribeSnapshotsOutput)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.DescribeSnapshotsInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.DescribeSnapshotsInput) *elasticache.DescribeSnapshotsOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.DescribeSnapshotsOutput) - } - } - - return r0, r1 -} - -// DescribeSnapshotsWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) DescribeSnapshotsWithContext(_a0 context.Context, _a1 *elasticache.DescribeSnapshotsInput, _a2 ...request.Option) (*elasticache.DescribeSnapshotsOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.DescribeSnapshotsOutput - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DescribeSnapshotsInput, ...request.Option) (*elasticache.DescribeSnapshotsOutput, error)); ok { - return rf(_a0, _a1, _a2...) - } - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DescribeSnapshotsInput, ...request.Option) *elasticache.DescribeSnapshotsOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DescribeSnapshotsOutput) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.DescribeSnapshotsInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DescribeUpdateActions provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DescribeUpdateActions(_a0 *elasticache.DescribeUpdateActionsInput) (*elasticache.DescribeUpdateActionsOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.DescribeUpdateActionsOutput - var r1 error - if rf, ok := ret.Get(0).(func(*elasticache.DescribeUpdateActionsInput) (*elasticache.DescribeUpdateActionsOutput, error)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.DescribeUpdateActionsInput) *elasticache.DescribeUpdateActionsOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DescribeUpdateActionsOutput) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.DescribeUpdateActionsInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DescribeUpdateActionsPages provides a mock function with given fields: _a0, _a1 -func (_m *ElastiCacheAPI) DescribeUpdateActionsPages(_a0 *elasticache.DescribeUpdateActionsInput, _a1 func(*elasticache.DescribeUpdateActionsOutput, bool) bool) error { - ret := _m.Called(_a0, _a1) - - var r0 error - if rf, ok := ret.Get(0).(func(*elasticache.DescribeUpdateActionsInput, func(*elasticache.DescribeUpdateActionsOutput, bool) bool) error); ok { - r0 = rf(_a0, _a1) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// DescribeUpdateActionsPagesWithContext provides a mock function with given fields: _a0, _a1, _a2, _a3 -func (_m *ElastiCacheAPI) DescribeUpdateActionsPagesWithContext(_a0 context.Context, _a1 *elasticache.DescribeUpdateActionsInput, _a2 func(*elasticache.DescribeUpdateActionsOutput, bool) bool, _a3 ...request.Option) error { - _va := make([]interface{}, len(_a3)) - for _i := range _a3 { - _va[_i] = _a3[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1, _a2) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DescribeUpdateActionsInput, func(*elasticache.DescribeUpdateActionsOutput, bool) bool, ...request.Option) error); ok { - r0 = rf(_a0, _a1, _a2, _a3...) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// DescribeUpdateActionsRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DescribeUpdateActionsRequest(_a0 *elasticache.DescribeUpdateActionsInput) (*request.Request, *elasticache.DescribeUpdateActionsOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - var r1 *elasticache.DescribeUpdateActionsOutput - if rf, ok := ret.Get(0).(func(*elasticache.DescribeUpdateActionsInput) (*request.Request, *elasticache.DescribeUpdateActionsOutput)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.DescribeUpdateActionsInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.DescribeUpdateActionsInput) *elasticache.DescribeUpdateActionsOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.DescribeUpdateActionsOutput) - } - } - - return r0, r1 -} - -// DescribeUpdateActionsWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) DescribeUpdateActionsWithContext(_a0 context.Context, _a1 *elasticache.DescribeUpdateActionsInput, _a2 ...request.Option) (*elasticache.DescribeUpdateActionsOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.DescribeUpdateActionsOutput - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DescribeUpdateActionsInput, ...request.Option) (*elasticache.DescribeUpdateActionsOutput, error)); ok { - return rf(_a0, _a1, _a2...) - } - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DescribeUpdateActionsInput, ...request.Option) *elasticache.DescribeUpdateActionsOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DescribeUpdateActionsOutput) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.DescribeUpdateActionsInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DescribeUserGroups provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DescribeUserGroups(_a0 *elasticache.DescribeUserGroupsInput) (*elasticache.DescribeUserGroupsOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.DescribeUserGroupsOutput - var r1 error - if rf, ok := ret.Get(0).(func(*elasticache.DescribeUserGroupsInput) (*elasticache.DescribeUserGroupsOutput, error)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.DescribeUserGroupsInput) *elasticache.DescribeUserGroupsOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DescribeUserGroupsOutput) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.DescribeUserGroupsInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DescribeUserGroupsPages provides a mock function with given fields: _a0, _a1 -func (_m *ElastiCacheAPI) DescribeUserGroupsPages(_a0 *elasticache.DescribeUserGroupsInput, _a1 func(*elasticache.DescribeUserGroupsOutput, bool) bool) error { - ret := _m.Called(_a0, _a1) - - var r0 error - if rf, ok := ret.Get(0).(func(*elasticache.DescribeUserGroupsInput, func(*elasticache.DescribeUserGroupsOutput, bool) bool) error); ok { - r0 = rf(_a0, _a1) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// DescribeUserGroupsPagesWithContext provides a mock function with given fields: _a0, _a1, _a2, _a3 -func (_m *ElastiCacheAPI) DescribeUserGroupsPagesWithContext(_a0 context.Context, _a1 *elasticache.DescribeUserGroupsInput, _a2 func(*elasticache.DescribeUserGroupsOutput, bool) bool, _a3 ...request.Option) error { - _va := make([]interface{}, len(_a3)) - for _i := range _a3 { - _va[_i] = _a3[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1, _a2) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DescribeUserGroupsInput, func(*elasticache.DescribeUserGroupsOutput, bool) bool, ...request.Option) error); ok { - r0 = rf(_a0, _a1, _a2, _a3...) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// DescribeUserGroupsRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DescribeUserGroupsRequest(_a0 *elasticache.DescribeUserGroupsInput) (*request.Request, *elasticache.DescribeUserGroupsOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - var r1 *elasticache.DescribeUserGroupsOutput - if rf, ok := ret.Get(0).(func(*elasticache.DescribeUserGroupsInput) (*request.Request, *elasticache.DescribeUserGroupsOutput)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.DescribeUserGroupsInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.DescribeUserGroupsInput) *elasticache.DescribeUserGroupsOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.DescribeUserGroupsOutput) - } - } - - return r0, r1 -} - -// DescribeUserGroupsWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) DescribeUserGroupsWithContext(_a0 context.Context, _a1 *elasticache.DescribeUserGroupsInput, _a2 ...request.Option) (*elasticache.DescribeUserGroupsOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.DescribeUserGroupsOutput - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DescribeUserGroupsInput, ...request.Option) (*elasticache.DescribeUserGroupsOutput, error)); ok { - return rf(_a0, _a1, _a2...) - } - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DescribeUserGroupsInput, ...request.Option) *elasticache.DescribeUserGroupsOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DescribeUserGroupsOutput) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.DescribeUserGroupsInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DescribeUsers provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DescribeUsers(_a0 *elasticache.DescribeUsersInput) (*elasticache.DescribeUsersOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.DescribeUsersOutput - var r1 error - if rf, ok := ret.Get(0).(func(*elasticache.DescribeUsersInput) (*elasticache.DescribeUsersOutput, error)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.DescribeUsersInput) *elasticache.DescribeUsersOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DescribeUsersOutput) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.DescribeUsersInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DescribeUsersPages provides a mock function with given fields: _a0, _a1 -func (_m *ElastiCacheAPI) DescribeUsersPages(_a0 *elasticache.DescribeUsersInput, _a1 func(*elasticache.DescribeUsersOutput, bool) bool) error { - ret := _m.Called(_a0, _a1) - - var r0 error - if rf, ok := ret.Get(0).(func(*elasticache.DescribeUsersInput, func(*elasticache.DescribeUsersOutput, bool) bool) error); ok { - r0 = rf(_a0, _a1) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// DescribeUsersPagesWithContext provides a mock function with given fields: _a0, _a1, _a2, _a3 -func (_m *ElastiCacheAPI) DescribeUsersPagesWithContext(_a0 context.Context, _a1 *elasticache.DescribeUsersInput, _a2 func(*elasticache.DescribeUsersOutput, bool) bool, _a3 ...request.Option) error { - _va := make([]interface{}, len(_a3)) - for _i := range _a3 { - _va[_i] = _a3[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1, _a2) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DescribeUsersInput, func(*elasticache.DescribeUsersOutput, bool) bool, ...request.Option) error); ok { - r0 = rf(_a0, _a1, _a2, _a3...) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// DescribeUsersRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DescribeUsersRequest(_a0 *elasticache.DescribeUsersInput) (*request.Request, *elasticache.DescribeUsersOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - var r1 *elasticache.DescribeUsersOutput - if rf, ok := ret.Get(0).(func(*elasticache.DescribeUsersInput) (*request.Request, *elasticache.DescribeUsersOutput)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.DescribeUsersInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.DescribeUsersInput) *elasticache.DescribeUsersOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.DescribeUsersOutput) - } - } - - return r0, r1 -} - -// DescribeUsersWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) DescribeUsersWithContext(_a0 context.Context, _a1 *elasticache.DescribeUsersInput, _a2 ...request.Option) (*elasticache.DescribeUsersOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.DescribeUsersOutput - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DescribeUsersInput, ...request.Option) (*elasticache.DescribeUsersOutput, error)); ok { - return rf(_a0, _a1, _a2...) - } - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DescribeUsersInput, ...request.Option) *elasticache.DescribeUsersOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DescribeUsersOutput) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.DescribeUsersInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DisassociateGlobalReplicationGroup provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DisassociateGlobalReplicationGroup(_a0 *elasticache.DisassociateGlobalReplicationGroupInput) (*elasticache.DisassociateGlobalReplicationGroupOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.DisassociateGlobalReplicationGroupOutput - var r1 error - if rf, ok := ret.Get(0).(func(*elasticache.DisassociateGlobalReplicationGroupInput) (*elasticache.DisassociateGlobalReplicationGroupOutput, error)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.DisassociateGlobalReplicationGroupInput) *elasticache.DisassociateGlobalReplicationGroupOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DisassociateGlobalReplicationGroupOutput) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.DisassociateGlobalReplicationGroupInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DisassociateGlobalReplicationGroupRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DisassociateGlobalReplicationGroupRequest(_a0 *elasticache.DisassociateGlobalReplicationGroupInput) (*request.Request, *elasticache.DisassociateGlobalReplicationGroupOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - var r1 *elasticache.DisassociateGlobalReplicationGroupOutput - if rf, ok := ret.Get(0).(func(*elasticache.DisassociateGlobalReplicationGroupInput) (*request.Request, *elasticache.DisassociateGlobalReplicationGroupOutput)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.DisassociateGlobalReplicationGroupInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.DisassociateGlobalReplicationGroupInput) *elasticache.DisassociateGlobalReplicationGroupOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.DisassociateGlobalReplicationGroupOutput) - } - } - - return r0, r1 -} - -// DisassociateGlobalReplicationGroupWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) DisassociateGlobalReplicationGroupWithContext(_a0 context.Context, _a1 *elasticache.DisassociateGlobalReplicationGroupInput, _a2 ...request.Option) (*elasticache.DisassociateGlobalReplicationGroupOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.DisassociateGlobalReplicationGroupOutput - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DisassociateGlobalReplicationGroupInput, ...request.Option) (*elasticache.DisassociateGlobalReplicationGroupOutput, error)); ok { - return rf(_a0, _a1, _a2...) - } - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DisassociateGlobalReplicationGroupInput, ...request.Option) *elasticache.DisassociateGlobalReplicationGroupOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DisassociateGlobalReplicationGroupOutput) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.DisassociateGlobalReplicationGroupInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// ExportServerlessCacheSnapshot provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) ExportServerlessCacheSnapshot(_a0 *elasticache.ExportServerlessCacheSnapshotInput) (*elasticache.ExportServerlessCacheSnapshotOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.ExportServerlessCacheSnapshotOutput - var r1 error - if rf, ok := ret.Get(0).(func(*elasticache.ExportServerlessCacheSnapshotInput) (*elasticache.ExportServerlessCacheSnapshotOutput, error)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.ExportServerlessCacheSnapshotInput) *elasticache.ExportServerlessCacheSnapshotOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.ExportServerlessCacheSnapshotOutput) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.ExportServerlessCacheSnapshotInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// ExportServerlessCacheSnapshotRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) ExportServerlessCacheSnapshotRequest(_a0 *elasticache.ExportServerlessCacheSnapshotInput) (*request.Request, *elasticache.ExportServerlessCacheSnapshotOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - var r1 *elasticache.ExportServerlessCacheSnapshotOutput - if rf, ok := ret.Get(0).(func(*elasticache.ExportServerlessCacheSnapshotInput) (*request.Request, *elasticache.ExportServerlessCacheSnapshotOutput)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.ExportServerlessCacheSnapshotInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.ExportServerlessCacheSnapshotInput) *elasticache.ExportServerlessCacheSnapshotOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.ExportServerlessCacheSnapshotOutput) - } - } - - return r0, r1 -} - -// ExportServerlessCacheSnapshotWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) ExportServerlessCacheSnapshotWithContext(_a0 context.Context, _a1 *elasticache.ExportServerlessCacheSnapshotInput, _a2 ...request.Option) (*elasticache.ExportServerlessCacheSnapshotOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.ExportServerlessCacheSnapshotOutput - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.ExportServerlessCacheSnapshotInput, ...request.Option) (*elasticache.ExportServerlessCacheSnapshotOutput, error)); ok { - return rf(_a0, _a1, _a2...) - } - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.ExportServerlessCacheSnapshotInput, ...request.Option) *elasticache.ExportServerlessCacheSnapshotOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.ExportServerlessCacheSnapshotOutput) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.ExportServerlessCacheSnapshotInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// FailoverGlobalReplicationGroup provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) FailoverGlobalReplicationGroup(_a0 *elasticache.FailoverGlobalReplicationGroupInput) (*elasticache.FailoverGlobalReplicationGroupOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.FailoverGlobalReplicationGroupOutput - var r1 error - if rf, ok := ret.Get(0).(func(*elasticache.FailoverGlobalReplicationGroupInput) (*elasticache.FailoverGlobalReplicationGroupOutput, error)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.FailoverGlobalReplicationGroupInput) *elasticache.FailoverGlobalReplicationGroupOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.FailoverGlobalReplicationGroupOutput) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.FailoverGlobalReplicationGroupInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// FailoverGlobalReplicationGroupRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) FailoverGlobalReplicationGroupRequest(_a0 *elasticache.FailoverGlobalReplicationGroupInput) (*request.Request, *elasticache.FailoverGlobalReplicationGroupOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - var r1 *elasticache.FailoverGlobalReplicationGroupOutput - if rf, ok := ret.Get(0).(func(*elasticache.FailoverGlobalReplicationGroupInput) (*request.Request, *elasticache.FailoverGlobalReplicationGroupOutput)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.FailoverGlobalReplicationGroupInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.FailoverGlobalReplicationGroupInput) *elasticache.FailoverGlobalReplicationGroupOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.FailoverGlobalReplicationGroupOutput) - } - } - - return r0, r1 -} - -// FailoverGlobalReplicationGroupWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) FailoverGlobalReplicationGroupWithContext(_a0 context.Context, _a1 *elasticache.FailoverGlobalReplicationGroupInput, _a2 ...request.Option) (*elasticache.FailoverGlobalReplicationGroupOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.FailoverGlobalReplicationGroupOutput - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.FailoverGlobalReplicationGroupInput, ...request.Option) (*elasticache.FailoverGlobalReplicationGroupOutput, error)); ok { - return rf(_a0, _a1, _a2...) - } - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.FailoverGlobalReplicationGroupInput, ...request.Option) *elasticache.FailoverGlobalReplicationGroupOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.FailoverGlobalReplicationGroupOutput) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.FailoverGlobalReplicationGroupInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// IncreaseNodeGroupsInGlobalReplicationGroup provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) IncreaseNodeGroupsInGlobalReplicationGroup(_a0 *elasticache.IncreaseNodeGroupsInGlobalReplicationGroupInput) (*elasticache.IncreaseNodeGroupsInGlobalReplicationGroupOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.IncreaseNodeGroupsInGlobalReplicationGroupOutput - var r1 error - if rf, ok := ret.Get(0).(func(*elasticache.IncreaseNodeGroupsInGlobalReplicationGroupInput) (*elasticache.IncreaseNodeGroupsInGlobalReplicationGroupOutput, error)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.IncreaseNodeGroupsInGlobalReplicationGroupInput) *elasticache.IncreaseNodeGroupsInGlobalReplicationGroupOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.IncreaseNodeGroupsInGlobalReplicationGroupOutput) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.IncreaseNodeGroupsInGlobalReplicationGroupInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// IncreaseNodeGroupsInGlobalReplicationGroupRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) IncreaseNodeGroupsInGlobalReplicationGroupRequest(_a0 *elasticache.IncreaseNodeGroupsInGlobalReplicationGroupInput) (*request.Request, *elasticache.IncreaseNodeGroupsInGlobalReplicationGroupOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - var r1 *elasticache.IncreaseNodeGroupsInGlobalReplicationGroupOutput - if rf, ok := ret.Get(0).(func(*elasticache.IncreaseNodeGroupsInGlobalReplicationGroupInput) (*request.Request, *elasticache.IncreaseNodeGroupsInGlobalReplicationGroupOutput)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.IncreaseNodeGroupsInGlobalReplicationGroupInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.IncreaseNodeGroupsInGlobalReplicationGroupInput) *elasticache.IncreaseNodeGroupsInGlobalReplicationGroupOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.IncreaseNodeGroupsInGlobalReplicationGroupOutput) - } - } - - return r0, r1 -} - -// IncreaseNodeGroupsInGlobalReplicationGroupWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) IncreaseNodeGroupsInGlobalReplicationGroupWithContext(_a0 context.Context, _a1 *elasticache.IncreaseNodeGroupsInGlobalReplicationGroupInput, _a2 ...request.Option) (*elasticache.IncreaseNodeGroupsInGlobalReplicationGroupOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.IncreaseNodeGroupsInGlobalReplicationGroupOutput - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.IncreaseNodeGroupsInGlobalReplicationGroupInput, ...request.Option) (*elasticache.IncreaseNodeGroupsInGlobalReplicationGroupOutput, error)); ok { - return rf(_a0, _a1, _a2...) - } - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.IncreaseNodeGroupsInGlobalReplicationGroupInput, ...request.Option) *elasticache.IncreaseNodeGroupsInGlobalReplicationGroupOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.IncreaseNodeGroupsInGlobalReplicationGroupOutput) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.IncreaseNodeGroupsInGlobalReplicationGroupInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// IncreaseReplicaCount provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) IncreaseReplicaCount(_a0 *elasticache.IncreaseReplicaCountInput) (*elasticache.IncreaseReplicaCountOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.IncreaseReplicaCountOutput - var r1 error - if rf, ok := ret.Get(0).(func(*elasticache.IncreaseReplicaCountInput) (*elasticache.IncreaseReplicaCountOutput, error)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.IncreaseReplicaCountInput) *elasticache.IncreaseReplicaCountOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.IncreaseReplicaCountOutput) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.IncreaseReplicaCountInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// IncreaseReplicaCountRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) IncreaseReplicaCountRequest(_a0 *elasticache.IncreaseReplicaCountInput) (*request.Request, *elasticache.IncreaseReplicaCountOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - var r1 *elasticache.IncreaseReplicaCountOutput - if rf, ok := ret.Get(0).(func(*elasticache.IncreaseReplicaCountInput) (*request.Request, *elasticache.IncreaseReplicaCountOutput)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.IncreaseReplicaCountInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.IncreaseReplicaCountInput) *elasticache.IncreaseReplicaCountOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.IncreaseReplicaCountOutput) - } - } - - return r0, r1 -} - -// IncreaseReplicaCountWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) IncreaseReplicaCountWithContext(_a0 context.Context, _a1 *elasticache.IncreaseReplicaCountInput, _a2 ...request.Option) (*elasticache.IncreaseReplicaCountOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.IncreaseReplicaCountOutput - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.IncreaseReplicaCountInput, ...request.Option) (*elasticache.IncreaseReplicaCountOutput, error)); ok { - return rf(_a0, _a1, _a2...) - } - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.IncreaseReplicaCountInput, ...request.Option) *elasticache.IncreaseReplicaCountOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.IncreaseReplicaCountOutput) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.IncreaseReplicaCountInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// ListAllowedNodeTypeModifications provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) ListAllowedNodeTypeModifications(_a0 *elasticache.ListAllowedNodeTypeModificationsInput) (*elasticache.ListAllowedNodeTypeModificationsOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.ListAllowedNodeTypeModificationsOutput - var r1 error - if rf, ok := ret.Get(0).(func(*elasticache.ListAllowedNodeTypeModificationsInput) (*elasticache.ListAllowedNodeTypeModificationsOutput, error)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.ListAllowedNodeTypeModificationsInput) *elasticache.ListAllowedNodeTypeModificationsOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.ListAllowedNodeTypeModificationsOutput) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.ListAllowedNodeTypeModificationsInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// ListAllowedNodeTypeModificationsRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) ListAllowedNodeTypeModificationsRequest(_a0 *elasticache.ListAllowedNodeTypeModificationsInput) (*request.Request, *elasticache.ListAllowedNodeTypeModificationsOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - var r1 *elasticache.ListAllowedNodeTypeModificationsOutput - if rf, ok := ret.Get(0).(func(*elasticache.ListAllowedNodeTypeModificationsInput) (*request.Request, *elasticache.ListAllowedNodeTypeModificationsOutput)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.ListAllowedNodeTypeModificationsInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.ListAllowedNodeTypeModificationsInput) *elasticache.ListAllowedNodeTypeModificationsOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.ListAllowedNodeTypeModificationsOutput) - } - } - - return r0, r1 -} - -// ListAllowedNodeTypeModificationsWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) ListAllowedNodeTypeModificationsWithContext(_a0 context.Context, _a1 *elasticache.ListAllowedNodeTypeModificationsInput, _a2 ...request.Option) (*elasticache.ListAllowedNodeTypeModificationsOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.ListAllowedNodeTypeModificationsOutput - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.ListAllowedNodeTypeModificationsInput, ...request.Option) (*elasticache.ListAllowedNodeTypeModificationsOutput, error)); ok { - return rf(_a0, _a1, _a2...) - } - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.ListAllowedNodeTypeModificationsInput, ...request.Option) *elasticache.ListAllowedNodeTypeModificationsOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.ListAllowedNodeTypeModificationsOutput) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.ListAllowedNodeTypeModificationsInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// ListTagsForResource provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) ListTagsForResource(_a0 *elasticache.ListTagsForResourceInput) (*elasticache.TagListMessage, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.TagListMessage - var r1 error - if rf, ok := ret.Get(0).(func(*elasticache.ListTagsForResourceInput) (*elasticache.TagListMessage, error)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.ListTagsForResourceInput) *elasticache.TagListMessage); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.TagListMessage) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.ListTagsForResourceInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// ListTagsForResourceRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) ListTagsForResourceRequest(_a0 *elasticache.ListTagsForResourceInput) (*request.Request, *elasticache.TagListMessage) { - ret := _m.Called(_a0) - - var r0 *request.Request - var r1 *elasticache.TagListMessage - if rf, ok := ret.Get(0).(func(*elasticache.ListTagsForResourceInput) (*request.Request, *elasticache.TagListMessage)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.ListTagsForResourceInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.ListTagsForResourceInput) *elasticache.TagListMessage); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.TagListMessage) - } - } - - return r0, r1 -} - -// ListTagsForResourceWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) ListTagsForResourceWithContext(_a0 context.Context, _a1 *elasticache.ListTagsForResourceInput, _a2 ...request.Option) (*elasticache.TagListMessage, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.TagListMessage - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.ListTagsForResourceInput, ...request.Option) (*elasticache.TagListMessage, error)); ok { - return rf(_a0, _a1, _a2...) - } - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.ListTagsForResourceInput, ...request.Option) *elasticache.TagListMessage); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.TagListMessage) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.ListTagsForResourceInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// ModifyCacheCluster provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) ModifyCacheCluster(_a0 *elasticache.ModifyCacheClusterInput) (*elasticache.ModifyCacheClusterOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.ModifyCacheClusterOutput - var r1 error - if rf, ok := ret.Get(0).(func(*elasticache.ModifyCacheClusterInput) (*elasticache.ModifyCacheClusterOutput, error)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.ModifyCacheClusterInput) *elasticache.ModifyCacheClusterOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.ModifyCacheClusterOutput) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.ModifyCacheClusterInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// ModifyCacheClusterRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) ModifyCacheClusterRequest(_a0 *elasticache.ModifyCacheClusterInput) (*request.Request, *elasticache.ModifyCacheClusterOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - var r1 *elasticache.ModifyCacheClusterOutput - if rf, ok := ret.Get(0).(func(*elasticache.ModifyCacheClusterInput) (*request.Request, *elasticache.ModifyCacheClusterOutput)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.ModifyCacheClusterInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.ModifyCacheClusterInput) *elasticache.ModifyCacheClusterOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.ModifyCacheClusterOutput) - } - } - - return r0, r1 -} - -// ModifyCacheClusterWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) ModifyCacheClusterWithContext(_a0 context.Context, _a1 *elasticache.ModifyCacheClusterInput, _a2 ...request.Option) (*elasticache.ModifyCacheClusterOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.ModifyCacheClusterOutput - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.ModifyCacheClusterInput, ...request.Option) (*elasticache.ModifyCacheClusterOutput, error)); ok { - return rf(_a0, _a1, _a2...) - } - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.ModifyCacheClusterInput, ...request.Option) *elasticache.ModifyCacheClusterOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.ModifyCacheClusterOutput) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.ModifyCacheClusterInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// ModifyCacheParameterGroup provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) ModifyCacheParameterGroup(_a0 *elasticache.ModifyCacheParameterGroupInput) (*elasticache.CacheParameterGroupNameMessage, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.CacheParameterGroupNameMessage - var r1 error - if rf, ok := ret.Get(0).(func(*elasticache.ModifyCacheParameterGroupInput) (*elasticache.CacheParameterGroupNameMessage, error)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.ModifyCacheParameterGroupInput) *elasticache.CacheParameterGroupNameMessage); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.CacheParameterGroupNameMessage) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.ModifyCacheParameterGroupInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// ModifyCacheParameterGroupRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) ModifyCacheParameterGroupRequest(_a0 *elasticache.ModifyCacheParameterGroupInput) (*request.Request, *elasticache.CacheParameterGroupNameMessage) { - ret := _m.Called(_a0) - - var r0 *request.Request - var r1 *elasticache.CacheParameterGroupNameMessage - if rf, ok := ret.Get(0).(func(*elasticache.ModifyCacheParameterGroupInput) (*request.Request, *elasticache.CacheParameterGroupNameMessage)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.ModifyCacheParameterGroupInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.ModifyCacheParameterGroupInput) *elasticache.CacheParameterGroupNameMessage); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.CacheParameterGroupNameMessage) - } - } - - return r0, r1 -} - -// ModifyCacheParameterGroupWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) ModifyCacheParameterGroupWithContext(_a0 context.Context, _a1 *elasticache.ModifyCacheParameterGroupInput, _a2 ...request.Option) (*elasticache.CacheParameterGroupNameMessage, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.CacheParameterGroupNameMessage - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.ModifyCacheParameterGroupInput, ...request.Option) (*elasticache.CacheParameterGroupNameMessage, error)); ok { - return rf(_a0, _a1, _a2...) - } - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.ModifyCacheParameterGroupInput, ...request.Option) *elasticache.CacheParameterGroupNameMessage); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.CacheParameterGroupNameMessage) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.ModifyCacheParameterGroupInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// ModifyCacheSubnetGroup provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) ModifyCacheSubnetGroup(_a0 *elasticache.ModifyCacheSubnetGroupInput) (*elasticache.ModifyCacheSubnetGroupOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.ModifyCacheSubnetGroupOutput - var r1 error - if rf, ok := ret.Get(0).(func(*elasticache.ModifyCacheSubnetGroupInput) (*elasticache.ModifyCacheSubnetGroupOutput, error)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.ModifyCacheSubnetGroupInput) *elasticache.ModifyCacheSubnetGroupOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.ModifyCacheSubnetGroupOutput) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.ModifyCacheSubnetGroupInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// ModifyCacheSubnetGroupRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) ModifyCacheSubnetGroupRequest(_a0 *elasticache.ModifyCacheSubnetGroupInput) (*request.Request, *elasticache.ModifyCacheSubnetGroupOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - var r1 *elasticache.ModifyCacheSubnetGroupOutput - if rf, ok := ret.Get(0).(func(*elasticache.ModifyCacheSubnetGroupInput) (*request.Request, *elasticache.ModifyCacheSubnetGroupOutput)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.ModifyCacheSubnetGroupInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.ModifyCacheSubnetGroupInput) *elasticache.ModifyCacheSubnetGroupOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.ModifyCacheSubnetGroupOutput) - } - } - - return r0, r1 -} - -// ModifyCacheSubnetGroupWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) ModifyCacheSubnetGroupWithContext(_a0 context.Context, _a1 *elasticache.ModifyCacheSubnetGroupInput, _a2 ...request.Option) (*elasticache.ModifyCacheSubnetGroupOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.ModifyCacheSubnetGroupOutput - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.ModifyCacheSubnetGroupInput, ...request.Option) (*elasticache.ModifyCacheSubnetGroupOutput, error)); ok { - return rf(_a0, _a1, _a2...) - } - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.ModifyCacheSubnetGroupInput, ...request.Option) *elasticache.ModifyCacheSubnetGroupOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.ModifyCacheSubnetGroupOutput) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.ModifyCacheSubnetGroupInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// ModifyGlobalReplicationGroup provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) ModifyGlobalReplicationGroup(_a0 *elasticache.ModifyGlobalReplicationGroupInput) (*elasticache.ModifyGlobalReplicationGroupOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.ModifyGlobalReplicationGroupOutput - var r1 error - if rf, ok := ret.Get(0).(func(*elasticache.ModifyGlobalReplicationGroupInput) (*elasticache.ModifyGlobalReplicationGroupOutput, error)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.ModifyGlobalReplicationGroupInput) *elasticache.ModifyGlobalReplicationGroupOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.ModifyGlobalReplicationGroupOutput) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.ModifyGlobalReplicationGroupInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// ModifyGlobalReplicationGroupRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) ModifyGlobalReplicationGroupRequest(_a0 *elasticache.ModifyGlobalReplicationGroupInput) (*request.Request, *elasticache.ModifyGlobalReplicationGroupOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - var r1 *elasticache.ModifyGlobalReplicationGroupOutput - if rf, ok := ret.Get(0).(func(*elasticache.ModifyGlobalReplicationGroupInput) (*request.Request, *elasticache.ModifyGlobalReplicationGroupOutput)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.ModifyGlobalReplicationGroupInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.ModifyGlobalReplicationGroupInput) *elasticache.ModifyGlobalReplicationGroupOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.ModifyGlobalReplicationGroupOutput) - } - } - - return r0, r1 -} - -// ModifyGlobalReplicationGroupWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) ModifyGlobalReplicationGroupWithContext(_a0 context.Context, _a1 *elasticache.ModifyGlobalReplicationGroupInput, _a2 ...request.Option) (*elasticache.ModifyGlobalReplicationGroupOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.ModifyGlobalReplicationGroupOutput - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.ModifyGlobalReplicationGroupInput, ...request.Option) (*elasticache.ModifyGlobalReplicationGroupOutput, error)); ok { - return rf(_a0, _a1, _a2...) - } - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.ModifyGlobalReplicationGroupInput, ...request.Option) *elasticache.ModifyGlobalReplicationGroupOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.ModifyGlobalReplicationGroupOutput) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.ModifyGlobalReplicationGroupInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// ModifyReplicationGroup provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) ModifyReplicationGroup(_a0 *elasticache.ModifyReplicationGroupInput) (*elasticache.ModifyReplicationGroupOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.ModifyReplicationGroupOutput - var r1 error - if rf, ok := ret.Get(0).(func(*elasticache.ModifyReplicationGroupInput) (*elasticache.ModifyReplicationGroupOutput, error)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.ModifyReplicationGroupInput) *elasticache.ModifyReplicationGroupOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.ModifyReplicationGroupOutput) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.ModifyReplicationGroupInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// ModifyReplicationGroupRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) ModifyReplicationGroupRequest(_a0 *elasticache.ModifyReplicationGroupInput) (*request.Request, *elasticache.ModifyReplicationGroupOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - var r1 *elasticache.ModifyReplicationGroupOutput - if rf, ok := ret.Get(0).(func(*elasticache.ModifyReplicationGroupInput) (*request.Request, *elasticache.ModifyReplicationGroupOutput)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.ModifyReplicationGroupInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.ModifyReplicationGroupInput) *elasticache.ModifyReplicationGroupOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.ModifyReplicationGroupOutput) - } - } - - return r0, r1 -} - -// ModifyReplicationGroupShardConfiguration provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) ModifyReplicationGroupShardConfiguration(_a0 *elasticache.ModifyReplicationGroupShardConfigurationInput) (*elasticache.ModifyReplicationGroupShardConfigurationOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.ModifyReplicationGroupShardConfigurationOutput - var r1 error - if rf, ok := ret.Get(0).(func(*elasticache.ModifyReplicationGroupShardConfigurationInput) (*elasticache.ModifyReplicationGroupShardConfigurationOutput, error)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.ModifyReplicationGroupShardConfigurationInput) *elasticache.ModifyReplicationGroupShardConfigurationOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.ModifyReplicationGroupShardConfigurationOutput) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.ModifyReplicationGroupShardConfigurationInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// ModifyReplicationGroupShardConfigurationRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) ModifyReplicationGroupShardConfigurationRequest(_a0 *elasticache.ModifyReplicationGroupShardConfigurationInput) (*request.Request, *elasticache.ModifyReplicationGroupShardConfigurationOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - var r1 *elasticache.ModifyReplicationGroupShardConfigurationOutput - if rf, ok := ret.Get(0).(func(*elasticache.ModifyReplicationGroupShardConfigurationInput) (*request.Request, *elasticache.ModifyReplicationGroupShardConfigurationOutput)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.ModifyReplicationGroupShardConfigurationInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.ModifyReplicationGroupShardConfigurationInput) *elasticache.ModifyReplicationGroupShardConfigurationOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.ModifyReplicationGroupShardConfigurationOutput) - } - } - - return r0, r1 -} - -// ModifyReplicationGroupShardConfigurationWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) ModifyReplicationGroupShardConfigurationWithContext(_a0 context.Context, _a1 *elasticache.ModifyReplicationGroupShardConfigurationInput, _a2 ...request.Option) (*elasticache.ModifyReplicationGroupShardConfigurationOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.ModifyReplicationGroupShardConfigurationOutput - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.ModifyReplicationGroupShardConfigurationInput, ...request.Option) (*elasticache.ModifyReplicationGroupShardConfigurationOutput, error)); ok { - return rf(_a0, _a1, _a2...) - } - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.ModifyReplicationGroupShardConfigurationInput, ...request.Option) *elasticache.ModifyReplicationGroupShardConfigurationOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.ModifyReplicationGroupShardConfigurationOutput) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.ModifyReplicationGroupShardConfigurationInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// ModifyReplicationGroupWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) ModifyReplicationGroupWithContext(_a0 context.Context, _a1 *elasticache.ModifyReplicationGroupInput, _a2 ...request.Option) (*elasticache.ModifyReplicationGroupOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.ModifyReplicationGroupOutput - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.ModifyReplicationGroupInput, ...request.Option) (*elasticache.ModifyReplicationGroupOutput, error)); ok { - return rf(_a0, _a1, _a2...) - } - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.ModifyReplicationGroupInput, ...request.Option) *elasticache.ModifyReplicationGroupOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.ModifyReplicationGroupOutput) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.ModifyReplicationGroupInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// ModifyServerlessCache provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) ModifyServerlessCache(_a0 *elasticache.ModifyServerlessCacheInput) (*elasticache.ModifyServerlessCacheOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.ModifyServerlessCacheOutput - var r1 error - if rf, ok := ret.Get(0).(func(*elasticache.ModifyServerlessCacheInput) (*elasticache.ModifyServerlessCacheOutput, error)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.ModifyServerlessCacheInput) *elasticache.ModifyServerlessCacheOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.ModifyServerlessCacheOutput) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.ModifyServerlessCacheInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// ModifyServerlessCacheRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) ModifyServerlessCacheRequest(_a0 *elasticache.ModifyServerlessCacheInput) (*request.Request, *elasticache.ModifyServerlessCacheOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - var r1 *elasticache.ModifyServerlessCacheOutput - if rf, ok := ret.Get(0).(func(*elasticache.ModifyServerlessCacheInput) (*request.Request, *elasticache.ModifyServerlessCacheOutput)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.ModifyServerlessCacheInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.ModifyServerlessCacheInput) *elasticache.ModifyServerlessCacheOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.ModifyServerlessCacheOutput) - } - } - - return r0, r1 -} - -// ModifyServerlessCacheWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) ModifyServerlessCacheWithContext(_a0 context.Context, _a1 *elasticache.ModifyServerlessCacheInput, _a2 ...request.Option) (*elasticache.ModifyServerlessCacheOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.ModifyServerlessCacheOutput - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.ModifyServerlessCacheInput, ...request.Option) (*elasticache.ModifyServerlessCacheOutput, error)); ok { - return rf(_a0, _a1, _a2...) - } - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.ModifyServerlessCacheInput, ...request.Option) *elasticache.ModifyServerlessCacheOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.ModifyServerlessCacheOutput) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.ModifyServerlessCacheInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// ModifyUser provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) ModifyUser(_a0 *elasticache.ModifyUserInput) (*elasticache.ModifyUserOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.ModifyUserOutput - var r1 error - if rf, ok := ret.Get(0).(func(*elasticache.ModifyUserInput) (*elasticache.ModifyUserOutput, error)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.ModifyUserInput) *elasticache.ModifyUserOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.ModifyUserOutput) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.ModifyUserInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// ModifyUserGroup provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) ModifyUserGroup(_a0 *elasticache.ModifyUserGroupInput) (*elasticache.ModifyUserGroupOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.ModifyUserGroupOutput - var r1 error - if rf, ok := ret.Get(0).(func(*elasticache.ModifyUserGroupInput) (*elasticache.ModifyUserGroupOutput, error)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.ModifyUserGroupInput) *elasticache.ModifyUserGroupOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.ModifyUserGroupOutput) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.ModifyUserGroupInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// ModifyUserGroupRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) ModifyUserGroupRequest(_a0 *elasticache.ModifyUserGroupInput) (*request.Request, *elasticache.ModifyUserGroupOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - var r1 *elasticache.ModifyUserGroupOutput - if rf, ok := ret.Get(0).(func(*elasticache.ModifyUserGroupInput) (*request.Request, *elasticache.ModifyUserGroupOutput)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.ModifyUserGroupInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.ModifyUserGroupInput) *elasticache.ModifyUserGroupOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.ModifyUserGroupOutput) - } - } - - return r0, r1 -} - -// ModifyUserGroupWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) ModifyUserGroupWithContext(_a0 context.Context, _a1 *elasticache.ModifyUserGroupInput, _a2 ...request.Option) (*elasticache.ModifyUserGroupOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.ModifyUserGroupOutput - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.ModifyUserGroupInput, ...request.Option) (*elasticache.ModifyUserGroupOutput, error)); ok { - return rf(_a0, _a1, _a2...) - } - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.ModifyUserGroupInput, ...request.Option) *elasticache.ModifyUserGroupOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.ModifyUserGroupOutput) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.ModifyUserGroupInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// ModifyUserRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) ModifyUserRequest(_a0 *elasticache.ModifyUserInput) (*request.Request, *elasticache.ModifyUserOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - var r1 *elasticache.ModifyUserOutput - if rf, ok := ret.Get(0).(func(*elasticache.ModifyUserInput) (*request.Request, *elasticache.ModifyUserOutput)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.ModifyUserInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.ModifyUserInput) *elasticache.ModifyUserOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.ModifyUserOutput) - } - } - - return r0, r1 -} - -// ModifyUserWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) ModifyUserWithContext(_a0 context.Context, _a1 *elasticache.ModifyUserInput, _a2 ...request.Option) (*elasticache.ModifyUserOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.ModifyUserOutput - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.ModifyUserInput, ...request.Option) (*elasticache.ModifyUserOutput, error)); ok { - return rf(_a0, _a1, _a2...) - } - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.ModifyUserInput, ...request.Option) *elasticache.ModifyUserOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.ModifyUserOutput) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.ModifyUserInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// PurchaseReservedCacheNodesOffering provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) PurchaseReservedCacheNodesOffering(_a0 *elasticache.PurchaseReservedCacheNodesOfferingInput) (*elasticache.PurchaseReservedCacheNodesOfferingOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.PurchaseReservedCacheNodesOfferingOutput - var r1 error - if rf, ok := ret.Get(0).(func(*elasticache.PurchaseReservedCacheNodesOfferingInput) (*elasticache.PurchaseReservedCacheNodesOfferingOutput, error)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.PurchaseReservedCacheNodesOfferingInput) *elasticache.PurchaseReservedCacheNodesOfferingOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.PurchaseReservedCacheNodesOfferingOutput) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.PurchaseReservedCacheNodesOfferingInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// PurchaseReservedCacheNodesOfferingRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) PurchaseReservedCacheNodesOfferingRequest(_a0 *elasticache.PurchaseReservedCacheNodesOfferingInput) (*request.Request, *elasticache.PurchaseReservedCacheNodesOfferingOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - var r1 *elasticache.PurchaseReservedCacheNodesOfferingOutput - if rf, ok := ret.Get(0).(func(*elasticache.PurchaseReservedCacheNodesOfferingInput) (*request.Request, *elasticache.PurchaseReservedCacheNodesOfferingOutput)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.PurchaseReservedCacheNodesOfferingInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.PurchaseReservedCacheNodesOfferingInput) *elasticache.PurchaseReservedCacheNodesOfferingOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.PurchaseReservedCacheNodesOfferingOutput) - } - } - - return r0, r1 -} - -// PurchaseReservedCacheNodesOfferingWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) PurchaseReservedCacheNodesOfferingWithContext(_a0 context.Context, _a1 *elasticache.PurchaseReservedCacheNodesOfferingInput, _a2 ...request.Option) (*elasticache.PurchaseReservedCacheNodesOfferingOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.PurchaseReservedCacheNodesOfferingOutput - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.PurchaseReservedCacheNodesOfferingInput, ...request.Option) (*elasticache.PurchaseReservedCacheNodesOfferingOutput, error)); ok { - return rf(_a0, _a1, _a2...) - } - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.PurchaseReservedCacheNodesOfferingInput, ...request.Option) *elasticache.PurchaseReservedCacheNodesOfferingOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.PurchaseReservedCacheNodesOfferingOutput) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.PurchaseReservedCacheNodesOfferingInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// RebalanceSlotsInGlobalReplicationGroup provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) RebalanceSlotsInGlobalReplicationGroup(_a0 *elasticache.RebalanceSlotsInGlobalReplicationGroupInput) (*elasticache.RebalanceSlotsInGlobalReplicationGroupOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.RebalanceSlotsInGlobalReplicationGroupOutput - var r1 error - if rf, ok := ret.Get(0).(func(*elasticache.RebalanceSlotsInGlobalReplicationGroupInput) (*elasticache.RebalanceSlotsInGlobalReplicationGroupOutput, error)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.RebalanceSlotsInGlobalReplicationGroupInput) *elasticache.RebalanceSlotsInGlobalReplicationGroupOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.RebalanceSlotsInGlobalReplicationGroupOutput) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.RebalanceSlotsInGlobalReplicationGroupInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// RebalanceSlotsInGlobalReplicationGroupRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) RebalanceSlotsInGlobalReplicationGroupRequest(_a0 *elasticache.RebalanceSlotsInGlobalReplicationGroupInput) (*request.Request, *elasticache.RebalanceSlotsInGlobalReplicationGroupOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - var r1 *elasticache.RebalanceSlotsInGlobalReplicationGroupOutput - if rf, ok := ret.Get(0).(func(*elasticache.RebalanceSlotsInGlobalReplicationGroupInput) (*request.Request, *elasticache.RebalanceSlotsInGlobalReplicationGroupOutput)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.RebalanceSlotsInGlobalReplicationGroupInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.RebalanceSlotsInGlobalReplicationGroupInput) *elasticache.RebalanceSlotsInGlobalReplicationGroupOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.RebalanceSlotsInGlobalReplicationGroupOutput) - } - } - - return r0, r1 -} - -// RebalanceSlotsInGlobalReplicationGroupWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) RebalanceSlotsInGlobalReplicationGroupWithContext(_a0 context.Context, _a1 *elasticache.RebalanceSlotsInGlobalReplicationGroupInput, _a2 ...request.Option) (*elasticache.RebalanceSlotsInGlobalReplicationGroupOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.RebalanceSlotsInGlobalReplicationGroupOutput - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.RebalanceSlotsInGlobalReplicationGroupInput, ...request.Option) (*elasticache.RebalanceSlotsInGlobalReplicationGroupOutput, error)); ok { - return rf(_a0, _a1, _a2...) - } - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.RebalanceSlotsInGlobalReplicationGroupInput, ...request.Option) *elasticache.RebalanceSlotsInGlobalReplicationGroupOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.RebalanceSlotsInGlobalReplicationGroupOutput) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.RebalanceSlotsInGlobalReplicationGroupInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// RebootCacheCluster provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) RebootCacheCluster(_a0 *elasticache.RebootCacheClusterInput) (*elasticache.RebootCacheClusterOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.RebootCacheClusterOutput - var r1 error - if rf, ok := ret.Get(0).(func(*elasticache.RebootCacheClusterInput) (*elasticache.RebootCacheClusterOutput, error)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.RebootCacheClusterInput) *elasticache.RebootCacheClusterOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.RebootCacheClusterOutput) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.RebootCacheClusterInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// RebootCacheClusterRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) RebootCacheClusterRequest(_a0 *elasticache.RebootCacheClusterInput) (*request.Request, *elasticache.RebootCacheClusterOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - var r1 *elasticache.RebootCacheClusterOutput - if rf, ok := ret.Get(0).(func(*elasticache.RebootCacheClusterInput) (*request.Request, *elasticache.RebootCacheClusterOutput)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.RebootCacheClusterInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.RebootCacheClusterInput) *elasticache.RebootCacheClusterOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.RebootCacheClusterOutput) - } - } - - return r0, r1 -} - -// RebootCacheClusterWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) RebootCacheClusterWithContext(_a0 context.Context, _a1 *elasticache.RebootCacheClusterInput, _a2 ...request.Option) (*elasticache.RebootCacheClusterOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.RebootCacheClusterOutput - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.RebootCacheClusterInput, ...request.Option) (*elasticache.RebootCacheClusterOutput, error)); ok { - return rf(_a0, _a1, _a2...) - } - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.RebootCacheClusterInput, ...request.Option) *elasticache.RebootCacheClusterOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.RebootCacheClusterOutput) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.RebootCacheClusterInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// RemoveTagsFromResource provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) RemoveTagsFromResource(_a0 *elasticache.RemoveTagsFromResourceInput) (*elasticache.TagListMessage, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.TagListMessage - var r1 error - if rf, ok := ret.Get(0).(func(*elasticache.RemoveTagsFromResourceInput) (*elasticache.TagListMessage, error)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.RemoveTagsFromResourceInput) *elasticache.TagListMessage); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.TagListMessage) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.RemoveTagsFromResourceInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// RemoveTagsFromResourceRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) RemoveTagsFromResourceRequest(_a0 *elasticache.RemoveTagsFromResourceInput) (*request.Request, *elasticache.TagListMessage) { - ret := _m.Called(_a0) - - var r0 *request.Request - var r1 *elasticache.TagListMessage - if rf, ok := ret.Get(0).(func(*elasticache.RemoveTagsFromResourceInput) (*request.Request, *elasticache.TagListMessage)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.RemoveTagsFromResourceInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.RemoveTagsFromResourceInput) *elasticache.TagListMessage); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.TagListMessage) - } - } - - return r0, r1 -} - -// RemoveTagsFromResourceWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) RemoveTagsFromResourceWithContext(_a0 context.Context, _a1 *elasticache.RemoveTagsFromResourceInput, _a2 ...request.Option) (*elasticache.TagListMessage, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.TagListMessage - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.RemoveTagsFromResourceInput, ...request.Option) (*elasticache.TagListMessage, error)); ok { - return rf(_a0, _a1, _a2...) - } - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.RemoveTagsFromResourceInput, ...request.Option) *elasticache.TagListMessage); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.TagListMessage) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.RemoveTagsFromResourceInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// ResetCacheParameterGroup provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) ResetCacheParameterGroup(_a0 *elasticache.ResetCacheParameterGroupInput) (*elasticache.CacheParameterGroupNameMessage, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.CacheParameterGroupNameMessage - var r1 error - if rf, ok := ret.Get(0).(func(*elasticache.ResetCacheParameterGroupInput) (*elasticache.CacheParameterGroupNameMessage, error)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.ResetCacheParameterGroupInput) *elasticache.CacheParameterGroupNameMessage); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.CacheParameterGroupNameMessage) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.ResetCacheParameterGroupInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// ResetCacheParameterGroupRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) ResetCacheParameterGroupRequest(_a0 *elasticache.ResetCacheParameterGroupInput) (*request.Request, *elasticache.CacheParameterGroupNameMessage) { - ret := _m.Called(_a0) - - var r0 *request.Request - var r1 *elasticache.CacheParameterGroupNameMessage - if rf, ok := ret.Get(0).(func(*elasticache.ResetCacheParameterGroupInput) (*request.Request, *elasticache.CacheParameterGroupNameMessage)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.ResetCacheParameterGroupInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.ResetCacheParameterGroupInput) *elasticache.CacheParameterGroupNameMessage); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.CacheParameterGroupNameMessage) - } - } - - return r0, r1 -} - -// ResetCacheParameterGroupWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) ResetCacheParameterGroupWithContext(_a0 context.Context, _a1 *elasticache.ResetCacheParameterGroupInput, _a2 ...request.Option) (*elasticache.CacheParameterGroupNameMessage, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.CacheParameterGroupNameMessage - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.ResetCacheParameterGroupInput, ...request.Option) (*elasticache.CacheParameterGroupNameMessage, error)); ok { - return rf(_a0, _a1, _a2...) - } - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.ResetCacheParameterGroupInput, ...request.Option) *elasticache.CacheParameterGroupNameMessage); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.CacheParameterGroupNameMessage) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.ResetCacheParameterGroupInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// RevokeCacheSecurityGroupIngress provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) RevokeCacheSecurityGroupIngress(_a0 *elasticache.RevokeCacheSecurityGroupIngressInput) (*elasticache.RevokeCacheSecurityGroupIngressOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.RevokeCacheSecurityGroupIngressOutput - var r1 error - if rf, ok := ret.Get(0).(func(*elasticache.RevokeCacheSecurityGroupIngressInput) (*elasticache.RevokeCacheSecurityGroupIngressOutput, error)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.RevokeCacheSecurityGroupIngressInput) *elasticache.RevokeCacheSecurityGroupIngressOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.RevokeCacheSecurityGroupIngressOutput) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.RevokeCacheSecurityGroupIngressInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// RevokeCacheSecurityGroupIngressRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) RevokeCacheSecurityGroupIngressRequest(_a0 *elasticache.RevokeCacheSecurityGroupIngressInput) (*request.Request, *elasticache.RevokeCacheSecurityGroupIngressOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - var r1 *elasticache.RevokeCacheSecurityGroupIngressOutput - if rf, ok := ret.Get(0).(func(*elasticache.RevokeCacheSecurityGroupIngressInput) (*request.Request, *elasticache.RevokeCacheSecurityGroupIngressOutput)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.RevokeCacheSecurityGroupIngressInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.RevokeCacheSecurityGroupIngressInput) *elasticache.RevokeCacheSecurityGroupIngressOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.RevokeCacheSecurityGroupIngressOutput) - } - } - - return r0, r1 -} - -// RevokeCacheSecurityGroupIngressWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) RevokeCacheSecurityGroupIngressWithContext(_a0 context.Context, _a1 *elasticache.RevokeCacheSecurityGroupIngressInput, _a2 ...request.Option) (*elasticache.RevokeCacheSecurityGroupIngressOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.RevokeCacheSecurityGroupIngressOutput - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.RevokeCacheSecurityGroupIngressInput, ...request.Option) (*elasticache.RevokeCacheSecurityGroupIngressOutput, error)); ok { - return rf(_a0, _a1, _a2...) - } - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.RevokeCacheSecurityGroupIngressInput, ...request.Option) *elasticache.RevokeCacheSecurityGroupIngressOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.RevokeCacheSecurityGroupIngressOutput) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.RevokeCacheSecurityGroupIngressInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// StartMigration provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) StartMigration(_a0 *elasticache.StartMigrationInput) (*elasticache.StartMigrationOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.StartMigrationOutput - var r1 error - if rf, ok := ret.Get(0).(func(*elasticache.StartMigrationInput) (*elasticache.StartMigrationOutput, error)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.StartMigrationInput) *elasticache.StartMigrationOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.StartMigrationOutput) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.StartMigrationInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// StartMigrationRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) StartMigrationRequest(_a0 *elasticache.StartMigrationInput) (*request.Request, *elasticache.StartMigrationOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - var r1 *elasticache.StartMigrationOutput - if rf, ok := ret.Get(0).(func(*elasticache.StartMigrationInput) (*request.Request, *elasticache.StartMigrationOutput)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.StartMigrationInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.StartMigrationInput) *elasticache.StartMigrationOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.StartMigrationOutput) - } - } - - return r0, r1 -} - -// StartMigrationWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) StartMigrationWithContext(_a0 context.Context, _a1 *elasticache.StartMigrationInput, _a2 ...request.Option) (*elasticache.StartMigrationOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.StartMigrationOutput - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.StartMigrationInput, ...request.Option) (*elasticache.StartMigrationOutput, error)); ok { - return rf(_a0, _a1, _a2...) - } - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.StartMigrationInput, ...request.Option) *elasticache.StartMigrationOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.StartMigrationOutput) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.StartMigrationInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// TestFailover provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) TestFailover(_a0 *elasticache.TestFailoverInput) (*elasticache.TestFailoverOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.TestFailoverOutput - var r1 error - if rf, ok := ret.Get(0).(func(*elasticache.TestFailoverInput) (*elasticache.TestFailoverOutput, error)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.TestFailoverInput) *elasticache.TestFailoverOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.TestFailoverOutput) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.TestFailoverInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// TestFailoverRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) TestFailoverRequest(_a0 *elasticache.TestFailoverInput) (*request.Request, *elasticache.TestFailoverOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - var r1 *elasticache.TestFailoverOutput - if rf, ok := ret.Get(0).(func(*elasticache.TestFailoverInput) (*request.Request, *elasticache.TestFailoverOutput)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.TestFailoverInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.TestFailoverInput) *elasticache.TestFailoverOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.TestFailoverOutput) - } - } - - return r0, r1 -} - -// TestFailoverWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) TestFailoverWithContext(_a0 context.Context, _a1 *elasticache.TestFailoverInput, _a2 ...request.Option) (*elasticache.TestFailoverOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.TestFailoverOutput - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.TestFailoverInput, ...request.Option) (*elasticache.TestFailoverOutput, error)); ok { - return rf(_a0, _a1, _a2...) - } - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.TestFailoverInput, ...request.Option) *elasticache.TestFailoverOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.TestFailoverOutput) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.TestFailoverInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// TestMigration provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) TestMigration(_a0 *elasticache.TestMigrationInput) (*elasticache.TestMigrationOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.TestMigrationOutput - var r1 error - if rf, ok := ret.Get(0).(func(*elasticache.TestMigrationInput) (*elasticache.TestMigrationOutput, error)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.TestMigrationInput) *elasticache.TestMigrationOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.TestMigrationOutput) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.TestMigrationInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// TestMigrationRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) TestMigrationRequest(_a0 *elasticache.TestMigrationInput) (*request.Request, *elasticache.TestMigrationOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - var r1 *elasticache.TestMigrationOutput - if rf, ok := ret.Get(0).(func(*elasticache.TestMigrationInput) (*request.Request, *elasticache.TestMigrationOutput)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(*elasticache.TestMigrationInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - if rf, ok := ret.Get(1).(func(*elasticache.TestMigrationInput) *elasticache.TestMigrationOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.TestMigrationOutput) - } - } - - return r0, r1 -} - -// TestMigrationWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) TestMigrationWithContext(_a0 context.Context, _a1 *elasticache.TestMigrationInput, _a2 ...request.Option) (*elasticache.TestMigrationOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.TestMigrationOutput - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.TestMigrationInput, ...request.Option) (*elasticache.TestMigrationOutput, error)); ok { - return rf(_a0, _a1, _a2...) - } - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.TestMigrationInput, ...request.Option) *elasticache.TestMigrationOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.TestMigrationOutput) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.TestMigrationInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// WaitUntilCacheClusterAvailable provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) WaitUntilCacheClusterAvailable(_a0 *elasticache.DescribeCacheClustersInput) error { - ret := _m.Called(_a0) - - var r0 error - if rf, ok := ret.Get(0).(func(*elasticache.DescribeCacheClustersInput) error); ok { - r0 = rf(_a0) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// WaitUntilCacheClusterAvailableWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) WaitUntilCacheClusterAvailableWithContext(_a0 context.Context, _a1 *elasticache.DescribeCacheClustersInput, _a2 ...request.WaiterOption) error { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DescribeCacheClustersInput, ...request.WaiterOption) error); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// WaitUntilCacheClusterDeleted provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) WaitUntilCacheClusterDeleted(_a0 *elasticache.DescribeCacheClustersInput) error { - ret := _m.Called(_a0) - - var r0 error - if rf, ok := ret.Get(0).(func(*elasticache.DescribeCacheClustersInput) error); ok { - r0 = rf(_a0) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// WaitUntilCacheClusterDeletedWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) WaitUntilCacheClusterDeletedWithContext(_a0 context.Context, _a1 *elasticache.DescribeCacheClustersInput, _a2 ...request.WaiterOption) error { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DescribeCacheClustersInput, ...request.WaiterOption) error); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// WaitUntilReplicationGroupAvailable provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) WaitUntilReplicationGroupAvailable(_a0 *elasticache.DescribeReplicationGroupsInput) error { - ret := _m.Called(_a0) - - var r0 error - if rf, ok := ret.Get(0).(func(*elasticache.DescribeReplicationGroupsInput) error); ok { - r0 = rf(_a0) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// WaitUntilReplicationGroupAvailableWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) WaitUntilReplicationGroupAvailableWithContext(_a0 context.Context, _a1 *elasticache.DescribeReplicationGroupsInput, _a2 ...request.WaiterOption) error { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DescribeReplicationGroupsInput, ...request.WaiterOption) error); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// WaitUntilReplicationGroupDeleted provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) WaitUntilReplicationGroupDeleted(_a0 *elasticache.DescribeReplicationGroupsInput) error { - ret := _m.Called(_a0) - - var r0 error - if rf, ok := ret.Get(0).(func(*elasticache.DescribeReplicationGroupsInput) error); ok { - r0 = rf(_a0) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// WaitUntilReplicationGroupDeletedWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) WaitUntilReplicationGroupDeletedWithContext(_a0 context.Context, _a1 *elasticache.DescribeReplicationGroupsInput, _a2 ...request.WaiterOption) error { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DescribeReplicationGroupsInput, ...request.WaiterOption) error); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// NewElastiCacheAPI creates a new instance of ElastiCacheAPI. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. -func NewElastiCacheAPI(t interface { - mock.TestingT - Cleanup(func()) -}) *ElastiCacheAPI { - mock := &ElastiCacheAPI{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/pkg/resource/cache_cluster/custom_set_output.go b/pkg/resource/cache_cluster/custom_set_output.go deleted file mode 100644 index 312835ea..00000000 --- a/pkg/resource/cache_cluster/custom_set_output.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"). You may -// not use this file except in compliance with the License. A copy of the -// License is located at -// -// http://aws.amazon.com/apache2.0/ -// -// or in the "license" file accompanying this file. This file is distributed -// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either -// express or implied. See the License for the specific language governing -// permissions and limitations under the License. - -package cache_cluster diff --git a/pkg/resource/cache_cluster/hooks.go b/pkg/resource/cache_cluster/hooks.go index 7a4b8edf..52a7737f 100644 --- a/pkg/resource/cache_cluster/hooks.go +++ b/pkg/resource/cache_cluster/hooks.go @@ -22,6 +22,7 @@ import ( ackcompare "github.com/aws-controllers-k8s/runtime/pkg/compare" ackrequeue "github.com/aws-controllers-k8s/runtime/pkg/requeue" + "github.com/aws/aws-sdk-go-v2/aws" svcsdk "github.com/aws/aws-sdk-go-v2/service/elasticache" svcapitypes "github.com/aws-controllers-k8s/elasticache-controller/apis/v1alpha1" @@ -196,3 +197,17 @@ func marshalAsAnnotation(val interface{}) string { } return string(data) } + +func Int32OrNil(i *int64) *int32 { + if i != nil { + return aws.Int32(int32(*i)) + } + return aws.Int32(0) +} + +func Int64OrNil(i *int32) *int64 { + if i != nil { + return aws.Int64(int64(*i)) + } + return aws.Int64(0) +} diff --git a/pkg/resource/cache_cluster/sdk.go b/pkg/resource/cache_cluster/sdk.go index 97193456..67ff3ab3 100644 --- a/pkg/resource/cache_cluster/sdk.go +++ b/pkg/resource/cache_cluster/sdk.go @@ -1237,7 +1237,7 @@ func (rm *resourceManager) sdkUpdate( } if pendingModifications := resp.CacheCluster.PendingModifiedValues; pendingModifications != nil { if pendingModifications.NumCacheNodes != nil { - ko.Spec.NumCacheNodes = pendingModifications.NumCacheNodes + ko.Spec.NumCacheNodes = Int64OrNil(pendingModifications.NumCacheNodes) } if pendingModifications.CacheNodeType != nil { ko.Spec.CacheNodeType = pendingModifications.CacheNodeType @@ -1262,7 +1262,7 @@ func (rm *resourceManager) newUpdateRequestPayload( if r.ko.Spec.AZMode != nil { res.AZMode = svcsdktypes.AZMode(*r.ko.Spec.AZMode) } - res.ApplyImmediately = true + res.ApplyImmediately = aws.Bool(true) if r.ko.Spec.AuthToken != nil { tmpSecret, err := rm.rr.SecretValueFromReference(ctx, r.ko.Spec.AuthToken) if err != nil { diff --git a/pkg/resource/cache_parameter_group/custom_set_output.go b/pkg/resource/cache_parameter_group/custom_set_output.go deleted file mode 100644 index d41e2efd..00000000 --- a/pkg/resource/cache_parameter_group/custom_set_output.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"). You may -// not use this file except in compliance with the License. A copy of the -// License is located at -// -// http://aws.amazon.com/apache2.0/ -// -// or in the "license" file accompanying this file. This file is distributed -// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either -// express or implied. See the License for the specific language governing -// permissions and limitations under the License. - -package cache_parameter_group - -import ( - "context" - - svcapitypes "github.com/aws-controllers-k8s/elasticache-controller/apis/v1alpha1" - ackv1alpha1 "github.com/aws-controllers-k8s/runtime/apis/core/v1alpha1" - svcsdk "github.com/aws/aws-sdk-go/service/elasticache" - corev1 "k8s.io/api/core/v1" -) - -func (rm *resourceManager) CustomDescribeCacheParameterGroupsSetOutput( - ctx context.Context, - r *resource, - resp *svcsdk.DescribeCacheParameterGroupsOutput, - ko *svcapitypes.CacheParameterGroup, -) (*svcapitypes.CacheParameterGroup, error) { - // Retrieve parameters using DescribeCacheParameters API and populate ko.Status.ParameterNameValues - if len(resp.CacheParameterGroups) == 0 { - return ko, nil - } - cpg := resp.CacheParameterGroups[0] - // Populate latest.ko.Spec.ParameterNameValues with latest parameter values - // Populate latest.ko.Status.Parameters with latest detailed parameters - error := rm.customSetOutputDescribeCacheParameters(ctx, cpg.CacheParameterGroupName, ko) - if error != nil { - return nil, error - } - return ko, nil -} - -func (rm *resourceManager) CustomCreateCacheParameterGroupSetOutput( - ctx context.Context, - r *resource, - resp *svcsdk.CreateCacheParameterGroupOutput, - ko *svcapitypes.CacheParameterGroup, -) (*svcapitypes.CacheParameterGroup, error) { - if r.ko.Spec.ParameterNameValues != nil && len(r.ko.Spec.ParameterNameValues) != 0 { - // Spec has parameters name and values. Create API does not save these, but Modify API does. - // Thus, Create needs to be followed by Modify call to save parameters from Spec. - // Setting synched condition to false, so that reconciler gets invoked again - // and modify logic gets executed. - rm.setCondition(ko, ackv1alpha1.ConditionTypeResourceSynced, corev1.ConditionFalse) - } - return ko, nil -} diff --git a/pkg/resource/cache_parameter_group/custom_update_api.go b/pkg/resource/cache_parameter_group/custom_update_api.go deleted file mode 100644 index dbf99b65..00000000 --- a/pkg/resource/cache_parameter_group/custom_update_api.go +++ /dev/null @@ -1,122 +0,0 @@ -// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"). You may -// not use this file except in compliance with the License. A copy of the -// License is located at -// -// http://aws.amazon.com/apache2.0/ -// -// or in the "license" file accompanying this file. This file is distributed -// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either -// express or implied. See the License for the specific language governing -// permissions and limitations under the License. - -package cache_parameter_group - -import ( - "context" - - svcapitypes "github.com/aws-controllers-k8s/elasticache-controller/apis/v1alpha1" - ackcompare "github.com/aws-controllers-k8s/runtime/pkg/compare" -) - -// Implements specialized logic for update CacheParameterGroup. -func (rm *resourceManager) customUpdateCacheParameterGroup( - ctx context.Context, - desired *resource, - latest *resource, - delta *ackcompare.Delta, -) (*resource, error) { - desiredParameters := desired.ko.Spec.ParameterNameValues - latestParameters := latest.ko.Spec.ParameterNameValues - - updated := false - var err error - // Update - if (desiredParameters == nil || len(desiredParameters) == 0) && - (latestParameters != nil && len(latestParameters) > 0) { - updated, err = rm.resetAllParameters(ctx, desired) - if !updated || err != nil { - return nil, err - } - } else { - removedParameters, modifiedParameters, addedParameters := rm.provideDelta(desiredParameters, latestParameters) - if removedParameters != nil && len(removedParameters) > 0 { - updated, err = rm.resetParameters(ctx, desired, removedParameters) - if !updated || err != nil { - return nil, err - } - } - if modifiedParameters != nil && len(modifiedParameters) > 0 { - updated, err = rm.saveParameters(ctx, desired, modifiedParameters) - if !updated || err != nil { - return nil, err - } - } - if addedParameters != nil && len(addedParameters) > 0 { - updated, err = rm.saveParameters(ctx, desired, addedParameters) - if !updated || err != nil { - return nil, err - } - } - } - if updated { - rm.setStatusDefaults(latest.ko) - // Populate latest.ko.Spec.ParameterNameValues with latest parameter values - // Populate latest.ko.Status.Parameters with latest detailed parameters - error := rm.customSetOutputDescribeCacheParameters(ctx, desired.ko.Spec.CacheParameterGroupName, latest.ko) - if error != nil { - return nil, error - } - } - return latest, nil -} - -// provideDelta compares given desired and latest Parameters and returns -// removedParameters, modifiedParameters, addedParameters -func (rm *resourceManager) provideDelta( - desiredParameters []*svcapitypes.ParameterNameValue, - latestParameters []*svcapitypes.ParameterNameValue, -) ([]*svcapitypes.ParameterNameValue, []*svcapitypes.ParameterNameValue, []*svcapitypes.ParameterNameValue) { - - desiredPametersMap := map[string]*svcapitypes.ParameterNameValue{} - for _, parameter := range desiredParameters { - p := *parameter - desiredPametersMap[*p.ParameterName] = &p - } - latestPametersMap := map[string]*svcapitypes.ParameterNameValue{} - for _, parameter := range latestParameters { - p := *parameter - latestPametersMap[*p.ParameterName] = &p - } - - removedParameters := []*svcapitypes.ParameterNameValue{} // available in latest but not found in desired - modifiedParameters := []*svcapitypes.ParameterNameValue{} // available in both desired, latest but values differ - addedParameters := []*svcapitypes.ParameterNameValue{} // available in desired but not found in latest - for latestParameterName, latestParameterNameValue := range latestPametersMap { - desiredParameterNameValue, found := desiredPametersMap[latestParameterName] - if found && desiredParameterNameValue != nil && - desiredParameterNameValue.ParameterValue != nil && *desiredParameterNameValue.ParameterValue != "" { - if *desiredParameterNameValue.ParameterValue != *latestParameterNameValue.ParameterValue { - // available in both desired, latest but values differ - modified := *desiredParameterNameValue - modifiedParameters = append(modifiedParameters, &modified) - } - } else { - // available in latest but not found in desired - removed := *latestParameterNameValue - removedParameters = append(removedParameters, &removed) - } - } - for desiredParameterName, desiredParameterNameValue := range desiredPametersMap { - _, found := latestPametersMap[desiredParameterName] - if !found && desiredParameterNameValue != nil { - // available in desired but not found in latest - added := *desiredParameterNameValue - if added.ParameterValue != nil && *added.ParameterValue != "" { - addedParameters = append(addedParameters, &added) - } - } - } - return removedParameters, modifiedParameters, addedParameters -} diff --git a/pkg/resource/cache_parameter_group/custom_api.go b/pkg/resource/cache_parameter_group/hooks.go similarity index 54% rename from pkg/resource/cache_parameter_group/custom_api.go rename to pkg/resource/cache_parameter_group/hooks.go index 3ce6b77e..67b7e1d9 100644 --- a/pkg/resource/cache_parameter_group/custom_api.go +++ b/pkg/resource/cache_parameter_group/hooks.go @@ -18,8 +18,11 @@ import ( svcapitypes "github.com/aws-controllers-k8s/elasticache-controller/apis/v1alpha1" ackv1alpha1 "github.com/aws-controllers-k8s/runtime/apis/core/v1alpha1" + ackcompare "github.com/aws-controllers-k8s/runtime/pkg/compare" ackerr "github.com/aws-controllers-k8s/runtime/pkg/errors" - svcsdk "github.com/aws/aws-sdk-go/service/elasticache" + "github.com/aws/aws-sdk-go-v2/aws" + svcsdk "github.com/aws/aws-sdk-go-v2/service/elasticache" + svcsdktypes "github.com/aws/aws-sdk-go-v2/service/elasticache/types" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -86,11 +89,11 @@ func (rm *resourceManager) provideEvents( maxRecords int64, ) ([]*svcapitypes.Event, error) { input := &svcsdk.DescribeEventsInput{} - input.SetSourceType("cache-parameter-group") - input.SetSourceIdentifier(*cacheParameterGroupName) - input.SetMaxRecords(maxRecords) - input.SetDuration(eventsDuration) - resp, err := rm.sdkapi.DescribeEventsWithContext(ctx, input) + input.SourceType = svcsdktypes.SourceTypeCacheParameterGroup + input.SourceIdentifier = cacheParameterGroupName + input.MaxRecords = aws.Int32(int32(maxRecords)) + input.Duration = aws.Int32(eventsDuration) + resp, err := rm.sdkapi.DescribeEvents(ctx, input) rm.metrics.RecordAPICall("READ_MANY", "DescribeEvents-CacheParameterGroup", err) if err != nil { rm.log.V(1).Info("Error during DescribeEvents-CacheParameterGroup", "error", err) @@ -129,10 +132,10 @@ func (rm *resourceManager) describeCacheParameters( if err != nil { return nil, err } - response, respErr := rm.sdkapi.DescribeCacheParametersWithContext(ctx, input) + response, respErr := rm.sdkapi.DescribeCacheParameters(ctx, input) rm.metrics.RecordAPICall("READ_MANY", "DescribeCacheParameters", respErr) if respErr != nil { - if awsErr, ok := ackerr.AWSError(respErr); ok && awsErr.Code() == "CacheParameterGroupNotFound" { + if awsErr, ok := ackerr.AWSError(respErr); ok && awsErr.ErrorCode() == "CacheParameterGroupNotFound" { return nil, ackerr.NotFound } rm.log.V(1).Info("Error during DescribeCacheParameters", "error", respErr) @@ -176,13 +179,13 @@ func (rm *resourceManager) newDescribeCacheParametersRequestPayload( res := &svcsdk.DescribeCacheParametersInput{} if cacheParameterGroupName != nil { - res.SetCacheParameterGroupName(*cacheParameterGroupName) + res.CacheParameterGroupName = cacheParameterGroupName } if source != nil { - res.SetSource(*source) + res.Source = source } if paginationMarker != nil { - res.SetMarker(*paginationMarker) + res.Marker = paginationMarker } return res, nil } @@ -194,11 +197,11 @@ func (rm *resourceManager) resetAllParameters( ) (bool, error) { input := &svcsdk.ResetCacheParameterGroupInput{} if desired.ko.Spec.CacheParameterGroupName != nil { - input.SetCacheParameterGroupName(*desired.ko.Spec.CacheParameterGroupName) + input.CacheParameterGroupName = desired.ko.Spec.CacheParameterGroupName } - input.SetResetAllParameters(true) + input.ResetAllParameters = aws.Bool(true) - _, err := rm.sdkapi.ResetCacheParameterGroupWithContext(ctx, input) + _, err := rm.sdkapi.ResetCacheParameterGroup(ctx, input) rm.metrics.RecordAPICall("UPDATE", "ResetCacheParameterGroup-ResetAllParameters", err) if err != nil { rm.log.V(1).Info("Error during ResetCacheParameterGroup-ResetAllParameters", "error", err) @@ -215,21 +218,30 @@ func (rm *resourceManager) resetParameters( ) (bool, error) { input := &svcsdk.ResetCacheParameterGroupInput{} if desired.ko.Spec.CacheParameterGroupName != nil { - input.SetCacheParameterGroupName(*desired.ko.Spec.CacheParameterGroupName) + input.CacheParameterGroupName = desired.ko.Spec.CacheParameterGroupName } if parameters != nil && len(parameters) > 0 { - parametersToReset := []*svcsdk.ParameterNameValue{} + parametersToReset := []*svcsdktypes.ParameterNameValue{} for _, parameter := range parameters { - parameterToReset := &svcsdk.ParameterNameValue{} + parameterToReset := &svcsdktypes.ParameterNameValue{} if parameter.ParameterName != nil { - parameterToReset.SetParameterName(*parameter.ParameterName) + parameterToReset.ParameterName = parameter.ParameterName + } + if parameter.ParameterValue != nil { + parameterToReset.ParameterValue = parameter.ParameterValue } parametersToReset = append(parametersToReset, parameterToReset) } - input.SetParameterNameValues(parametersToReset) + parameterNameValues := make([]svcsdktypes.ParameterNameValue, len(parametersToReset)) + for i, parameter := range parametersToReset { + if parameter != nil { + parameterNameValues[i] = *parameter + } + } + input.ParameterNameValues = parameterNameValues } - _, err := rm.sdkapi.ResetCacheParameterGroupWithContext(ctx, input) + _, err := rm.sdkapi.ResetCacheParameterGroup(ctx, input) rm.metrics.RecordAPICall("UPDATE", "ResetCacheParameterGroup", err) if err != nil { rm.log.V(1).Info("Error during ResetCacheParameterGroup", "error", err) @@ -247,14 +259,14 @@ func (rm *resourceManager) saveParameters( ) (bool, error) { modifyApiBatchSize := 20 // Paginated save: 20 parameters in single api call - parametersToSave := []*svcsdk.ParameterNameValue{} + parametersToSave := []svcsdktypes.ParameterNameValue{} for _, parameter := range parameters { - parameterToSave := &svcsdk.ParameterNameValue{} + parameterToSave := svcsdktypes.ParameterNameValue{} if parameter.ParameterName != nil { - parameterToSave.SetParameterName(*parameter.ParameterName) + parameterToSave.ParameterName = parameter.ParameterName } if parameter.ParameterValue != nil { - parameterToSave.SetParameterValue(*parameter.ParameterValue) + parameterToSave.ParameterValue = parameter.ParameterValue } parametersToSave = append(parametersToSave, parameterToSave) @@ -264,7 +276,7 @@ func (rm *resourceManager) saveParameters( return false, err } // re-init to save next set of parameters - parametersToSave = []*svcsdk.ParameterNameValue{} + parametersToSave = []svcsdktypes.ParameterNameValue{} } } if len(parametersToSave) > 0 { // when len(parameters) % modifyApiBatchSize != 0 @@ -281,16 +293,14 @@ func (rm *resourceManager) saveParameters( func (rm *resourceManager) modifyCacheParameterGroup( ctx context.Context, desired *resource, - parameters []*svcsdk.ParameterNameValue, + parameters []svcsdktypes.ParameterNameValue, ) (bool, error) { input := &svcsdk.ModifyCacheParameterGroupInput{} if desired.ko.Spec.CacheParameterGroupName != nil { - input.SetCacheParameterGroupName(*desired.ko.Spec.CacheParameterGroupName) + input.CacheParameterGroupName = desired.ko.Spec.CacheParameterGroupName } - if parameters != nil && len(parameters) > 0 { - input.SetParameterNameValues(parameters) - } - _, err := rm.sdkapi.ModifyCacheParameterGroupWithContext(ctx, input) + input.ParameterNameValues = parameters + _, err := rm.sdkapi.ModifyCacheParameterGroup(ctx, input) rm.metrics.RecordAPICall("UPDATE", "ModifyCacheParameterGroup", err) if err != nil { rm.log.V(1).Info("Error during ModifyCacheParameterGroup", "error", err) @@ -325,3 +335,147 @@ func (rm *resourceManager) setCondition( condition.Status = cStatus } } + +func (rm *resourceManager) CustomDescribeCacheParameterGroupsSetOutput( + ctx context.Context, + r *resource, + resp *svcsdk.DescribeCacheParameterGroupsOutput, + ko *svcapitypes.CacheParameterGroup, +) (*svcapitypes.CacheParameterGroup, error) { + // Retrieve parameters using DescribeCacheParameters API and populate ko.Status.ParameterNameValues + if len(resp.CacheParameterGroups) == 0 { + return ko, nil + } + cpg := resp.CacheParameterGroups[0] + // Populate latest.ko.Spec.ParameterNameValues with latest parameter values + // Populate latest.ko.Status.Parameters with latest detailed parameters + error := rm.customSetOutputDescribeCacheParameters(ctx, cpg.CacheParameterGroupName, ko) + if error != nil { + return nil, error + } + return ko, nil +} + +func (rm *resourceManager) CustomCreateCacheParameterGroupSetOutput( + ctx context.Context, + r *resource, + resp *svcsdk.CreateCacheParameterGroupOutput, + ko *svcapitypes.CacheParameterGroup, +) (*svcapitypes.CacheParameterGroup, error) { + if r.ko.Spec.ParameterNameValues != nil && len(r.ko.Spec.ParameterNameValues) != 0 { + // Spec has parameters name and values. Create API does not save these, but Modify API does. + // Thus, Create needs to be followed by Modify call to save parameters from Spec. + // Setting synched condition to false, so that reconciler gets invoked again + // and modify logic gets executed. + rm.setCondition(ko, ackv1alpha1.ConditionTypeResourceSynced, corev1.ConditionFalse) + } + return ko, nil +} + +// Implements specialized logic for update CacheParameterGroup. +func (rm *resourceManager) customUpdateCacheParameterGroup( + ctx context.Context, + desired *resource, + latest *resource, + delta *ackcompare.Delta, +) (*resource, error) { + desiredParameters := desired.ko.Spec.ParameterNameValues + latestParameters := latest.ko.Spec.ParameterNameValues + + updated := false + var err error + // Update + if (desiredParameters == nil || len(desiredParameters) == 0) && + (latestParameters != nil && len(latestParameters) > 0) { + updated, err = rm.resetAllParameters(ctx, desired) + if !updated || err != nil { + return nil, err + } + } else { + removedParameters, modifiedParameters, addedParameters := rm.provideDelta(desiredParameters, latestParameters) + if removedParameters != nil && len(removedParameters) > 0 { + updated, err = rm.resetParameters(ctx, desired, removedParameters) + if !updated || err != nil { + return nil, err + } + } + if modifiedParameters != nil && len(modifiedParameters) > 0 { + updated, err = rm.saveParameters(ctx, desired, modifiedParameters) + if !updated || err != nil { + return nil, err + } + } + if addedParameters != nil && len(addedParameters) > 0 { + updated, err = rm.saveParameters(ctx, desired, addedParameters) + if !updated || err != nil { + return nil, err + } + } + } + if updated { + rm.setStatusDefaults(latest.ko) + // Populate latest.ko.Spec.ParameterNameValues with latest parameter values + // Populate latest.ko.Status.Parameters with latest detailed parameters + error := rm.customSetOutputDescribeCacheParameters(ctx, desired.ko.Spec.CacheParameterGroupName, latest.ko) + if error != nil { + return nil, error + } + } + return latest, nil +} + +// provideDelta compares given desired and latest Parameters and returns +// removedParameters, modifiedParameters, addedParameters +func (rm *resourceManager) provideDelta( + desiredParameters []*svcapitypes.ParameterNameValue, + latestParameters []*svcapitypes.ParameterNameValue, +) ([]*svcapitypes.ParameterNameValue, []*svcapitypes.ParameterNameValue, []*svcapitypes.ParameterNameValue) { + + desiredPametersMap := map[string]*svcapitypes.ParameterNameValue{} + for _, parameter := range desiredParameters { + p := *parameter + desiredPametersMap[*p.ParameterName] = &p + } + latestPametersMap := map[string]*svcapitypes.ParameterNameValue{} + for _, parameter := range latestParameters { + p := *parameter + latestPametersMap[*p.ParameterName] = &p + } + + removedParameters := []*svcapitypes.ParameterNameValue{} // available in latest but not found in desired + modifiedParameters := []*svcapitypes.ParameterNameValue{} // available in both desired, latest but values differ + addedParameters := []*svcapitypes.ParameterNameValue{} // available in desired but not found in latest + for latestParameterName, latestParameterNameValue := range latestPametersMap { + desiredParameterNameValue, found := desiredPametersMap[latestParameterName] + if found && desiredParameterNameValue != nil && + desiredParameterNameValue.ParameterValue != nil && *desiredParameterNameValue.ParameterValue != "" { + if *desiredParameterNameValue.ParameterValue != *latestParameterNameValue.ParameterValue { + // available in both desired, latest but values differ + modified := *desiredParameterNameValue + modifiedParameters = append(modifiedParameters, &modified) + } + } else { + // available in latest but not found in desired + removed := *latestParameterNameValue + removedParameters = append(removedParameters, &removed) + } + } + for desiredParameterName, desiredParameterNameValue := range desiredPametersMap { + _, found := latestPametersMap[desiredParameterName] + if !found && desiredParameterNameValue != nil { + // available in desired but not found in latest + added := *desiredParameterNameValue + if added.ParameterValue != nil && *added.ParameterValue != "" { + addedParameters = append(addedParameters, &added) + } + } + } + return removedParameters, modifiedParameters, addedParameters +} + +func Int32OrNil(i *int64) *int32 { + if i == nil { + return nil + } + return aws.Int32(int32(*i)) +} diff --git a/pkg/resource/cache_subnet_group/custom_set_output.go b/pkg/resource/cache_subnet_group/hooks.go similarity index 77% rename from pkg/resource/cache_subnet_group/custom_set_output.go rename to pkg/resource/cache_subnet_group/hooks.go index 79bdbb93..56246c67 100644 --- a/pkg/resource/cache_subnet_group/custom_set_output.go +++ b/pkg/resource/cache_subnet_group/hooks.go @@ -17,7 +17,9 @@ import ( "context" svcapitypes "github.com/aws-controllers-k8s/elasticache-controller/apis/v1alpha1" - "github.com/aws/aws-sdk-go/service/elasticache" + "github.com/aws/aws-sdk-go-v2/aws" + svcsdk "github.com/aws/aws-sdk-go-v2/service/elasticache" + svcsdktypes "github.com/aws/aws-sdk-go-v2/service/elasticache/types" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -30,14 +32,14 @@ const ( func (rm *resourceManager) CustomDescribeCacheSubnetGroupsSetOutput( ctx context.Context, r *resource, - resp *elasticache.DescribeCacheSubnetGroupsOutput, + resp *svcsdk.DescribeCacheSubnetGroupsOutput, ko *svcapitypes.CacheSubnetGroup, ) (*svcapitypes.CacheSubnetGroup, error) { if len(resp.CacheSubnetGroups) == 0 { return ko, nil } elem := resp.CacheSubnetGroups[0] - err := rm.customSetOutputSupplementAPIs(ctx, r, elem, ko) + err := rm.customSetOutputSupplementAPIs(ctx, r, &elem, ko) if err != nil { return nil, err } @@ -47,7 +49,7 @@ func (rm *resourceManager) CustomDescribeCacheSubnetGroupsSetOutput( func (rm *resourceManager) customSetOutputSupplementAPIs( ctx context.Context, r *resource, - subnetGroup *elasticache.CacheSubnetGroup, + subnetGroup *svcsdktypes.CacheSubnetGroup, ko *svcapitypes.CacheSubnetGroup, ) error { events, err := rm.provideEvents(ctx, r.ko.Spec.CacheSubnetGroupName, 20) @@ -63,12 +65,12 @@ func (rm *resourceManager) provideEvents( subnetGroupName *string, maxRecords int64, ) ([]*svcapitypes.Event, error) { - input := &elasticache.DescribeEventsInput{} - input.SetSourceType("cache-subnet-group") - input.SetSourceIdentifier(*subnetGroupName) - input.SetMaxRecords(maxRecords) - input.SetDuration(eventsDuration) - resp, err := rm.sdkapi.DescribeEventsWithContext(ctx, input) + input := &svcsdk.DescribeEventsInput{} + input.SourceType = svcsdktypes.SourceTypeCacheSubnetGroup + input.SourceIdentifier = subnetGroupName + input.MaxRecords = aws.Int32(int32(maxRecords)) + input.Duration = aws.Int32(eventsDuration) + resp, err := rm.sdkapi.DescribeEvents(ctx, input) rm.metrics.RecordAPICall("READ_MANY", "DescribeEvents-CacheSubnetGroup", err) if err != nil { rm.log.V(1).Info("Error during DescribeEvents-CacheSubnetGroup", "error", err) @@ -93,3 +95,10 @@ func (rm *resourceManager) provideEvents( } return events, nil } + +func Int32OrNil(i *int64) *int32 { + if i == nil { + return nil + } + return aws.Int32(int32(*i)) +} diff --git a/pkg/resource/cache_subnet_group/sdk.go b/pkg/resource/cache_subnet_group/sdk.go index 9acdd724..74690536 100644 --- a/pkg/resource/cache_subnet_group/sdk.go +++ b/pkg/resource/cache_subnet_group/sdk.go @@ -129,32 +129,12 @@ func (rm *resourceManager) sdkFind( } f3elem.SubnetOutpost = f3elemf2 } - if f3iter.SupportedNetworkTypes != nil { - f3elemf3 := []*string{} - for _, f3elemf3iter := range f3iter.SupportedNetworkTypes { - var f3elemf3elem *string - f3elemf3elem = aws.String(string(f3elemf3iter)) - f3elemf3 = append(f3elemf3, f3elemf3elem) - } - f3elem.SupportedNetworkTypes = f3elemf3 - } f3 = append(f3, f3elem) } ko.Status.Subnets = f3 } else { ko.Status.Subnets = nil } - if elem.SupportedNetworkTypes != nil { - f4 := []*string{} - for _, f4iter := range elem.SupportedNetworkTypes { - var f4elem *string - f4elem = aws.String(string(f4iter)) - f4 = append(f4, f4elem) - } - ko.Status.SupportedNetworkTypes = f4 - } else { - ko.Status.SupportedNetworkTypes = nil - } if elem.VpcId != nil { ko.Status.VPCID = elem.VpcId } else { @@ -275,32 +255,12 @@ func (rm *resourceManager) sdkCreate( } f3elem.SubnetOutpost = f3elemf2 } - if f3iter.SupportedNetworkTypes != nil { - f3elemf3 := []*string{} - for _, f3elemf3iter := range f3iter.SupportedNetworkTypes { - var f3elemf3elem *string - f3elemf3elem = aws.String(string(f3elemf3iter)) - f3elemf3 = append(f3elemf3, f3elemf3elem) - } - f3elem.SupportedNetworkTypes = f3elemf3 - } f3 = append(f3, f3elem) } ko.Status.Subnets = f3 } else { ko.Status.Subnets = nil } - if resp.CacheSubnetGroup.SupportedNetworkTypes != nil { - f4 := []*string{} - for _, f4iter := range resp.CacheSubnetGroup.SupportedNetworkTypes { - var f4elem *string - f4elem = aws.String(string(f4iter)) - f4 = append(f4, f4elem) - } - ko.Status.SupportedNetworkTypes = f4 - } else { - ko.Status.SupportedNetworkTypes = nil - } if resp.CacheSubnetGroup.VpcId != nil { ko.Status.VPCID = resp.CacheSubnetGroup.VpcId } else { @@ -413,32 +373,12 @@ func (rm *resourceManager) sdkUpdate( } f3elem.SubnetOutpost = f3elemf2 } - if f3iter.SupportedNetworkTypes != nil { - f3elemf3 := []*string{} - for _, f3elemf3iter := range f3iter.SupportedNetworkTypes { - var f3elemf3elem *string - f3elemf3elem = aws.String(string(f3elemf3iter)) - f3elemf3 = append(f3elemf3, f3elemf3elem) - } - f3elem.SupportedNetworkTypes = f3elemf3 - } f3 = append(f3, f3elem) } ko.Status.Subnets = f3 } else { ko.Status.Subnets = nil } - if resp.CacheSubnetGroup.SupportedNetworkTypes != nil { - f4 := []*string{} - for _, f4iter := range resp.CacheSubnetGroup.SupportedNetworkTypes { - var f4elem *string - f4elem = aws.String(string(f4iter)) - f4 = append(f4, f4elem) - } - ko.Status.SupportedNetworkTypes = f4 - } else { - ko.Status.SupportedNetworkTypes = nil - } if resp.CacheSubnetGroup.VpcId != nil { ko.Status.VPCID = resp.CacheSubnetGroup.VpcId } else { diff --git a/pkg/resource/replication_group/annotations.go b/pkg/resource/replication_group/annotations.go deleted file mode 100644 index 8fb03abf..00000000 --- a/pkg/resource/replication_group/annotations.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"). You may -// not use this file except in compliance with the License. A copy of the -// License is located at -// -// http://aws.amazon.com/apache2.0/ -// -// or in the "license" file accompanying this file. This file is distributed -// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either -// express or implied. See the License for the specific language governing -// permissions and limitations under the License. - -package replication_group - -import ( - svcapitypes "github.com/aws-controllers-k8s/elasticache-controller/apis/v1alpha1" -) - -const ( - // AnnotationLastRequestedLDCs is an annotation whose value is the marshaled list of pointers to - // LogDeliveryConfigurationRequest structs passed in as input to either the create or modify API called most - // recently - AnnotationLastRequestedLDCs = svcapitypes.AnnotationPrefix + "last-requested-log-delivery-configurations" - // AnnotationLastRequestedCNT is an annotation whose value is passed in as input to either the create or modify API - // called most recently - AnnotationLastRequestedCNT = svcapitypes.AnnotationPrefix + "last-requested-cache-node-type" - // AnnotationLastRequestedNNG is an annotation whose value is passed in as input to either the create or modify API - // called most recently - AnnotationLastRequestedNNG = svcapitypes.AnnotationPrefix + "last-requested-num-node-groups" - // AnnotationLastRequestedNGC is an annotation whose value is the marshaled list of pointers to - // NodeGroupConfiguration structs passed in as input to either the create or modify API called most - // recently - AnnotationLastRequestedNGC = svcapitypes.AnnotationPrefix + "last-requested-node-group-configuration" -) diff --git a/pkg/resource/replication_group/custom_set_output.go b/pkg/resource/replication_group/custom_set_output.go deleted file mode 100644 index 9aa8b320..00000000 --- a/pkg/resource/replication_group/custom_set_output.go +++ /dev/null @@ -1,361 +0,0 @@ -// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"). You may -// not use this file except in compliance with the License. A copy of the -// License is located at -// -// http://aws.amazon.com/apache2.0/ -// -// or in the "license" file accompanying this file. This file is distributed -// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either -// express or implied. See the License for the specific language governing -// permissions and limitations under the License. - -package replication_group - -import ( - "context" - "encoding/json" - "strconv" - - svcapitypes "github.com/aws-controllers-k8s/elasticache-controller/apis/v1alpha1" - ackv1alpha1 "github.com/aws-controllers-k8s/runtime/apis/core/v1alpha1" - "github.com/aws/aws-sdk-go/service/elasticache" - svcsdk "github.com/aws/aws-sdk-go/service/elasticache" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -const ( - // The number of minutes worth of events to retrieve. - // 14 days in minutes - eventsDuration = 20160 -) - -func (rm *resourceManager) CustomDescribeReplicationGroupsSetOutput( - ctx context.Context, - r *resource, - resp *elasticache.DescribeReplicationGroupsOutput, - ko *svcapitypes.ReplicationGroup, -) (*svcapitypes.ReplicationGroup, error) { - if len(resp.ReplicationGroups) == 0 { - return ko, nil - } - elem := resp.ReplicationGroups[0] - rm.customSetOutput(elem, ko) - err := rm.customSetOutputSupplementAPIs(ctx, r, elem, ko) - if err != nil { - return nil, err - } - return ko, nil -} - -func (rm *resourceManager) CustomCreateReplicationGroupSetOutput( - ctx context.Context, - r *resource, - resp *elasticache.CreateReplicationGroupOutput, - ko *svcapitypes.ReplicationGroup, -) (*svcapitypes.ReplicationGroup, error) { - rm.customSetOutput(resp.ReplicationGroup, ko) - rm.setAnnotationsFields(r, ko) - rm.setLastRequestedNodeGroupConfiguration(r, ko) - rm.setLastRequestedNumNodeGroups(r, ko) - return ko, nil -} - -func (rm *resourceManager) CustomModifyReplicationGroupSetOutput( - ctx context.Context, - r *resource, - resp *elasticache.ModifyReplicationGroupOutput, - ko *svcapitypes.ReplicationGroup, -) (*svcapitypes.ReplicationGroup, error) { - rm.customSetOutput(resp.ReplicationGroup, ko) - - // reset latest.spec.LDC to original value in desired to prevent stale data - // from the modify API being merged back into desired upon spec patching - var logDeliveryConfig []*svcapitypes.LogDeliveryConfigurationRequest - for _, ldc := range r.ko.Spec.LogDeliveryConfigurations { - logDeliveryConfig = append(logDeliveryConfig, ldc.DeepCopy()) - } - ko.Spec.LogDeliveryConfigurations = logDeliveryConfig - - // Keep the value of desired for CacheNodeType. - ko.Spec.CacheNodeType = r.ko.Spec.CacheNodeType - - rm.setAnnotationsFields(r, ko) - return ko, nil -} - -func (rm *resourceManager) customSetOutput( - respRG *elasticache.ReplicationGroup, - ko *svcapitypes.ReplicationGroup, -) { - if ko.Status.Conditions == nil { - ko.Status.Conditions = []*ackv1alpha1.Condition{} - } - - allNodeGroupsAvailable := true - nodeGroupMembersCount := 0 - memberClustersCount := 0 - if respRG.NodeGroups != nil { - for _, nodeGroup := range respRG.NodeGroups { - if nodeGroup.Status == nil || *nodeGroup.Status != "available" { - allNodeGroupsAvailable = false - break - } - } - for _, nodeGroup := range respRG.NodeGroups { - if nodeGroup.NodeGroupMembers == nil { - continue - } - nodeGroupMembersCount = nodeGroupMembersCount + len(nodeGroup.NodeGroupMembers) - } - } - if respRG.MemberClusters != nil { - memberClustersCount = len(respRG.MemberClusters) - } - - rgStatus := respRG.Status - syncConditionStatus := corev1.ConditionUnknown - if rgStatus != nil { - if (*rgStatus == "available" && allNodeGroupsAvailable && memberClustersCount == nodeGroupMembersCount) || - *rgStatus == "create-failed" { - syncConditionStatus = corev1.ConditionTrue - } else { - // resource in "creating", "modifying" , "deleting", "snapshotting" - // states is being modified at server end - // thus current status is considered out of sync. - syncConditionStatus = corev1.ConditionFalse - } - } - var resourceSyncedCondition *ackv1alpha1.Condition = nil - for _, condition := range ko.Status.Conditions { - if condition.Type == ackv1alpha1.ConditionTypeResourceSynced { - resourceSyncedCondition = condition - break - } - } - if resourceSyncedCondition == nil { - resourceSyncedCondition = &ackv1alpha1.Condition{ - Type: ackv1alpha1.ConditionTypeResourceSynced, - Status: syncConditionStatus, - } - ko.Status.Conditions = append(ko.Status.Conditions, resourceSyncedCondition) - } else { - resourceSyncedCondition.Status = syncConditionStatus - } - - if rgStatus != nil && (*rgStatus == "available" || *rgStatus == "snapshotting") { - input, err := rm.newListAllowedNodeTypeModificationsPayLoad(respRG) - - if err == nil { - resp, apiErr := rm.sdkapi.ListAllowedNodeTypeModifications(input) - rm.metrics.RecordAPICall("READ_MANY", "ListAllowedNodeTypeModifications", apiErr) - // Overwrite the values for ScaleUp and ScaleDown - if apiErr == nil { - ko.Status.AllowedScaleDownModifications = resp.ScaleDownModifications - ko.Status.AllowedScaleUpModifications = resp.ScaleUpModifications - } - } - } else { - ko.Status.AllowedScaleDownModifications = nil - ko.Status.AllowedScaleUpModifications = nil - } - - // populate status logDeliveryConfigurations struct - if respRG.LogDeliveryConfigurations != nil { - var f11 []*svcapitypes.LogDeliveryConfiguration - for _, f11iter := range respRG.LogDeliveryConfigurations { - f11elem := &svcapitypes.LogDeliveryConfiguration{} - if f11iter.DestinationDetails != nil { - f11elemf0 := &svcapitypes.DestinationDetails{} - if f11iter.DestinationDetails.CloudWatchLogsDetails != nil { - f11elemf0f0 := &svcapitypes.CloudWatchLogsDestinationDetails{} - if f11iter.DestinationDetails.CloudWatchLogsDetails.LogGroup != nil { - f11elemf0f0.LogGroup = f11iter.DestinationDetails.CloudWatchLogsDetails.LogGroup - } - f11elemf0.CloudWatchLogsDetails = f11elemf0f0 - } - if f11iter.DestinationDetails.KinesisFirehoseDetails != nil { - f11elemf0f1 := &svcapitypes.KinesisFirehoseDestinationDetails{} - if f11iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream != nil { - f11elemf0f1.DeliveryStream = f11iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream - } - f11elemf0.KinesisFirehoseDetails = f11elemf0f1 - } - f11elem.DestinationDetails = f11elemf0 - } - if f11iter.DestinationType != nil { - f11elem.DestinationType = f11iter.DestinationType - } - if f11iter.LogFormat != nil { - f11elem.LogFormat = f11iter.LogFormat - } - if f11iter.LogType != nil { - f11elem.LogType = f11iter.LogType - } - if f11iter.Status != nil { - f11elem.Status = f11iter.Status - } - if f11iter.Message != nil { - f11elem.Message = f11iter.Message - } - f11 = append(f11, f11elem) - } - ko.Status.LogDeliveryConfigurations = f11 - } else { - ko.Status.LogDeliveryConfigurations = nil - } -} - -// newListAllowedNodeTypeModificationsPayLoad returns an SDK-specific struct for the HTTP request -// payload of the ListAllowedNodeTypeModifications API call. -func (rm *resourceManager) newListAllowedNodeTypeModificationsPayLoad(respRG *elasticache.ReplicationGroup) ( - *svcsdk.ListAllowedNodeTypeModificationsInput, error) { - res := &svcsdk.ListAllowedNodeTypeModificationsInput{} - - if respRG.ReplicationGroupId != nil { - res.SetReplicationGroupId(*respRG.ReplicationGroupId) - } - - return res, nil -} - -func (rm *resourceManager) customSetOutputSupplementAPIs( - ctx context.Context, - r *resource, - respRG *elasticache.ReplicationGroup, - ko *svcapitypes.ReplicationGroup, -) error { - events, err := rm.provideEvents(ctx, r.ko.Spec.ReplicationGroupID, 20) - if err != nil { - return err - } - ko.Status.Events = events - return nil -} - -func (rm *resourceManager) provideEvents( - ctx context.Context, - replicationGroupId *string, - maxRecords int64, -) ([]*svcapitypes.Event, error) { - input := &elasticache.DescribeEventsInput{} - input.SetSourceType("replication-group") - input.SetSourceIdentifier(*replicationGroupId) - input.SetMaxRecords(maxRecords) - input.SetDuration(eventsDuration) - resp, err := rm.sdkapi.DescribeEventsWithContext(ctx, input) - rm.metrics.RecordAPICall("READ_MANY", "DescribeEvents-ReplicationGroup", err) - if err != nil { - rm.log.V(1).Info("Error during DescribeEvents-ReplicationGroup", "error", err) - return nil, err - } - events := []*svcapitypes.Event{} - if resp.Events != nil { - for _, respEvent := range resp.Events { - event := &svcapitypes.Event{} - if respEvent.Message != nil { - event.Message = respEvent.Message - } - if respEvent.Date != nil { - eventDate := metav1.NewTime(*respEvent.Date) - event.Date = &eventDate - } - // Not copying redundant source id (replication id) - // and source type (replication group) - // into each event object - events = append(events, event) - } - } - return events, nil -} - -// setAnnotationsFields copies the desired object's annotations, populates any -// relevant fields, and sets the latest object's annotations to this newly populated map. -// Fields that are handled by custom modify implementation are not set here. -// This should only be called upon a successful create or modify call. -func (rm *resourceManager) setAnnotationsFields( - r *resource, - ko *svcapitypes.ReplicationGroup, -) { - annotations := getAnnotationsFields(r, ko) - - rm.setLastRequestedLogDeliveryConfigurations(r, annotations) - rm.setLastRequestedCacheNodeType(r, annotations) - ko.ObjectMeta.Annotations = annotations -} - -// getAnnotationsFields return the annotations map that would be used to set the fields -func getAnnotationsFields( - r *resource, - ko *svcapitypes.ReplicationGroup) map[string]string { - - if ko.ObjectMeta.Annotations != nil { - return ko.ObjectMeta.Annotations - } - - desiredAnnotations := r.ko.ObjectMeta.GetAnnotations() - annotations := make(map[string]string) - for k, v := range desiredAnnotations { - annotations[k] = v - } - - ko.ObjectMeta.Annotations = annotations - return annotations -} - -// setLastRequestedLogDeliveryConfigurations copies desired.Spec.LogDeliveryConfigurations -// into the annotations of the object. -// r is the desired resource, and annotations is the annotations map modified by this method -func (rm *resourceManager) setLastRequestedLogDeliveryConfigurations( - r *resource, - annotations map[string]string, -) { - lastRequestedConfigs, err := json.Marshal(r.ko.Spec.LogDeliveryConfigurations) - if err != nil { - annotations[AnnotationLastRequestedLDCs] = "null" - } else { - annotations[AnnotationLastRequestedLDCs] = string(lastRequestedConfigs) - } -} - -// setLastRequestedCacheNodeType copies desired.Spec.CacheNodeType into the annotation -// of the object. -func (rm *resourceManager) setLastRequestedCacheNodeType( - r *resource, - annotations map[string]string, -) { - if r.ko.Spec.CacheNodeType != nil { - annotations[AnnotationLastRequestedCNT] = *r.ko.Spec.CacheNodeType - } -} - -// setLastRequestedNodeGroupConfiguration copies desired.spec.NodeGroupConfiguration into the -// annotation of the object -func (rm *resourceManager) setLastRequestedNodeGroupConfiguration( - r *resource, - ko *svcapitypes.ReplicationGroup, -) { - annotations := getAnnotationsFields(r, ko) - lastRequestedConfigs, err := json.Marshal(r.ko.Spec.NodeGroupConfiguration) - if err != nil { - annotations[AnnotationLastRequestedNGC] = "null" - } else { - annotations[AnnotationLastRequestedNGC] = string(lastRequestedConfigs) - } -} - -// setLastRequestedNumNodeGroups copies desired.spec.NumNodeGroups into the -// annotation of the object -func (rm *resourceManager) setLastRequestedNumNodeGroups( - r *resource, - ko *svcapitypes.ReplicationGroup, -) { - annotations := getAnnotationsFields(r, ko) - if r.ko.Spec.NumNodeGroups != nil { - annotations[AnnotationLastRequestedNNG] = strconv.Itoa(int(*r.ko.Spec.NumNodeGroups)) - } else { - annotations[AnnotationLastRequestedNNG] = "null" - } -} diff --git a/pkg/resource/replication_group/custom_update_api.go b/pkg/resource/replication_group/custom_update_api.go deleted file mode 100644 index e07ce2da..00000000 --- a/pkg/resource/replication_group/custom_update_api.go +++ /dev/null @@ -1,772 +0,0 @@ -// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"). You may -// not use this file except in compliance with the License. A copy of the -// License is located at -// -// http://aws.amazon.com/apache2.0/ -// -// or in the "license" file accompanying this file. This file is distributed -// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either -// express or implied. See the License for the specific language governing -// permissions and limitations under the License. - -package replication_group - -import ( - "context" - "encoding/json" - "fmt" - "reflect" - "sort" - "strconv" - - "github.com/aws-controllers-k8s/runtime/pkg/requeue" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/pkg/errors" - - svcapitypes "github.com/aws-controllers-k8s/elasticache-controller/apis/v1alpha1" - ackcompare "github.com/aws-controllers-k8s/runtime/pkg/compare" - svcsdk "github.com/aws/aws-sdk-go/service/elasticache" -) - -// Implements specialized logic for replication group updates. -func (rm *resourceManager) CustomModifyReplicationGroup( - ctx context.Context, - desired *resource, - latest *resource, - delta *ackcompare.Delta, -) (*resource, error) { - - latestRGStatus := latest.ko.Status.Status - - allNodeGroupsAvailable := true - nodeGroupMembersCount := 0 - if latest.ko.Status.NodeGroups != nil { - for _, nodeGroup := range latest.ko.Status.NodeGroups { - if nodeGroup.Status == nil || *nodeGroup.Status != "available" { - allNodeGroupsAvailable = false - break - } - } - for _, nodeGroup := range latest.ko.Status.NodeGroups { - if nodeGroup.NodeGroupMembers == nil { - continue - } - nodeGroupMembersCount = nodeGroupMembersCount + len(nodeGroup.NodeGroupMembers) - } - } - - if latestRGStatus == nil || *latestRGStatus != "available" || !allNodeGroupsAvailable { - return nil, requeue.NeededAfter( - errors.New("Replication Group can not be modified, it is not in 'available' state."), - requeue.DefaultRequeueAfterDuration) - } - - memberClustersCount := 0 - if latest.ko.Status.MemberClusters != nil { - memberClustersCount = len(latest.ko.Status.MemberClusters) - } - if memberClustersCount != nodeGroupMembersCount { - return nil, requeue.NeededAfter( - errors.New("Replication Group can not be modified, "+ - "need to wait for member clusters and node group members."), - requeue.DefaultRequeueAfterDuration) - } - - // Handle the asynchronous rollback case for while Scaling down. - // This means that we have already attempted to apply the CacheNodeType once and - // were not successful hence we will set a terminal condition. - if !cacheNodeTypeRequiresUpdate(desired) && delta.DifferentAt("Spec.CacheNodeType") { - return nil, awserr.New("InvalidParameterCombination", "Cannot update CacheNodeType, "+ - "Please refer to Events for more details", nil) - - } - - // Handle the asynchronous rollback for Resharding. - if !nodeGroupRequiresUpdate(desired) && rm.shardConfigurationsDiffer(desired, latest) { - - return nil, awserr.New("InvalidParameterCombination", "Cannot update NodeGroups, "+ - "Please refer to Events for more details", nil) - } - - // Handle NodeGroupConfiguration asynchronous rollback situations other than Resharding. - if !nodeGroupRequiresUpdate(desired) && (rm.replicaCountDifference(desired, latest) != 0 && !delta.DifferentAt("Spec.ReplicasPerNodeGroup")) { - return nil, awserr.New("InvalidParameterCombination", "Cannot update NodeGroupConfiguration, "+ - "Please refer to Events for more details", nil) - } - - // Order of operations when diffs map to multiple updates APIs: - // 1. When automaticFailoverEnabled differs: - // if automaticFailoverEnabled == false; do nothing in this custom logic, let the modify execute first. - // else if automaticFailoverEnabled == true then following logic should execute first. - // 2. When multiAZ differs - // if multiAZ = true then below is fine. - // else if multiAZ = false ; do nothing in custom logic, let the modify execute. - // 3. updateReplicaCount() is invoked Before updateShardConfiguration() - // because both accept availability zones, however the number of - // values depend on replica count. - if desired.ko.Spec.AutomaticFailoverEnabled != nil && *desired.ko.Spec.AutomaticFailoverEnabled == false { - latestAutomaticFailoverEnabled := latest.ko.Status.AutomaticFailover != nil && *latest.ko.Status.AutomaticFailover == "enabled" - if latestAutomaticFailoverEnabled != *desired.ko.Spec.AutomaticFailoverEnabled { - return rm.modifyReplicationGroup(ctx, desired, latest, delta) - } - } - if desired.ko.Spec.MultiAZEnabled != nil && *desired.ko.Spec.MultiAZEnabled == false { - latestMultiAZEnabled := latest.ko.Status.MultiAZ != nil && *latest.ko.Status.MultiAZ == "enabled" - if latestMultiAZEnabled != *desired.ko.Spec.MultiAZEnabled { - return rm.modifyReplicationGroup(ctx, desired, latest, delta) - } - } - - // increase/decrease replica count - if diff := rm.replicaCountDifference(desired, latest); diff != 0 { - if diff > 0 { - return rm.increaseReplicaCount(ctx, desired, latest) - } - return rm.decreaseReplicaCount(ctx, desired, latest) - } - - // If there is a scale up modification, then we would prioritize it - // over increase/decrease shards. This is important since performing - // scale in without scale up might fail due to insufficient memory. - if delta.DifferentAt("Spec.CacheNodeType") && desired.ko.Status.AllowedScaleUpModifications != nil { - if desired.ko.Spec.CacheNodeType != nil { - for _, scaleUpInstance := range desired.ko.Status.AllowedScaleUpModifications { - if *scaleUpInstance == *desired.ko.Spec.CacheNodeType { - return nil, nil - } - } - } - } - - // increase/decrease shards - if rm.shardConfigurationsDiffer(desired, latest) { - return rm.updateShardConfiguration(ctx, desired, latest) - } - - return rm.modifyReplicationGroup(ctx, desired, latest, delta) -} - -// modifyReplicationGroup updates replication group -// it handles properties that put replication group in -// modifying state if these are supplied to modify API -// irrespective of apply immediately. -func (rm *resourceManager) modifyReplicationGroup( - ctx context.Context, - desired *resource, - latest *resource, - delta *ackcompare.Delta, -) (*resource, error) { - // Method currently handles SecurityGroupIDs, EngineVersion - // Avoid making unnecessary DescribeCacheCluster API call if both fields are nil in spec. - if desired.ko.Spec.SecurityGroupIDs == nil && desired.ko.Spec.EngineVersion == nil { - // no updates done - return nil, nil - } - - // Get details using describe cache cluster to compute diff - latestCacheCluster, err := rm.describeCacheCluster(ctx, latest) - if err != nil { - return nil, err - } - - // SecurityGroupIds, EngineVersion - if rm.securityGroupIdsDiffer(desired, latest, latestCacheCluster) || - delta.DifferentAt("Spec.EngineVersion") { - input := rm.newModifyReplicationGroupRequestPayload(desired, latest, latestCacheCluster, delta) - resp, respErr := rm.sdkapi.ModifyReplicationGroupWithContext(ctx, input) - rm.metrics.RecordAPICall("UPDATE", "ModifyReplicationGroup", respErr) - if respErr != nil { - rm.log.V(1).Info("Error during ModifyReplicationGroup", "error", respErr) - return nil, respErr - } - - return rm.setReplicationGroupOutput(desired, resp.ReplicationGroup) - } - - // no updates done - return nil, nil -} - -// replicaConfigurationsDifference returns -// positive number if desired replica count is greater than latest replica count -// negative number if desired replica count is less than latest replica count -// 0 otherwise -func (rm *resourceManager) replicaCountDifference( - desired *resource, - latest *resource, -) int { - desiredSpec := desired.ko.Spec - - // There are two ways of setting replica counts for NodeGroups in Elasticache ReplicationGroup. - // - The first way is to have the same replica count for all node groups. - // In this case, the Spec.ReplicasPerNodeGroup field is set to a non-nil-value integer pointer. - // - The second way is to set different replica counts per node group. - // In this case, the Spec.NodeGroupConfiguration field is set to a non-nil NodeGroupConfiguration slice - // of NodeGroupConfiguration structs that each have a ReplicaCount non-nil-value integer pointer field - // that contains the number of replicas for that particular node group. - if desiredSpec.ReplicasPerNodeGroup != nil { - return int(*desiredSpec.ReplicasPerNodeGroup - *latest.ko.Spec.ReplicasPerNodeGroup) - } else if desiredSpec.NodeGroupConfiguration != nil { - return rm.diffReplicasNodeGroupConfiguration(desired, latest) - } - return 0 -} - -// diffReplicasNodeGroupConfiguration takes desired Spec.NodeGroupConfiguration slice field into account to return -// positive number if desired replica count is greater than latest replica count -// negative number if desired replica count is less than latest replica count -// 0 otherwise -func (rm *resourceManager) diffReplicasNodeGroupConfiguration( - desired *resource, - latest *resource, -) int { - desiredSpec := desired.ko.Spec - latestStatus := latest.ko.Status - // each shard could have different value for replica count - latestReplicaCounts := map[string]int{} - for _, latestShard := range latestStatus.NodeGroups { - if latestShard.NodeGroupID == nil { - continue - } - latestReplicaCount := 0 - if latestShard.NodeGroupMembers != nil { - if len(latestShard.NodeGroupMembers) > 0 { - latestReplicaCount = len(latestShard.NodeGroupMembers) - 1 - } - } - latestReplicaCounts[*latestShard.NodeGroupID] = latestReplicaCount - } - for _, desiredShard := range desiredSpec.NodeGroupConfiguration { - if desiredShard.NodeGroupID == nil || desiredShard.ReplicaCount == nil { - // no specs to compare for this shard - continue - } - latestShardReplicaCount, found := latestReplicaCounts[*desiredShard.NodeGroupID] - if !found { - // shard not present in status - continue - } - if desiredShardReplicaCount := int(*desiredShard.ReplicaCount); desiredShardReplicaCount != latestShardReplicaCount { - rm.log.V(1).Info( - "ReplicaCount differs", - "NodeGroup", *desiredShard.NodeGroupID, - "desired", int(*desiredShard.ReplicaCount), - "latest", latestShardReplicaCount, - ) - return desiredShardReplicaCount - latestShardReplicaCount - } - } - return 0 -} - -// shardConfigurationsDiffer returns true if shard -// configuration differs between desired, latest resource. -func (rm *resourceManager) shardConfigurationsDiffer( - desired *resource, - latest *resource, -) bool { - desiredSpec := desired.ko.Spec - latestStatus := latest.ko.Status - - // desired shards - var desiredShardsCount *int64 = desiredSpec.NumNodeGroups - if desiredShardsCount == nil && desiredSpec.NodeGroupConfiguration != nil { - numShards := int64(len(desiredSpec.NodeGroupConfiguration)) - desiredShardsCount = &numShards - } - if desiredShardsCount == nil { - // no shards config in desired specs - return false - } - - // latest shards - var latestShardsCount *int64 = nil - if latestStatus.NodeGroups != nil { - numShards := int64(len(latestStatus.NodeGroups)) - latestShardsCount = &numShards - } - - return latestShardsCount == nil || *desiredShardsCount != *latestShardsCount -} - -func (rm *resourceManager) increaseReplicaCount( - ctx context.Context, - desired *resource, - latest *resource, -) (*resource, error) { - input, err := rm.newIncreaseReplicaCountRequestPayload(desired, latest) - if err != nil { - return nil, err - } - resp, respErr := rm.sdkapi.IncreaseReplicaCountWithContext(ctx, input) - rm.metrics.RecordAPICall("UPDATE", "IncreaseReplicaCount", respErr) - if respErr != nil { - rm.log.V(1).Info("Error during IncreaseReplicaCount", "error", respErr) - return nil, respErr - } - return rm.setReplicationGroupOutput(desired, resp.ReplicationGroup) -} - -func (rm *resourceManager) decreaseReplicaCount( - ctx context.Context, - desired *resource, - latest *resource, -) (*resource, error) { - input, err := rm.newDecreaseReplicaCountRequestPayload(desired, latest) - if err != nil { - return nil, err - } - resp, respErr := rm.sdkapi.DecreaseReplicaCountWithContext(ctx, input) - rm.metrics.RecordAPICall("UPDATE", "DecreaseReplicaCount", respErr) - if respErr != nil { - rm.log.V(1).Info("Error during DecreaseReplicaCount", "error", respErr) - return nil, respErr - } - return rm.setReplicationGroupOutput(desired, resp.ReplicationGroup) -} - -func (rm *resourceManager) updateShardConfiguration( - ctx context.Context, - desired *resource, - latest *resource, -) (*resource, error) { - input, err := rm.newUpdateShardConfigurationRequestPayload(desired, latest) - if err != nil { - return nil, err - } - resp, respErr := rm.sdkapi.ModifyReplicationGroupShardConfigurationWithContext(ctx, input) - rm.metrics.RecordAPICall("UPDATE", "ModifyReplicationGroupShardConfiguration", respErr) - if respErr != nil { - rm.log.V(1).Info("Error during ModifyReplicationGroupShardConfiguration", "error", respErr) - return nil, respErr - } - - r, err := rm.setReplicationGroupOutput(desired, resp.ReplicationGroup) - - if err != nil { - return r, err - } - - ko := r.ko.DeepCopy() - // Update the annotations since API call was successful - rm.setLastRequestedNodeGroupConfiguration(desired, ko) - rm.setLastRequestedNumNodeGroups(desired, ko) - return &resource{ko}, nil -} - -// newIncreaseReplicaCountRequestPayload returns an SDK-specific struct for the HTTP request -// payload of the Create API call for the resource -func (rm *resourceManager) newIncreaseReplicaCountRequestPayload( - desired *resource, - latest *resource, -) (*svcsdk.IncreaseReplicaCountInput, error) { - res := &svcsdk.IncreaseReplicaCountInput{} - desiredSpec := desired.ko.Spec - - res.SetApplyImmediately(true) - if desiredSpec.ReplicationGroupID != nil { - res.SetReplicationGroupId(*desiredSpec.ReplicationGroupID) - } - if desiredSpec.ReplicasPerNodeGroup != nil { - res.SetNewReplicaCount(*desiredSpec.ReplicasPerNodeGroup) - } - - latestStatus := latest.ko.Status - // each shard could have different value for replica count - latestReplicaCounts := map[string]int{} - for _, latestShard := range latestStatus.NodeGroups { - if latestShard.NodeGroupID == nil { - continue - } - latestReplicaCount := 0 - if latestShard.NodeGroupMembers != nil { - if len(latestShard.NodeGroupMembers) > 0 { - latestReplicaCount = len(latestShard.NodeGroupMembers) - 1 - } - } - latestReplicaCounts[*latestShard.NodeGroupID] = latestReplicaCount - } - - if desiredSpec.NodeGroupConfiguration != nil { - shardsConfig := []*svcsdk.ConfigureShard{} - for _, desiredShard := range desiredSpec.NodeGroupConfiguration { - if desiredShard.NodeGroupID == nil { - continue - } - _, found := latestReplicaCounts[*desiredShard.NodeGroupID] - if !found { - continue - } - // shard has an Id and it is present on server. - shardConfig := &svcsdk.ConfigureShard{} - shardConfig.SetNodeGroupId(*desiredShard.NodeGroupID) - if desiredShard.ReplicaCount != nil { - shardConfig.SetNewReplicaCount(*desiredShard.ReplicaCount) - } - shardAZs := []*string{} - if desiredShard.PrimaryAvailabilityZone != nil { - shardAZs = append(shardAZs, desiredShard.PrimaryAvailabilityZone) - } - if desiredShard.ReplicaAvailabilityZones != nil { - for _, desiredAZ := range desiredShard.ReplicaAvailabilityZones { - shardAZs = append(shardAZs, desiredAZ) - } - } - if len(shardAZs) > 0 { - shardConfig.SetPreferredAvailabilityZones(shardAZs) - } - shardsConfig = append(shardsConfig, shardConfig) - } - res.SetReplicaConfiguration(shardsConfig) - } - - return res, nil -} - -// newDecreaseReplicaCountRequestPayload returns an SDK-specific struct for the HTTP request -// payload of the Create API call for the resource -func (rm *resourceManager) newDecreaseReplicaCountRequestPayload( - desired *resource, - latest *resource, -) (*svcsdk.DecreaseReplicaCountInput, error) { - res := &svcsdk.DecreaseReplicaCountInput{} - desiredSpec := desired.ko.Spec - - res.SetApplyImmediately(true) - if desiredSpec.ReplicationGroupID != nil { - res.SetReplicationGroupId(*desiredSpec.ReplicationGroupID) - } - if desiredSpec.ReplicasPerNodeGroup != nil { - res.SetNewReplicaCount(*desiredSpec.ReplicasPerNodeGroup) - } - - latestStatus := latest.ko.Status - // each shard could have different value for replica count - latestReplicaCounts := map[string]int{} - for _, latestShard := range latestStatus.NodeGroups { - if latestShard.NodeGroupID == nil { - continue - } - latestReplicaCount := 0 - if latestShard.NodeGroupMembers != nil { - if len(latestShard.NodeGroupMembers) > 0 { - latestReplicaCount = len(latestShard.NodeGroupMembers) - 1 - } - } - latestReplicaCounts[*latestShard.NodeGroupID] = latestReplicaCount - } - - if desiredSpec.NodeGroupConfiguration != nil { - shardsConfig := []*svcsdk.ConfigureShard{} - for _, desiredShard := range desiredSpec.NodeGroupConfiguration { - if desiredShard.NodeGroupID == nil { - continue - } - _, found := latestReplicaCounts[*desiredShard.NodeGroupID] - if !found { - continue - } - // shard has an Id and it is present on server. - shardConfig := &svcsdk.ConfigureShard{} - shardConfig.SetNodeGroupId(*desiredShard.NodeGroupID) - if desiredShard.ReplicaCount != nil { - shardConfig.SetNewReplicaCount(*desiredShard.ReplicaCount) - } - shardAZs := []*string{} - if desiredShard.PrimaryAvailabilityZone != nil { - shardAZs = append(shardAZs, desiredShard.PrimaryAvailabilityZone) - } - if desiredShard.ReplicaAvailabilityZones != nil { - for _, desiredAZ := range desiredShard.ReplicaAvailabilityZones { - shardAZs = append(shardAZs, desiredAZ) - } - } - if len(shardAZs) > 0 { - shardConfig.SetPreferredAvailabilityZones(shardAZs) - } - shardsConfig = append(shardsConfig, shardConfig) - } - res.SetReplicaConfiguration(shardsConfig) - } - - return res, nil -} - -// newUpdateShardConfigurationRequestPayload returns an SDK-specific struct for the HTTP request -// payload of the Update API call for the resource -func (rm *resourceManager) newUpdateShardConfigurationRequestPayload( - desired *resource, - latest *resource, -) (*svcsdk.ModifyReplicationGroupShardConfigurationInput, error) { - res := &svcsdk.ModifyReplicationGroupShardConfigurationInput{} - - desiredSpec := desired.ko.Spec - latestStatus := latest.ko.Status - - // Mandatory arguments - // - ApplyImmediately - // - ReplicationGroupId - // - NodeGroupCount - res.SetApplyImmediately(true) - if desiredSpec.ReplicationGroupID != nil { - res.SetReplicationGroupId(*desiredSpec.ReplicationGroupID) - } - var desiredShardsCount *int64 = desiredSpec.NumNodeGroups - if desiredShardsCount == nil && desiredSpec.NodeGroupConfiguration != nil { - numShards := int64(len(desiredSpec.NodeGroupConfiguration)) - desiredShardsCount = &numShards - } - if desiredShardsCount != nil { - res.SetNodeGroupCount(*desiredShardsCount) - } - - // If desired nodegroup count (number of shards): - // - increases, then (optional) provide ReshardingConfiguration - // - decreases, then (mandatory) provide - // either NodeGroupsToRemove - // or NodeGroupsToRetain - var latestShardsCount *int64 = nil - if latestStatus.NodeGroups != nil { - numShards := int64(len(latestStatus.NodeGroups)) - latestShardsCount = &numShards - } - - increase := (desiredShardsCount != nil && latestShardsCount != nil && *desiredShardsCount > *latestShardsCount) || - (desiredShardsCount != nil && latestShardsCount == nil) - decrease := desiredShardsCount != nil && latestShardsCount != nil && *desiredShardsCount < *latestShardsCount - // Additional arguments - shardsConfig := []*svcsdk.ReshardingConfiguration{} - shardsToRetain := []*string{} - if desiredSpec.NodeGroupConfiguration != nil { - for _, desiredShard := range desiredSpec.NodeGroupConfiguration { - shardConfig := &svcsdk.ReshardingConfiguration{} - if desiredShard.NodeGroupID != nil { - shardConfig.SetNodeGroupId(*desiredShard.NodeGroupID) - shardsToRetain = append(shardsToRetain, desiredShard.NodeGroupID) - } - shardAZs := []*string{} - if desiredShard.PrimaryAvailabilityZone != nil { - shardAZs = append(shardAZs, desiredShard.PrimaryAvailabilityZone) - } - if desiredShard.ReplicaAvailabilityZones != nil { - for _, desiredAZ := range desiredShard.ReplicaAvailabilityZones { - shardAZs = append(shardAZs, desiredAZ) - } - shardConfig.SetPreferredAvailabilityZones(shardAZs) - } - shardsConfig = append(shardsConfig, shardConfig) - } - } else if decrease { - for i := 0; i < int(*desiredShardsCount); i++ { - shardsToRetain = append(shardsToRetain, desired.ko.Status.NodeGroups[i].NodeGroupID) - } - } - - if increase { - if len(shardsConfig) > 0 { - res.SetReshardingConfiguration(shardsConfig) - } - } else if decrease { - if len(shardsToRetain) == 0 { - return nil, awserr.New("InvalidParameterValue", "At least one node group should be present.", nil) - } - res.SetNodeGroupsToRetain(shardsToRetain) - } - - return res, nil -} - -// getAnyCacheClusterIDFromNodeGroups returns a cache cluster ID from supplied node groups. -// Any cache cluster Id which is not nil is returned. -func (rm *resourceManager) getAnyCacheClusterIDFromNodeGroups( - nodeGroups []*svcapitypes.NodeGroup, -) *string { - if nodeGroups == nil { - return nil - } - - var cacheClusterId *string = nil - for _, nodeGroup := range nodeGroups { - if nodeGroup.NodeGroupMembers == nil { - continue - } - for _, nodeGroupMember := range nodeGroup.NodeGroupMembers { - if nodeGroupMember.CacheClusterID == nil { - continue - } - cacheClusterId = nodeGroupMember.CacheClusterID - break - } - if cacheClusterId != nil { - break - } - } - return cacheClusterId -} - -// describeCacheCluster provides CacheCluster object -// per the supplied latest Replication Group Id -// it invokes DescribeCacheClusters API to do so -func (rm *resourceManager) describeCacheCluster( - ctx context.Context, - resource *resource, -) (*svcsdk.CacheCluster, error) { - input := &svcsdk.DescribeCacheClustersInput{} - - ko := resource.ko - latestStatus := ko.Status - if latestStatus.NodeGroups == nil { - return nil, nil - } - cacheClusterId := rm.getAnyCacheClusterIDFromNodeGroups(latestStatus.NodeGroups) - if cacheClusterId == nil { - return nil, nil - } - - input.SetCacheClusterId(*cacheClusterId) - resp, respErr := rm.sdkapi.DescribeCacheClustersWithContext(ctx, input) - rm.metrics.RecordAPICall("READ_MANY", "DescribeCacheClusters", respErr) - if respErr != nil { - rm.log.V(1).Info("Error during DescribeCacheClusters", "error", respErr) - return nil, respErr - } - if resp.CacheClusters == nil { - return nil, nil - } - - for _, cc := range resp.CacheClusters { - if cc == nil { - continue - } - return cc, nil - } - return nil, fmt.Errorf("could not find a non-nil cache cluster from API response") -} - -// securityGroupIdsDiffer return true if -// Security Group Ids differ between desired spec and latest (from cache cluster) status -func (rm *resourceManager) securityGroupIdsDiffer( - desired *resource, - latest *resource, - latestCacheCluster *svcsdk.CacheCluster, -) bool { - if desired.ko.Spec.SecurityGroupIDs == nil { - return false - } - - desiredIds := []*string{} - for _, id := range desired.ko.Spec.SecurityGroupIDs { - if id == nil { - continue - } - var value string - value = *id - desiredIds = append(desiredIds, &value) - } - sort.Slice(desiredIds, func(i, j int) bool { - return *desiredIds[i] < *desiredIds[j] - }) - - latestIds := []*string{} - if latestCacheCluster != nil && latestCacheCluster.SecurityGroups != nil { - for _, latestSG := range latestCacheCluster.SecurityGroups { - if latestSG == nil { - continue - } - var value string - value = *latestSG.SecurityGroupId - latestIds = append(latestIds, &value) - } - } - sort.Slice(latestIds, func(i, j int) bool { - return *latestIds[i] < *latestIds[j] - }) - - if len(desiredIds) != len(latestIds) { - return true // differ - } - for index, desiredId := range desiredIds { - if *desiredId != *latestIds[index] { - return true // differ - } - } - // no difference - return false -} - -// newModifyReplicationGroupRequestPayload provides request input object -func (rm *resourceManager) newModifyReplicationGroupRequestPayload( - desired *resource, - latest *resource, - latestCacheCluster *svcsdk.CacheCluster, - delta *ackcompare.Delta, -) *svcsdk.ModifyReplicationGroupInput { - input := &svcsdk.ModifyReplicationGroupInput{} - - input.SetApplyImmediately(true) - if desired.ko.Spec.ReplicationGroupID != nil { - input.SetReplicationGroupId(*desired.ko.Spec.ReplicationGroupID) - } - - if rm.securityGroupIdsDiffer(desired, latest, latestCacheCluster) && - desired.ko.Spec.SecurityGroupIDs != nil { - ids := []*string{} - for _, id := range desired.ko.Spec.SecurityGroupIDs { - var value string - value = *id - ids = append(ids, &value) - } - input.SetSecurityGroupIds(ids) - } - - if delta.DifferentAt("Spec.EngineVersion") && - desired.ko.Spec.EngineVersion != nil { - input.SetEngineVersion(*desired.ko.Spec.EngineVersion) - } - - return input -} - -// cacheNodeTypeRequiresUpdate retrieves the last requested cacheNodeType saved in annotations and compares them -// to the current desired cacheNodeType -func cacheNodeTypeRequiresUpdate(desired *resource) bool { - annotations := desired.ko.ObjectMeta.GetAnnotations() - if val, ok := annotations[AnnotationLastRequestedCNT]; ok && desired.ko.Spec.CacheNodeType != nil { - return val != *desired.ko.Spec.CacheNodeType - } - - // This means there is delta and no value in annotation or in Spec - return true -} - -// nodeGroupRequiresUpdate retrieves the last applied NumNodeGroups and NodeGroupConfiguration and compares them -// to the current desired NumNodeGroups and NodeGroupConfiguration -func nodeGroupRequiresUpdate(desired *resource) bool { - annotations := desired.ko.ObjectMeta.GetAnnotations() - - if val, ok := annotations[AnnotationLastRequestedNNG]; ok && val != "null" { - numNodes, err := strconv.ParseInt(val, 10, 64) - - if err != nil { - return false - } - - if numNodes != *desired.ko.Spec.NumNodeGroups { - return true - } - - return false - } - - desiredNodeGroupConfig := desired.ko.Spec.NodeGroupConfiguration - if val, ok := annotations[AnnotationLastRequestedNGC]; ok && val != "null" { - var lastRequestedNodeGroupConfig []*svcapitypes.NodeGroupConfiguration - _ = json.Unmarshal([]byte(val), &lastRequestedNodeGroupConfig) - return !reflect.DeepEqual(desiredNodeGroupConfig, lastRequestedNodeGroupConfig) - } - - // This means there is delta and no value in annotation or in Spec - return true -} diff --git a/pkg/resource/replication_group/delta.go b/pkg/resource/replication_group/delta.go index 8dfa0aa7..fe220197 100644 --- a/pkg/resource/replication_group/delta.go +++ b/pkg/resource/replication_group/delta.go @@ -91,13 +91,6 @@ func newResourceDelta( if !reflect.DeepEqual(a.ko.Spec.CacheSubnetGroupRef, b.ko.Spec.CacheSubnetGroupRef) { delta.Add("Spec.CacheSubnetGroupRef", a.ko.Spec.CacheSubnetGroupRef, b.ko.Spec.CacheSubnetGroupRef) } - if ackcompare.HasNilDifference(a.ko.Spec.ClusterMode, b.ko.Spec.ClusterMode) { - delta.Add("Spec.ClusterMode", a.ko.Spec.ClusterMode, b.ko.Spec.ClusterMode) - } else if a.ko.Spec.ClusterMode != nil && b.ko.Spec.ClusterMode != nil { - if *a.ko.Spec.ClusterMode != *b.ko.Spec.ClusterMode { - delta.Add("Spec.ClusterMode", a.ko.Spec.ClusterMode, b.ko.Spec.ClusterMode) - } - } if ackcompare.HasNilDifference(a.ko.Spec.DataTieringEnabled, b.ko.Spec.DataTieringEnabled) { delta.Add("Spec.DataTieringEnabled", a.ko.Spec.DataTieringEnabled, b.ko.Spec.DataTieringEnabled) } else if a.ko.Spec.DataTieringEnabled != nil && b.ko.Spec.DataTieringEnabled != nil { @@ -126,13 +119,6 @@ func newResourceDelta( delta.Add("Spec.EngineVersion", a.ko.Spec.EngineVersion, b.ko.Spec.EngineVersion) } } - if ackcompare.HasNilDifference(a.ko.Spec.IPDiscovery, b.ko.Spec.IPDiscovery) { - delta.Add("Spec.IPDiscovery", a.ko.Spec.IPDiscovery, b.ko.Spec.IPDiscovery) - } else if a.ko.Spec.IPDiscovery != nil && b.ko.Spec.IPDiscovery != nil { - if *a.ko.Spec.IPDiscovery != *b.ko.Spec.IPDiscovery { - delta.Add("Spec.IPDiscovery", a.ko.Spec.IPDiscovery, b.ko.Spec.IPDiscovery) - } - } if ackcompare.HasNilDifference(a.ko.Spec.KMSKeyID, b.ko.Spec.KMSKeyID) { delta.Add("Spec.KMSKeyID", a.ko.Spec.KMSKeyID, b.ko.Spec.KMSKeyID) } else if a.ko.Spec.KMSKeyID != nil && b.ko.Spec.KMSKeyID != nil { @@ -140,13 +126,6 @@ func newResourceDelta( delta.Add("Spec.KMSKeyID", a.ko.Spec.KMSKeyID, b.ko.Spec.KMSKeyID) } } - if ackcompare.HasNilDifference(a.ko.Spec.NetworkType, b.ko.Spec.NetworkType) { - delta.Add("Spec.NetworkType", a.ko.Spec.NetworkType, b.ko.Spec.NetworkType) - } else if a.ko.Spec.NetworkType != nil && b.ko.Spec.NetworkType != nil { - if *a.ko.Spec.NetworkType != *b.ko.Spec.NetworkType { - delta.Add("Spec.NetworkType", a.ko.Spec.NetworkType, b.ko.Spec.NetworkType) - } - } if len(a.ko.Spec.NodeGroupConfiguration) != len(b.ko.Spec.NodeGroupConfiguration) { delta.Add("Spec.NodeGroupConfiguration", a.ko.Spec.NodeGroupConfiguration, b.ko.Spec.NodeGroupConfiguration) } else if len(a.ko.Spec.NodeGroupConfiguration) > 0 { @@ -213,13 +192,6 @@ func newResourceDelta( if !reflect.DeepEqual(a.ko.Spec.SecurityGroupRefs, b.ko.Spec.SecurityGroupRefs) { delta.Add("Spec.SecurityGroupRefs", a.ko.Spec.SecurityGroupRefs, b.ko.Spec.SecurityGroupRefs) } - if ackcompare.HasNilDifference(a.ko.Spec.ServerlessCacheSnapshotName, b.ko.Spec.ServerlessCacheSnapshotName) { - delta.Add("Spec.ServerlessCacheSnapshotName", a.ko.Spec.ServerlessCacheSnapshotName, b.ko.Spec.ServerlessCacheSnapshotName) - } else if a.ko.Spec.ServerlessCacheSnapshotName != nil && b.ko.Spec.ServerlessCacheSnapshotName != nil { - if *a.ko.Spec.ServerlessCacheSnapshotName != *b.ko.Spec.ServerlessCacheSnapshotName { - delta.Add("Spec.ServerlessCacheSnapshotName", a.ko.Spec.ServerlessCacheSnapshotName, b.ko.Spec.ServerlessCacheSnapshotName) - } - } if len(a.ko.Spec.SnapshotARNs) != len(b.ko.Spec.SnapshotARNs) { delta.Add("Spec.SnapshotARNs", a.ko.Spec.SnapshotARNs, b.ko.Spec.SnapshotARNs) } else if len(a.ko.Spec.SnapshotARNs) > 0 { @@ -258,13 +230,6 @@ func newResourceDelta( delta.Add("Spec.TransitEncryptionEnabled", a.ko.Spec.TransitEncryptionEnabled, b.ko.Spec.TransitEncryptionEnabled) } } - if ackcompare.HasNilDifference(a.ko.Spec.TransitEncryptionMode, b.ko.Spec.TransitEncryptionMode) { - delta.Add("Spec.TransitEncryptionMode", a.ko.Spec.TransitEncryptionMode, b.ko.Spec.TransitEncryptionMode) - } else if a.ko.Spec.TransitEncryptionMode != nil && b.ko.Spec.TransitEncryptionMode != nil { - if *a.ko.Spec.TransitEncryptionMode != *b.ko.Spec.TransitEncryptionMode { - delta.Add("Spec.TransitEncryptionMode", a.ko.Spec.TransitEncryptionMode, b.ko.Spec.TransitEncryptionMode) - } - } if len(a.ko.Spec.UserGroupIDs) != len(b.ko.Spec.UserGroupIDs) { delta.Add("Spec.UserGroupIDs", a.ko.Spec.UserGroupIDs, b.ko.Spec.UserGroupIDs) } else if len(a.ko.Spec.UserGroupIDs) > 0 { diff --git a/pkg/resource/replication_group/delta_util.go b/pkg/resource/replication_group/delta_util.go deleted file mode 100644 index 74cea39d..00000000 --- a/pkg/resource/replication_group/delta_util.go +++ /dev/null @@ -1,163 +0,0 @@ -// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"). You may -// not use this file except in compliance with the License. A copy of the -// License is located at -// -// http://aws.amazon.com/apache2.0/ -// -// or in the "license" file accompanying this file. This file is distributed -// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either -// express or implied. See the License for the specific language governing -// permissions and limitations under the License. - -package replication_group - -import ( - "encoding/json" - "reflect" - - ackcompare "github.com/aws-controllers-k8s/runtime/pkg/compare" - - svcapitypes "github.com/aws-controllers-k8s/elasticache-controller/apis/v1alpha1" - "github.com/aws-controllers-k8s/elasticache-controller/pkg/common" - "github.com/aws-controllers-k8s/elasticache-controller/pkg/util" -) - -// modifyDelta removes non-meaningful differences from the delta and adds additional differences if necessary -func modifyDelta( - delta *ackcompare.Delta, - desired *resource, - latest *resource, -) { - - if delta.DifferentAt("Spec.EngineVersion") { - if desired.ko.Spec.EngineVersion != nil && latest.ko.Spec.EngineVersion != nil { - if util.EngineVersionsMatch(*desired.ko.Spec.EngineVersion, *latest.ko.Spec.EngineVersion) { - common.RemoveFromDelta(delta, "Spec.EngineVersion") - } - } - // TODO: handle the case of a nil difference (especially when desired EV is nil) - } - - // if server has given PreferredMaintenanceWindow a default value, no action needs to be taken - if delta.DifferentAt("Spec.PreferredMaintenanceWindow") { - if desired.ko.Spec.PreferredMaintenanceWindow == nil && latest.ko.Spec.PreferredMaintenanceWindow != nil { - common.RemoveFromDelta(delta, "Spec.PreferredMaintenanceWindow") - } - } - - // note that the comparison is actually done between desired.Spec.LogDeliveryConfigurations and - // the last requested configurations saved in annotations (as opposed to latest.Spec.LogDeliveryConfigurations) - if logDeliveryRequiresUpdate(desired) { - delta.Add("Spec.LogDeliveryConfigurations", desired.ko.Spec.LogDeliveryConfigurations, - unmarshalLastRequestedLDCs(desired)) - } - - if multiAZRequiresUpdate(desired, latest) { - delta.Add("Spec.MultiAZEnabled", desired.ko.Spec.MultiAZEnabled, latest.ko.Status.MultiAZ) - } - - if autoFailoverRequiresUpdate(desired, latest) { - delta.Add("Spec.AutomaticFailoverEnabled", desired.ko.Spec.AutomaticFailoverEnabled, - latest.ko.Status.AutomaticFailover) - } - - if updateRequired, current := primaryClusterIDRequiresUpdate(desired, latest); updateRequired { - delta.Add("Spec.PrimaryClusterID", desired.ko.Spec.PrimaryClusterID, *current) - } -} - -// logDeliveryRequiresUpdate retrieves the last requested configurations saved in annotations and compares them -// to the current desired configurations -func logDeliveryRequiresUpdate(desired *resource) bool { - desiredConfigs := desired.ko.Spec.LogDeliveryConfigurations - lastRequestedConfigs := unmarshalLastRequestedLDCs(desired) - return !reflect.DeepEqual(desiredConfigs, lastRequestedConfigs) -} - -// unmarshal the value found in annotations for the LogDeliveryConfigurations field requested in the last -// successful create or modify call -func unmarshalLastRequestedLDCs(desired *resource) []*svcapitypes.LogDeliveryConfigurationRequest { - var lastRequestedConfigs []*svcapitypes.LogDeliveryConfigurationRequest - - annotations := desired.ko.ObjectMeta.GetAnnotations() - if val, ok := annotations[AnnotationLastRequestedLDCs]; ok { - _ = json.Unmarshal([]byte(val), &lastRequestedConfigs) - } - - return lastRequestedConfigs -} - -// multiAZRequiresUpdate returns true if the latest multi AZ status does not yet match the -// desired state, and false otherwise -func multiAZRequiresUpdate(desired *resource, latest *resource) bool { - // no preference for multi AZ specified; no update required - if desired.ko.Spec.MultiAZEnabled == nil { - return false - } - - // API should return a non-nil value, but if it doesn't then attempt to update - if latest.ko.Status.MultiAZ == nil { - return true - } - - // true maps to "enabled"; false maps to "disabled" - // this accounts for values such as "enabling" and "disabling" - if *desired.ko.Spec.MultiAZEnabled { - return *latest.ko.Status.MultiAZ != string(svcapitypes.MultiAZStatus_enabled) - } else { - return *latest.ko.Status.MultiAZ != string(svcapitypes.MultiAZStatus_disabled) - } -} - -// autoFailoverRequiresUpdate returns true if the latest auto failover status does not yet match the -// desired state, and false otherwise -func autoFailoverRequiresUpdate(desired *resource, latest *resource) bool { - // the logic is exactly analogous to multiAZRequiresUpdate above - if desired.ko.Spec.AutomaticFailoverEnabled == nil { - return false - } - - if latest.ko.Status.AutomaticFailover == nil { - return true - } - - if *desired.ko.Spec.AutomaticFailoverEnabled { - return *latest.ko.Status.AutomaticFailover != string(svcapitypes.AutomaticFailoverStatus_enabled) - } else { - return *latest.ko.Status.AutomaticFailover != string(svcapitypes.AutomaticFailoverStatus_disabled) - } -} - -// primaryClusterIDRequiresUpdate retrieves the current primary cluster ID and determines whether -// an update is required. If no desired state is specified or there is an issue retrieving the -// latest state, return false, nil. Otherwise, return false or true depending on equality of -// the latest and desired states, and a non-nil pointer to the latest value -func primaryClusterIDRequiresUpdate(desired *resource, latest *resource) (bool, *string) { - if desired.ko.Spec.PrimaryClusterID == nil { - return false, nil - } - - // primary cluster ID applies to cluster mode disabled only; if API returns multiple - // or no node groups, or the provided node group is nil, there is nothing that can be done - if len(latest.ko.Status.NodeGroups) != 1 || latest.ko.Status.NodeGroups[0] == nil { - return false, nil - } - - // attempt to find primary cluster in node group. If for some reason it is not present, we - // don't have a reliable latest state, so do nothing - ng := *latest.ko.Status.NodeGroups[0] - for _, member := range ng.NodeGroupMembers { - if member == nil { - continue - } - - if member.CurrentRole != nil && *member.CurrentRole == "primary" && member.CacheClusterID != nil { - val := *member.CacheClusterID - return val != *desired.ko.Spec.PrimaryClusterID, &val - } - } - - return false, nil -} diff --git a/pkg/resource/replication_group/hooks.go b/pkg/resource/replication_group/hooks.go index 0b2e1271..89132047 100644 --- a/pkg/resource/replication_group/hooks.go +++ b/pkg/resource/replication_group/hooks.go @@ -15,12 +15,44 @@ package replication_group import ( "context" + "encoding/json" "errors" + "fmt" + "reflect" + "sort" + "strconv" - ackrequeue "github.com/aws-controllers-k8s/runtime/pkg/requeue" + "github.com/aws/aws-sdk-go-v2/aws" + svcsdk "github.com/aws/aws-sdk-go-v2/service/elasticache" + svcsdktypes "github.com/aws/aws-sdk-go-v2/service/elasticache/types" + "github.com/aws/aws-sdk-go/aws/awserr" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" svcapitypes "github.com/aws-controllers-k8s/elasticache-controller/apis/v1alpha1" + "github.com/aws-controllers-k8s/elasticache-controller/pkg/common" "github.com/aws-controllers-k8s/elasticache-controller/pkg/util" + ackv1alpha1 "github.com/aws-controllers-k8s/runtime/apis/core/v1alpha1" + ackcompare "github.com/aws-controllers-k8s/runtime/pkg/compare" + "github.com/aws-controllers-k8s/runtime/pkg/requeue" + ackrequeue "github.com/aws-controllers-k8s/runtime/pkg/requeue" +) + +const ( + // AnnotationLastRequestedLDCs is an annotation whose value is the marshaled list of pointers to + // LogDeliveryConfigurationRequest structs passed in as input to either the create or modify API called most + // recently + AnnotationLastRequestedLDCs = svcapitypes.AnnotationPrefix + "last-requested-log-delivery-configurations" + // AnnotationLastRequestedCNT is an annotation whose value is passed in as input to either the create or modify API + // called most recently + AnnotationLastRequestedCNT = svcapitypes.AnnotationPrefix + "last-requested-cache-node-type" + // AnnotationLastRequestedNNG is an annotation whose value is passed in as input to either the create or modify API + // called most recently + AnnotationLastRequestedNNG = svcapitypes.AnnotationPrefix + "last-requested-num-node-groups" + // AnnotationLastRequestedNGC is an annotation whose value is the marshaled list of pointers to + // NodeGroupConfiguration structs passed in as input to either the create or modify API called most + // recently + AnnotationLastRequestedNGC = svcapitypes.AnnotationPrefix + "last-requested-node-group-configuration" ) var ( @@ -101,3 +133,1360 @@ func (rm *resourceManager) syncTags( ) (err error) { return util.SyncTags(ctx, desired.ko.Spec.Tags, latest.ko.Spec.Tags, latest.ko.Status.ACKResourceMetadata, ToACKTags, rm.sdkapi, rm.metrics) } + +const ( + // The number of minutes worth of events to retrieve. + // 14 days in minutes + eventsDuration = 20160 +) + +func (rm *resourceManager) CustomDescribeReplicationGroupsSetOutput( + ctx context.Context, + r *resource, + resp *svcsdk.DescribeReplicationGroupsOutput, + ko *svcapitypes.ReplicationGroup, +) (*svcapitypes.ReplicationGroup, error) { + if len(resp.ReplicationGroups) == 0 { + return ko, nil + } + elem := resp.ReplicationGroups[0] + rm.customSetOutput(ctx, elem, ko) + err := rm.customSetOutputSupplementAPIs(ctx, r, &elem, ko) + if err != nil { + return nil, err + } + return ko, nil +} + +func (rm *resourceManager) CustomCreateReplicationGroupSetOutput( + ctx context.Context, + r *resource, + resp *svcsdk.CreateReplicationGroupOutput, + ko *svcapitypes.ReplicationGroup, +) (*svcapitypes.ReplicationGroup, error) { + rm.customSetOutput(ctx, *resp.ReplicationGroup, ko) + rm.setAnnotationsFields(r, ko) + rm.setLastRequestedNodeGroupConfiguration(r, ko) + rm.setLastRequestedNumNodeGroups(r, ko) + return ko, nil +} + +func (rm *resourceManager) CustomModifyReplicationGroupSetOutput( + ctx context.Context, + r *resource, + resp *svcsdk.ModifyReplicationGroupOutput, + ko *svcapitypes.ReplicationGroup, +) (*svcapitypes.ReplicationGroup, error) { + rm.customSetOutput(ctx, *resp.ReplicationGroup, ko) + + // reset latest.spec.LDC to original value in desired to prevent stale data + // from the modify API being merged back into desired upon spec patching + var logDeliveryConfig []*svcapitypes.LogDeliveryConfigurationRequest + for _, ldc := range r.ko.Spec.LogDeliveryConfigurations { + logDeliveryConfig = append(logDeliveryConfig, ldc.DeepCopy()) + } + ko.Spec.LogDeliveryConfigurations = logDeliveryConfig + + // Keep the value of desired for CacheNodeType. + ko.Spec.CacheNodeType = r.ko.Spec.CacheNodeType + + rm.setAnnotationsFields(r, ko) + return ko, nil +} + +func (rm *resourceManager) customSetOutput( + ctx context.Context, + respRG svcsdktypes.ReplicationGroup, + ko *svcapitypes.ReplicationGroup, +) { + if ko.Status.Conditions == nil { + ko.Status.Conditions = []*ackv1alpha1.Condition{} + } + + allNodeGroupsAvailable := true + nodeGroupMembersCount := 0 + memberClustersCount := 0 + if respRG.NodeGroups != nil { + for _, nodeGroup := range respRG.NodeGroups { + if nodeGroup.Status == nil || *nodeGroup.Status != "available" { + allNodeGroupsAvailable = false + break + } + } + for _, nodeGroup := range respRG.NodeGroups { + if nodeGroup.NodeGroupMembers == nil { + continue + } + nodeGroupMembersCount = nodeGroupMembersCount + len(nodeGroup.NodeGroupMembers) + } + } + if respRG.MemberClusters != nil { + memberClustersCount = len(respRG.MemberClusters) + } + + rgStatus := respRG.Status + syncConditionStatus := corev1.ConditionUnknown + if rgStatus != nil { + if (*rgStatus == "available" && allNodeGroupsAvailable && memberClustersCount == nodeGroupMembersCount) || + *rgStatus == "create-failed" { + syncConditionStatus = corev1.ConditionTrue + } else { + // resource in "creating", "modifying" , "deleting", "snapshotting" + // states is being modified at server end + // thus current status is considered out of sync. + syncConditionStatus = corev1.ConditionFalse + } + } + + var resourceSyncedCondition *ackv1alpha1.Condition = nil + for _, condition := range ko.Status.Conditions { + if condition.Type == ackv1alpha1.ConditionTypeResourceSynced { + resourceSyncedCondition = condition + break + } + } + if resourceSyncedCondition == nil { + resourceSyncedCondition = &ackv1alpha1.Condition{ + Type: ackv1alpha1.ConditionTypeResourceSynced, + Status: syncConditionStatus, + } + ko.Status.Conditions = append(ko.Status.Conditions, resourceSyncedCondition) + } else { + resourceSyncedCondition.Status = syncConditionStatus + } + + if rgStatus != nil && (*rgStatus == "available" || *rgStatus == "snapshotting") { + input, err := rm.newListAllowedNodeTypeModificationsPayLoad(&respRG) + + if err == nil { + resp, apiErr := rm.sdkapi.ListAllowedNodeTypeModifications(ctx, input) + rm.metrics.RecordAPICall("READ_MANY", "ListAllowedNodeTypeModifications", apiErr) + // Overwrite the values for ScaleUp and ScaleDown + if apiErr == nil { + ko.Status.AllowedScaleDownModifications = aws.StringSlice(resp.ScaleDownModifications) + ko.Status.AllowedScaleUpModifications = aws.StringSlice(resp.ScaleUpModifications) + } + } + } else { + ko.Status.AllowedScaleDownModifications = nil + ko.Status.AllowedScaleUpModifications = nil + } + + // populate status logDeliveryConfigurations struct + if respRG.LogDeliveryConfigurations != nil { + var f11 []*svcapitypes.LogDeliveryConfiguration + for _, f11iter := range respRG.LogDeliveryConfigurations { + f11elem := &svcapitypes.LogDeliveryConfiguration{} + if f11iter.DestinationDetails != nil { + f11elemf0 := &svcapitypes.DestinationDetails{} + if f11iter.DestinationDetails.CloudWatchLogsDetails != nil { + f11elemf0f0 := &svcapitypes.CloudWatchLogsDestinationDetails{} + if f11iter.DestinationDetails.CloudWatchLogsDetails.LogGroup != nil { + f11elemf0f0.LogGroup = f11iter.DestinationDetails.CloudWatchLogsDetails.LogGroup + } + f11elemf0.CloudWatchLogsDetails = f11elemf0f0 + } + if f11iter.DestinationDetails.KinesisFirehoseDetails != nil { + f11elemf0f1 := &svcapitypes.KinesisFirehoseDestinationDetails{} + if f11iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream != nil { + f11elemf0f1.DeliveryStream = f11iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream + } + f11elemf0.KinesisFirehoseDetails = f11elemf0f1 + } + f11elem.DestinationDetails = f11elemf0 + } + if f11iter.DestinationType != "" { + f11elem.DestinationType = aws.String(string(f11iter.DestinationType)) + } + if f11iter.LogFormat != "" { + f11elem.LogFormat = aws.String(string(f11iter.LogFormat)) + } + if f11iter.LogType != "" { + f11elem.LogType = aws.String(string(f11iter.LogType)) + } + if f11iter.Status != "" { + f11elem.Status = aws.String(string(f11iter.Status)) + } + if f11iter.Message != nil && *f11iter.Message != "" { + f11elem.Message = f11iter.Message + } + f11 = append(f11, f11elem) + } + ko.Status.LogDeliveryConfigurations = f11 + } else { + ko.Status.LogDeliveryConfigurations = nil + } +} + +// newListAllowedNodeTypeModificationsPayLoad returns an SDK-specific struct for the HTTP request +// payload of the ListAllowedNodeTypeModifications API call. +func (rm *resourceManager) newListAllowedNodeTypeModificationsPayLoad(respRG *svcsdktypes.ReplicationGroup) ( + *svcsdk.ListAllowedNodeTypeModificationsInput, error) { + res := &svcsdk.ListAllowedNodeTypeModificationsInput{} + + if respRG.ReplicationGroupId != nil { + res.ReplicationGroupId = respRG.ReplicationGroupId + } + + return res, nil +} + +func (rm *resourceManager) customSetOutputSupplementAPIs( + ctx context.Context, + r *resource, + respRG *svcsdktypes.ReplicationGroup, + ko *svcapitypes.ReplicationGroup, +) error { + events, err := rm.provideEvents(ctx, r.ko.Spec.ReplicationGroupID, 20) + if err != nil { + return err + } + ko.Status.Events = events + return nil +} + +func (rm *resourceManager) provideEvents( + ctx context.Context, + replicationGroupId *string, + maxRecords int64, +) ([]*svcapitypes.Event, error) { + input := &svcsdk.DescribeEventsInput{} + input.SourceType = svcsdktypes.SourceTypeReplicationGroup + input.SourceIdentifier = replicationGroupId + input.MaxRecords = aws.Int32(int32(maxRecords)) + input.Duration = aws.Int32(eventsDuration) + resp, err := rm.sdkapi.DescribeEvents(ctx, input) + rm.metrics.RecordAPICall("READ_MANY", "DescribeEvents-ReplicationGroup", err) + if err != nil { + rm.log.V(1).Info("Error during DescribeEvents-ReplicationGroup", "error", err) + return nil, err + } + events := []*svcapitypes.Event{} + if resp.Events != nil { + for _, respEvent := range resp.Events { + event := &svcapitypes.Event{} + if respEvent.Message != nil { + event.Message = respEvent.Message + } + if respEvent.Date != nil { + eventDate := metav1.NewTime(*respEvent.Date) + event.Date = &eventDate + } + // Not copying redundant source id (replication id) + // and source type (replication group) + // into each event object + events = append(events, event) + } + } + return events, nil +} + +// setAnnotationsFields copies the desired object's annotations, populates any +// relevant fields, and sets the latest object's annotations to this newly populated map. +// Fields that are handled by custom modify implementation are not set here. +// This should only be called upon a successful create or modify call. +func (rm *resourceManager) setAnnotationsFields( + r *resource, + ko *svcapitypes.ReplicationGroup, +) { + annotations := getAnnotationsFields(r, ko) + + rm.setLastRequestedLogDeliveryConfigurations(r, annotations) + rm.setLastRequestedCacheNodeType(r, annotations) + ko.ObjectMeta.Annotations = annotations +} + +// getAnnotationsFields return the annotations map that would be used to set the fields +func getAnnotationsFields( + r *resource, + ko *svcapitypes.ReplicationGroup) map[string]string { + + if ko.ObjectMeta.Annotations != nil { + return ko.ObjectMeta.Annotations + } + + desiredAnnotations := r.ko.ObjectMeta.GetAnnotations() + annotations := make(map[string]string) + for k, v := range desiredAnnotations { + annotations[k] = v + } + + ko.ObjectMeta.Annotations = annotations + return annotations +} + +// setLastRequestedLogDeliveryConfigurations copies desired.Spec.LogDeliveryConfigurations +// into the annotations of the object. +// r is the desired resource, and annotations is the annotations map modified by this method +func (rm *resourceManager) setLastRequestedLogDeliveryConfigurations( + r *resource, + annotations map[string]string, +) { + lastRequestedConfigs, err := json.Marshal(r.ko.Spec.LogDeliveryConfigurations) + if err != nil { + annotations[AnnotationLastRequestedLDCs] = "null" + } else { + annotations[AnnotationLastRequestedLDCs] = string(lastRequestedConfigs) + } +} + +// setLastRequestedCacheNodeType copies desired.Spec.CacheNodeType into the annotation +// of the object. +func (rm *resourceManager) setLastRequestedCacheNodeType( + r *resource, + annotations map[string]string, +) { + if r.ko.Spec.CacheNodeType != nil { + annotations[AnnotationLastRequestedCNT] = *r.ko.Spec.CacheNodeType + } +} + +// setLastRequestedNodeGroupConfiguration copies desired.spec.NodeGroupConfiguration into the +// annotation of the object +func (rm *resourceManager) setLastRequestedNodeGroupConfiguration( + r *resource, + ko *svcapitypes.ReplicationGroup, +) { + annotations := getAnnotationsFields(r, ko) + lastRequestedConfigs, err := json.Marshal(r.ko.Spec.NodeGroupConfiguration) + if err != nil { + annotations[AnnotationLastRequestedNGC] = "null" + } else { + annotations[AnnotationLastRequestedNGC] = string(lastRequestedConfigs) + } +} + +// setLastRequestedNumNodeGroups copies desired.spec.NumNodeGroups into the +// annotation of the object +func (rm *resourceManager) setLastRequestedNumNodeGroups( + r *resource, + ko *svcapitypes.ReplicationGroup, +) { + annotations := getAnnotationsFields(r, ko) + if r.ko.Spec.NumNodeGroups != nil { + annotations[AnnotationLastRequestedNNG] = strconv.Itoa(int(*r.ko.Spec.NumNodeGroups)) + } else { + annotations[AnnotationLastRequestedNNG] = "null" + } +} + +// Implements specialized logic for replication group updates. +func (rm *resourceManager) CustomModifyReplicationGroup( + ctx context.Context, + desired *resource, + latest *resource, + delta *ackcompare.Delta, +) (*resource, error) { + + latestRGStatus := latest.ko.Status.Status + + allNodeGroupsAvailable := true + nodeGroupMembersCount := 0 + if latest.ko.Status.NodeGroups != nil { + for _, nodeGroup := range latest.ko.Status.NodeGroups { + if nodeGroup.Status == nil || *nodeGroup.Status != "available" { + allNodeGroupsAvailable = false + break + } + } + for _, nodeGroup := range latest.ko.Status.NodeGroups { + if nodeGroup.NodeGroupMembers == nil { + continue + } + nodeGroupMembersCount = nodeGroupMembersCount + len(nodeGroup.NodeGroupMembers) + } + } + + if latestRGStatus == nil || *latestRGStatus != "available" || !allNodeGroupsAvailable { + return nil, requeue.NeededAfter( + errors.New("Replication Group can not be modified, it is not in 'available' state."), + requeue.DefaultRequeueAfterDuration) + } + + memberClustersCount := 0 + if latest.ko.Status.MemberClusters != nil { + memberClustersCount = len(latest.ko.Status.MemberClusters) + } + if memberClustersCount != nodeGroupMembersCount { + return nil, requeue.NeededAfter( + errors.New("Replication Group can not be modified, "+ + "need to wait for member clusters and node group members."), + requeue.DefaultRequeueAfterDuration) + } + + // Handle the asynchronous rollback case for while Scaling down. + // This means that we have already attempted to apply the CacheNodeType once and + // were not successful hence we will set a terminal condition. + if !cacheNodeTypeRequiresUpdate(desired) && delta.DifferentAt("Spec.CacheNodeType") { + return nil, awserr.New("InvalidParameterCombination", "Cannot update CacheNodeType, "+ + "Please refer to Events for more details", nil) + + } + + // Handle the asynchronous rollback for Resharding. + if !nodeGroupRequiresUpdate(desired) && rm.shardConfigurationsDiffer(desired, latest) { + + return nil, awserr.New("InvalidParameterCombination", "Cannot update NodeGroups, "+ + "Please refer to Events for more details", nil) + } + + // Handle NodeGroupConfiguration asynchronous rollback situations other than Resharding. + if !nodeGroupRequiresUpdate(desired) && (rm.replicaCountDifference(desired, latest) != 0 && !delta.DifferentAt("Spec.ReplicasPerNodeGroup")) { + return nil, awserr.New("InvalidParameterCombination", "Cannot update NodeGroupConfiguration, "+ + "Please refer to Events for more details", nil) + } + + // Order of operations when diffs map to multiple updates APIs: + // 1. When automaticFailoverEnabled differs: + // if automaticFailoverEnabled == false; do nothing in this custom logic, let the modify execute first. + // else if automaticFailoverEnabled == true then following logic should execute first. + // 2. When multiAZ differs + // if multiAZ = true then below is fine. + // else if multiAZ = false ; do nothing in custom logic, let the modify execute. + // 3. updateReplicaCount() is invoked Before updateShardConfiguration() + // because both accept availability zones, however the number of + // values depend on replica count. + if desired.ko.Spec.AutomaticFailoverEnabled != nil && *desired.ko.Spec.AutomaticFailoverEnabled == false { + latestAutomaticFailoverEnabled := latest.ko.Status.AutomaticFailover != nil && *latest.ko.Status.AutomaticFailover == "enabled" + if latestAutomaticFailoverEnabled != *desired.ko.Spec.AutomaticFailoverEnabled { + return rm.modifyReplicationGroup(ctx, desired, latest, delta) + } + } + if desired.ko.Spec.MultiAZEnabled != nil && *desired.ko.Spec.MultiAZEnabled == false { + latestMultiAZEnabled := latest.ko.Status.MultiAZ != nil && *latest.ko.Status.MultiAZ == "enabled" + if latestMultiAZEnabled != *desired.ko.Spec.MultiAZEnabled { + return rm.modifyReplicationGroup(ctx, desired, latest, delta) + } + } + + // increase/decrease replica count + if diff := rm.replicaCountDifference(desired, latest); diff != 0 { + if diff > 0 { + return rm.increaseReplicaCount(ctx, desired, latest) + } + return rm.decreaseReplicaCount(ctx, desired, latest) + } + + // If there is a scale up modification, then we would prioritize it + // over increase/decrease shards. This is important since performing + // scale in without scale up might fail due to insufficient memory. + if delta.DifferentAt("Spec.CacheNodeType") && desired.ko.Status.AllowedScaleUpModifications != nil { + if desired.ko.Spec.CacheNodeType != nil { + for _, scaleUpInstance := range desired.ko.Status.AllowedScaleUpModifications { + if *scaleUpInstance == *desired.ko.Spec.CacheNodeType { + return nil, nil + } + } + } + } + + // increase/decrease shards + if rm.shardConfigurationsDiffer(desired, latest) { + return rm.updateShardConfiguration(ctx, desired, latest) + } + + return rm.modifyReplicationGroup(ctx, desired, latest, delta) +} + +// modifyReplicationGroup updates replication group +// it handles properties that put replication group in +// modifying state if these are supplied to modify API +// irrespective of apply immediately. +func (rm *resourceManager) modifyReplicationGroup( + ctx context.Context, + desired *resource, + latest *resource, + delta *ackcompare.Delta, +) (*resource, error) { + // Method currently handles SecurityGroupIDs, EngineVersion + // Avoid making unnecessary DescribeCacheCluster API call if both fields are nil in spec. + if desired.ko.Spec.SecurityGroupIDs == nil && desired.ko.Spec.EngineVersion == nil { + // no updates done + return nil, nil + } + + // Get details using describe cache cluster to compute diff + latestCacheCluster, err := rm.describeCacheCluster(ctx, latest) + if err != nil { + return nil, err + } + + // SecurityGroupIds, EngineVersion + if rm.securityGroupIdsDiffer(desired, latest, latestCacheCluster) || + delta.DifferentAt("Spec.EngineVersion") { + input := rm.newModifyReplicationGroupRequestPayload(desired, latest, latestCacheCluster, delta) + resp, respErr := rm.sdkapi.ModifyReplicationGroup(ctx, input) + rm.metrics.RecordAPICall("UPDATE", "ModifyReplicationGroup", respErr) + if respErr != nil { + rm.log.V(1).Info("Error during ModifyReplicationGroup", "error", respErr) + return nil, respErr + } + + return rm.setReplicationGroupOutput(ctx, desired, resp.ReplicationGroup) + } + + // no updates done + return nil, nil +} + +// replicaConfigurationsDifference returns +// positive number if desired replica count is greater than latest replica count +// negative number if desired replica count is less than latest replica count +// 0 otherwise +func (rm *resourceManager) replicaCountDifference( + desired *resource, + latest *resource, +) int { + desiredSpec := desired.ko.Spec + + // There are two ways of setting replica counts for NodeGroups in Elasticache ReplicationGroup. + // - The first way is to have the same replica count for all node groups. + // In this case, the Spec.ReplicasPerNodeGroup field is set to a non-nil-value integer pointer. + // - The second way is to set different replica counts per node group. + // In this case, the Spec.NodeGroupConfiguration field is set to a non-nil NodeGroupConfiguration slice + // of NodeGroupConfiguration structs that each have a ReplicaCount non-nil-value integer pointer field + // that contains the number of replicas for that particular node group. + if desiredSpec.ReplicasPerNodeGroup != nil { + return int(*desiredSpec.ReplicasPerNodeGroup - *latest.ko.Spec.ReplicasPerNodeGroup) + } else if desiredSpec.NodeGroupConfiguration != nil { + return rm.diffReplicasNodeGroupConfiguration(desired, latest) + } + return 0 +} + +// diffReplicasNodeGroupConfiguration takes desired Spec.NodeGroupConfiguration slice field into account to return +// positive number if desired replica count is greater than latest replica count +// negative number if desired replica count is less than latest replica count +// 0 otherwise +func (rm *resourceManager) diffReplicasNodeGroupConfiguration( + desired *resource, + latest *resource, +) int { + desiredSpec := desired.ko.Spec + latestStatus := latest.ko.Status + // each shard could have different value for replica count + latestReplicaCounts := map[string]int{} + for _, latestShard := range latestStatus.NodeGroups { + if latestShard.NodeGroupID == nil { + continue + } + latestReplicaCount := 0 + if latestShard.NodeGroupMembers != nil { + if len(latestShard.NodeGroupMembers) > 0 { + latestReplicaCount = len(latestShard.NodeGroupMembers) - 1 + } + } + latestReplicaCounts[*latestShard.NodeGroupID] = latestReplicaCount + } + for _, desiredShard := range desiredSpec.NodeGroupConfiguration { + if desiredShard.NodeGroupID == nil || desiredShard.ReplicaCount == nil { + // no specs to compare for this shard + continue + } + latestShardReplicaCount, found := latestReplicaCounts[*desiredShard.NodeGroupID] + if !found { + // shard not present in status + continue + } + if desiredShardReplicaCount := int(*desiredShard.ReplicaCount); desiredShardReplicaCount != latestShardReplicaCount { + rm.log.V(1).Info( + "ReplicaCount differs", + "NodeGroup", *desiredShard.NodeGroupID, + "desired", int(*desiredShard.ReplicaCount), + "latest", latestShardReplicaCount, + ) + return desiredShardReplicaCount - latestShardReplicaCount + } + } + return 0 +} + +// shardConfigurationsDiffer returns true if shard +// configuration differs between desired, latest resource. +func (rm *resourceManager) shardConfigurationsDiffer( + desired *resource, + latest *resource, +) bool { + desiredSpec := desired.ko.Spec + latestStatus := latest.ko.Status + + // desired shards + var desiredShardsCount *int64 = desiredSpec.NumNodeGroups + if desiredShardsCount == nil && desiredSpec.NodeGroupConfiguration != nil { + numShards := int64(len(desiredSpec.NodeGroupConfiguration)) + desiredShardsCount = &numShards + } + if desiredShardsCount == nil { + // no shards config in desired specs + return false + } + + // latest shards + var latestShardsCount *int64 = nil + if latestStatus.NodeGroups != nil { + numShards := int64(len(latestStatus.NodeGroups)) + latestShardsCount = &numShards + } + + return latestShardsCount == nil || *desiredShardsCount != *latestShardsCount +} + +func (rm *resourceManager) increaseReplicaCount( + ctx context.Context, + desired *resource, + latest *resource, +) (*resource, error) { + input, err := rm.newIncreaseReplicaCountRequestPayload(desired, latest) + if err != nil { + return nil, err + } + resp, respErr := rm.sdkapi.IncreaseReplicaCount(ctx, input) + rm.metrics.RecordAPICall("UPDATE", "IncreaseReplicaCount", respErr) + if respErr != nil { + rm.log.V(1).Info("Error during IncreaseReplicaCount", "error", respErr) + return nil, respErr + } + return rm.setReplicationGroupOutput(ctx, desired, resp.ReplicationGroup) +} + +func (rm *resourceManager) decreaseReplicaCount( + ctx context.Context, + desired *resource, + latest *resource, +) (*resource, error) { + input, err := rm.newDecreaseReplicaCountRequestPayload(desired, latest) + if err != nil { + return nil, err + } + resp, respErr := rm.sdkapi.DecreaseReplicaCount(ctx, input) + rm.metrics.RecordAPICall("UPDATE", "DecreaseReplicaCount", respErr) + if respErr != nil { + rm.log.V(1).Info("Error during DecreaseReplicaCount", "error", respErr) + return nil, respErr + } + return rm.setReplicationGroupOutput(ctx, desired, resp.ReplicationGroup) +} + +func (rm *resourceManager) updateShardConfiguration( + ctx context.Context, + desired *resource, + latest *resource, +) (*resource, error) { + input, err := rm.newUpdateShardConfigurationRequestPayload(desired, latest) + if err != nil { + return nil, err + } + resp, respErr := rm.sdkapi.ModifyReplicationGroupShardConfiguration(ctx, input) + rm.metrics.RecordAPICall("UPDATE", "ModifyReplicationGroupShardConfiguration", respErr) + if respErr != nil { + rm.log.V(1).Info("Error during ModifyReplicationGroupShardConfiguration", "error", respErr) + return nil, respErr + } + + r, err := rm.setReplicationGroupOutput(ctx, desired, resp.ReplicationGroup) + + if err != nil { + return r, err + } + + ko := r.ko.DeepCopy() + // Update the annotations since API call was successful + rm.setLastRequestedNodeGroupConfiguration(desired, ko) + rm.setLastRequestedNumNodeGroups(desired, ko) + return &resource{ko}, nil +} + +// newIncreaseReplicaCountRequestPayload returns an SDK-specific struct for the HTTP request +// payload of the Create API call for the resource +func (rm *resourceManager) newIncreaseReplicaCountRequestPayload( + desired *resource, + latest *resource, +) (*svcsdk.IncreaseReplicaCountInput, error) { + res := &svcsdk.IncreaseReplicaCountInput{} + desiredSpec := desired.ko.Spec + + res.ApplyImmediately = aws.Bool(true) + if desiredSpec.ReplicationGroupID != nil { + res.ReplicationGroupId = desiredSpec.ReplicationGroupID + } + if desiredSpec.ReplicasPerNodeGroup != nil { + res.NewReplicaCount = Int32OrNil(desiredSpec.ReplicasPerNodeGroup) + } + + latestStatus := latest.ko.Status + // each shard could have different value for replica count + latestReplicaCounts := map[string]int{} + for _, latestShard := range latestStatus.NodeGroups { + if latestShard.NodeGroupID == nil { + continue + } + latestReplicaCount := 0 + if latestShard.NodeGroupMembers != nil { + if len(latestShard.NodeGroupMembers) > 0 { + latestReplicaCount = len(latestShard.NodeGroupMembers) - 1 + } + } + latestReplicaCounts[*latestShard.NodeGroupID] = latestReplicaCount + } + + if desiredSpec.NodeGroupConfiguration != nil { + shardsConfig := []*svcsdktypes.ConfigureShard{} + for _, desiredShard := range desiredSpec.NodeGroupConfiguration { + if desiredShard.NodeGroupID == nil { + continue + } + _, found := latestReplicaCounts[*desiredShard.NodeGroupID] + if !found { + continue + } + // shard has an Id and it is present on server. + shardConfig := &svcsdktypes.ConfigureShard{} + shardConfig.NodeGroupId = desiredShard.NodeGroupID + if desiredShard.ReplicaCount != nil { + shardConfig.NewReplicaCount = Int32OrNil(desiredShard.ReplicaCount) + } + shardAZs := []*string{} + if desiredShard.PrimaryAvailabilityZone != nil { + shardAZs = append(shardAZs, desiredShard.PrimaryAvailabilityZone) + } + if desiredShard.ReplicaAvailabilityZones != nil { + for _, desiredAZ := range desiredShard.ReplicaAvailabilityZones { + shardAZs = append(shardAZs, desiredAZ) + } + } + if len(shardAZs) > 0 { + stringAZs := make([]string, len(shardAZs)) + for i, az := range shardAZs { + if az != nil { + stringAZs[i] = *az + } + } + shardConfig.PreferredAvailabilityZones = stringAZs + } + shardsConfig = append(shardsConfig, shardConfig) + } + + // Convert []*ConfigureShard to []ConfigureShard + configShards := make([]svcsdktypes.ConfigureShard, len(shardsConfig)) + for i, config := range shardsConfig { + if config != nil { + configShards[i] = *config + } + } + res.ReplicaConfiguration = configShards + } + + return res, nil +} + +// newDecreaseReplicaCountRequestPayload returns an SDK-specific struct for the HTTP request +// payload of the Create API call for the resource +func (rm *resourceManager) newDecreaseReplicaCountRequestPayload( + desired *resource, + latest *resource, +) (*svcsdk.DecreaseReplicaCountInput, error) { + res := &svcsdk.DecreaseReplicaCountInput{} + desiredSpec := desired.ko.Spec + + res.ApplyImmediately = aws.Bool(true) + if desiredSpec.ReplicationGroupID != nil { + res.ReplicationGroupId = desiredSpec.ReplicationGroupID + } + if desiredSpec.ReplicasPerNodeGroup != nil { + res.NewReplicaCount = Int32OrNil(desiredSpec.ReplicasPerNodeGroup) + } + + latestStatus := latest.ko.Status + // each shard could have different value for replica count + latestReplicaCounts := map[string]int{} + for _, latestShard := range latestStatus.NodeGroups { + if latestShard.NodeGroupID == nil { + continue + } + latestReplicaCount := 0 + if latestShard.NodeGroupMembers != nil { + if len(latestShard.NodeGroupMembers) > 0 { + latestReplicaCount = len(latestShard.NodeGroupMembers) - 1 + } + } + latestReplicaCounts[*latestShard.NodeGroupID] = latestReplicaCount + } + + if desiredSpec.NodeGroupConfiguration != nil { + configShards := make([]svcsdktypes.ConfigureShard, len(desiredSpec.NodeGroupConfiguration)) + for i, config := range desiredSpec.NodeGroupConfiguration { + stringAZs := make([]string, len(config.ReplicaAvailabilityZones)) + for i, az := range config.ReplicaAvailabilityZones { + stringAZs[i] = *az + } + configShards[i] = svcsdktypes.ConfigureShard{ + NodeGroupId: config.NodeGroupID, + NewReplicaCount: Int32OrNil(config.ReplicaCount), + PreferredAvailabilityZones: stringAZs, + } + } + res.ReplicaConfiguration = configShards + } + + return res, nil +} + +// newUpdateShardConfigurationRequestPayload returns an SDK-specific struct for the HTTP request +// payload of the Update API call for the resource +func (rm *resourceManager) newUpdateShardConfigurationRequestPayload( + desired *resource, + latest *resource, +) (*svcsdk.ModifyReplicationGroupShardConfigurationInput, error) { + res := &svcsdk.ModifyReplicationGroupShardConfigurationInput{} + + desiredSpec := desired.ko.Spec + latestStatus := latest.ko.Status + + // Mandatory arguments + // - ApplyImmediately + // - ReplicationGroupId + // - NodeGroupCount + res.ApplyImmediately = aws.Bool(true) + if desiredSpec.ReplicationGroupID != nil { + res.ReplicationGroupId = desiredSpec.ReplicationGroupID + } + desiredShardsCount := desiredSpec.NumNodeGroups + if desiredShardsCount == nil && desiredSpec.NodeGroupConfiguration != nil { + numShards := int64(len(desiredSpec.NodeGroupConfiguration)) + desiredShardsCount = &numShards + } + if desiredShardsCount != nil { + res.NodeGroupCount = Int32OrNil(desiredShardsCount) + } + + // If desired nodegroup count (number of shards): + // - increases, then (optional) provide ReshardingConfiguration + // - decreases, then (mandatory) provide + // either NodeGroupsToRemove + // or NodeGroupsToRetain + var latestShardsCount *int64 = nil + if latestStatus.NodeGroups != nil { + numShards := int64(len(latestStatus.NodeGroups)) + latestShardsCount = &numShards + } + + increase := (desiredShardsCount != nil && latestShardsCount != nil && *desiredShardsCount > *latestShardsCount) || + (desiredShardsCount != nil && latestShardsCount == nil) + decrease := desiredShardsCount != nil && latestShardsCount != nil && *desiredShardsCount < *latestShardsCount + // Additional arguments + shardsConfig := []*svcsdktypes.ReshardingConfiguration{} + shardsToRetain := []string{} + if desiredSpec.NodeGroupConfiguration != nil { + for _, desiredShard := range desiredSpec.NodeGroupConfiguration { + shardConfig := &svcsdktypes.ReshardingConfiguration{} + if desiredShard.NodeGroupID != nil { + shardConfig.NodeGroupId = desiredShard.NodeGroupID + shardsToRetain = append(shardsToRetain, *desiredShard.NodeGroupID) + } + shardAZs := []string{} + if desiredShard.PrimaryAvailabilityZone != nil { + shardAZs = append(shardAZs, *desiredShard.PrimaryAvailabilityZone) + } + if desiredShard.ReplicaAvailabilityZones != nil { + for _, desiredAZ := range desiredShard.ReplicaAvailabilityZones { + shardAZs = append(shardAZs, *desiredAZ) + } + shardConfig.PreferredAvailabilityZones = shardAZs + } + shardsConfig = append(shardsConfig, shardConfig) + } + } else if decrease { + for i := 0; i < int(*desiredShardsCount); i++ { + if desired.ko.Status.NodeGroups[i] != nil && desired.ko.Status.NodeGroups[i].NodeGroupID != nil { + shardsToRetain = append(shardsToRetain, *desired.ko.Status.NodeGroups[i].NodeGroupID) + } + } + } + + if increase { + if len(shardsConfig) > 0 { + reshardConfig := make([]svcsdktypes.ReshardingConfiguration, len(shardsConfig)) + for i, config := range shardsConfig { + reshardConfig[i] = *config + } + res.ReshardingConfiguration = reshardConfig + } + } else if decrease { + if len(shardsToRetain) == 0 { + return nil, awserr.New("InvalidParameterValue", "At least one node group should be present.", nil) + } + res.NodeGroupsToRetain = shardsToRetain + } + + return res, nil +} + +// getAnyCacheClusterIDFromNodeGroups returns a cache cluster ID from supplied node groups. +// Any cache cluster Id which is not nil is returned. +func (rm *resourceManager) getAnyCacheClusterIDFromNodeGroups( + nodeGroups []*svcapitypes.NodeGroup, +) *string { + if nodeGroups == nil { + return nil + } + + var cacheClusterId *string = nil + for _, nodeGroup := range nodeGroups { + if nodeGroup.NodeGroupMembers == nil { + continue + } + for _, nodeGroupMember := range nodeGroup.NodeGroupMembers { + if nodeGroupMember.CacheClusterID == nil { + continue + } + cacheClusterId = nodeGroupMember.CacheClusterID + break + } + if cacheClusterId != nil { + break + } + } + return cacheClusterId +} + +// describeCacheCluster provides CacheCluster object +// per the supplied latest Replication Group Id +// it invokes DescribeCacheClusters API to do so +func (rm *resourceManager) describeCacheCluster( + ctx context.Context, + resource *resource, +) (*svcsdktypes.CacheCluster, error) { + input := &svcsdk.DescribeCacheClustersInput{} + + ko := resource.ko + latestStatus := ko.Status + if latestStatus.NodeGroups == nil { + return nil, nil + } + cacheClusterId := rm.getAnyCacheClusterIDFromNodeGroups(latestStatus.NodeGroups) + if cacheClusterId == nil { + return nil, nil + } + + input.CacheClusterId = cacheClusterId + resp, respErr := rm.sdkapi.DescribeCacheClusters(ctx, input) + rm.metrics.RecordAPICall("READ_MANY", "DescribeCacheClusters", respErr) + if respErr != nil { + rm.log.V(1).Info("Error during DescribeCacheClusters", "error", respErr) + return nil, respErr + } + if resp.CacheClusters == nil { + return nil, nil + } + + for _, cc := range resp.CacheClusters { + if cc.CacheClusterId == nil { + continue + } + return &cc, nil + } + return nil, fmt.Errorf("could not find a non-nil cache cluster from API response") +} + +// securityGroupIdsDiffer return true if +// Security Group Ids differ between desired spec and latest (from cache cluster) status +func (rm *resourceManager) securityGroupIdsDiffer( + desired *resource, + latest *resource, + latestCacheCluster *svcsdktypes.CacheCluster, +) bool { + if desired.ko.Spec.SecurityGroupIDs == nil { + return false + } + + desiredIds := []*string{} + for _, id := range desired.ko.Spec.SecurityGroupIDs { + if id == nil { + continue + } + var value string + value = *id + desiredIds = append(desiredIds, &value) + } + sort.Slice(desiredIds, func(i, j int) bool { + return *desiredIds[i] < *desiredIds[j] + }) + + latestIds := []*string{} + if latestCacheCluster != nil && latestCacheCluster.SecurityGroups != nil { + for _, latestSG := range latestCacheCluster.SecurityGroups { + if latestSG.SecurityGroupId == nil { + continue + } + var value string + value = *latestSG.SecurityGroupId + latestIds = append(latestIds, &value) + } + } + sort.Slice(latestIds, func(i, j int) bool { + return *latestIds[i] < *latestIds[j] + }) + + if len(desiredIds) != len(latestIds) { + return true // differ + } + for index, desiredId := range desiredIds { + if *desiredId != *latestIds[index] { + return true // differ + } + } + // no difference + return false +} + +// newModifyReplicationGroupRequestPayload provides request input object +func (rm *resourceManager) newModifyReplicationGroupRequestPayload( + desired *resource, + latest *resource, + latestCacheCluster *svcsdktypes.CacheCluster, + delta *ackcompare.Delta, +) *svcsdk.ModifyReplicationGroupInput { + input := &svcsdk.ModifyReplicationGroupInput{} + + input.ApplyImmediately = aws.Bool(true) + if desired.ko.Spec.ReplicationGroupID != nil { + input.ReplicationGroupId = desired.ko.Spec.ReplicationGroupID + } + + if rm.securityGroupIdsDiffer(desired, latest, latestCacheCluster) && + desired.ko.Spec.SecurityGroupIDs != nil { + ids := []string{} + for _, id := range desired.ko.Spec.SecurityGroupIDs { + var value string + value = *id + ids = append(ids, value) + } + input.SecurityGroupIds = ids + } + + if delta.DifferentAt("Spec.EngineVersion") && + desired.ko.Spec.EngineVersion != nil { + input.EngineVersion = desired.ko.Spec.EngineVersion + } + + return input +} + +// cacheNodeTypeRequiresUpdate retrieves the last requested cacheNodeType saved in annotations and compares them +// to the current desired cacheNodeType +func cacheNodeTypeRequiresUpdate(desired *resource) bool { + annotations := desired.ko.ObjectMeta.GetAnnotations() + if val, ok := annotations[AnnotationLastRequestedCNT]; ok && desired.ko.Spec.CacheNodeType != nil { + return val != *desired.ko.Spec.CacheNodeType + } + + // This means there is delta and no value in annotation or in Spec + return true +} + +// nodeGroupRequiresUpdate retrieves the last applied NumNodeGroups and NodeGroupConfiguration and compares them +// to the current desired NumNodeGroups and NodeGroupConfiguration +func nodeGroupRequiresUpdate(desired *resource) bool { + annotations := desired.ko.ObjectMeta.GetAnnotations() + + if val, ok := annotations[AnnotationLastRequestedNNG]; ok && val != "null" { + numNodes, err := strconv.ParseInt(val, 10, 64) + + if err != nil { + return false + } + + if numNodes != *desired.ko.Spec.NumNodeGroups { + return true + } + + return false + } + + desiredNodeGroupConfig := desired.ko.Spec.NodeGroupConfiguration + if val, ok := annotations[AnnotationLastRequestedNGC]; ok && val != "null" { + var lastRequestedNodeGroupConfig []*svcapitypes.NodeGroupConfiguration + _ = json.Unmarshal([]byte(val), &lastRequestedNodeGroupConfig) + return !reflect.DeepEqual(desiredNodeGroupConfig, lastRequestedNodeGroupConfig) + } + + // This means there is delta and no value in annotation or in Spec + return true +} + +/* +To be called in sdkFind, this function updates the replication group's Spec fields with the latest observed state +This requires extra processing of the API response as well as additional API calls, and is necessary because +sdkFind does not update many of these Spec fields by default. "resource" is a wrapper around "ko", the object +which will eventually be returned as "latest". +*/ +func (rm *resourceManager) updateSpecFields( + ctx context.Context, + respRG svcsdktypes.ReplicationGroup, + resource *resource, +) { + if isDeleting(resource) { + return + } + // populate relevant ko.Spec fields with observed state of respRG.NodeGroups + setReplicasPerNodeGroup(respRG, resource) + setNodeGroupConfiguration(respRG, resource) + + // updating some Spec fields requires a DescribeCacheClusters call + latestCacheCluster, err := rm.describeCacheCluster(ctx, resource) + if err == nil && latestCacheCluster != nil { + setEngineVersion(latestCacheCluster, resource) + setMaintenanceWindow(latestCacheCluster, resource) + setCacheParameterGroup(latestCacheCluster, resource) + } +} + +// if NodeGroupConfiguration was given in the desired.Spec, update ko.Spec with the latest observed value +func setNodeGroupConfiguration( + respRG svcsdktypes.ReplicationGroup, + resource *resource, +) { + ko := resource.ko + if respRG.NodeGroups != nil && ko.Spec.NodeGroupConfiguration != nil { + nodeGroupConfigurations := []*svcapitypes.NodeGroupConfiguration{} + for _, nodeGroup := range respRG.NodeGroups { + nodeGroupConfiguration := &svcapitypes.NodeGroupConfiguration{} + + if nodeGroup.NodeGroupId != nil { + nodeGroupConfiguration.NodeGroupID = nodeGroup.NodeGroupId + } + replicaAZs := []*string{} + + for _, nodeGroupMember := range nodeGroup.NodeGroupMembers { + if nodeGroupMember.CurrentRole != nil && *nodeGroupMember.CurrentRole == "primary" { + nodeGroupConfiguration.PrimaryAvailabilityZone = nodeGroupMember.PreferredAvailabilityZone + } + + // In this case we cannot say what is primary AZ and replica AZ. + if nodeGroupMember.CurrentRole == nil && nodeGroupConfiguration.PrimaryAvailabilityZone == nil { + // We cannot determine the correct AZ so we would use the first node group member as primary + nodeGroupConfiguration.PrimaryAvailabilityZone = nodeGroupMember.PreferredAvailabilityZone + } + + if nodeGroupConfiguration.PrimaryAvailabilityZone != nil || *nodeGroupMember.CurrentRole == "replica" { + replicaAZs = append(replicaAZs, nodeGroupMember.PreferredAvailabilityZone) + } + } + + if len(replicaAZs) > 0 { + nodeGroupConfiguration.ReplicaAvailabilityZones = replicaAZs + } + + replicaCount := int64(len(replicaAZs)) + nodeGroupConfiguration.ReplicaCount = &replicaCount + } + + ko.Spec.NodeGroupConfiguration = nodeGroupConfigurations + } + + if respRG.NodeGroups != nil && ko.Spec.NumNodeGroups != nil { + *ko.Spec.NumNodeGroups = int64(len(respRG.NodeGroups)) + } +} + +//TODO: for all the fields here, reevaluate if the latest observed state should always be populated, +// even if the corresponding field was not specified in desired + +// if ReplicasPerNodeGroup was given in desired.Spec, update ko.Spec with the latest observed value +func setReplicasPerNodeGroup( + respRG svcsdktypes.ReplicationGroup, + resource *resource, +) { + ko := resource.ko + if respRG.NodeGroups != nil && ko.Spec.ReplicasPerNodeGroup != nil { + // if ReplicasPerNodeGroup is specified, all node groups should have the same # replicas so use the first + nodeGroup := respRG.NodeGroups[0] + if nodeGroup.NodeGroupMembers != nil { + if len(nodeGroup.NodeGroupMembers) > 0 { + *ko.Spec.ReplicasPerNodeGroup = int64(len(nodeGroup.NodeGroupMembers) - 1) + } + } + } +} + +// if EngineVersion was specified in desired.Spec, update ko.Spec with the latest observed value (if non-nil) +func setEngineVersion( + latestCacheCluster *svcsdktypes.CacheCluster, + resource *resource, +) { + ko := resource.ko + if ko.Spec.EngineVersion != nil && latestCacheCluster.EngineVersion != nil { + *ko.Spec.EngineVersion = *latestCacheCluster.EngineVersion + } +} + +// update maintenance window (if non-nil in API response) regardless of whether it was specified in desired +func setMaintenanceWindow( + latestCacheCluster *svcsdktypes.CacheCluster, + resource *resource, +) { + ko := resource.ko + if latestCacheCluster.PreferredMaintenanceWindow != nil { + pmw := *latestCacheCluster.PreferredMaintenanceWindow + ko.Spec.PreferredMaintenanceWindow = &pmw + } +} + +// setCacheParameterGroup updates the cache parameter group associated with the replication group +// +// (if non-nil in API response) regardless of whether it was specified in desired +func setCacheParameterGroup( + latestCacheCluster *svcsdktypes.CacheCluster, + resource *resource, +) { + ko := resource.ko + if latestCacheCluster.CacheParameterGroup != nil && latestCacheCluster.CacheParameterGroup.CacheParameterGroupName != nil { + cpgName := *latestCacheCluster.CacheParameterGroup.CacheParameterGroupName + ko.Spec.CacheParameterGroupName = &cpgName + } +} + +// modifyDelta removes non-meaningful differences from the delta and adds additional differences if necessary +func modifyDelta( + delta *ackcompare.Delta, + desired *resource, + latest *resource, +) { + + if delta.DifferentAt("Spec.EngineVersion") { + if desired.ko.Spec.EngineVersion != nil && latest.ko.Spec.EngineVersion != nil { + if util.EngineVersionsMatch(*desired.ko.Spec.EngineVersion, *latest.ko.Spec.EngineVersion) { + common.RemoveFromDelta(delta, "Spec.EngineVersion") + } + } + // TODO: handle the case of a nil difference (especially when desired EV is nil) + } + + // if server has given PreferredMaintenanceWindow a default value, no action needs to be taken + if delta.DifferentAt("Spec.PreferredMaintenanceWindow") { + if desired.ko.Spec.PreferredMaintenanceWindow == nil && latest.ko.Spec.PreferredMaintenanceWindow != nil { + common.RemoveFromDelta(delta, "Spec.PreferredMaintenanceWindow") + } + } + + // note that the comparison is actually done between desired.Spec.LogDeliveryConfigurations and + // the last requested configurations saved in annotations (as opposed to latest.Spec.LogDeliveryConfigurations) + if logDeliveryRequiresUpdate(desired) { + delta.Add("Spec.LogDeliveryConfigurations", desired.ko.Spec.LogDeliveryConfigurations, + unmarshalLastRequestedLDCs(desired)) + } + + if multiAZRequiresUpdate(desired, latest) { + delta.Add("Spec.MultiAZEnabled", desired.ko.Spec.MultiAZEnabled, latest.ko.Status.MultiAZ) + } + + if autoFailoverRequiresUpdate(desired, latest) { + delta.Add("Spec.AutomaticFailoverEnabled", desired.ko.Spec.AutomaticFailoverEnabled, + latest.ko.Status.AutomaticFailover) + } + + if updateRequired, current := primaryClusterIDRequiresUpdate(desired, latest); updateRequired { + delta.Add("Spec.PrimaryClusterID", desired.ko.Spec.PrimaryClusterID, *current) + } +} + +// logDeliveryRequiresUpdate retrieves the last requested configurations saved in annotations and compares them +// to the current desired configurations +func logDeliveryRequiresUpdate(desired *resource) bool { + desiredConfigs := desired.ko.Spec.LogDeliveryConfigurations + lastRequestedConfigs := unmarshalLastRequestedLDCs(desired) + return !reflect.DeepEqual(desiredConfigs, lastRequestedConfigs) +} + +// unmarshal the value found in annotations for the LogDeliveryConfigurations field requested in the last +// successful create or modify call +func unmarshalLastRequestedLDCs(desired *resource) []*svcapitypes.LogDeliveryConfigurationRequest { + var lastRequestedConfigs []*svcapitypes.LogDeliveryConfigurationRequest + + annotations := desired.ko.ObjectMeta.GetAnnotations() + if val, ok := annotations[AnnotationLastRequestedLDCs]; ok { + _ = json.Unmarshal([]byte(val), &lastRequestedConfigs) + } + + return lastRequestedConfigs +} + +// multiAZRequiresUpdate returns true if the latest multi AZ status does not yet match the +// desired state, and false otherwise +func multiAZRequiresUpdate(desired *resource, latest *resource) bool { + // no preference for multi AZ specified; no update required + if desired.ko.Spec.MultiAZEnabled == nil { + return false + } + + // API should return a non-nil value, but if it doesn't then attempt to update + if latest.ko.Status.MultiAZ == nil { + return true + } + + // true maps to "enabled"; false maps to "disabled" + // this accounts for values such as "enabling" and "disabling" + if *desired.ko.Spec.MultiAZEnabled { + return *latest.ko.Status.MultiAZ != string(svcapitypes.MultiAZStatus_enabled) + } else { + return *latest.ko.Status.MultiAZ != string(svcapitypes.MultiAZStatus_disabled) + } +} + +// autoFailoverRequiresUpdate returns true if the latest auto failover status does not yet match the +// desired state, and false otherwise +func autoFailoverRequiresUpdate(desired *resource, latest *resource) bool { + // the logic is exactly analogous to multiAZRequiresUpdate above + if desired.ko.Spec.AutomaticFailoverEnabled == nil { + return false + } + + if latest.ko.Status.AutomaticFailover == nil { + return true + } + + if *desired.ko.Spec.AutomaticFailoverEnabled { + return *latest.ko.Status.AutomaticFailover != string(svcapitypes.AutomaticFailoverStatus_enabled) + } else { + return *latest.ko.Status.AutomaticFailover != string(svcapitypes.AutomaticFailoverStatus_disabled) + } +} + +// primaryClusterIDRequiresUpdate retrieves the current primary cluster ID and determines whether +// an update is required. If no desired state is specified or there is an issue retrieving the +// latest state, return false, nil. Otherwise, return false or true depending on equality of +// the latest and desired states, and a non-nil pointer to the latest value +func primaryClusterIDRequiresUpdate(desired *resource, latest *resource) (bool, *string) { + if desired.ko.Spec.PrimaryClusterID == nil { + return false, nil + } + + // primary cluster ID applies to cluster mode disabled only; if API returns multiple + // or no node groups, or the provided node group is nil, there is nothing that can be done + if len(latest.ko.Status.NodeGroups) != 1 || latest.ko.Status.NodeGroups[0] == nil { + return false, nil + } + + // attempt to find primary cluster in node group. If for some reason it is not present, we + // don't have a reliable latest state, so do nothing + ng := *latest.ko.Status.NodeGroups[0] + for _, member := range ng.NodeGroupMembers { + if member == nil { + continue + } + + if member.CurrentRole != nil && *member.CurrentRole == "primary" && member.CacheClusterID != nil { + val := *member.CacheClusterID + return val != *desired.ko.Spec.PrimaryClusterID, &val + } + } + + return false, nil +} + +func Int32OrNil(i *int64) *int32 { + if i == nil { + return nil + } + return aws.Int32(int32(*i)) +} diff --git a/pkg/resource/replication_group/post_set_output.go b/pkg/resource/replication_group/post_set_output.go deleted file mode 100644 index 29cd6d74..00000000 --- a/pkg/resource/replication_group/post_set_output.go +++ /dev/null @@ -1,154 +0,0 @@ -// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"). You may -// not use this file except in compliance with the License. A copy of the -// License is located at -// -// http://aws.amazon.com/apache2.0/ -// -// or in the "license" file accompanying this file. This file is distributed -// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either -// express or implied. See the License for the specific language governing -// permissions and limitations under the License. - -package replication_group - -import ( - "context" - - svcapitypes "github.com/aws-controllers-k8s/elasticache-controller/apis/v1alpha1" - - svcsdk "github.com/aws/aws-sdk-go/service/elasticache" -) - -/* -To be called in sdkFind, this function updates the replication group's Spec fields with the latest observed state -This requires extra processing of the API response as well as additional API calls, and is necessary because -sdkFind does not update many of these Spec fields by default. "resource" is a wrapper around "ko", the object -which will eventually be returned as "latest". -*/ -func (rm *resourceManager) updateSpecFields( - ctx context.Context, - respRG *svcsdk.ReplicationGroup, - resource *resource, -) { - if isDeleting(resource) { - return - } - // populate relevant ko.Spec fields with observed state of respRG.NodeGroups - setReplicasPerNodeGroup(respRG, resource) - setNodeGroupConfiguration(respRG, resource) - - // updating some Spec fields requires a DescribeCacheClusters call - latestCacheCluster, err := rm.describeCacheCluster(ctx, resource) - if err == nil && latestCacheCluster != nil { - setEngineVersion(latestCacheCluster, resource) - setMaintenanceWindow(latestCacheCluster, resource) - setCacheParameterGroup(latestCacheCluster, resource) - } -} - -// if NodeGroupConfiguration was given in the desired.Spec, update ko.Spec with the latest observed value -func setNodeGroupConfiguration( - respRG *svcsdk.ReplicationGroup, - resource *resource, -) { - ko := resource.ko - if respRG.NodeGroups != nil && ko.Spec.NodeGroupConfiguration != nil { - nodeGroupConfigurations := []*svcapitypes.NodeGroupConfiguration{} - for _, nodeGroup := range respRG.NodeGroups { - nodeGroupConfiguration := &svcapitypes.NodeGroupConfiguration{} - - if nodeGroup.NodeGroupId != nil { - nodeGroupConfiguration.NodeGroupID = nodeGroup.NodeGroupId - } - replicaAZs := []*string{} - - for _, nodeGroupMember := range nodeGroup.NodeGroupMembers { - if nodeGroupMember.CurrentRole != nil && *nodeGroupMember.CurrentRole == "primary" { - nodeGroupConfiguration.PrimaryAvailabilityZone = nodeGroupMember.PreferredAvailabilityZone - } - - // In this case we cannot say what is primary AZ and replica AZ. - if nodeGroupMember.CurrentRole == nil && nodeGroupConfiguration.PrimaryAvailabilityZone == nil { - // We cannot determine the correct AZ so we would use the first node group member as primary - nodeGroupConfiguration.PrimaryAvailabilityZone = nodeGroupMember.PreferredAvailabilityZone - } - - if nodeGroupConfiguration.PrimaryAvailabilityZone != nil || *nodeGroupMember.CurrentRole == "replica" { - replicaAZs = append(replicaAZs, nodeGroupMember.PreferredAvailabilityZone) - } - } - - if len(replicaAZs) > 0 { - nodeGroupConfiguration.ReplicaAvailabilityZones = replicaAZs - } - - replicaCount := int64(len(replicaAZs)) - nodeGroupConfiguration.ReplicaCount = &replicaCount - } - - ko.Spec.NodeGroupConfiguration = nodeGroupConfigurations - } - - if respRG.NodeGroups != nil && ko.Spec.NumNodeGroups != nil { - *ko.Spec.NumNodeGroups = int64(len(respRG.NodeGroups)) - } -} - -//TODO: for all the fields here, reevaluate if the latest observed state should always be populated, -// even if the corresponding field was not specified in desired - -// if ReplicasPerNodeGroup was given in desired.Spec, update ko.Spec with the latest observed value -func setReplicasPerNodeGroup( - respRG *svcsdk.ReplicationGroup, - resource *resource, -) { - ko := resource.ko - if respRG.NodeGroups != nil && ko.Spec.ReplicasPerNodeGroup != nil { - // if ReplicasPerNodeGroup is specified, all node groups should have the same # replicas so use the first - nodeGroup := respRG.NodeGroups[0] - if nodeGroup != nil && nodeGroup.NodeGroupMembers != nil { - if len(nodeGroup.NodeGroupMembers) > 0 { - *ko.Spec.ReplicasPerNodeGroup = int64(len(nodeGroup.NodeGroupMembers) - 1) - } - } - } -} - -// if EngineVersion was specified in desired.Spec, update ko.Spec with the latest observed value (if non-nil) -func setEngineVersion( - latestCacheCluster *svcsdk.CacheCluster, - resource *resource, -) { - ko := resource.ko - if ko.Spec.EngineVersion != nil && latestCacheCluster.EngineVersion != nil { - *ko.Spec.EngineVersion = *latestCacheCluster.EngineVersion - } -} - -// update maintenance window (if non-nil in API response) regardless of whether it was specified in desired -func setMaintenanceWindow( - latestCacheCluster *svcsdk.CacheCluster, - resource *resource, -) { - ko := resource.ko - if latestCacheCluster.PreferredMaintenanceWindow != nil { - pmw := *latestCacheCluster.PreferredMaintenanceWindow - ko.Spec.PreferredMaintenanceWindow = &pmw - } -} - -// setCacheParameterGroup updates the cache parameter group associated with the replication group -// -// (if non-nil in API response) regardless of whether it was specified in desired -func setCacheParameterGroup( - latestCacheCluster *svcsdk.CacheCluster, - resource *resource, -) { - ko := resource.ko - if latestCacheCluster.CacheParameterGroup != nil && latestCacheCluster.CacheParameterGroup.CacheParameterGroupName != nil { - cpgName := *latestCacheCluster.CacheParameterGroup.CacheParameterGroupName - ko.Spec.CacheParameterGroupName = &cpgName - } -} diff --git a/pkg/resource/replication_group/sdk.go b/pkg/resource/replication_group/sdk.go index 9bcd3cdf..67f4220f 100644 --- a/pkg/resource/replication_group/sdk.go +++ b/pkg/resource/replication_group/sdk.go @@ -134,21 +134,16 @@ func (rm *resourceManager) sdkFind( } else { ko.Status.ClusterEnabled = nil } - if elem.ClusterMode != "" { - ko.Spec.ClusterMode = aws.String(string(elem.ClusterMode)) - } else { - ko.Spec.ClusterMode = nil - } if elem.ConfigurationEndpoint != nil { - f9 := &svcapitypes.Endpoint{} + f8 := &svcapitypes.Endpoint{} if elem.ConfigurationEndpoint.Address != nil { - f9.Address = elem.ConfigurationEndpoint.Address + f8.Address = elem.ConfigurationEndpoint.Address } if elem.ConfigurationEndpoint.Port != nil { portCopy := int64(*elem.ConfigurationEndpoint.Port) - f9.Port = &portCopy + f8.Port = &portCopy } - ko.Status.ConfigurationEndpoint = f9 + ko.Status.ConfigurationEndpoint = f8 } else { ko.Status.ConfigurationEndpoint = nil } @@ -168,61 +163,56 @@ func (rm *resourceManager) sdkFind( ko.Spec.Engine = nil } if elem.GlobalReplicationGroupInfo != nil { - f13 := &svcapitypes.GlobalReplicationGroupInfo{} + f12 := &svcapitypes.GlobalReplicationGroupInfo{} if elem.GlobalReplicationGroupInfo.GlobalReplicationGroupId != nil { - f13.GlobalReplicationGroupID = elem.GlobalReplicationGroupInfo.GlobalReplicationGroupId + f12.GlobalReplicationGroupID = elem.GlobalReplicationGroupInfo.GlobalReplicationGroupId } if elem.GlobalReplicationGroupInfo.GlobalReplicationGroupMemberRole != nil { - f13.GlobalReplicationGroupMemberRole = elem.GlobalReplicationGroupInfo.GlobalReplicationGroupMemberRole + f12.GlobalReplicationGroupMemberRole = elem.GlobalReplicationGroupInfo.GlobalReplicationGroupMemberRole } - ko.Status.GlobalReplicationGroupInfo = f13 + ko.Status.GlobalReplicationGroupInfo = f12 } else { ko.Status.GlobalReplicationGroupInfo = nil } - if elem.IpDiscovery != "" { - ko.Spec.IPDiscovery = aws.String(string(elem.IpDiscovery)) - } else { - ko.Spec.IPDiscovery = nil - } if elem.KmsKeyId != nil { ko.Spec.KMSKeyID = elem.KmsKeyId } else { ko.Spec.KMSKeyID = nil } if elem.LogDeliveryConfigurations != nil { - f16 := []*svcapitypes.LogDeliveryConfigurationRequest{} - for _, f16iter := range elem.LogDeliveryConfigurations { - f16elem := &svcapitypes.LogDeliveryConfigurationRequest{} - if f16iter.DestinationDetails != nil { - f16elemf0 := &svcapitypes.DestinationDetails{} - if f16iter.DestinationDetails.CloudWatchLogsDetails != nil { - f16elemf0f0 := &svcapitypes.CloudWatchLogsDestinationDetails{} - if f16iter.DestinationDetails.CloudWatchLogsDetails.LogGroup != nil { - f16elemf0f0.LogGroup = f16iter.DestinationDetails.CloudWatchLogsDetails.LogGroup + f14 := []*svcapitypes.LogDeliveryConfigurationRequest{} + for _, f14iter := range elem.LogDeliveryConfigurations { + f14elem := &svcapitypes.LogDeliveryConfigurationRequest{} + if f14iter.DestinationDetails != nil { + f14elemf0 := &svcapitypes.DestinationDetails{} + if f14iter.DestinationDetails.CloudWatchLogsDetails != nil { + f14elemf0f0 := &svcapitypes.CloudWatchLogsDestinationDetails{} + if f14iter.DestinationDetails.CloudWatchLogsDetails.LogGroup != nil { + f14elemf0f0.LogGroup = f14iter.DestinationDetails.CloudWatchLogsDetails.LogGroup } - f16elemf0.CloudWatchLogsDetails = f16elemf0f0 + f14elemf0.CloudWatchLogsDetails = f14elemf0f0 } - if f16iter.DestinationDetails.KinesisFirehoseDetails != nil { - f16elemf0f1 := &svcapitypes.KinesisFirehoseDestinationDetails{} - if f16iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream != nil { - f16elemf0f1.DeliveryStream = f16iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream + if f14iter.DestinationDetails.KinesisFirehoseDetails != nil { + f14elemf0f1 := &svcapitypes.KinesisFirehoseDestinationDetails{} + if f14iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream != nil { + f14elemf0f1.DeliveryStream = f14iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream } - f16elemf0.KinesisFirehoseDetails = f16elemf0f1 + f14elemf0.KinesisFirehoseDetails = f14elemf0f1 } - f16elem.DestinationDetails = f16elemf0 + f14elem.DestinationDetails = f14elemf0 } - if f16iter.DestinationType != "" { - f16elem.DestinationType = aws.String(string(f16iter.DestinationType)) + if f14iter.DestinationType != "" { + f14elem.DestinationType = aws.String(string(f14iter.DestinationType)) } - if f16iter.LogFormat != "" { - f16elem.LogFormat = aws.String(string(f16iter.LogFormat)) + if f14iter.LogFormat != "" { + f14elem.LogFormat = aws.String(string(f14iter.LogFormat)) } - if f16iter.LogType != "" { - f16elem.LogType = aws.String(string(f16iter.LogType)) + if f14iter.LogType != "" { + f14elem.LogType = aws.String(string(f14iter.LogType)) } - f16 = append(f16, f16elem) + f14 = append(f14, f14elem) } - ko.Spec.LogDeliveryConfigurations = f16 + ko.Spec.LogDeliveryConfigurations = f14 } else { ko.Spec.LogDeliveryConfigurations = nil } @@ -241,163 +231,149 @@ func (rm *resourceManager) sdkFind( } else { ko.Status.MultiAZ = nil } - if elem.NetworkType != "" { - ko.Spec.NetworkType = aws.String(string(elem.NetworkType)) - } else { - ko.Spec.NetworkType = nil - } if elem.NodeGroups != nil { - f21 := []*svcapitypes.NodeGroup{} - for _, f21iter := range elem.NodeGroups { - f21elem := &svcapitypes.NodeGroup{} - if f21iter.NodeGroupId != nil { - f21elem.NodeGroupID = f21iter.NodeGroupId + f18 := []*svcapitypes.NodeGroup{} + for _, f18iter := range elem.NodeGroups { + f18elem := &svcapitypes.NodeGroup{} + if f18iter.NodeGroupId != nil { + f18elem.NodeGroupID = f18iter.NodeGroupId } - if f21iter.NodeGroupMembers != nil { - f21elemf1 := []*svcapitypes.NodeGroupMember{} - for _, f21elemf1iter := range f21iter.NodeGroupMembers { - f21elemf1elem := &svcapitypes.NodeGroupMember{} - if f21elemf1iter.CacheClusterId != nil { - f21elemf1elem.CacheClusterID = f21elemf1iter.CacheClusterId + if f18iter.NodeGroupMembers != nil { + f18elemf1 := []*svcapitypes.NodeGroupMember{} + for _, f18elemf1iter := range f18iter.NodeGroupMembers { + f18elemf1elem := &svcapitypes.NodeGroupMember{} + if f18elemf1iter.CacheClusterId != nil { + f18elemf1elem.CacheClusterID = f18elemf1iter.CacheClusterId } - if f21elemf1iter.CacheNodeId != nil { - f21elemf1elem.CacheNodeID = f21elemf1iter.CacheNodeId + if f18elemf1iter.CacheNodeId != nil { + f18elemf1elem.CacheNodeID = f18elemf1iter.CacheNodeId } - if f21elemf1iter.CurrentRole != nil { - f21elemf1elem.CurrentRole = f21elemf1iter.CurrentRole + if f18elemf1iter.CurrentRole != nil { + f18elemf1elem.CurrentRole = f18elemf1iter.CurrentRole } - if f21elemf1iter.PreferredAvailabilityZone != nil { - f21elemf1elem.PreferredAvailabilityZone = f21elemf1iter.PreferredAvailabilityZone + if f18elemf1iter.PreferredAvailabilityZone != nil { + f18elemf1elem.PreferredAvailabilityZone = f18elemf1iter.PreferredAvailabilityZone } - if f21elemf1iter.PreferredOutpostArn != nil { - f21elemf1elem.PreferredOutpostARN = f21elemf1iter.PreferredOutpostArn + if f18elemf1iter.PreferredOutpostArn != nil { + f18elemf1elem.PreferredOutpostARN = f18elemf1iter.PreferredOutpostArn } - if f21elemf1iter.ReadEndpoint != nil { - f21elemf1elemf5 := &svcapitypes.Endpoint{} - if f21elemf1iter.ReadEndpoint.Address != nil { - f21elemf1elemf5.Address = f21elemf1iter.ReadEndpoint.Address + if f18elemf1iter.ReadEndpoint != nil { + f18elemf1elemf5 := &svcapitypes.Endpoint{} + if f18elemf1iter.ReadEndpoint.Address != nil { + f18elemf1elemf5.Address = f18elemf1iter.ReadEndpoint.Address } - if f21elemf1iter.ReadEndpoint.Port != nil { - portCopy := int64(*f21elemf1iter.ReadEndpoint.Port) - f21elemf1elemf5.Port = &portCopy + if f18elemf1iter.ReadEndpoint.Port != nil { + portCopy := int64(*f18elemf1iter.ReadEndpoint.Port) + f18elemf1elemf5.Port = &portCopy } - f21elemf1elem.ReadEndpoint = f21elemf1elemf5 + f18elemf1elem.ReadEndpoint = f18elemf1elemf5 } - f21elemf1 = append(f21elemf1, f21elemf1elem) + f18elemf1 = append(f18elemf1, f18elemf1elem) } - f21elem.NodeGroupMembers = f21elemf1 + f18elem.NodeGroupMembers = f18elemf1 } - if f21iter.PrimaryEndpoint != nil { - f21elemf2 := &svcapitypes.Endpoint{} - if f21iter.PrimaryEndpoint.Address != nil { - f21elemf2.Address = f21iter.PrimaryEndpoint.Address + if f18iter.PrimaryEndpoint != nil { + f18elemf2 := &svcapitypes.Endpoint{} + if f18iter.PrimaryEndpoint.Address != nil { + f18elemf2.Address = f18iter.PrimaryEndpoint.Address } - if f21iter.PrimaryEndpoint.Port != nil { - portCopy := int64(*f21iter.PrimaryEndpoint.Port) - f21elemf2.Port = &portCopy + if f18iter.PrimaryEndpoint.Port != nil { + portCopy := int64(*f18iter.PrimaryEndpoint.Port) + f18elemf2.Port = &portCopy } - f21elem.PrimaryEndpoint = f21elemf2 + f18elem.PrimaryEndpoint = f18elemf2 } - if f21iter.ReaderEndpoint != nil { - f21elemf3 := &svcapitypes.Endpoint{} - if f21iter.ReaderEndpoint.Address != nil { - f21elemf3.Address = f21iter.ReaderEndpoint.Address + if f18iter.ReaderEndpoint != nil { + f18elemf3 := &svcapitypes.Endpoint{} + if f18iter.ReaderEndpoint.Address != nil { + f18elemf3.Address = f18iter.ReaderEndpoint.Address } - if f21iter.ReaderEndpoint.Port != nil { - portCopy := int64(*f21iter.ReaderEndpoint.Port) - f21elemf3.Port = &portCopy + if f18iter.ReaderEndpoint.Port != nil { + portCopy := int64(*f18iter.ReaderEndpoint.Port) + f18elemf3.Port = &portCopy } - f21elem.ReaderEndpoint = f21elemf3 + f18elem.ReaderEndpoint = f18elemf3 } - if f21iter.Slots != nil { - f21elem.Slots = f21iter.Slots + if f18iter.Slots != nil { + f18elem.Slots = f18iter.Slots } - if f21iter.Status != nil { - f21elem.Status = f21iter.Status + if f18iter.Status != nil { + f18elem.Status = f18iter.Status } - f21 = append(f21, f21elem) + f18 = append(f18, f18elem) } - ko.Status.NodeGroups = f21 + ko.Status.NodeGroups = f18 } else { ko.Status.NodeGroups = nil } if elem.PendingModifiedValues != nil { - f22 := &svcapitypes.ReplicationGroupPendingModifiedValues{} + f19 := &svcapitypes.ReplicationGroupPendingModifiedValues{} if elem.PendingModifiedValues.AuthTokenStatus != "" { - f22.AuthTokenStatus = aws.String(string(elem.PendingModifiedValues.AuthTokenStatus)) + f19.AuthTokenStatus = aws.String(string(elem.PendingModifiedValues.AuthTokenStatus)) } if elem.PendingModifiedValues.AutomaticFailoverStatus != "" { - f22.AutomaticFailoverStatus = aws.String(string(elem.PendingModifiedValues.AutomaticFailoverStatus)) - } - if elem.PendingModifiedValues.ClusterMode != "" { - f22.ClusterMode = aws.String(string(elem.PendingModifiedValues.ClusterMode)) + f19.AutomaticFailoverStatus = aws.String(string(elem.PendingModifiedValues.AutomaticFailoverStatus)) } if elem.PendingModifiedValues.LogDeliveryConfigurations != nil { - f22f3 := []*svcapitypes.PendingLogDeliveryConfiguration{} - for _, f22f3iter := range elem.PendingModifiedValues.LogDeliveryConfigurations { - f22f3elem := &svcapitypes.PendingLogDeliveryConfiguration{} - if f22f3iter.DestinationDetails != nil { - f22f3elemf0 := &svcapitypes.DestinationDetails{} - if f22f3iter.DestinationDetails.CloudWatchLogsDetails != nil { - f22f3elemf0f0 := &svcapitypes.CloudWatchLogsDestinationDetails{} - if f22f3iter.DestinationDetails.CloudWatchLogsDetails.LogGroup != nil { - f22f3elemf0f0.LogGroup = f22f3iter.DestinationDetails.CloudWatchLogsDetails.LogGroup + f19f2 := []*svcapitypes.PendingLogDeliveryConfiguration{} + for _, f19f2iter := range elem.PendingModifiedValues.LogDeliveryConfigurations { + f19f2elem := &svcapitypes.PendingLogDeliveryConfiguration{} + if f19f2iter.DestinationDetails != nil { + f19f2elemf0 := &svcapitypes.DestinationDetails{} + if f19f2iter.DestinationDetails.CloudWatchLogsDetails != nil { + f19f2elemf0f0 := &svcapitypes.CloudWatchLogsDestinationDetails{} + if f19f2iter.DestinationDetails.CloudWatchLogsDetails.LogGroup != nil { + f19f2elemf0f0.LogGroup = f19f2iter.DestinationDetails.CloudWatchLogsDetails.LogGroup } - f22f3elemf0.CloudWatchLogsDetails = f22f3elemf0f0 + f19f2elemf0.CloudWatchLogsDetails = f19f2elemf0f0 } - if f22f3iter.DestinationDetails.KinesisFirehoseDetails != nil { - f22f3elemf0f1 := &svcapitypes.KinesisFirehoseDestinationDetails{} - if f22f3iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream != nil { - f22f3elemf0f1.DeliveryStream = f22f3iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream + if f19f2iter.DestinationDetails.KinesisFirehoseDetails != nil { + f19f2elemf0f1 := &svcapitypes.KinesisFirehoseDestinationDetails{} + if f19f2iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream != nil { + f19f2elemf0f1.DeliveryStream = f19f2iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream } - f22f3elemf0.KinesisFirehoseDetails = f22f3elemf0f1 + f19f2elemf0.KinesisFirehoseDetails = f19f2elemf0f1 } - f22f3elem.DestinationDetails = f22f3elemf0 + f19f2elem.DestinationDetails = f19f2elemf0 } - if f22f3iter.DestinationType != "" { - f22f3elem.DestinationType = aws.String(string(f22f3iter.DestinationType)) + if f19f2iter.DestinationType != "" { + f19f2elem.DestinationType = aws.String(string(f19f2iter.DestinationType)) } - if f22f3iter.LogFormat != "" { - f22f3elem.LogFormat = aws.String(string(f22f3iter.LogFormat)) + if f19f2iter.LogFormat != "" { + f19f2elem.LogFormat = aws.String(string(f19f2iter.LogFormat)) } - if f22f3iter.LogType != "" { - f22f3elem.LogType = aws.String(string(f22f3iter.LogType)) + if f19f2iter.LogType != "" { + f19f2elem.LogType = aws.String(string(f19f2iter.LogType)) } - f22f3 = append(f22f3, f22f3elem) + f19f2 = append(f19f2, f19f2elem) } - f22.LogDeliveryConfigurations = f22f3 + f19.LogDeliveryConfigurations = f19f2 } if elem.PendingModifiedValues.PrimaryClusterId != nil { - f22.PrimaryClusterID = elem.PendingModifiedValues.PrimaryClusterId + f19.PrimaryClusterID = elem.PendingModifiedValues.PrimaryClusterId } if elem.PendingModifiedValues.Resharding != nil { - f22f5 := &svcapitypes.ReshardingStatus{} + f19f4 := &svcapitypes.ReshardingStatus{} if elem.PendingModifiedValues.Resharding.SlotMigration != nil { - f22f5f0 := &svcapitypes.SlotMigration{} + f19f4f0 := &svcapitypes.SlotMigration{} if elem.PendingModifiedValues.Resharding.SlotMigration.ProgressPercentage != nil { - f22f5f0.ProgressPercentage = elem.PendingModifiedValues.Resharding.SlotMigration.ProgressPercentage + f19f4f0.ProgressPercentage = elem.PendingModifiedValues.Resharding.SlotMigration.ProgressPercentage } - f22f5.SlotMigration = f22f5f0 + f19f4.SlotMigration = f19f4f0 } - f22.Resharding = f22f5 - } - if elem.PendingModifiedValues.TransitEncryptionEnabled != nil { - f22.TransitEncryptionEnabled = elem.PendingModifiedValues.TransitEncryptionEnabled - } - if elem.PendingModifiedValues.TransitEncryptionMode != "" { - f22.TransitEncryptionMode = aws.String(string(elem.PendingModifiedValues.TransitEncryptionMode)) + f19.Resharding = f19f4 } if elem.PendingModifiedValues.UserGroups != nil { - f22f8 := &svcapitypes.UserGroupsUpdateStatus{} + f19f5 := &svcapitypes.UserGroupsUpdateStatus{} if elem.PendingModifiedValues.UserGroups.UserGroupIdsToAdd != nil { - f22f8.UserGroupIDsToAdd = aws.StringSlice(elem.PendingModifiedValues.UserGroups.UserGroupIdsToAdd) + f19f5.UserGroupIDsToAdd = aws.StringSlice(elem.PendingModifiedValues.UserGroups.UserGroupIdsToAdd) } if elem.PendingModifiedValues.UserGroups.UserGroupIdsToRemove != nil { - f22f8.UserGroupIDsToRemove = aws.StringSlice(elem.PendingModifiedValues.UserGroups.UserGroupIdsToRemove) + f19f5.UserGroupIDsToRemove = aws.StringSlice(elem.PendingModifiedValues.UserGroups.UserGroupIdsToRemove) } - f22.UserGroups = f22f8 + f19.UserGroups = f19f5 } - ko.Status.PendingModifiedValues = f22 + ko.Status.PendingModifiedValues = f19 } else { ko.Status.PendingModifiedValues = nil } @@ -437,11 +413,6 @@ func (rm *resourceManager) sdkFind( } else { ko.Spec.TransitEncryptionEnabled = nil } - if elem.TransitEncryptionMode != "" { - ko.Spec.TransitEncryptionMode = aws.String(string(elem.TransitEncryptionMode)) - } else { - ko.Spec.TransitEncryptionMode = nil - } if elem.UserGroupIds != nil { ko.Spec.UserGroupIDs = aws.StringSlice(elem.UserGroupIds) } else { @@ -615,21 +586,16 @@ func (rm *resourceManager) sdkCreate( } else { ko.Status.ClusterEnabled = nil } - if resp.ReplicationGroup.ClusterMode != "" { - ko.Spec.ClusterMode = aws.String(string(resp.ReplicationGroup.ClusterMode)) - } else { - ko.Spec.ClusterMode = nil - } if resp.ReplicationGroup.ConfigurationEndpoint != nil { - f9 := &svcapitypes.Endpoint{} + f8 := &svcapitypes.Endpoint{} if resp.ReplicationGroup.ConfigurationEndpoint.Address != nil { - f9.Address = resp.ReplicationGroup.ConfigurationEndpoint.Address + f8.Address = resp.ReplicationGroup.ConfigurationEndpoint.Address } if resp.ReplicationGroup.ConfigurationEndpoint.Port != nil { portCopy := int64(*resp.ReplicationGroup.ConfigurationEndpoint.Port) - f9.Port = &portCopy + f8.Port = &portCopy } - ko.Status.ConfigurationEndpoint = f9 + ko.Status.ConfigurationEndpoint = f8 } else { ko.Status.ConfigurationEndpoint = nil } @@ -649,61 +615,56 @@ func (rm *resourceManager) sdkCreate( ko.Spec.Engine = nil } if resp.ReplicationGroup.GlobalReplicationGroupInfo != nil { - f13 := &svcapitypes.GlobalReplicationGroupInfo{} + f12 := &svcapitypes.GlobalReplicationGroupInfo{} if resp.ReplicationGroup.GlobalReplicationGroupInfo.GlobalReplicationGroupId != nil { - f13.GlobalReplicationGroupID = resp.ReplicationGroup.GlobalReplicationGroupInfo.GlobalReplicationGroupId + f12.GlobalReplicationGroupID = resp.ReplicationGroup.GlobalReplicationGroupInfo.GlobalReplicationGroupId } if resp.ReplicationGroup.GlobalReplicationGroupInfo.GlobalReplicationGroupMemberRole != nil { - f13.GlobalReplicationGroupMemberRole = resp.ReplicationGroup.GlobalReplicationGroupInfo.GlobalReplicationGroupMemberRole + f12.GlobalReplicationGroupMemberRole = resp.ReplicationGroup.GlobalReplicationGroupInfo.GlobalReplicationGroupMemberRole } - ko.Status.GlobalReplicationGroupInfo = f13 + ko.Status.GlobalReplicationGroupInfo = f12 } else { ko.Status.GlobalReplicationGroupInfo = nil } - if resp.ReplicationGroup.IpDiscovery != "" { - ko.Spec.IPDiscovery = aws.String(string(resp.ReplicationGroup.IpDiscovery)) - } else { - ko.Spec.IPDiscovery = nil - } if resp.ReplicationGroup.KmsKeyId != nil { ko.Spec.KMSKeyID = resp.ReplicationGroup.KmsKeyId } else { ko.Spec.KMSKeyID = nil } if resp.ReplicationGroup.LogDeliveryConfigurations != nil { - f16 := []*svcapitypes.LogDeliveryConfigurationRequest{} - for _, f16iter := range resp.ReplicationGroup.LogDeliveryConfigurations { - f16elem := &svcapitypes.LogDeliveryConfigurationRequest{} - if f16iter.DestinationDetails != nil { - f16elemf0 := &svcapitypes.DestinationDetails{} - if f16iter.DestinationDetails.CloudWatchLogsDetails != nil { - f16elemf0f0 := &svcapitypes.CloudWatchLogsDestinationDetails{} - if f16iter.DestinationDetails.CloudWatchLogsDetails.LogGroup != nil { - f16elemf0f0.LogGroup = f16iter.DestinationDetails.CloudWatchLogsDetails.LogGroup + f14 := []*svcapitypes.LogDeliveryConfigurationRequest{} + for _, f14iter := range resp.ReplicationGroup.LogDeliveryConfigurations { + f14elem := &svcapitypes.LogDeliveryConfigurationRequest{} + if f14iter.DestinationDetails != nil { + f14elemf0 := &svcapitypes.DestinationDetails{} + if f14iter.DestinationDetails.CloudWatchLogsDetails != nil { + f14elemf0f0 := &svcapitypes.CloudWatchLogsDestinationDetails{} + if f14iter.DestinationDetails.CloudWatchLogsDetails.LogGroup != nil { + f14elemf0f0.LogGroup = f14iter.DestinationDetails.CloudWatchLogsDetails.LogGroup } - f16elemf0.CloudWatchLogsDetails = f16elemf0f0 + f14elemf0.CloudWatchLogsDetails = f14elemf0f0 } - if f16iter.DestinationDetails.KinesisFirehoseDetails != nil { - f16elemf0f1 := &svcapitypes.KinesisFirehoseDestinationDetails{} - if f16iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream != nil { - f16elemf0f1.DeliveryStream = f16iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream + if f14iter.DestinationDetails.KinesisFirehoseDetails != nil { + f14elemf0f1 := &svcapitypes.KinesisFirehoseDestinationDetails{} + if f14iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream != nil { + f14elemf0f1.DeliveryStream = f14iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream } - f16elemf0.KinesisFirehoseDetails = f16elemf0f1 + f14elemf0.KinesisFirehoseDetails = f14elemf0f1 } - f16elem.DestinationDetails = f16elemf0 + f14elem.DestinationDetails = f14elemf0 } - if f16iter.DestinationType != "" { - f16elem.DestinationType = aws.String(string(f16iter.DestinationType)) + if f14iter.DestinationType != "" { + f14elem.DestinationType = aws.String(string(f14iter.DestinationType)) } - if f16iter.LogFormat != "" { - f16elem.LogFormat = aws.String(string(f16iter.LogFormat)) + if f14iter.LogFormat != "" { + f14elem.LogFormat = aws.String(string(f14iter.LogFormat)) } - if f16iter.LogType != "" { - f16elem.LogType = aws.String(string(f16iter.LogType)) + if f14iter.LogType != "" { + f14elem.LogType = aws.String(string(f14iter.LogType)) } - f16 = append(f16, f16elem) + f14 = append(f14, f14elem) } - ko.Spec.LogDeliveryConfigurations = f16 + ko.Spec.LogDeliveryConfigurations = f14 } else { ko.Spec.LogDeliveryConfigurations = nil } @@ -722,163 +683,149 @@ func (rm *resourceManager) sdkCreate( } else { ko.Status.MultiAZ = nil } - if resp.ReplicationGroup.NetworkType != "" { - ko.Spec.NetworkType = aws.String(string(resp.ReplicationGroup.NetworkType)) - } else { - ko.Spec.NetworkType = nil - } if resp.ReplicationGroup.NodeGroups != nil { - f21 := []*svcapitypes.NodeGroup{} - for _, f21iter := range resp.ReplicationGroup.NodeGroups { - f21elem := &svcapitypes.NodeGroup{} - if f21iter.NodeGroupId != nil { - f21elem.NodeGroupID = f21iter.NodeGroupId - } - if f21iter.NodeGroupMembers != nil { - f21elemf1 := []*svcapitypes.NodeGroupMember{} - for _, f21elemf1iter := range f21iter.NodeGroupMembers { - f21elemf1elem := &svcapitypes.NodeGroupMember{} - if f21elemf1iter.CacheClusterId != nil { - f21elemf1elem.CacheClusterID = f21elemf1iter.CacheClusterId + f18 := []*svcapitypes.NodeGroup{} + for _, f18iter := range resp.ReplicationGroup.NodeGroups { + f18elem := &svcapitypes.NodeGroup{} + if f18iter.NodeGroupId != nil { + f18elem.NodeGroupID = f18iter.NodeGroupId + } + if f18iter.NodeGroupMembers != nil { + f18elemf1 := []*svcapitypes.NodeGroupMember{} + for _, f18elemf1iter := range f18iter.NodeGroupMembers { + f18elemf1elem := &svcapitypes.NodeGroupMember{} + if f18elemf1iter.CacheClusterId != nil { + f18elemf1elem.CacheClusterID = f18elemf1iter.CacheClusterId } - if f21elemf1iter.CacheNodeId != nil { - f21elemf1elem.CacheNodeID = f21elemf1iter.CacheNodeId + if f18elemf1iter.CacheNodeId != nil { + f18elemf1elem.CacheNodeID = f18elemf1iter.CacheNodeId } - if f21elemf1iter.CurrentRole != nil { - f21elemf1elem.CurrentRole = f21elemf1iter.CurrentRole + if f18elemf1iter.CurrentRole != nil { + f18elemf1elem.CurrentRole = f18elemf1iter.CurrentRole } - if f21elemf1iter.PreferredAvailabilityZone != nil { - f21elemf1elem.PreferredAvailabilityZone = f21elemf1iter.PreferredAvailabilityZone + if f18elemf1iter.PreferredAvailabilityZone != nil { + f18elemf1elem.PreferredAvailabilityZone = f18elemf1iter.PreferredAvailabilityZone } - if f21elemf1iter.PreferredOutpostArn != nil { - f21elemf1elem.PreferredOutpostARN = f21elemf1iter.PreferredOutpostArn + if f18elemf1iter.PreferredOutpostArn != nil { + f18elemf1elem.PreferredOutpostARN = f18elemf1iter.PreferredOutpostArn } - if f21elemf1iter.ReadEndpoint != nil { - f21elemf1elemf5 := &svcapitypes.Endpoint{} - if f21elemf1iter.ReadEndpoint.Address != nil { - f21elemf1elemf5.Address = f21elemf1iter.ReadEndpoint.Address + if f18elemf1iter.ReadEndpoint != nil { + f18elemf1elemf5 := &svcapitypes.Endpoint{} + if f18elemf1iter.ReadEndpoint.Address != nil { + f18elemf1elemf5.Address = f18elemf1iter.ReadEndpoint.Address } - if f21elemf1iter.ReadEndpoint.Port != nil { - portCopy := int64(*f21elemf1iter.ReadEndpoint.Port) - f21elemf1elemf5.Port = &portCopy + if f18elemf1iter.ReadEndpoint.Port != nil { + portCopy := int64(*f18elemf1iter.ReadEndpoint.Port) + f18elemf1elemf5.Port = &portCopy } - f21elemf1elem.ReadEndpoint = f21elemf1elemf5 + f18elemf1elem.ReadEndpoint = f18elemf1elemf5 } - f21elemf1 = append(f21elemf1, f21elemf1elem) + f18elemf1 = append(f18elemf1, f18elemf1elem) } - f21elem.NodeGroupMembers = f21elemf1 + f18elem.NodeGroupMembers = f18elemf1 } - if f21iter.PrimaryEndpoint != nil { - f21elemf2 := &svcapitypes.Endpoint{} - if f21iter.PrimaryEndpoint.Address != nil { - f21elemf2.Address = f21iter.PrimaryEndpoint.Address + if f18iter.PrimaryEndpoint != nil { + f18elemf2 := &svcapitypes.Endpoint{} + if f18iter.PrimaryEndpoint.Address != nil { + f18elemf2.Address = f18iter.PrimaryEndpoint.Address } - if f21iter.PrimaryEndpoint.Port != nil { - portCopy := int64(*f21iter.PrimaryEndpoint.Port) - f21elemf2.Port = &portCopy + if f18iter.PrimaryEndpoint.Port != nil { + portCopy := int64(*f18iter.PrimaryEndpoint.Port) + f18elemf2.Port = &portCopy } - f21elem.PrimaryEndpoint = f21elemf2 + f18elem.PrimaryEndpoint = f18elemf2 } - if f21iter.ReaderEndpoint != nil { - f21elemf3 := &svcapitypes.Endpoint{} - if f21iter.ReaderEndpoint.Address != nil { - f21elemf3.Address = f21iter.ReaderEndpoint.Address + if f18iter.ReaderEndpoint != nil { + f18elemf3 := &svcapitypes.Endpoint{} + if f18iter.ReaderEndpoint.Address != nil { + f18elemf3.Address = f18iter.ReaderEndpoint.Address } - if f21iter.ReaderEndpoint.Port != nil { - portCopy := int64(*f21iter.ReaderEndpoint.Port) - f21elemf3.Port = &portCopy + if f18iter.ReaderEndpoint.Port != nil { + portCopy := int64(*f18iter.ReaderEndpoint.Port) + f18elemf3.Port = &portCopy } - f21elem.ReaderEndpoint = f21elemf3 + f18elem.ReaderEndpoint = f18elemf3 } - if f21iter.Slots != nil { - f21elem.Slots = f21iter.Slots + if f18iter.Slots != nil { + f18elem.Slots = f18iter.Slots } - if f21iter.Status != nil { - f21elem.Status = f21iter.Status + if f18iter.Status != nil { + f18elem.Status = f18iter.Status } - f21 = append(f21, f21elem) + f18 = append(f18, f18elem) } - ko.Status.NodeGroups = f21 + ko.Status.NodeGroups = f18 } else { ko.Status.NodeGroups = nil } if resp.ReplicationGroup.PendingModifiedValues != nil { - f22 := &svcapitypes.ReplicationGroupPendingModifiedValues{} + f19 := &svcapitypes.ReplicationGroupPendingModifiedValues{} if resp.ReplicationGroup.PendingModifiedValues.AuthTokenStatus != "" { - f22.AuthTokenStatus = aws.String(string(resp.ReplicationGroup.PendingModifiedValues.AuthTokenStatus)) + f19.AuthTokenStatus = aws.String(string(resp.ReplicationGroup.PendingModifiedValues.AuthTokenStatus)) } if resp.ReplicationGroup.PendingModifiedValues.AutomaticFailoverStatus != "" { - f22.AutomaticFailoverStatus = aws.String(string(resp.ReplicationGroup.PendingModifiedValues.AutomaticFailoverStatus)) - } - if resp.ReplicationGroup.PendingModifiedValues.ClusterMode != "" { - f22.ClusterMode = aws.String(string(resp.ReplicationGroup.PendingModifiedValues.ClusterMode)) + f19.AutomaticFailoverStatus = aws.String(string(resp.ReplicationGroup.PendingModifiedValues.AutomaticFailoverStatus)) } if resp.ReplicationGroup.PendingModifiedValues.LogDeliveryConfigurations != nil { - f22f3 := []*svcapitypes.PendingLogDeliveryConfiguration{} - for _, f22f3iter := range resp.ReplicationGroup.PendingModifiedValues.LogDeliveryConfigurations { - f22f3elem := &svcapitypes.PendingLogDeliveryConfiguration{} - if f22f3iter.DestinationDetails != nil { - f22f3elemf0 := &svcapitypes.DestinationDetails{} - if f22f3iter.DestinationDetails.CloudWatchLogsDetails != nil { - f22f3elemf0f0 := &svcapitypes.CloudWatchLogsDestinationDetails{} - if f22f3iter.DestinationDetails.CloudWatchLogsDetails.LogGroup != nil { - f22f3elemf0f0.LogGroup = f22f3iter.DestinationDetails.CloudWatchLogsDetails.LogGroup + f19f2 := []*svcapitypes.PendingLogDeliveryConfiguration{} + for _, f19f2iter := range resp.ReplicationGroup.PendingModifiedValues.LogDeliveryConfigurations { + f19f2elem := &svcapitypes.PendingLogDeliveryConfiguration{} + if f19f2iter.DestinationDetails != nil { + f19f2elemf0 := &svcapitypes.DestinationDetails{} + if f19f2iter.DestinationDetails.CloudWatchLogsDetails != nil { + f19f2elemf0f0 := &svcapitypes.CloudWatchLogsDestinationDetails{} + if f19f2iter.DestinationDetails.CloudWatchLogsDetails.LogGroup != nil { + f19f2elemf0f0.LogGroup = f19f2iter.DestinationDetails.CloudWatchLogsDetails.LogGroup } - f22f3elemf0.CloudWatchLogsDetails = f22f3elemf0f0 + f19f2elemf0.CloudWatchLogsDetails = f19f2elemf0f0 } - if f22f3iter.DestinationDetails.KinesisFirehoseDetails != nil { - f22f3elemf0f1 := &svcapitypes.KinesisFirehoseDestinationDetails{} - if f22f3iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream != nil { - f22f3elemf0f1.DeliveryStream = f22f3iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream + if f19f2iter.DestinationDetails.KinesisFirehoseDetails != nil { + f19f2elemf0f1 := &svcapitypes.KinesisFirehoseDestinationDetails{} + if f19f2iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream != nil { + f19f2elemf0f1.DeliveryStream = f19f2iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream } - f22f3elemf0.KinesisFirehoseDetails = f22f3elemf0f1 + f19f2elemf0.KinesisFirehoseDetails = f19f2elemf0f1 } - f22f3elem.DestinationDetails = f22f3elemf0 + f19f2elem.DestinationDetails = f19f2elemf0 } - if f22f3iter.DestinationType != "" { - f22f3elem.DestinationType = aws.String(string(f22f3iter.DestinationType)) + if f19f2iter.DestinationType != "" { + f19f2elem.DestinationType = aws.String(string(f19f2iter.DestinationType)) } - if f22f3iter.LogFormat != "" { - f22f3elem.LogFormat = aws.String(string(f22f3iter.LogFormat)) + if f19f2iter.LogFormat != "" { + f19f2elem.LogFormat = aws.String(string(f19f2iter.LogFormat)) } - if f22f3iter.LogType != "" { - f22f3elem.LogType = aws.String(string(f22f3iter.LogType)) + if f19f2iter.LogType != "" { + f19f2elem.LogType = aws.String(string(f19f2iter.LogType)) } - f22f3 = append(f22f3, f22f3elem) + f19f2 = append(f19f2, f19f2elem) } - f22.LogDeliveryConfigurations = f22f3 + f19.LogDeliveryConfigurations = f19f2 } if resp.ReplicationGroup.PendingModifiedValues.PrimaryClusterId != nil { - f22.PrimaryClusterID = resp.ReplicationGroup.PendingModifiedValues.PrimaryClusterId + f19.PrimaryClusterID = resp.ReplicationGroup.PendingModifiedValues.PrimaryClusterId } if resp.ReplicationGroup.PendingModifiedValues.Resharding != nil { - f22f5 := &svcapitypes.ReshardingStatus{} + f19f4 := &svcapitypes.ReshardingStatus{} if resp.ReplicationGroup.PendingModifiedValues.Resharding.SlotMigration != nil { - f22f5f0 := &svcapitypes.SlotMigration{} + f19f4f0 := &svcapitypes.SlotMigration{} if resp.ReplicationGroup.PendingModifiedValues.Resharding.SlotMigration.ProgressPercentage != nil { - f22f5f0.ProgressPercentage = resp.ReplicationGroup.PendingModifiedValues.Resharding.SlotMigration.ProgressPercentage + f19f4f0.ProgressPercentage = resp.ReplicationGroup.PendingModifiedValues.Resharding.SlotMigration.ProgressPercentage } - f22f5.SlotMigration = f22f5f0 + f19f4.SlotMigration = f19f4f0 } - f22.Resharding = f22f5 - } - if resp.ReplicationGroup.PendingModifiedValues.TransitEncryptionEnabled != nil { - f22.TransitEncryptionEnabled = resp.ReplicationGroup.PendingModifiedValues.TransitEncryptionEnabled - } - if resp.ReplicationGroup.PendingModifiedValues.TransitEncryptionMode != "" { - f22.TransitEncryptionMode = aws.String(string(resp.ReplicationGroup.PendingModifiedValues.TransitEncryptionMode)) + f19.Resharding = f19f4 } if resp.ReplicationGroup.PendingModifiedValues.UserGroups != nil { - f22f8 := &svcapitypes.UserGroupsUpdateStatus{} + f19f5 := &svcapitypes.UserGroupsUpdateStatus{} if resp.ReplicationGroup.PendingModifiedValues.UserGroups.UserGroupIdsToAdd != nil { - f22f8.UserGroupIDsToAdd = aws.StringSlice(resp.ReplicationGroup.PendingModifiedValues.UserGroups.UserGroupIdsToAdd) + f19f5.UserGroupIDsToAdd = aws.StringSlice(resp.ReplicationGroup.PendingModifiedValues.UserGroups.UserGroupIdsToAdd) } if resp.ReplicationGroup.PendingModifiedValues.UserGroups.UserGroupIdsToRemove != nil { - f22f8.UserGroupIDsToRemove = aws.StringSlice(resp.ReplicationGroup.PendingModifiedValues.UserGroups.UserGroupIdsToRemove) + f19f5.UserGroupIDsToRemove = aws.StringSlice(resp.ReplicationGroup.PendingModifiedValues.UserGroups.UserGroupIdsToRemove) } - f22.UserGroups = f22f8 + f19.UserGroups = f19f5 } - ko.Status.PendingModifiedValues = f22 + ko.Status.PendingModifiedValues = f19 } else { ko.Status.PendingModifiedValues = nil } @@ -918,11 +865,6 @@ func (rm *resourceManager) sdkCreate( } else { ko.Spec.TransitEncryptionEnabled = nil } - if resp.ReplicationGroup.TransitEncryptionMode != "" { - ko.Spec.TransitEncryptionMode = aws.String(string(resp.ReplicationGroup.TransitEncryptionMode)) - } else { - ko.Spec.TransitEncryptionMode = nil - } if resp.ReplicationGroup.UserGroupIds != nil { ko.Spec.UserGroupIDs = aws.StringSlice(resp.ReplicationGroup.UserGroupIds) } else { @@ -973,9 +915,6 @@ func (rm *resourceManager) newCreateRequestPayload( if r.ko.Spec.CacheSubnetGroupName != nil { res.CacheSubnetGroupName = r.ko.Spec.CacheSubnetGroupName } - if r.ko.Spec.ClusterMode != nil { - res.ClusterMode = svcsdktypes.ClusterMode(*r.ko.Spec.ClusterMode) - } if r.ko.Spec.DataTieringEnabled != nil { res.DataTieringEnabled = r.ko.Spec.DataTieringEnabled } @@ -985,89 +924,83 @@ func (rm *resourceManager) newCreateRequestPayload( if r.ko.Spec.EngineVersion != nil { res.EngineVersion = r.ko.Spec.EngineVersion } - if r.ko.Spec.IPDiscovery != nil { - res.IpDiscovery = svcsdktypes.IpDiscovery(*r.ko.Spec.IPDiscovery) - } if r.ko.Spec.KMSKeyID != nil { res.KmsKeyId = r.ko.Spec.KMSKeyID } if r.ko.Spec.LogDeliveryConfigurations != nil { - f13 := []svcsdktypes.LogDeliveryConfigurationRequest{} - for _, f13iter := range r.ko.Spec.LogDeliveryConfigurations { - f13elem := &svcsdktypes.LogDeliveryConfigurationRequest{} - if f13iter.DestinationDetails != nil { - f13elemf0 := &svcsdktypes.DestinationDetails{} - if f13iter.DestinationDetails.CloudWatchLogsDetails != nil { - f13elemf0f0 := &svcsdktypes.CloudWatchLogsDestinationDetails{} - if f13iter.DestinationDetails.CloudWatchLogsDetails.LogGroup != nil { - f13elemf0f0.LogGroup = f13iter.DestinationDetails.CloudWatchLogsDetails.LogGroup + f11 := []svcsdktypes.LogDeliveryConfigurationRequest{} + for _, f11iter := range r.ko.Spec.LogDeliveryConfigurations { + f11elem := &svcsdktypes.LogDeliveryConfigurationRequest{} + if f11iter.DestinationDetails != nil { + f11elemf0 := &svcsdktypes.DestinationDetails{} + if f11iter.DestinationDetails.CloudWatchLogsDetails != nil { + f11elemf0f0 := &svcsdktypes.CloudWatchLogsDestinationDetails{} + if f11iter.DestinationDetails.CloudWatchLogsDetails.LogGroup != nil { + f11elemf0f0.LogGroup = f11iter.DestinationDetails.CloudWatchLogsDetails.LogGroup } - f13elemf0.CloudWatchLogsDetails = f13elemf0f0 + f11elemf0.CloudWatchLogsDetails = f11elemf0f0 } - if f13iter.DestinationDetails.KinesisFirehoseDetails != nil { - f13elemf0f1 := &svcsdktypes.KinesisFirehoseDestinationDetails{} - if f13iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream != nil { - f13elemf0f1.DeliveryStream = f13iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream + if f11iter.DestinationDetails.KinesisFirehoseDetails != nil { + f11elemf0f1 := &svcsdktypes.KinesisFirehoseDestinationDetails{} + if f11iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream != nil { + f11elemf0f1.DeliveryStream = f11iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream } - f13elemf0.KinesisFirehoseDetails = f13elemf0f1 + f11elemf0.KinesisFirehoseDetails = f11elemf0f1 } - f13elem.DestinationDetails = f13elemf0 + f11elem.DestinationDetails = f11elemf0 } - if f13iter.DestinationType != nil { - f13elem.DestinationType = svcsdktypes.DestinationType(*f13iter.DestinationType) + if f11iter.DestinationType != nil { + f11elem.DestinationType = svcsdktypes.DestinationType(*f11iter.DestinationType) } - if f13iter.Enabled != nil { - f13elem.Enabled = f13iter.Enabled + if f11iter.Enabled != nil { + f11elem.Enabled = f11iter.Enabled } - if f13iter.LogFormat != nil { - f13elem.LogFormat = svcsdktypes.LogFormat(*f13iter.LogFormat) + if f11iter.LogFormat != nil { + f11elem.LogFormat = svcsdktypes.LogFormat(*f11iter.LogFormat) } - if f13iter.LogType != nil { - f13elem.LogType = svcsdktypes.LogType(*f13iter.LogType) + if f11iter.LogType != nil { + f11elem.LogType = svcsdktypes.LogType(*f11iter.LogType) } - f13 = append(f13, *f13elem) + f11 = append(f11, *f11elem) } - res.LogDeliveryConfigurations = f13 + res.LogDeliveryConfigurations = f11 } if r.ko.Spec.MultiAZEnabled != nil { res.MultiAZEnabled = r.ko.Spec.MultiAZEnabled } - if r.ko.Spec.NetworkType != nil { - res.NetworkType = svcsdktypes.NetworkType(*r.ko.Spec.NetworkType) - } if r.ko.Spec.NodeGroupConfiguration != nil { - f16 := []svcsdktypes.NodeGroupConfiguration{} - for _, f16iter := range r.ko.Spec.NodeGroupConfiguration { - f16elem := &svcsdktypes.NodeGroupConfiguration{} - if f16iter.NodeGroupID != nil { - f16elem.NodeGroupId = f16iter.NodeGroupID + f13 := []svcsdktypes.NodeGroupConfiguration{} + for _, f13iter := range r.ko.Spec.NodeGroupConfiguration { + f13elem := &svcsdktypes.NodeGroupConfiguration{} + if f13iter.NodeGroupID != nil { + f13elem.NodeGroupId = f13iter.NodeGroupID } - if f16iter.PrimaryAvailabilityZone != nil { - f16elem.PrimaryAvailabilityZone = f16iter.PrimaryAvailabilityZone + if f13iter.PrimaryAvailabilityZone != nil { + f13elem.PrimaryAvailabilityZone = f13iter.PrimaryAvailabilityZone } - if f16iter.PrimaryOutpostARN != nil { - f16elem.PrimaryOutpostArn = f16iter.PrimaryOutpostARN + if f13iter.PrimaryOutpostARN != nil { + f13elem.PrimaryOutpostArn = f13iter.PrimaryOutpostARN } - if f16iter.ReplicaAvailabilityZones != nil { - f16elem.ReplicaAvailabilityZones = aws.ToStringSlice(f16iter.ReplicaAvailabilityZones) + if f13iter.ReplicaAvailabilityZones != nil { + f13elem.ReplicaAvailabilityZones = aws.ToStringSlice(f13iter.ReplicaAvailabilityZones) } - if f16iter.ReplicaCount != nil { - replicaCountCopy0 := *f16iter.ReplicaCount + if f13iter.ReplicaCount != nil { + replicaCountCopy0 := *f13iter.ReplicaCount if replicaCountCopy0 > math.MaxInt32 || replicaCountCopy0 < math.MinInt32 { return nil, fmt.Errorf("error: field ReplicaCount is of type int32") } replicaCountCopy := int32(replicaCountCopy0) - f16elem.ReplicaCount = &replicaCountCopy + f13elem.ReplicaCount = &replicaCountCopy } - if f16iter.ReplicaOutpostARNs != nil { - f16elem.ReplicaOutpostArns = aws.ToStringSlice(f16iter.ReplicaOutpostARNs) + if f13iter.ReplicaOutpostARNs != nil { + f13elem.ReplicaOutpostArns = aws.ToStringSlice(f13iter.ReplicaOutpostARNs) } - if f16iter.Slots != nil { - f16elem.Slots = f16iter.Slots + if f13iter.Slots != nil { + f13elem.Slots = f13iter.Slots } - f16 = append(f16, *f16elem) + f13 = append(f13, *f13elem) } - res.NodeGroupConfiguration = f16 + res.NodeGroupConfiguration = f13 } if r.ko.Spec.NotificationTopicARN != nil { res.NotificationTopicArn = r.ko.Spec.NotificationTopicARN @@ -1114,9 +1047,6 @@ func (rm *resourceManager) newCreateRequestPayload( if r.ko.Spec.SecurityGroupIDs != nil { res.SecurityGroupIds = aws.ToStringSlice(r.ko.Spec.SecurityGroupIDs) } - if r.ko.Spec.ServerlessCacheSnapshotName != nil { - res.ServerlessCacheSnapshotName = r.ko.Spec.ServerlessCacheSnapshotName - } if r.ko.Spec.SnapshotARNs != nil { res.SnapshotArns = aws.ToStringSlice(r.ko.Spec.SnapshotARNs) } @@ -1135,25 +1065,22 @@ func (rm *resourceManager) newCreateRequestPayload( res.SnapshotWindow = r.ko.Spec.SnapshotWindow } if r.ko.Spec.Tags != nil { - f32 := []svcsdktypes.Tag{} - for _, f32iter := range r.ko.Spec.Tags { - f32elem := &svcsdktypes.Tag{} - if f32iter.Key != nil { - f32elem.Key = f32iter.Key + f28 := []svcsdktypes.Tag{} + for _, f28iter := range r.ko.Spec.Tags { + f28elem := &svcsdktypes.Tag{} + if f28iter.Key != nil { + f28elem.Key = f28iter.Key } - if f32iter.Value != nil { - f32elem.Value = f32iter.Value + if f28iter.Value != nil { + f28elem.Value = f28iter.Value } - f32 = append(f32, *f32elem) + f28 = append(f28, *f28elem) } - res.Tags = f32 + res.Tags = f28 } if r.ko.Spec.TransitEncryptionEnabled != nil { res.TransitEncryptionEnabled = r.ko.Spec.TransitEncryptionEnabled } - if r.ko.Spec.TransitEncryptionMode != nil { - res.TransitEncryptionMode = svcsdktypes.TransitEncryptionMode(*r.ko.Spec.TransitEncryptionMode) - } if r.ko.Spec.UserGroupIDs != nil { res.UserGroupIds = aws.ToStringSlice(r.ko.Spec.UserGroupIDs) } @@ -1189,7 +1116,7 @@ func (rm *resourceManager) sdkUpdate( return nil, err } if !delta.DifferentAt("Spec.LogDeliveryConfigurations") { - input.SetLogDeliveryConfigurations(nil) + input.LogDeliveryConfigurations = nil } if delta.DifferentAt("UserGroupIDs") { for _, diff := range delta.Differences { @@ -1199,7 +1126,7 @@ func (rm *resourceManager) sdkUpdate( // User groups to add { - var userGroupsToAdd []*string + var userGroupsToAdd []string for _, requiredUserGroup := range requiredUserGroups { found := false @@ -1211,16 +1138,18 @@ func (rm *resourceManager) sdkUpdate( } if !found { - userGroupsToAdd = append(userGroupsToAdd, requiredUserGroup) + if requiredUserGroup != nil { + userGroupsToAdd = append(userGroupsToAdd, *requiredUserGroup) + } } } - input.SetUserGroupIdsToAdd(userGroupsToAdd) + input.UserGroupIdsToAdd = userGroupsToAdd } // User groups to remove { - var userGroupsToRemove []*string + var userGroupsToRemove []string for _, existingUserGroup := range existingUserGroups { found := false @@ -1232,11 +1161,13 @@ func (rm *resourceManager) sdkUpdate( } if !found { - userGroupsToRemove = append(userGroupsToRemove, existingUserGroup) + if existingUserGroup != nil { + userGroupsToRemove = append(userGroupsToRemove, *existingUserGroup) + } } } - input.SetUserGroupIdsToRemove(userGroupsToRemove) + input.UserGroupIdsToRemove = userGroupsToRemove } } } @@ -1295,21 +1226,16 @@ func (rm *resourceManager) sdkUpdate( } else { ko.Status.ClusterEnabled = nil } - if resp.ReplicationGroup.ClusterMode != "" { - ko.Spec.ClusterMode = aws.String(string(resp.ReplicationGroup.ClusterMode)) - } else { - ko.Spec.ClusterMode = nil - } if resp.ReplicationGroup.ConfigurationEndpoint != nil { - f9 := &svcapitypes.Endpoint{} + f8 := &svcapitypes.Endpoint{} if resp.ReplicationGroup.ConfigurationEndpoint.Address != nil { - f9.Address = resp.ReplicationGroup.ConfigurationEndpoint.Address + f8.Address = resp.ReplicationGroup.ConfigurationEndpoint.Address } if resp.ReplicationGroup.ConfigurationEndpoint.Port != nil { portCopy := int64(*resp.ReplicationGroup.ConfigurationEndpoint.Port) - f9.Port = &portCopy + f8.Port = &portCopy } - ko.Status.ConfigurationEndpoint = f9 + ko.Status.ConfigurationEndpoint = f8 } else { ko.Status.ConfigurationEndpoint = nil } @@ -1329,61 +1255,56 @@ func (rm *resourceManager) sdkUpdate( ko.Spec.Engine = nil } if resp.ReplicationGroup.GlobalReplicationGroupInfo != nil { - f13 := &svcapitypes.GlobalReplicationGroupInfo{} + f12 := &svcapitypes.GlobalReplicationGroupInfo{} if resp.ReplicationGroup.GlobalReplicationGroupInfo.GlobalReplicationGroupId != nil { - f13.GlobalReplicationGroupID = resp.ReplicationGroup.GlobalReplicationGroupInfo.GlobalReplicationGroupId + f12.GlobalReplicationGroupID = resp.ReplicationGroup.GlobalReplicationGroupInfo.GlobalReplicationGroupId } if resp.ReplicationGroup.GlobalReplicationGroupInfo.GlobalReplicationGroupMemberRole != nil { - f13.GlobalReplicationGroupMemberRole = resp.ReplicationGroup.GlobalReplicationGroupInfo.GlobalReplicationGroupMemberRole + f12.GlobalReplicationGroupMemberRole = resp.ReplicationGroup.GlobalReplicationGroupInfo.GlobalReplicationGroupMemberRole } - ko.Status.GlobalReplicationGroupInfo = f13 + ko.Status.GlobalReplicationGroupInfo = f12 } else { ko.Status.GlobalReplicationGroupInfo = nil } - if resp.ReplicationGroup.IpDiscovery != "" { - ko.Spec.IPDiscovery = aws.String(string(resp.ReplicationGroup.IpDiscovery)) - } else { - ko.Spec.IPDiscovery = nil - } if resp.ReplicationGroup.KmsKeyId != nil { ko.Spec.KMSKeyID = resp.ReplicationGroup.KmsKeyId } else { ko.Spec.KMSKeyID = nil } if resp.ReplicationGroup.LogDeliveryConfigurations != nil { - f16 := []*svcapitypes.LogDeliveryConfigurationRequest{} - for _, f16iter := range resp.ReplicationGroup.LogDeliveryConfigurations { - f16elem := &svcapitypes.LogDeliveryConfigurationRequest{} - if f16iter.DestinationDetails != nil { - f16elemf0 := &svcapitypes.DestinationDetails{} - if f16iter.DestinationDetails.CloudWatchLogsDetails != nil { - f16elemf0f0 := &svcapitypes.CloudWatchLogsDestinationDetails{} - if f16iter.DestinationDetails.CloudWatchLogsDetails.LogGroup != nil { - f16elemf0f0.LogGroup = f16iter.DestinationDetails.CloudWatchLogsDetails.LogGroup + f14 := []*svcapitypes.LogDeliveryConfigurationRequest{} + for _, f14iter := range resp.ReplicationGroup.LogDeliveryConfigurations { + f14elem := &svcapitypes.LogDeliveryConfigurationRequest{} + if f14iter.DestinationDetails != nil { + f14elemf0 := &svcapitypes.DestinationDetails{} + if f14iter.DestinationDetails.CloudWatchLogsDetails != nil { + f14elemf0f0 := &svcapitypes.CloudWatchLogsDestinationDetails{} + if f14iter.DestinationDetails.CloudWatchLogsDetails.LogGroup != nil { + f14elemf0f0.LogGroup = f14iter.DestinationDetails.CloudWatchLogsDetails.LogGroup } - f16elemf0.CloudWatchLogsDetails = f16elemf0f0 + f14elemf0.CloudWatchLogsDetails = f14elemf0f0 } - if f16iter.DestinationDetails.KinesisFirehoseDetails != nil { - f16elemf0f1 := &svcapitypes.KinesisFirehoseDestinationDetails{} - if f16iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream != nil { - f16elemf0f1.DeliveryStream = f16iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream + if f14iter.DestinationDetails.KinesisFirehoseDetails != nil { + f14elemf0f1 := &svcapitypes.KinesisFirehoseDestinationDetails{} + if f14iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream != nil { + f14elemf0f1.DeliveryStream = f14iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream } - f16elemf0.KinesisFirehoseDetails = f16elemf0f1 + f14elemf0.KinesisFirehoseDetails = f14elemf0f1 } - f16elem.DestinationDetails = f16elemf0 + f14elem.DestinationDetails = f14elemf0 } - if f16iter.DestinationType != "" { - f16elem.DestinationType = aws.String(string(f16iter.DestinationType)) + if f14iter.DestinationType != "" { + f14elem.DestinationType = aws.String(string(f14iter.DestinationType)) } - if f16iter.LogFormat != "" { - f16elem.LogFormat = aws.String(string(f16iter.LogFormat)) + if f14iter.LogFormat != "" { + f14elem.LogFormat = aws.String(string(f14iter.LogFormat)) } - if f16iter.LogType != "" { - f16elem.LogType = aws.String(string(f16iter.LogType)) + if f14iter.LogType != "" { + f14elem.LogType = aws.String(string(f14iter.LogType)) } - f16 = append(f16, f16elem) + f14 = append(f14, f14elem) } - ko.Spec.LogDeliveryConfigurations = f16 + ko.Spec.LogDeliveryConfigurations = f14 } else { ko.Spec.LogDeliveryConfigurations = nil } @@ -1402,163 +1323,149 @@ func (rm *resourceManager) sdkUpdate( } else { ko.Status.MultiAZ = nil } - if resp.ReplicationGroup.NetworkType != "" { - ko.Spec.NetworkType = aws.String(string(resp.ReplicationGroup.NetworkType)) - } else { - ko.Spec.NetworkType = nil - } if resp.ReplicationGroup.NodeGroups != nil { - f21 := []*svcapitypes.NodeGroup{} - for _, f21iter := range resp.ReplicationGroup.NodeGroups { - f21elem := &svcapitypes.NodeGroup{} - if f21iter.NodeGroupId != nil { - f21elem.NodeGroupID = f21iter.NodeGroupId - } - if f21iter.NodeGroupMembers != nil { - f21elemf1 := []*svcapitypes.NodeGroupMember{} - for _, f21elemf1iter := range f21iter.NodeGroupMembers { - f21elemf1elem := &svcapitypes.NodeGroupMember{} - if f21elemf1iter.CacheClusterId != nil { - f21elemf1elem.CacheClusterID = f21elemf1iter.CacheClusterId + f18 := []*svcapitypes.NodeGroup{} + for _, f18iter := range resp.ReplicationGroup.NodeGroups { + f18elem := &svcapitypes.NodeGroup{} + if f18iter.NodeGroupId != nil { + f18elem.NodeGroupID = f18iter.NodeGroupId + } + if f18iter.NodeGroupMembers != nil { + f18elemf1 := []*svcapitypes.NodeGroupMember{} + for _, f18elemf1iter := range f18iter.NodeGroupMembers { + f18elemf1elem := &svcapitypes.NodeGroupMember{} + if f18elemf1iter.CacheClusterId != nil { + f18elemf1elem.CacheClusterID = f18elemf1iter.CacheClusterId } - if f21elemf1iter.CacheNodeId != nil { - f21elemf1elem.CacheNodeID = f21elemf1iter.CacheNodeId + if f18elemf1iter.CacheNodeId != nil { + f18elemf1elem.CacheNodeID = f18elemf1iter.CacheNodeId } - if f21elemf1iter.CurrentRole != nil { - f21elemf1elem.CurrentRole = f21elemf1iter.CurrentRole + if f18elemf1iter.CurrentRole != nil { + f18elemf1elem.CurrentRole = f18elemf1iter.CurrentRole } - if f21elemf1iter.PreferredAvailabilityZone != nil { - f21elemf1elem.PreferredAvailabilityZone = f21elemf1iter.PreferredAvailabilityZone + if f18elemf1iter.PreferredAvailabilityZone != nil { + f18elemf1elem.PreferredAvailabilityZone = f18elemf1iter.PreferredAvailabilityZone } - if f21elemf1iter.PreferredOutpostArn != nil { - f21elemf1elem.PreferredOutpostARN = f21elemf1iter.PreferredOutpostArn + if f18elemf1iter.PreferredOutpostArn != nil { + f18elemf1elem.PreferredOutpostARN = f18elemf1iter.PreferredOutpostArn } - if f21elemf1iter.ReadEndpoint != nil { - f21elemf1elemf5 := &svcapitypes.Endpoint{} - if f21elemf1iter.ReadEndpoint.Address != nil { - f21elemf1elemf5.Address = f21elemf1iter.ReadEndpoint.Address + if f18elemf1iter.ReadEndpoint != nil { + f18elemf1elemf5 := &svcapitypes.Endpoint{} + if f18elemf1iter.ReadEndpoint.Address != nil { + f18elemf1elemf5.Address = f18elemf1iter.ReadEndpoint.Address } - if f21elemf1iter.ReadEndpoint.Port != nil { - portCopy := int64(*f21elemf1iter.ReadEndpoint.Port) - f21elemf1elemf5.Port = &portCopy + if f18elemf1iter.ReadEndpoint.Port != nil { + portCopy := int64(*f18elemf1iter.ReadEndpoint.Port) + f18elemf1elemf5.Port = &portCopy } - f21elemf1elem.ReadEndpoint = f21elemf1elemf5 + f18elemf1elem.ReadEndpoint = f18elemf1elemf5 } - f21elemf1 = append(f21elemf1, f21elemf1elem) + f18elemf1 = append(f18elemf1, f18elemf1elem) } - f21elem.NodeGroupMembers = f21elemf1 + f18elem.NodeGroupMembers = f18elemf1 } - if f21iter.PrimaryEndpoint != nil { - f21elemf2 := &svcapitypes.Endpoint{} - if f21iter.PrimaryEndpoint.Address != nil { - f21elemf2.Address = f21iter.PrimaryEndpoint.Address + if f18iter.PrimaryEndpoint != nil { + f18elemf2 := &svcapitypes.Endpoint{} + if f18iter.PrimaryEndpoint.Address != nil { + f18elemf2.Address = f18iter.PrimaryEndpoint.Address } - if f21iter.PrimaryEndpoint.Port != nil { - portCopy := int64(*f21iter.PrimaryEndpoint.Port) - f21elemf2.Port = &portCopy + if f18iter.PrimaryEndpoint.Port != nil { + portCopy := int64(*f18iter.PrimaryEndpoint.Port) + f18elemf2.Port = &portCopy } - f21elem.PrimaryEndpoint = f21elemf2 + f18elem.PrimaryEndpoint = f18elemf2 } - if f21iter.ReaderEndpoint != nil { - f21elemf3 := &svcapitypes.Endpoint{} - if f21iter.ReaderEndpoint.Address != nil { - f21elemf3.Address = f21iter.ReaderEndpoint.Address + if f18iter.ReaderEndpoint != nil { + f18elemf3 := &svcapitypes.Endpoint{} + if f18iter.ReaderEndpoint.Address != nil { + f18elemf3.Address = f18iter.ReaderEndpoint.Address } - if f21iter.ReaderEndpoint.Port != nil { - portCopy := int64(*f21iter.ReaderEndpoint.Port) - f21elemf3.Port = &portCopy + if f18iter.ReaderEndpoint.Port != nil { + portCopy := int64(*f18iter.ReaderEndpoint.Port) + f18elemf3.Port = &portCopy } - f21elem.ReaderEndpoint = f21elemf3 + f18elem.ReaderEndpoint = f18elemf3 } - if f21iter.Slots != nil { - f21elem.Slots = f21iter.Slots + if f18iter.Slots != nil { + f18elem.Slots = f18iter.Slots } - if f21iter.Status != nil { - f21elem.Status = f21iter.Status + if f18iter.Status != nil { + f18elem.Status = f18iter.Status } - f21 = append(f21, f21elem) + f18 = append(f18, f18elem) } - ko.Status.NodeGroups = f21 + ko.Status.NodeGroups = f18 } else { ko.Status.NodeGroups = nil } if resp.ReplicationGroup.PendingModifiedValues != nil { - f22 := &svcapitypes.ReplicationGroupPendingModifiedValues{} + f19 := &svcapitypes.ReplicationGroupPendingModifiedValues{} if resp.ReplicationGroup.PendingModifiedValues.AuthTokenStatus != "" { - f22.AuthTokenStatus = aws.String(string(resp.ReplicationGroup.PendingModifiedValues.AuthTokenStatus)) + f19.AuthTokenStatus = aws.String(string(resp.ReplicationGroup.PendingModifiedValues.AuthTokenStatus)) } if resp.ReplicationGroup.PendingModifiedValues.AutomaticFailoverStatus != "" { - f22.AutomaticFailoverStatus = aws.String(string(resp.ReplicationGroup.PendingModifiedValues.AutomaticFailoverStatus)) - } - if resp.ReplicationGroup.PendingModifiedValues.ClusterMode != "" { - f22.ClusterMode = aws.String(string(resp.ReplicationGroup.PendingModifiedValues.ClusterMode)) + f19.AutomaticFailoverStatus = aws.String(string(resp.ReplicationGroup.PendingModifiedValues.AutomaticFailoverStatus)) } if resp.ReplicationGroup.PendingModifiedValues.LogDeliveryConfigurations != nil { - f22f3 := []*svcapitypes.PendingLogDeliveryConfiguration{} - for _, f22f3iter := range resp.ReplicationGroup.PendingModifiedValues.LogDeliveryConfigurations { - f22f3elem := &svcapitypes.PendingLogDeliveryConfiguration{} - if f22f3iter.DestinationDetails != nil { - f22f3elemf0 := &svcapitypes.DestinationDetails{} - if f22f3iter.DestinationDetails.CloudWatchLogsDetails != nil { - f22f3elemf0f0 := &svcapitypes.CloudWatchLogsDestinationDetails{} - if f22f3iter.DestinationDetails.CloudWatchLogsDetails.LogGroup != nil { - f22f3elemf0f0.LogGroup = f22f3iter.DestinationDetails.CloudWatchLogsDetails.LogGroup + f19f2 := []*svcapitypes.PendingLogDeliveryConfiguration{} + for _, f19f2iter := range resp.ReplicationGroup.PendingModifiedValues.LogDeliveryConfigurations { + f19f2elem := &svcapitypes.PendingLogDeliveryConfiguration{} + if f19f2iter.DestinationDetails != nil { + f19f2elemf0 := &svcapitypes.DestinationDetails{} + if f19f2iter.DestinationDetails.CloudWatchLogsDetails != nil { + f19f2elemf0f0 := &svcapitypes.CloudWatchLogsDestinationDetails{} + if f19f2iter.DestinationDetails.CloudWatchLogsDetails.LogGroup != nil { + f19f2elemf0f0.LogGroup = f19f2iter.DestinationDetails.CloudWatchLogsDetails.LogGroup } - f22f3elemf0.CloudWatchLogsDetails = f22f3elemf0f0 + f19f2elemf0.CloudWatchLogsDetails = f19f2elemf0f0 } - if f22f3iter.DestinationDetails.KinesisFirehoseDetails != nil { - f22f3elemf0f1 := &svcapitypes.KinesisFirehoseDestinationDetails{} - if f22f3iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream != nil { - f22f3elemf0f1.DeliveryStream = f22f3iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream + if f19f2iter.DestinationDetails.KinesisFirehoseDetails != nil { + f19f2elemf0f1 := &svcapitypes.KinesisFirehoseDestinationDetails{} + if f19f2iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream != nil { + f19f2elemf0f1.DeliveryStream = f19f2iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream } - f22f3elemf0.KinesisFirehoseDetails = f22f3elemf0f1 + f19f2elemf0.KinesisFirehoseDetails = f19f2elemf0f1 } - f22f3elem.DestinationDetails = f22f3elemf0 + f19f2elem.DestinationDetails = f19f2elemf0 } - if f22f3iter.DestinationType != "" { - f22f3elem.DestinationType = aws.String(string(f22f3iter.DestinationType)) + if f19f2iter.DestinationType != "" { + f19f2elem.DestinationType = aws.String(string(f19f2iter.DestinationType)) } - if f22f3iter.LogFormat != "" { - f22f3elem.LogFormat = aws.String(string(f22f3iter.LogFormat)) + if f19f2iter.LogFormat != "" { + f19f2elem.LogFormat = aws.String(string(f19f2iter.LogFormat)) } - if f22f3iter.LogType != "" { - f22f3elem.LogType = aws.String(string(f22f3iter.LogType)) + if f19f2iter.LogType != "" { + f19f2elem.LogType = aws.String(string(f19f2iter.LogType)) } - f22f3 = append(f22f3, f22f3elem) + f19f2 = append(f19f2, f19f2elem) } - f22.LogDeliveryConfigurations = f22f3 + f19.LogDeliveryConfigurations = f19f2 } if resp.ReplicationGroup.PendingModifiedValues.PrimaryClusterId != nil { - f22.PrimaryClusterID = resp.ReplicationGroup.PendingModifiedValues.PrimaryClusterId + f19.PrimaryClusterID = resp.ReplicationGroup.PendingModifiedValues.PrimaryClusterId } if resp.ReplicationGroup.PendingModifiedValues.Resharding != nil { - f22f5 := &svcapitypes.ReshardingStatus{} + f19f4 := &svcapitypes.ReshardingStatus{} if resp.ReplicationGroup.PendingModifiedValues.Resharding.SlotMigration != nil { - f22f5f0 := &svcapitypes.SlotMigration{} + f19f4f0 := &svcapitypes.SlotMigration{} if resp.ReplicationGroup.PendingModifiedValues.Resharding.SlotMigration.ProgressPercentage != nil { - f22f5f0.ProgressPercentage = resp.ReplicationGroup.PendingModifiedValues.Resharding.SlotMigration.ProgressPercentage + f19f4f0.ProgressPercentage = resp.ReplicationGroup.PendingModifiedValues.Resharding.SlotMigration.ProgressPercentage } - f22f5.SlotMigration = f22f5f0 + f19f4.SlotMigration = f19f4f0 } - f22.Resharding = f22f5 - } - if resp.ReplicationGroup.PendingModifiedValues.TransitEncryptionEnabled != nil { - f22.TransitEncryptionEnabled = resp.ReplicationGroup.PendingModifiedValues.TransitEncryptionEnabled - } - if resp.ReplicationGroup.PendingModifiedValues.TransitEncryptionMode != "" { - f22.TransitEncryptionMode = aws.String(string(resp.ReplicationGroup.PendingModifiedValues.TransitEncryptionMode)) + f19.Resharding = f19f4 } if resp.ReplicationGroup.PendingModifiedValues.UserGroups != nil { - f22f8 := &svcapitypes.UserGroupsUpdateStatus{} + f19f5 := &svcapitypes.UserGroupsUpdateStatus{} if resp.ReplicationGroup.PendingModifiedValues.UserGroups.UserGroupIdsToAdd != nil { - f22f8.UserGroupIDsToAdd = aws.StringSlice(resp.ReplicationGroup.PendingModifiedValues.UserGroups.UserGroupIdsToAdd) + f19f5.UserGroupIDsToAdd = aws.StringSlice(resp.ReplicationGroup.PendingModifiedValues.UserGroups.UserGroupIdsToAdd) } if resp.ReplicationGroup.PendingModifiedValues.UserGroups.UserGroupIdsToRemove != nil { - f22f8.UserGroupIDsToRemove = aws.StringSlice(resp.ReplicationGroup.PendingModifiedValues.UserGroups.UserGroupIdsToRemove) + f19f5.UserGroupIDsToRemove = aws.StringSlice(resp.ReplicationGroup.PendingModifiedValues.UserGroups.UserGroupIdsToRemove) } - f22.UserGroups = f22f8 + f19.UserGroups = f19f5 } - ko.Status.PendingModifiedValues = f22 + ko.Status.PendingModifiedValues = f19 } else { ko.Status.PendingModifiedValues = nil } @@ -1598,11 +1505,6 @@ func (rm *resourceManager) sdkUpdate( } else { ko.Spec.TransitEncryptionEnabled = nil } - if resp.ReplicationGroup.TransitEncryptionMode != "" { - ko.Spec.TransitEncryptionMode = aws.String(string(resp.ReplicationGroup.TransitEncryptionMode)) - } else { - ko.Spec.TransitEncryptionMode = nil - } if resp.ReplicationGroup.UserGroupIds != nil { ko.Spec.UserGroupIDs = aws.StringSlice(resp.ReplicationGroup.UserGroupIds) } else { @@ -1627,7 +1529,7 @@ func (rm *resourceManager) newUpdateRequestPayload( ) (*svcsdk.ModifyReplicationGroupInput, error) { res := &svcsdk.ModifyReplicationGroupInput{} - res.ApplyImmediately = true + res.ApplyImmediately = aws.Bool(true) if r.ko.Spec.AuthToken != nil { tmpSecret, err := rm.rr.SecretValueFromReference(ctx, r.ko.Spec.AuthToken) if err != nil { @@ -1652,15 +1554,9 @@ func (rm *resourceManager) newUpdateRequestPayload( if r.ko.Spec.CacheSecurityGroupNames != nil { res.CacheSecurityGroupNames = aws.ToStringSlice(r.ko.Spec.CacheSecurityGroupNames) } - if r.ko.Spec.ClusterMode != nil { - res.ClusterMode = svcsdktypes.ClusterMode(*r.ko.Spec.ClusterMode) - } if r.ko.Spec.Engine != nil { res.Engine = r.ko.Spec.Engine } - if r.ko.Spec.IPDiscovery != nil { - res.IpDiscovery = svcsdktypes.IpDiscovery(*r.ko.Spec.IPDiscovery) - } if r.ko.Spec.LogDeliveryConfigurations != nil { f11 := []svcsdktypes.LogDeliveryConfigurationRequest{} for _, f11iter := range r.ko.Spec.LogDeliveryConfigurations { @@ -1734,9 +1630,6 @@ func (rm *resourceManager) newUpdateRequestPayload( if r.ko.Spec.TransitEncryptionEnabled != nil { res.TransitEncryptionEnabled = r.ko.Spec.TransitEncryptionEnabled } - if r.ko.Spec.TransitEncryptionMode != nil { - res.TransitEncryptionMode = svcsdktypes.TransitEncryptionMode(*r.ko.Spec.TransitEncryptionMode) - } return res, nil } @@ -1794,7 +1687,7 @@ func (rm *resourceManager) sdkDelete( rm.metrics.RecordAPICall("DELETE", "DeleteReplicationGroup", err) // delete call successful if err == nil { - rp, _ := rm.setReplicationGroupOutput(r, resp.ReplicationGroup) + rp, _ := rm.setReplicationGroupOutput(ctx, r, resp.ReplicationGroup) // Setting resource synced condition to false will trigger a requeue of // the resource. ackcondition.SetSynced( @@ -1957,8 +1850,9 @@ func (rm *resourceManager) terminalAWSError(err error) bool { // This method copies the data from given ReplicationGroup by populating it // into copy of supplied resource and returns that. func (rm *resourceManager) setReplicationGroupOutput( + ctx context.Context, r *resource, - obj *svcsdk.ReplicationGroup, + obj *svcsdktypes.ReplicationGroup, ) (*resource, error) { if obj == nil || r == nil || @@ -2012,21 +1906,16 @@ func (rm *resourceManager) setReplicationGroupOutput( } else { ko.Status.ClusterEnabled = nil } - if resp.ReplicationGroup.ClusterMode != "" { - ko.Spec.ClusterMode = aws.String(string(resp.ReplicationGroup.ClusterMode)) - } else { - ko.Spec.ClusterMode = nil - } if resp.ReplicationGroup.ConfigurationEndpoint != nil { - f9 := &svcapitypes.Endpoint{} + f8 := &svcapitypes.Endpoint{} if resp.ReplicationGroup.ConfigurationEndpoint.Address != nil { - f9.Address = resp.ReplicationGroup.ConfigurationEndpoint.Address + f8.Address = resp.ReplicationGroup.ConfigurationEndpoint.Address } if resp.ReplicationGroup.ConfigurationEndpoint.Port != nil { portCopy := int64(*resp.ReplicationGroup.ConfigurationEndpoint.Port) - f9.Port = &portCopy + f8.Port = &portCopy } - ko.Status.ConfigurationEndpoint = f9 + ko.Status.ConfigurationEndpoint = f8 } else { ko.Status.ConfigurationEndpoint = nil } @@ -2046,61 +1935,56 @@ func (rm *resourceManager) setReplicationGroupOutput( ko.Spec.Engine = nil } if resp.ReplicationGroup.GlobalReplicationGroupInfo != nil { - f13 := &svcapitypes.GlobalReplicationGroupInfo{} + f12 := &svcapitypes.GlobalReplicationGroupInfo{} if resp.ReplicationGroup.GlobalReplicationGroupInfo.GlobalReplicationGroupId != nil { - f13.GlobalReplicationGroupID = resp.ReplicationGroup.GlobalReplicationGroupInfo.GlobalReplicationGroupId + f12.GlobalReplicationGroupID = resp.ReplicationGroup.GlobalReplicationGroupInfo.GlobalReplicationGroupId } if resp.ReplicationGroup.GlobalReplicationGroupInfo.GlobalReplicationGroupMemberRole != nil { - f13.GlobalReplicationGroupMemberRole = resp.ReplicationGroup.GlobalReplicationGroupInfo.GlobalReplicationGroupMemberRole + f12.GlobalReplicationGroupMemberRole = resp.ReplicationGroup.GlobalReplicationGroupInfo.GlobalReplicationGroupMemberRole } - ko.Status.GlobalReplicationGroupInfo = f13 + ko.Status.GlobalReplicationGroupInfo = f12 } else { ko.Status.GlobalReplicationGroupInfo = nil } - if resp.ReplicationGroup.IpDiscovery != "" { - ko.Spec.IPDiscovery = aws.String(string(resp.ReplicationGroup.IpDiscovery)) - } else { - ko.Spec.IPDiscovery = nil - } if resp.ReplicationGroup.KmsKeyId != nil { ko.Spec.KMSKeyID = resp.ReplicationGroup.KmsKeyId } else { ko.Spec.KMSKeyID = nil } if resp.ReplicationGroup.LogDeliveryConfigurations != nil { - f16 := []*svcapitypes.LogDeliveryConfigurationRequest{} - for _, f16iter := range resp.ReplicationGroup.LogDeliveryConfigurations { - f16elem := &svcapitypes.LogDeliveryConfigurationRequest{} - if f16iter.DestinationDetails != nil { - f16elemf0 := &svcapitypes.DestinationDetails{} - if f16iter.DestinationDetails.CloudWatchLogsDetails != nil { - f16elemf0f0 := &svcapitypes.CloudWatchLogsDestinationDetails{} - if f16iter.DestinationDetails.CloudWatchLogsDetails.LogGroup != nil { - f16elemf0f0.LogGroup = f16iter.DestinationDetails.CloudWatchLogsDetails.LogGroup + f14 := []*svcapitypes.LogDeliveryConfigurationRequest{} + for _, f14iter := range resp.ReplicationGroup.LogDeliveryConfigurations { + f14elem := &svcapitypes.LogDeliveryConfigurationRequest{} + if f14iter.DestinationDetails != nil { + f14elemf0 := &svcapitypes.DestinationDetails{} + if f14iter.DestinationDetails.CloudWatchLogsDetails != nil { + f14elemf0f0 := &svcapitypes.CloudWatchLogsDestinationDetails{} + if f14iter.DestinationDetails.CloudWatchLogsDetails.LogGroup != nil { + f14elemf0f0.LogGroup = f14iter.DestinationDetails.CloudWatchLogsDetails.LogGroup } - f16elemf0.CloudWatchLogsDetails = f16elemf0f0 + f14elemf0.CloudWatchLogsDetails = f14elemf0f0 } - if f16iter.DestinationDetails.KinesisFirehoseDetails != nil { - f16elemf0f1 := &svcapitypes.KinesisFirehoseDestinationDetails{} - if f16iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream != nil { - f16elemf0f1.DeliveryStream = f16iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream + if f14iter.DestinationDetails.KinesisFirehoseDetails != nil { + f14elemf0f1 := &svcapitypes.KinesisFirehoseDestinationDetails{} + if f14iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream != nil { + f14elemf0f1.DeliveryStream = f14iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream } - f16elemf0.KinesisFirehoseDetails = f16elemf0f1 + f14elemf0.KinesisFirehoseDetails = f14elemf0f1 } - f16elem.DestinationDetails = f16elemf0 + f14elem.DestinationDetails = f14elemf0 } - if f16iter.DestinationType != "" { - f16elem.DestinationType = aws.String(string(f16iter.DestinationType)) + if f14iter.DestinationType != "" { + f14elem.DestinationType = aws.String(string(f14iter.DestinationType)) } - if f16iter.LogFormat != "" { - f16elem.LogFormat = aws.String(string(f16iter.LogFormat)) + if f14iter.LogFormat != "" { + f14elem.LogFormat = aws.String(string(f14iter.LogFormat)) } - if f16iter.LogType != "" { - f16elem.LogType = aws.String(string(f16iter.LogType)) + if f14iter.LogType != "" { + f14elem.LogType = aws.String(string(f14iter.LogType)) } - f16 = append(f16, f16elem) + f14 = append(f14, f14elem) } - ko.Spec.LogDeliveryConfigurations = f16 + ko.Spec.LogDeliveryConfigurations = f14 } else { ko.Spec.LogDeliveryConfigurations = nil } @@ -2119,163 +2003,149 @@ func (rm *resourceManager) setReplicationGroupOutput( } else { ko.Status.MultiAZ = nil } - if resp.ReplicationGroup.NetworkType != "" { - ko.Spec.NetworkType = aws.String(string(resp.ReplicationGroup.NetworkType)) - } else { - ko.Spec.NetworkType = nil - } if resp.ReplicationGroup.NodeGroups != nil { - f21 := []*svcapitypes.NodeGroup{} - for _, f21iter := range resp.ReplicationGroup.NodeGroups { - f21elem := &svcapitypes.NodeGroup{} - if f21iter.NodeGroupId != nil { - f21elem.NodeGroupID = f21iter.NodeGroupId - } - if f21iter.NodeGroupMembers != nil { - f21elemf1 := []*svcapitypes.NodeGroupMember{} - for _, f21elemf1iter := range f21iter.NodeGroupMembers { - f21elemf1elem := &svcapitypes.NodeGroupMember{} - if f21elemf1iter.CacheClusterId != nil { - f21elemf1elem.CacheClusterID = f21elemf1iter.CacheClusterId + f18 := []*svcapitypes.NodeGroup{} + for _, f18iter := range resp.ReplicationGroup.NodeGroups { + f18elem := &svcapitypes.NodeGroup{} + if f18iter.NodeGroupId != nil { + f18elem.NodeGroupID = f18iter.NodeGroupId + } + if f18iter.NodeGroupMembers != nil { + f18elemf1 := []*svcapitypes.NodeGroupMember{} + for _, f18elemf1iter := range f18iter.NodeGroupMembers { + f18elemf1elem := &svcapitypes.NodeGroupMember{} + if f18elemf1iter.CacheClusterId != nil { + f18elemf1elem.CacheClusterID = f18elemf1iter.CacheClusterId } - if f21elemf1iter.CacheNodeId != nil { - f21elemf1elem.CacheNodeID = f21elemf1iter.CacheNodeId + if f18elemf1iter.CacheNodeId != nil { + f18elemf1elem.CacheNodeID = f18elemf1iter.CacheNodeId } - if f21elemf1iter.CurrentRole != nil { - f21elemf1elem.CurrentRole = f21elemf1iter.CurrentRole + if f18elemf1iter.CurrentRole != nil { + f18elemf1elem.CurrentRole = f18elemf1iter.CurrentRole } - if f21elemf1iter.PreferredAvailabilityZone != nil { - f21elemf1elem.PreferredAvailabilityZone = f21elemf1iter.PreferredAvailabilityZone + if f18elemf1iter.PreferredAvailabilityZone != nil { + f18elemf1elem.PreferredAvailabilityZone = f18elemf1iter.PreferredAvailabilityZone } - if f21elemf1iter.PreferredOutpostArn != nil { - f21elemf1elem.PreferredOutpostARN = f21elemf1iter.PreferredOutpostArn + if f18elemf1iter.PreferredOutpostArn != nil { + f18elemf1elem.PreferredOutpostARN = f18elemf1iter.PreferredOutpostArn } - if f21elemf1iter.ReadEndpoint != nil { - f21elemf1elemf5 := &svcapitypes.Endpoint{} - if f21elemf1iter.ReadEndpoint.Address != nil { - f21elemf1elemf5.Address = f21elemf1iter.ReadEndpoint.Address + if f18elemf1iter.ReadEndpoint != nil { + f18elemf1elemf5 := &svcapitypes.Endpoint{} + if f18elemf1iter.ReadEndpoint.Address != nil { + f18elemf1elemf5.Address = f18elemf1iter.ReadEndpoint.Address } - if f21elemf1iter.ReadEndpoint.Port != nil { - portCopy := int64(*f21elemf1iter.ReadEndpoint.Port) - f21elemf1elemf5.Port = &portCopy + if f18elemf1iter.ReadEndpoint.Port != nil { + portCopy := int64(*f18elemf1iter.ReadEndpoint.Port) + f18elemf1elemf5.Port = &portCopy } - f21elemf1elem.ReadEndpoint = f21elemf1elemf5 + f18elemf1elem.ReadEndpoint = f18elemf1elemf5 } - f21elemf1 = append(f21elemf1, f21elemf1elem) + f18elemf1 = append(f18elemf1, f18elemf1elem) } - f21elem.NodeGroupMembers = f21elemf1 + f18elem.NodeGroupMembers = f18elemf1 } - if f21iter.PrimaryEndpoint != nil { - f21elemf2 := &svcapitypes.Endpoint{} - if f21iter.PrimaryEndpoint.Address != nil { - f21elemf2.Address = f21iter.PrimaryEndpoint.Address + if f18iter.PrimaryEndpoint != nil { + f18elemf2 := &svcapitypes.Endpoint{} + if f18iter.PrimaryEndpoint.Address != nil { + f18elemf2.Address = f18iter.PrimaryEndpoint.Address } - if f21iter.PrimaryEndpoint.Port != nil { - portCopy := int64(*f21iter.PrimaryEndpoint.Port) - f21elemf2.Port = &portCopy + if f18iter.PrimaryEndpoint.Port != nil { + portCopy := int64(*f18iter.PrimaryEndpoint.Port) + f18elemf2.Port = &portCopy } - f21elem.PrimaryEndpoint = f21elemf2 + f18elem.PrimaryEndpoint = f18elemf2 } - if f21iter.ReaderEndpoint != nil { - f21elemf3 := &svcapitypes.Endpoint{} - if f21iter.ReaderEndpoint.Address != nil { - f21elemf3.Address = f21iter.ReaderEndpoint.Address + if f18iter.ReaderEndpoint != nil { + f18elemf3 := &svcapitypes.Endpoint{} + if f18iter.ReaderEndpoint.Address != nil { + f18elemf3.Address = f18iter.ReaderEndpoint.Address } - if f21iter.ReaderEndpoint.Port != nil { - portCopy := int64(*f21iter.ReaderEndpoint.Port) - f21elemf3.Port = &portCopy + if f18iter.ReaderEndpoint.Port != nil { + portCopy := int64(*f18iter.ReaderEndpoint.Port) + f18elemf3.Port = &portCopy } - f21elem.ReaderEndpoint = f21elemf3 + f18elem.ReaderEndpoint = f18elemf3 } - if f21iter.Slots != nil { - f21elem.Slots = f21iter.Slots + if f18iter.Slots != nil { + f18elem.Slots = f18iter.Slots } - if f21iter.Status != nil { - f21elem.Status = f21iter.Status + if f18iter.Status != nil { + f18elem.Status = f18iter.Status } - f21 = append(f21, f21elem) + f18 = append(f18, f18elem) } - ko.Status.NodeGroups = f21 + ko.Status.NodeGroups = f18 } else { ko.Status.NodeGroups = nil } if resp.ReplicationGroup.PendingModifiedValues != nil { - f22 := &svcapitypes.ReplicationGroupPendingModifiedValues{} + f19 := &svcapitypes.ReplicationGroupPendingModifiedValues{} if resp.ReplicationGroup.PendingModifiedValues.AuthTokenStatus != "" { - f22.AuthTokenStatus = aws.String(string(resp.ReplicationGroup.PendingModifiedValues.AuthTokenStatus)) + f19.AuthTokenStatus = aws.String(string(resp.ReplicationGroup.PendingModifiedValues.AuthTokenStatus)) } if resp.ReplicationGroup.PendingModifiedValues.AutomaticFailoverStatus != "" { - f22.AutomaticFailoverStatus = aws.String(string(resp.ReplicationGroup.PendingModifiedValues.AutomaticFailoverStatus)) - } - if resp.ReplicationGroup.PendingModifiedValues.ClusterMode != "" { - f22.ClusterMode = aws.String(string(resp.ReplicationGroup.PendingModifiedValues.ClusterMode)) + f19.AutomaticFailoverStatus = aws.String(string(resp.ReplicationGroup.PendingModifiedValues.AutomaticFailoverStatus)) } if resp.ReplicationGroup.PendingModifiedValues.LogDeliveryConfigurations != nil { - f22f3 := []*svcapitypes.PendingLogDeliveryConfiguration{} - for _, f22f3iter := range resp.ReplicationGroup.PendingModifiedValues.LogDeliveryConfigurations { - f22f3elem := &svcapitypes.PendingLogDeliveryConfiguration{} - if f22f3iter.DestinationDetails != nil { - f22f3elemf0 := &svcapitypes.DestinationDetails{} - if f22f3iter.DestinationDetails.CloudWatchLogsDetails != nil { - f22f3elemf0f0 := &svcapitypes.CloudWatchLogsDestinationDetails{} - if f22f3iter.DestinationDetails.CloudWatchLogsDetails.LogGroup != nil { - f22f3elemf0f0.LogGroup = f22f3iter.DestinationDetails.CloudWatchLogsDetails.LogGroup + f19f2 := []*svcapitypes.PendingLogDeliveryConfiguration{} + for _, f19f2iter := range resp.ReplicationGroup.PendingModifiedValues.LogDeliveryConfigurations { + f19f2elem := &svcapitypes.PendingLogDeliveryConfiguration{} + if f19f2iter.DestinationDetails != nil { + f19f2elemf0 := &svcapitypes.DestinationDetails{} + if f19f2iter.DestinationDetails.CloudWatchLogsDetails != nil { + f19f2elemf0f0 := &svcapitypes.CloudWatchLogsDestinationDetails{} + if f19f2iter.DestinationDetails.CloudWatchLogsDetails.LogGroup != nil { + f19f2elemf0f0.LogGroup = f19f2iter.DestinationDetails.CloudWatchLogsDetails.LogGroup } - f22f3elemf0.CloudWatchLogsDetails = f22f3elemf0f0 + f19f2elemf0.CloudWatchLogsDetails = f19f2elemf0f0 } - if f22f3iter.DestinationDetails.KinesisFirehoseDetails != nil { - f22f3elemf0f1 := &svcapitypes.KinesisFirehoseDestinationDetails{} - if f22f3iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream != nil { - f22f3elemf0f1.DeliveryStream = f22f3iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream + if f19f2iter.DestinationDetails.KinesisFirehoseDetails != nil { + f19f2elemf0f1 := &svcapitypes.KinesisFirehoseDestinationDetails{} + if f19f2iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream != nil { + f19f2elemf0f1.DeliveryStream = f19f2iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream } - f22f3elemf0.KinesisFirehoseDetails = f22f3elemf0f1 + f19f2elemf0.KinesisFirehoseDetails = f19f2elemf0f1 } - f22f3elem.DestinationDetails = f22f3elemf0 + f19f2elem.DestinationDetails = f19f2elemf0 } - if f22f3iter.DestinationType != "" { - f22f3elem.DestinationType = aws.String(string(f22f3iter.DestinationType)) + if f19f2iter.DestinationType != "" { + f19f2elem.DestinationType = aws.String(string(f19f2iter.DestinationType)) } - if f22f3iter.LogFormat != "" { - f22f3elem.LogFormat = aws.String(string(f22f3iter.LogFormat)) + if f19f2iter.LogFormat != "" { + f19f2elem.LogFormat = aws.String(string(f19f2iter.LogFormat)) } - if f22f3iter.LogType != "" { - f22f3elem.LogType = aws.String(string(f22f3iter.LogType)) + if f19f2iter.LogType != "" { + f19f2elem.LogType = aws.String(string(f19f2iter.LogType)) } - f22f3 = append(f22f3, f22f3elem) + f19f2 = append(f19f2, f19f2elem) } - f22.LogDeliveryConfigurations = f22f3 + f19.LogDeliveryConfigurations = f19f2 } if resp.ReplicationGroup.PendingModifiedValues.PrimaryClusterId != nil { - f22.PrimaryClusterID = resp.ReplicationGroup.PendingModifiedValues.PrimaryClusterId + f19.PrimaryClusterID = resp.ReplicationGroup.PendingModifiedValues.PrimaryClusterId } if resp.ReplicationGroup.PendingModifiedValues.Resharding != nil { - f22f5 := &svcapitypes.ReshardingStatus{} + f19f4 := &svcapitypes.ReshardingStatus{} if resp.ReplicationGroup.PendingModifiedValues.Resharding.SlotMigration != nil { - f22f5f0 := &svcapitypes.SlotMigration{} + f19f4f0 := &svcapitypes.SlotMigration{} if resp.ReplicationGroup.PendingModifiedValues.Resharding.SlotMigration.ProgressPercentage != nil { - f22f5f0.ProgressPercentage = resp.ReplicationGroup.PendingModifiedValues.Resharding.SlotMigration.ProgressPercentage + f19f4f0.ProgressPercentage = resp.ReplicationGroup.PendingModifiedValues.Resharding.SlotMigration.ProgressPercentage } - f22f5.SlotMigration = f22f5f0 + f19f4.SlotMigration = f19f4f0 } - f22.Resharding = f22f5 - } - if resp.ReplicationGroup.PendingModifiedValues.TransitEncryptionEnabled != nil { - f22.TransitEncryptionEnabled = resp.ReplicationGroup.PendingModifiedValues.TransitEncryptionEnabled - } - if resp.ReplicationGroup.PendingModifiedValues.TransitEncryptionMode != "" { - f22.TransitEncryptionMode = aws.String(string(resp.ReplicationGroup.PendingModifiedValues.TransitEncryptionMode)) + f19.Resharding = f19f4 } if resp.ReplicationGroup.PendingModifiedValues.UserGroups != nil { - f22f8 := &svcapitypes.UserGroupsUpdateStatus{} + f19f5 := &svcapitypes.UserGroupsUpdateStatus{} if resp.ReplicationGroup.PendingModifiedValues.UserGroups.UserGroupIdsToAdd != nil { - f22f8.UserGroupIDsToAdd = aws.StringSlice(resp.ReplicationGroup.PendingModifiedValues.UserGroups.UserGroupIdsToAdd) + f19f5.UserGroupIDsToAdd = aws.StringSlice(resp.ReplicationGroup.PendingModifiedValues.UserGroups.UserGroupIdsToAdd) } if resp.ReplicationGroup.PendingModifiedValues.UserGroups.UserGroupIdsToRemove != nil { - f22f8.UserGroupIDsToRemove = aws.StringSlice(resp.ReplicationGroup.PendingModifiedValues.UserGroups.UserGroupIdsToRemove) + f19f5.UserGroupIDsToRemove = aws.StringSlice(resp.ReplicationGroup.PendingModifiedValues.UserGroups.UserGroupIdsToRemove) } - f22.UserGroups = f22f8 + f19.UserGroups = f19f5 } - ko.Status.PendingModifiedValues = f22 + ko.Status.PendingModifiedValues = f19 } else { ko.Status.PendingModifiedValues = nil } @@ -2315,11 +2185,6 @@ func (rm *resourceManager) setReplicationGroupOutput( } else { ko.Spec.TransitEncryptionEnabled = nil } - if resp.ReplicationGroup.TransitEncryptionMode != "" { - ko.Spec.TransitEncryptionMode = aws.String(string(resp.ReplicationGroup.TransitEncryptionMode)) - } else { - ko.Spec.TransitEncryptionMode = nil - } if resp.ReplicationGroup.UserGroupIds != nil { ko.Spec.UserGroupIDs = aws.StringSlice(resp.ReplicationGroup.UserGroupIds) } else { @@ -2327,6 +2192,6 @@ func (rm *resourceManager) setReplicationGroupOutput( } rm.setStatusDefaults(ko) - rm.customSetOutput(obj, ko) // custom set output from obj + rm.customSetOutput(ctx, *obj, ko) // custom set output from obj return &resource{ko}, nil } diff --git a/pkg/resource/snapshot/custom_set_conditions.go b/pkg/resource/snapshot/custom_set_conditions.go deleted file mode 100644 index 4fdb00dd..00000000 --- a/pkg/resource/snapshot/custom_set_conditions.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"). You may -// not use this file except in compliance with the License. A copy of the -// License is located at -// -// http://aws.amazon.com/apache2.0/ -// -// or in the "license" file accompanying this file. This file is distributed -// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either -// express or implied. See the License for the specific language governing -// permissions and limitations under the License. - -package snapshot - -import ( - svcapitypes "github.com/aws-controllers-k8s/elasticache-controller/apis/v1alpha1" - ackv1alpha1 "github.com/aws-controllers-k8s/runtime/apis/core/v1alpha1" - corev1 "k8s.io/api/core/v1" -) - -// CustomUpdateConditions sets conditions (terminal) on supplied snapshot -// it examines supplied resource to determine conditions. -// It returns true if conditions are updated -func (rm *resourceManager) CustomUpdateConditions( - ko *svcapitypes.Snapshot, - r *resource, - err error, -) bool { - snapshotStatus := r.ko.Status.SnapshotStatus - if snapshotStatus == nil || *snapshotStatus != "failed" { - return false - } - // Terminal condition - var terminalCondition *ackv1alpha1.Condition = nil - if ko.Status.Conditions == nil { - ko.Status.Conditions = []*ackv1alpha1.Condition{} - } else { - for _, condition := range ko.Status.Conditions { - if condition.Type == ackv1alpha1.ConditionTypeTerminal { - terminalCondition = condition - break - } - } - if terminalCondition != nil && terminalCondition.Status == corev1.ConditionTrue { - // some other exception already put the resource in terminal condition - return false - } - } - if terminalCondition == nil { - terminalCondition = &ackv1alpha1.Condition{ - Type: ackv1alpha1.ConditionTypeTerminal, - } - ko.Status.Conditions = append(ko.Status.Conditions, terminalCondition) - } - terminalCondition.Status = corev1.ConditionTrue - errorMessage := "Snapshot status: failed" - terminalCondition.Message = &errorMessage - return true -} diff --git a/pkg/resource/snapshot/custom_set_output.go b/pkg/resource/snapshot/custom_set_output.go deleted file mode 100644 index 2875e231..00000000 --- a/pkg/resource/snapshot/custom_set_output.go +++ /dev/null @@ -1,105 +0,0 @@ -// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"). You may -// not use this file except in compliance with the License. A copy of the -// License is located at -// -// http://aws.amazon.com/apache2.0/ -// -// or in the "license" file accompanying this file. This file is distributed -// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either -// express or implied. See the License for the specific language governing -// permissions and limitations under the License. - -package snapshot - -import ( - "context" - - svcapitypes "github.com/aws-controllers-k8s/elasticache-controller/apis/v1alpha1" - ackv1alpha1 "github.com/aws-controllers-k8s/runtime/apis/core/v1alpha1" - "github.com/aws/aws-sdk-go/service/elasticache" - corev1 "k8s.io/api/core/v1" -) - -func (rm *resourceManager) CustomDescribeSnapshotSetOutput( - ctx context.Context, - r *resource, - resp *elasticache.DescribeSnapshotsOutput, - ko *svcapitypes.Snapshot, -) (*svcapitypes.Snapshot, error) { - if len(resp.Snapshots) == 0 { - return ko, nil - } - elem := resp.Snapshots[0] - rm.customSetOutput(r, elem, ko) - return ko, nil -} - -func (rm *resourceManager) CustomCreateSnapshotSetOutput( - ctx context.Context, - r *resource, - resp *elasticache.CreateSnapshotOutput, - ko *svcapitypes.Snapshot, -) (*svcapitypes.Snapshot, error) { - rm.customSetOutput(r, resp.Snapshot, ko) - return ko, nil -} - -func (rm *resourceManager) CustomCopySnapshotSetOutput( - r *resource, - resp *elasticache.CopySnapshotOutput, - ko *svcapitypes.Snapshot, -) *svcapitypes.Snapshot { - rm.customSetOutput(r, resp.Snapshot, ko) - return ko -} - -func (rm *resourceManager) customSetOutput( - r *resource, - respSnapshot *elasticache.Snapshot, - ko *svcapitypes.Snapshot, -) { - if respSnapshot.ReplicationGroupId != nil { - ko.Spec.ReplicationGroupID = respSnapshot.ReplicationGroupId - } - - if respSnapshot.KmsKeyId != nil { - ko.Spec.KMSKeyID = respSnapshot.KmsKeyId - } - - if respSnapshot.CacheClusterId != nil { - ko.Spec.CacheClusterID = respSnapshot.CacheClusterId - } - - if ko.Status.Conditions == nil { - ko.Status.Conditions = []*ackv1alpha1.Condition{} - } - snapshotStatus := respSnapshot.SnapshotStatus - syncConditionStatus := corev1.ConditionUnknown - if snapshotStatus != nil { - if *snapshotStatus == "available" || - *snapshotStatus == "failed" { - syncConditionStatus = corev1.ConditionTrue - } else { - // resource in "creating", "restoring","exporting" - syncConditionStatus = corev1.ConditionFalse - } - } - var resourceSyncedCondition *ackv1alpha1.Condition = nil - for _, condition := range ko.Status.Conditions { - if condition.Type == ackv1alpha1.ConditionTypeResourceSynced { - resourceSyncedCondition = condition - break - } - } - if resourceSyncedCondition == nil { - resourceSyncedCondition = &ackv1alpha1.Condition{ - Type: ackv1alpha1.ConditionTypeResourceSynced, - Status: syncConditionStatus, - } - ko.Status.Conditions = append(ko.Status.Conditions, resourceSyncedCondition) - } else { - resourceSyncedCondition.Status = syncConditionStatus - } -} diff --git a/pkg/resource/snapshot/custom_update_api.go b/pkg/resource/snapshot/custom_update_api.go deleted file mode 100644 index 04492bc7..00000000 --- a/pkg/resource/snapshot/custom_update_api.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"). You may -// not use this file except in compliance with the License. A copy of the -// License is located at -// -// http://aws.amazon.com/apache2.0/ -// -// or in the "license" file accompanying this file. This file is distributed -// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either -// express or implied. See the License for the specific language governing -// permissions and limitations under the License. - -package snapshot - -import ( - "context" - - ackcompare "github.com/aws-controllers-k8s/runtime/pkg/compare" -) - -// Snapshot API has no update -func (rm *resourceManager) customUpdateSnapshot( - ctx context.Context, - desired *resource, - latest *resource, - delta *ackcompare.Delta, -) (*resource, error) { - return latest, nil -} diff --git a/pkg/resource/snapshot/custom_create_api.go b/pkg/resource/snapshot/hooks.go similarity index 52% rename from pkg/resource/snapshot/custom_create_api.go rename to pkg/resource/snapshot/hooks.go index bccdc3e8..0278d450 100644 --- a/pkg/resource/snapshot/custom_create_api.go +++ b/pkg/resource/snapshot/hooks.go @@ -18,8 +18,13 @@ import ( svcapitypes "github.com/aws-controllers-k8s/elasticache-controller/apis/v1alpha1" ackv1alpha1 "github.com/aws-controllers-k8s/runtime/apis/core/v1alpha1" + ackcompare "github.com/aws-controllers-k8s/runtime/pkg/compare" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/elasticache" + svcsdk "github.com/aws/aws-sdk-go-v2/service/elasticache" + svcsdktypes "github.com/aws/aws-sdk-go-v2/service/elasticache/types" "github.com/aws/aws-sdk-go/aws/awserr" - svcsdk "github.com/aws/aws-sdk-go/service/elasticache" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -38,7 +43,7 @@ func (rm *resourceManager) CustomCreateSnapshot( return nil, err } - resp, respErr := rm.sdkapi.CopySnapshot(input) + resp, respErr := rm.sdkapi.CopySnapshot(ctx, input) rm.metrics.RecordAPICall("CREATE", "CopySnapshot", respErr) if respErr != nil { @@ -58,11 +63,12 @@ func (rm *resourceManager) CustomCreateSnapshot( if resp.Snapshot.AutoMinorVersionUpgrade != nil { ko.Status.AutoMinorVersionUpgrade = resp.Snapshot.AutoMinorVersionUpgrade } - if resp.Snapshot.AutomaticFailover != nil { - ko.Status.AutomaticFailover = resp.Snapshot.AutomaticFailover + if resp.Snapshot.AutomaticFailover != "" { + ko.Status.AutomaticFailover = aws.String(string(resp.Snapshot.AutomaticFailover)) } if resp.Snapshot.CacheClusterCreateTime != nil { - ko.Status.CacheClusterCreateTime = &metav1.Time{*resp.Snapshot.CacheClusterCreateTime} + cacheClusterCreateTime := metav1.Time{Time: *resp.Snapshot.CacheClusterCreateTime} + ko.Status.CacheClusterCreateTime = &cacheClusterCreateTime } if resp.Snapshot.CacheNodeType != nil { ko.Status.CacheNodeType = resp.Snapshot.CacheNodeType @@ -87,7 +93,7 @@ func (rm *resourceManager) CustomCreateSnapshot( f11elem.CacheClusterID = f11iter.CacheClusterId } if f11iter.CacheNodeCreateTime != nil { - f11elem.CacheNodeCreateTime = &metav1.Time{*f11iter.CacheNodeCreateTime} + f11elem.CacheNodeCreateTime = &metav1.Time{Time: *f11iter.CacheNodeCreateTime} } if f11iter.CacheNodeId != nil { f11elem.CacheNodeID = f11iter.CacheNodeId @@ -106,14 +112,14 @@ func (rm *resourceManager) CustomCreateSnapshot( if f11iter.NodeGroupConfiguration.ReplicaAvailabilityZones != nil { f11elemf4f2 := []*string{} for _, f11elemf4f2iter := range f11iter.NodeGroupConfiguration.ReplicaAvailabilityZones { - var f11elemf4f2elem string - f11elemf4f2elem = *f11elemf4f2iter - f11elemf4f2 = append(f11elemf4f2, &f11elemf4f2elem) + f11elemf4f2iter := f11elemf4f2iter // Create new variable to avoid referencing loop variable + f11elemf4f2 = append(f11elemf4f2, &f11elemf4f2iter) } f11elemf4.ReplicaAvailabilityZones = f11elemf4f2 } if f11iter.NodeGroupConfiguration.ReplicaCount != nil { - f11elemf4.ReplicaCount = f11iter.NodeGroupConfiguration.ReplicaCount + replicaCount := int64(*f11iter.NodeGroupConfiguration.ReplicaCount) + f11elemf4.ReplicaCount = &replicaCount } if f11iter.NodeGroupConfiguration.Slots != nil { f11elemf4.Slots = f11iter.NodeGroupConfiguration.Slots @@ -124,20 +130,23 @@ func (rm *resourceManager) CustomCreateSnapshot( f11elem.NodeGroupID = f11iter.NodeGroupId } if f11iter.SnapshotCreateTime != nil { - f11elem.SnapshotCreateTime = &metav1.Time{*f11iter.SnapshotCreateTime} + f11elem.SnapshotCreateTime = &metav1.Time{Time: *f11iter.SnapshotCreateTime} } f11 = append(f11, f11elem) } ko.Status.NodeSnapshots = f11 } if resp.Snapshot.NumCacheNodes != nil { - ko.Status.NumCacheNodes = resp.Snapshot.NumCacheNodes + numNodes := int64(*resp.Snapshot.NumCacheNodes) + ko.Status.NumCacheNodes = &numNodes } if resp.Snapshot.NumNodeGroups != nil { - ko.Status.NumNodeGroups = resp.Snapshot.NumNodeGroups + numNodeGroups := int64(*resp.Snapshot.NumNodeGroups) + ko.Status.NumNodeGroups = &numNodeGroups } if resp.Snapshot.Port != nil { - ko.Status.Port = resp.Snapshot.Port + port := int64(*resp.Snapshot.Port) + ko.Status.Port = &port } if resp.Snapshot.PreferredAvailabilityZone != nil { ko.Status.PreferredAvailabilityZone = resp.Snapshot.PreferredAvailabilityZone @@ -150,7 +159,8 @@ func (rm *resourceManager) CustomCreateSnapshot( } if resp.Snapshot.SnapshotRetentionLimit != nil { - ko.Status.SnapshotRetentionLimit = resp.Snapshot.SnapshotRetentionLimit + retentionLimit := int64(*resp.Snapshot.SnapshotRetentionLimit) + ko.Status.SnapshotRetentionLimit = &retentionLimit } if resp.Snapshot.SnapshotSource != nil { ko.Status.SnapshotSource = resp.Snapshot.SnapshotSource @@ -185,15 +195,146 @@ func (rm *resourceManager) newCopySnapshotPayload( res := &svcsdk.CopySnapshotInput{} if r.ko.Spec.SourceSnapshotName != nil { - res.SetSourceSnapshotName(*r.ko.Spec.SourceSnapshotName) + res.SourceSnapshotName = r.ko.Spec.SourceSnapshotName } if r.ko.Spec.KMSKeyID != nil { - res.SetKmsKeyId(*r.ko.Spec.KMSKeyID) + res.KmsKeyId = r.ko.Spec.KMSKeyID } - if r.ko.Spec.SnapshotName != nil { - res.SetTargetSnapshotName(*r.ko.Spec.SnapshotName) + res.TargetSnapshotName = r.ko.Spec.SnapshotName } return res, nil } + +// CustomUpdateConditions sets conditions (terminal) on supplied snapshot +// it examines supplied resource to determine conditions. +// It returns true if conditions are updated +func (rm *resourceManager) CustomUpdateConditions( + ko *svcapitypes.Snapshot, + r *resource, + err error, +) bool { + snapshotStatus := r.ko.Status.SnapshotStatus + if snapshotStatus == nil || *snapshotStatus != "failed" { + return false + } + // Terminal condition + var terminalCondition *ackv1alpha1.Condition = nil + if ko.Status.Conditions == nil { + ko.Status.Conditions = []*ackv1alpha1.Condition{} + } else { + for _, condition := range ko.Status.Conditions { + if condition.Type == ackv1alpha1.ConditionTypeTerminal { + terminalCondition = condition + break + } + } + if terminalCondition != nil && terminalCondition.Status == corev1.ConditionTrue { + // some other exception already put the resource in terminal condition + return false + } + } + if terminalCondition == nil { + terminalCondition = &ackv1alpha1.Condition{ + Type: ackv1alpha1.ConditionTypeTerminal, + } + ko.Status.Conditions = append(ko.Status.Conditions, terminalCondition) + } + terminalCondition.Status = corev1.ConditionTrue + errorMessage := "Snapshot status: failed" + terminalCondition.Message = &errorMessage + return true +} + +func (rm *resourceManager) CustomDescribeSnapshotSetOutput( + ctx context.Context, + r *resource, + resp *elasticache.DescribeSnapshotsOutput, + ko *svcapitypes.Snapshot, +) (*svcapitypes.Snapshot, error) { + if len(resp.Snapshots) == 0 { + return ko, nil + } + elem := resp.Snapshots[0] + rm.customSetOutput(r, &elem, ko) + return ko, nil +} + +func (rm *resourceManager) CustomCreateSnapshotSetOutput( + ctx context.Context, + r *resource, + resp *elasticache.CreateSnapshotOutput, + ko *svcapitypes.Snapshot, +) (*svcapitypes.Snapshot, error) { + rm.customSetOutput(r, resp.Snapshot, ko) + return ko, nil +} + +func (rm *resourceManager) CustomCopySnapshotSetOutput( + r *resource, + resp *elasticache.CopySnapshotOutput, + ko *svcapitypes.Snapshot, +) *svcapitypes.Snapshot { + rm.customSetOutput(r, resp.Snapshot, ko) + return ko +} + +func (rm *resourceManager) customSetOutput( + r *resource, + respSnapshot *svcsdktypes.Snapshot, + ko *svcapitypes.Snapshot, +) { + if respSnapshot.ReplicationGroupId != nil { + ko.Spec.ReplicationGroupID = respSnapshot.ReplicationGroupId + } + + if respSnapshot.KmsKeyId != nil { + ko.Spec.KMSKeyID = respSnapshot.KmsKeyId + } + + if respSnapshot.CacheClusterId != nil { + ko.Spec.CacheClusterID = respSnapshot.CacheClusterId + } + + if ko.Status.Conditions == nil { + ko.Status.Conditions = []*ackv1alpha1.Condition{} + } + snapshotStatus := respSnapshot.SnapshotStatus + syncConditionStatus := corev1.ConditionUnknown + if snapshotStatus != nil { + if *snapshotStatus == "available" || + *snapshotStatus == "failed" { + syncConditionStatus = corev1.ConditionTrue + } else { + // resource in "creating", "restoring","exporting" + syncConditionStatus = corev1.ConditionFalse + } + } + var resourceSyncedCondition *ackv1alpha1.Condition = nil + for _, condition := range ko.Status.Conditions { + if condition.Type == ackv1alpha1.ConditionTypeResourceSynced { + resourceSyncedCondition = condition + break + } + } + if resourceSyncedCondition == nil { + resourceSyncedCondition = &ackv1alpha1.Condition{ + Type: ackv1alpha1.ConditionTypeResourceSynced, + Status: syncConditionStatus, + } + ko.Status.Conditions = append(ko.Status.Conditions, resourceSyncedCondition) + } else { + resourceSyncedCondition.Status = syncConditionStatus + } +} + +// Snapshot API has no update +func (rm *resourceManager) customUpdateSnapshot( + ctx context.Context, + desired *resource, + latest *resource, + delta *ackcompare.Delta, +) (*resource, error) { + return latest, nil +} diff --git a/pkg/resource/user/custom_set_output.go b/pkg/resource/user/custom_set_output.go deleted file mode 100644 index 8846f03b..00000000 --- a/pkg/resource/user/custom_set_output.go +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"). You may -// not use this file except in compliance with the License. A copy of the -// License is located at -// -// http://aws.amazon.com/apache2.0/ -// -// or in the "license" file accompanying this file. This file is distributed -// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either -// express or implied. See the License for the specific language governing -// permissions and limitations under the License. - -package user - -import ( - "context" - - svcapitypes "github.com/aws-controllers-k8s/elasticache-controller/apis/v1alpha1" - svcsdk "github.com/aws/aws-sdk-go/service/elasticache" -) - -// set the custom Status fields upon creation -func (rm *resourceManager) CustomCreateUserSetOutput( - ctx context.Context, - r *resource, - resp *svcsdk.CreateUserOutput, - ko *svcapitypes.User, -) (*svcapitypes.User, error) { - return rm.CustomSetOutput(r, resp.AccessString, ko) -} - -// precondition: successful ModifyUserWithContext call -// By updating 'latest' Status fields, these changes should be applied to 'desired' -// upon patching -func (rm *resourceManager) CustomModifyUserSetOutput( - ctx context.Context, - r *resource, - resp *svcsdk.ModifyUserOutput, - ko *svcapitypes.User, -) (*svcapitypes.User, error) { - return rm.CustomSetOutput(r, resp.AccessString, ko) -} - -func (rm *resourceManager) CustomSetOutput( - r *resource, - responseAccessString *string, - ko *svcapitypes.User, -) (*svcapitypes.User, error) { - - lastRequested := *r.ko.Spec.AccessString - ko.Status.LastRequestedAccessString = &lastRequested - - expandedAccessStringValue := *responseAccessString - ko.Status.ExpandedAccessString = &expandedAccessStringValue - - return ko, nil -} diff --git a/pkg/resource/user/custom_update.go b/pkg/resource/user/custom_update.go deleted file mode 100644 index c977f252..00000000 --- a/pkg/resource/user/custom_update.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"). You may -// not use this file except in compliance with the License. A copy of the -// License is located at -// -// http://aws.amazon.com/apache2.0/ -// -// or in the "license" file accompanying this file. This file is distributed -// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either -// express or implied. See the License for the specific language governing -// permissions and limitations under the License. - -package user - -import ( - "context" - - "github.com/pkg/errors" - - ackcompare "github.com/aws-controllers-k8s/runtime/pkg/compare" - "github.com/aws-controllers-k8s/runtime/pkg/requeue" -) - -// currently this function's only purpose is to requeue if the resource is currently unavailable -func (rm *resourceManager) CustomModifyUser( - ctx context.Context, - desired *resource, - latest *resource, - delta *ackcompare.Delta, -) (*resource, error) { - - // requeue if necessary - latestStatus := latest.ko.Status.Status - if latestStatus == nil || *latestStatus != "active" { - return nil, requeue.NeededAfter( - errors.New("User cannot be modified as its status is not 'active'."), - requeue.DefaultRequeueAfterDuration) - } - - return nil, nil -} diff --git a/pkg/resource/user/delta.go b/pkg/resource/user/delta.go index cd2a9791..746c7b38 100644 --- a/pkg/resource/user/delta.go +++ b/pkg/resource/user/delta.go @@ -50,24 +50,6 @@ func newResourceDelta( delta.Add("Spec.AccessString", a.ko.Spec.AccessString, b.ko.Spec.AccessString) } } - if ackcompare.HasNilDifference(a.ko.Spec.AuthenticationMode, b.ko.Spec.AuthenticationMode) { - delta.Add("Spec.AuthenticationMode", a.ko.Spec.AuthenticationMode, b.ko.Spec.AuthenticationMode) - } else if a.ko.Spec.AuthenticationMode != nil && b.ko.Spec.AuthenticationMode != nil { - if len(a.ko.Spec.AuthenticationMode.Passwords) != len(b.ko.Spec.AuthenticationMode.Passwords) { - delta.Add("Spec.AuthenticationMode.Passwords", a.ko.Spec.AuthenticationMode.Passwords, b.ko.Spec.AuthenticationMode.Passwords) - } else if len(a.ko.Spec.AuthenticationMode.Passwords) > 0 { - if !ackcompare.SliceStringPEqual(a.ko.Spec.AuthenticationMode.Passwords, b.ko.Spec.AuthenticationMode.Passwords) { - delta.Add("Spec.AuthenticationMode.Passwords", a.ko.Spec.AuthenticationMode.Passwords, b.ko.Spec.AuthenticationMode.Passwords) - } - } - if ackcompare.HasNilDifference(a.ko.Spec.AuthenticationMode.Type, b.ko.Spec.AuthenticationMode.Type) { - delta.Add("Spec.AuthenticationMode.Type", a.ko.Spec.AuthenticationMode.Type, b.ko.Spec.AuthenticationMode.Type) - } else if a.ko.Spec.AuthenticationMode.Type != nil && b.ko.Spec.AuthenticationMode.Type != nil { - if *a.ko.Spec.AuthenticationMode.Type != *b.ko.Spec.AuthenticationMode.Type { - delta.Add("Spec.AuthenticationMode.Type", a.ko.Spec.AuthenticationMode.Type, b.ko.Spec.AuthenticationMode.Type) - } - } - } if ackcompare.HasNilDifference(a.ko.Spec.Engine, b.ko.Spec.Engine) { delta.Add("Spec.Engine", a.ko.Spec.Engine, b.ko.Spec.Engine) } else if a.ko.Spec.Engine != nil && b.ko.Spec.Engine != nil { diff --git a/pkg/resource/user/delta_util.go b/pkg/resource/user/delta_util.go deleted file mode 100644 index 353a7cc2..00000000 --- a/pkg/resource/user/delta_util.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"). You may -// not use this file except in compliance with the License. A copy of the -// License is located at -// -// http://aws.amazon.com/apache2.0/ -// -// or in the "license" file accompanying this file. This file is distributed -// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either -// express or implied. See the License for the specific language governing -// permissions and limitations under the License. - -package user - -import ( - "github.com/aws-controllers-k8s/elasticache-controller/pkg/common" - ackcompare "github.com/aws-controllers-k8s/runtime/pkg/compare" -) - -// remove differences which are not meaningful (i.e. ones that don't warrant a call to rm.Update) -func filterDelta( - delta *ackcompare.Delta, - desired *resource, - latest *resource, -) { - // the returned AccessString can be different than the specified one; as long as the last requested AccessString - // matches the currently desired one, remove this difference from the delta - if delta.DifferentAt("Spec.AccessString") { - if desired.ko.Spec.AccessString != nil && - desired.ko.Status.LastRequestedAccessString != nil && - *desired.ko.Spec.AccessString == *desired.ko.Status.LastRequestedAccessString { - - common.RemoveFromDelta(delta, "Spec.AccessString") - } - } -} diff --git a/pkg/resource/user/hooks.go b/pkg/resource/user/hooks.go new file mode 100644 index 00000000..28e2b316 --- /dev/null +++ b/pkg/resource/user/hooks.go @@ -0,0 +1,166 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package user + +import ( + "context" + + svcsdk "github.com/aws/aws-sdk-go-v2/service/elasticache" + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + + svcapitypes "github.com/aws-controllers-k8s/elasticache-controller/apis/v1alpha1" + "github.com/aws-controllers-k8s/elasticache-controller/pkg/common" + ackv1alpha1 "github.com/aws-controllers-k8s/runtime/apis/core/v1alpha1" + ackcompare "github.com/aws-controllers-k8s/runtime/pkg/compare" + "github.com/aws-controllers-k8s/runtime/pkg/requeue" +) + +// set the custom Status fields upon creation +func (rm *resourceManager) CustomCreateUserSetOutput( + ctx context.Context, + r *resource, + resp *svcsdk.CreateUserOutput, + ko *svcapitypes.User, +) (*svcapitypes.User, error) { + return rm.CustomSetOutput(r, resp.AccessString, ko) +} + +// precondition: successful ModifyUserWithContext call +// By updating 'latest' Status fields, these changes should be applied to 'desired' +// upon patching +func (rm *resourceManager) CustomModifyUserSetOutput( + ctx context.Context, + r *resource, + resp *svcsdk.ModifyUserOutput, + ko *svcapitypes.User, +) (*svcapitypes.User, error) { + return rm.CustomSetOutput(r, resp.AccessString, ko) +} + +func (rm *resourceManager) CustomSetOutput( + r *resource, + responseAccessString *string, + ko *svcapitypes.User, +) (*svcapitypes.User, error) { + + lastRequested := *r.ko.Spec.AccessString + ko.Status.LastRequestedAccessString = &lastRequested + + expandedAccessStringValue := *responseAccessString + ko.Status.ExpandedAccessString = &expandedAccessStringValue + + return ko, nil +} + +// currently this function's only purpose is to requeue if the resource is currently unavailable +func (rm *resourceManager) CustomModifyUser( + ctx context.Context, + desired *resource, + latest *resource, + delta *ackcompare.Delta, +) (*resource, error) { + + // requeue if necessary + latestStatus := latest.ko.Status.Status + if latestStatus == nil || *latestStatus != "active" { + return nil, requeue.NeededAfter( + errors.New("User cannot be modified as its status is not 'active'."), + requeue.DefaultRequeueAfterDuration) + } + + return nil, nil +} + +// TODO: this should be generated in the future. In general, it doesn't seem like a good idea to add every non-nil +// Spec field in desired.Spec to the payload (i.e. what we do when building most inputs), unless there is +// actually a difference in the Spec field between desired and latest +func (rm *resourceManager) populateUpdatePayload( + input *svcsdk.ModifyUserInput, + r *resource, + delta *ackcompare.Delta, +) { + if delta.DifferentAt("Spec.AccessString") && r.ko.Spec.AccessString != nil { + input.AccessString = r.ko.Spec.AccessString + } + + if delta.DifferentAt("Spec.NoPasswordRequired") && r.ko.Spec.NoPasswordRequired != nil { + input.NoPasswordRequired = r.ko.Spec.NoPasswordRequired + } + + //TODO: add update for passwords field once we have framework-level support + +} + +/* + functions to update the state of the resource where the generated code or the set_output + functions are insufficient +*/ + +// set the ResourceSynced condition based on the User's Status. r is a wrapper around the User resource which will +// eventually be returned as "latest" +func (rm *resourceManager) setSyncedCondition( + status *string, + r *resource, +) { + // determine whether the resource can be considered synced + syncedStatus := corev1.ConditionUnknown + if status != nil { + if *status == "active" { + syncedStatus = corev1.ConditionTrue + } else { + syncedStatus = corev1.ConditionFalse + } + + } + + // TODO: add utility function in a common repo to do the below as it's done at least once per resource + + // set existing condition to the above status (or create a new condition with this status) + ko := r.ko + var resourceSyncedCondition *ackv1alpha1.Condition = nil + for _, condition := range ko.Status.Conditions { + if condition.Type == ackv1alpha1.ConditionTypeResourceSynced { + resourceSyncedCondition = condition + break + } + } + if resourceSyncedCondition == nil { + resourceSyncedCondition = &ackv1alpha1.Condition{ + Type: ackv1alpha1.ConditionTypeResourceSynced, + Status: syncedStatus, + } + ko.Status.Conditions = append(ko.Status.Conditions, resourceSyncedCondition) + } else { + resourceSyncedCondition.Status = syncedStatus + } +} + +// remove differences which are not meaningful (i.e. ones that don't warrant a call to rm.Update) +func filterDelta( + delta *ackcompare.Delta, + desired *resource, + latest *resource, +) { + // the returned AccessString can be different than the specified one; as long as the last requested AccessString + // matches the currently desired one, remove this difference from the delta + if delta.DifferentAt("Spec.AccessString") { + if desired.ko.Spec.AccessString != nil && + desired.ko.Status.LastRequestedAccessString != nil && + *desired.ko.Spec.AccessString == *desired.ko.Status.LastRequestedAccessString { + + common.RemoveFromDelta(delta, "Spec.AccessString") + } + } +} diff --git a/pkg/resource/user/post_build_request.go b/pkg/resource/user/post_build_request.go deleted file mode 100644 index 487328cb..00000000 --- a/pkg/resource/user/post_build_request.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"). You may -// not use this file except in compliance with the License. A copy of the -// License is located at -// -// http://aws.amazon.com/apache2.0/ -// -// or in the "license" file accompanying this file. This file is distributed -// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either -// express or implied. See the License for the specific language governing -// permissions and limitations under the License. - -package user - -import ( - ackcompare "github.com/aws-controllers-k8s/runtime/pkg/compare" - svcsdk "github.com/aws/aws-sdk-go/service/elasticache" -) - -// TODO: this should be generated in the future. In general, it doesn't seem like a good idea to add every non-nil -// Spec field in desired.Spec to the payload (i.e. what we do when building most inputs), unless there is -// actually a difference in the Spec field between desired and latest -func (rm *resourceManager) populateUpdatePayload( - input *svcsdk.ModifyUserInput, - r *resource, - delta *ackcompare.Delta, -) { - if delta.DifferentAt("Spec.AccessString") && r.ko.Spec.AccessString != nil { - input.AccessString = r.ko.Spec.AccessString - } - - if delta.DifferentAt("Spec.NoPasswordRequired") && r.ko.Spec.NoPasswordRequired != nil { - input.NoPasswordRequired = r.ko.Spec.NoPasswordRequired - } - - //TODO: add update for passwords field once we have framework-level support - -} diff --git a/pkg/resource/user/post_set_output.go b/pkg/resource/user/post_set_output.go deleted file mode 100644 index 8c1d576e..00000000 --- a/pkg/resource/user/post_set_output.go +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"). You may -// not use this file except in compliance with the License. A copy of the -// License is located at -// -// http://aws.amazon.com/apache2.0/ -// -// or in the "license" file accompanying this file. This file is distributed -// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either -// express or implied. See the License for the specific language governing -// permissions and limitations under the License. - -package user - -import ( - ackv1alpha1 "github.com/aws-controllers-k8s/runtime/apis/core/v1alpha1" - corev1 "k8s.io/api/core/v1" -) - -/* - This file contains functions to update the state of the resource where the generated code or the set_output - functions are insufficient -*/ - -// set the ResourceSynced condition based on the User's Status. r is a wrapper around the User resource which will -// eventually be returned as "latest" -func (rm *resourceManager) setSyncedCondition( - status *string, - r *resource, -) { - // determine whether the resource can be considered synced - syncedStatus := corev1.ConditionUnknown - if status != nil { - if *status == "active" { - syncedStatus = corev1.ConditionTrue - } else { - syncedStatus = corev1.ConditionFalse - } - - } - - // TODO: add utility function in a common repo to do the below as it's done at least once per resource - - // set existing condition to the above status (or create a new condition with this status) - ko := r.ko - var resourceSyncedCondition *ackv1alpha1.Condition = nil - for _, condition := range ko.Status.Conditions { - if condition.Type == ackv1alpha1.ConditionTypeResourceSynced { - resourceSyncedCondition = condition - break - } - } - if resourceSyncedCondition == nil { - resourceSyncedCondition = &ackv1alpha1.Condition{ - Type: ackv1alpha1.ConditionTypeResourceSynced, - Status: syncedStatus, - } - ko.Status.Conditions = append(ko.Status.Conditions, resourceSyncedCondition) - } else { - resourceSyncedCondition.Status = syncedStatus - } -} diff --git a/pkg/resource/user/sdk.go b/pkg/resource/user/sdk.go index ca1e8fc2..7ebb76f6 100644 --- a/pkg/resource/user/sdk.go +++ b/pkg/resource/user/sdk.go @@ -287,22 +287,6 @@ func (rm *resourceManager) newCreateRequestPayload( if r.ko.Spec.AccessString != nil { res.AccessString = r.ko.Spec.AccessString } - if r.ko.Spec.AuthenticationMode != nil { - f1 := &svcsdktypes.AuthenticationMode{} - if r.ko.Spec.AuthenticationMode.Passwords != nil { - f1f0 := []string{} - for _, f1f0iter := range r.ko.Spec.AuthenticationMode.Passwords { - var f1f0elem string - f1f0elem = f1f0iter - f1f0 = append(f1f0, f1f0elem) - } - f1.Passwords = f1f0 - } - if r.ko.Spec.AuthenticationMode.Type != nil { - f1.Type = svcsdktypes.InputAuthenticationType(*r.ko.Spec.AuthenticationMode.Type) - } - res.AuthenticationMode = f1 - } if r.ko.Spec.Engine != nil { res.Engine = r.ko.Spec.Engine } @@ -310,35 +294,35 @@ func (rm *resourceManager) newCreateRequestPayload( res.NoPasswordRequired = r.ko.Spec.NoPasswordRequired } if r.ko.Spec.Passwords != nil { - f4 := []string{} - for _, f4iter := range r.ko.Spec.Passwords { - var f4elem string - if f4iter != nil { - tmpSecret, err := rm.rr.SecretValueFromReference(ctx, f4iter) + f3 := []string{} + for _, f3iter := range r.ko.Spec.Passwords { + var f3elem string + if f3iter != nil { + tmpSecret, err := rm.rr.SecretValueFromReference(ctx, f3iter) if err != nil { return nil, ackrequeue.Needed(err) } if tmpSecret != "" { - f4elem = tmpSecret + f3elem = tmpSecret } } - f4 = append(f4, f4elem) + f3 = append(f3, f3elem) } - res.Passwords = f4 + res.Passwords = f3 } if r.ko.Spec.Tags != nil { - f5 := []svcsdktypes.Tag{} - for _, f5iter := range r.ko.Spec.Tags { - f5elem := &svcsdktypes.Tag{} - if f5iter.Key != nil { - f5elem.Key = f5iter.Key + f4 := []svcsdktypes.Tag{} + for _, f4iter := range r.ko.Spec.Tags { + f4elem := &svcsdktypes.Tag{} + if f4iter.Key != nil { + f4elem.Key = f4iter.Key } - if f5iter.Value != nil { - f5elem.Value = f5iter.Value + if f4iter.Value != nil { + f4elem.Value = f4iter.Value } - f5 = append(f5, *f5elem) + f4 = append(f4, *f4elem) } - res.Tags = f5 + res.Tags = f4 } if r.ko.Spec.UserID != nil { res.UserId = r.ko.Spec.UserID @@ -459,22 +443,6 @@ func (rm *resourceManager) newUpdateRequestPayload( ) (*svcsdk.ModifyUserInput, error) { res := &svcsdk.ModifyUserInput{} - if r.ko.Spec.AuthenticationMode != nil { - f1 := &svcsdktypes.AuthenticationMode{} - if r.ko.Spec.AuthenticationMode.Passwords != nil { - f1f0 := []string{} - for _, f1f0iter := range r.ko.Spec.AuthenticationMode.Passwords { - var f1f0elem string - f1f0elem = f1f0iter - f1f0 = append(f1f0, f1f0elem) - } - f1.Passwords = f1f0 - } - if r.ko.Spec.AuthenticationMode.Type != nil { - f1.Type = svcsdktypes.InputAuthenticationType(*r.ko.Spec.AuthenticationMode.Type) - } - res.AuthenticationMode = f1 - } if r.ko.Spec.Engine != nil { res.Engine = r.ko.Spec.Engine } diff --git a/pkg/resource/user_group/custom_set_output.go b/pkg/resource/user_group/custom_set_output.go deleted file mode 100644 index 11fd45f1..00000000 --- a/pkg/resource/user_group/custom_set_output.go +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"). You may -// not use this file except in compliance with the License. A copy of the -// License is located at -// -// http://aws.amazon.com/apache2.0/ -// -// or in the "license" file accompanying this file. This file is distributed -// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either -// express or implied. See the License for the specific language governing -// permissions and limitations under the License. - -package user_group - -import ( - "context" - - svcapitypes "github.com/aws-controllers-k8s/elasticache-controller/apis/v1alpha1" - ackv1alpha1 "github.com/aws-controllers-k8s/runtime/apis/core/v1alpha1" - "github.com/aws/aws-sdk-go/service/elasticache" - corev1 "k8s.io/api/core/v1" -) - -func (rm *resourceManager) CustomDescribeUserGroupsSetOutput( - ctx context.Context, - r *resource, - resp *elasticache.DescribeUserGroupsOutput, - ko *svcapitypes.UserGroup, -) (*svcapitypes.UserGroup, error) { - elem := resp.UserGroups[0] - rm.customSetOutput(elem.UserIds, - elem.Engine, - elem.Status, - ko) - return ko, nil -} - -func (rm *resourceManager) CustomCreateUserGroupSetOutput( - ctx context.Context, - r *resource, - resp *elasticache.CreateUserGroupOutput, - ko *svcapitypes.UserGroup, -) (*svcapitypes.UserGroup, error) { - rm.customSetOutput(resp.UserIds, - resp.Engine, - resp.Status, - ko) - return ko, nil -} - -func (rm *resourceManager) customSetOutput( - userIds []*string, - engine *string, - status *string, - ko *svcapitypes.UserGroup, -) { - if userIds != nil { - ko.Spec.UserIDs = userIds - } - - if engine != nil { - ko.Spec.Engine = engine - } - - syncConditionStatus := corev1.ConditionUnknown - if status != nil { - if *status == "active" { - syncConditionStatus = corev1.ConditionTrue - } else { - syncConditionStatus = corev1.ConditionFalse - } - } - var resourceSyncedCondition *ackv1alpha1.Condition = nil - for _, condition := range ko.Status.Conditions { - if condition.Type == ackv1alpha1.ConditionTypeResourceSynced { - resourceSyncedCondition = condition - break - } - } - if resourceSyncedCondition == nil { - resourceSyncedCondition = &ackv1alpha1.Condition{ - Type: ackv1alpha1.ConditionTypeResourceSynced, - Status: syncConditionStatus, - } - ko.Status.Conditions = append(ko.Status.Conditions, resourceSyncedCondition) - } else { - resourceSyncedCondition.Status = syncConditionStatus - } -} diff --git a/pkg/resource/user_group/custom_update_api.go b/pkg/resource/user_group/hooks.go similarity index 63% rename from pkg/resource/user_group/custom_update_api.go rename to pkg/resource/user_group/hooks.go index 3563cd2b..a832dca8 100644 --- a/pkg/resource/user_group/custom_update_api.go +++ b/pkg/resource/user_group/hooks.go @@ -21,7 +21,8 @@ import ( ackv1alpha1 "github.com/aws-controllers-k8s/runtime/apis/core/v1alpha1" ackcompare "github.com/aws-controllers-k8s/runtime/pkg/compare" "github.com/aws-controllers-k8s/runtime/pkg/requeue" - svcsdk "github.com/aws/aws-sdk-go/service/elasticache" + svcsdk "github.com/aws/aws-sdk-go-v2/service/elasticache" + corev1 "k8s.io/api/core/v1" ) // Implements custom logic for UpdateUserGroup @@ -60,31 +61,31 @@ func (rm *resourceManager) customUpdateUserGroup( // User Ids to add { - var userIdsToAdd []*string + var userIdsToAdd []string for userId, include := range requiredUserIdsMap { if include { - userIdsToAdd = append(userIdsToAdd, &userId) + userIdsToAdd = append(userIdsToAdd, userId) } } - input.SetUserIdsToAdd(userIdsToAdd) + input.UserIdsToAdd = userIdsToAdd } // User Ids to remove { - var userIdsToRemove []*string + var userIdsToRemove []string for userId, include := range existingUserIdsMap { if include { - userIdsToRemove = append(userIdsToRemove, &userId) + userIdsToRemove = append(userIdsToRemove, userId) } } - input.SetUserIdsToRemove(userIdsToRemove) + input.UserIdsToRemove = userIdsToRemove } - resp, respErr := rm.sdkapi.ModifyUserGroupWithContext(ctx, input) + resp, respErr := rm.sdkapi.ModifyUserGroup(ctx, input) rm.metrics.RecordAPICall("UPDATE", "ModifyUserGroup", respErr) if respErr != nil { return nil, respErr @@ -105,18 +106,14 @@ func (rm *resourceManager) customUpdateUserGroup( if resp.PendingChanges.UserIdsToAdd != nil { f2f0 := []*string{} for _, f2f0iter := range resp.PendingChanges.UserIdsToAdd { - var f2f0elem string - f2f0elem = *f2f0iter - f2f0 = append(f2f0, &f2f0elem) + f2f0 = append(f2f0, &f2f0iter) } f2.UserIDsToAdd = f2f0 } if resp.PendingChanges.UserIdsToRemove != nil { f2f1 := []*string{} for _, f2f1iter := range resp.PendingChanges.UserIdsToRemove { - var f2f1elem string - f2f1elem = *f2f1iter - f2f1 = append(f2f1, &f2f1elem) + f2f1 = append(f2f1, &f2f1iter) } f2.UserIDsToRemove = f2f1 } @@ -127,9 +124,7 @@ func (rm *resourceManager) customUpdateUserGroup( if resp.ReplicationGroups != nil { f3 := []*string{} for _, f3iter := range resp.ReplicationGroups { - var f3elem string - f3elem = *f3iter - f3 = append(f3, &f3elem) + f3 = append(f3, &f3iter) } ko.Status.ReplicationGroups = f3 } else { @@ -142,7 +137,11 @@ func (rm *resourceManager) customUpdateUserGroup( } rm.setStatusDefaults(ko) - rm.customSetOutput(resp.UserIds, resp.Engine, resp.Status, ko) + rm.customSetOutput( + stringSliceToPointers(resp.UserIds), + resp.Engine, + resp.Status, + ko) return &resource{ko}, nil } } @@ -173,8 +172,86 @@ func (rm *resourceManager) newUpdateRequestPayload( res := &svcsdk.ModifyUserGroupInput{} if r.ko.Spec.UserGroupID != nil { - res.SetUserGroupId(*r.ko.Spec.UserGroupID) + res.UserGroupId = r.ko.Spec.UserGroupID } return res, nil } + +func (rm *resourceManager) CustomDescribeUserGroupsSetOutput( + ctx context.Context, + r *resource, + resp *svcsdk.DescribeUserGroupsOutput, + ko *svcapitypes.UserGroup, +) (*svcapitypes.UserGroup, error) { + elem := resp.UserGroups[0] + rm.customSetOutput( + stringSliceToPointers(elem.UserIds), + elem.Engine, + elem.Status, + ko) + return ko, nil +} + +func (rm *resourceManager) CustomCreateUserGroupSetOutput( + ctx context.Context, + r *resource, + resp *svcsdk.CreateUserGroupOutput, + ko *svcapitypes.UserGroup, +) (*svcapitypes.UserGroup, error) { + rm.customSetOutput( + stringSliceToPointers(resp.UserIds), + resp.Engine, + resp.Status, + ko) + return ko, nil +} + +func (rm *resourceManager) customSetOutput( + userIds []*string, + engine *string, + status *string, + ko *svcapitypes.UserGroup, +) { + if userIds != nil { + ko.Spec.UserIDs = userIds + } + + if engine != nil { + ko.Spec.Engine = engine + } + + syncConditionStatus := corev1.ConditionUnknown + if status != nil { + if *status == "active" { + syncConditionStatus = corev1.ConditionTrue + } else { + syncConditionStatus = corev1.ConditionFalse + } + } + var resourceSyncedCondition *ackv1alpha1.Condition = nil + for _, condition := range ko.Status.Conditions { + if condition.Type == ackv1alpha1.ConditionTypeResourceSynced { + resourceSyncedCondition = condition + break + } + } + if resourceSyncedCondition == nil { + resourceSyncedCondition = &ackv1alpha1.Condition{ + Type: ackv1alpha1.ConditionTypeResourceSynced, + Status: syncConditionStatus, + } + ko.Status.Conditions = append(ko.Status.Conditions, resourceSyncedCondition) + } else { + resourceSyncedCondition.Status = syncConditionStatus + } +} + +func stringSliceToPointers(slice []string) []*string { + ptrs := make([]*string, len(slice)) + for i, s := range slice { + s := s // Create new variable to avoid referencing loop variable + ptrs[i] = &s + } + return ptrs +} diff --git a/pkg/resource/user_group/sdk.go b/pkg/resource/user_group/sdk.go index 32af0446..90dce7c0 100644 --- a/pkg/resource/user_group/sdk.go +++ b/pkg/resource/user_group/sdk.go @@ -125,11 +125,6 @@ func (rm *resourceManager) sdkFind( } else { ko.Status.ReplicationGroups = nil } - if elem.ServerlessCaches != nil { - ko.Status.ServerlessCaches = aws.StringSlice(elem.ServerlessCaches) - } else { - ko.Status.ServerlessCaches = nil - } if elem.Status != nil { ko.Status.Status = elem.Status } else { @@ -247,11 +242,6 @@ func (rm *resourceManager) sdkCreate( } else { ko.Status.ReplicationGroups = nil } - if resp.ServerlessCaches != nil { - ko.Status.ServerlessCaches = aws.StringSlice(resp.ServerlessCaches) - } else { - ko.Status.ServerlessCaches = nil - } if resp.Status != nil { ko.Status.Status = resp.Status } else { diff --git a/pkg/util/tags.go b/pkg/util/tags.go index 9685019b..ac7b40ea 100644 --- a/pkg/util/tags.go +++ b/pkg/util/tags.go @@ -23,8 +23,8 @@ import ( ackrequeue "github.com/aws-controllers-k8s/runtime/pkg/requeue" ackrtlog "github.com/aws-controllers-k8s/runtime/pkg/runtime/log" acktags "github.com/aws-controllers-k8s/runtime/pkg/tags" - svcsdk "github.com/aws/aws-sdk-go/service/elasticache" - "github.com/aws/aws-sdk-go/service/elasticache/elasticacheiface" + svcsdk "github.com/aws/aws-sdk-go-v2/service/elasticache" + svcsdktypes "github.com/aws/aws-sdk-go-v2/service/elasticache/types" svcapitypes "github.com/aws-controllers-k8s/elasticache-controller/apis/v1alpha1" ) @@ -37,11 +37,11 @@ var requeueWaitWhileTagUpdated = ackrequeue.NeededAfter( // GetTags retrieves the resource's associated tags. func GetTags( ctx context.Context, - sdkapi elasticacheiface.ElastiCacheAPI, + sdkapi *svcsdk.Client, metrics *metrics.Metrics, resourceARN string, ) ([]*svcapitypes.Tag, error) { - resp, err := sdkapi.ListTagsForResourceWithContext( + resp, err := sdkapi.ListTagsForResource( ctx, &svcsdk.ListTagsForResourceInput{ ResourceName: &resourceARN, @@ -85,7 +85,7 @@ func SyncTags( latestTags []*svcapitypes.Tag, latestACKResourceMetadata *ackv1alpha1.ResourceMetadata, toACKTags func(tags []*svcapitypes.Tag) acktags.Tags, - sdkapi elasticacheiface.ElastiCacheAPI, + sdkapi *svcsdk.Client, metrics *metrics.Metrics, ) (err error) { rlog := ackrtlog.FromContext(ctx) @@ -113,17 +113,17 @@ func SyncTags( // so after adding or removing tags, we have to wait for the cache cluster to be available again // process: add tags -> requeue -> remove tags -> requeue -> other update if len(added) > 0 { - toAdd := make([]*svcsdk.Tag, 0, len(added)) + toAdd := make([]svcsdktypes.Tag, 0, len(added)) for key, val := range added { key, val := key, val - toAdd = append(toAdd, &svcsdk.Tag{ + toAdd = append(toAdd, svcsdktypes.Tag{ Key: &key, Value: &val, }) } rlog.Debug("adding tags to cache cluster", "tags", added) - _, err = sdkapi.AddTagsToResourceWithContext( + _, err = sdkapi.AddTagsToResource( ctx, &svcsdk.AddTagsToResourceInput{ ResourceName: arn, @@ -135,13 +135,13 @@ func SyncTags( return err } } else if len(removed) > 0 { - toRemove := make([]*string, 0, len(removed)) + toRemove := make([]string, 0, len(removed)) for key := range removed { key := key - toRemove = append(toRemove, &key) + toRemove = append(toRemove, key) } rlog.Debug("removing tags from cache cluster", "tags", removed) - _, err = sdkapi.RemoveTagsFromResourceWithContext( + _, err = sdkapi.RemoveTagsFromResource( ctx, &svcsdk.RemoveTagsFromResourceInput{ ResourceName: arn, diff --git a/templates/hooks/cache_cluster/sdk_update_post_set_output.go.tpl b/templates/hooks/cache_cluster/sdk_update_post_set_output.go.tpl index 602943c0..07ffb00b 100644 --- a/templates/hooks/cache_cluster/sdk_update_post_set_output.go.tpl +++ b/templates/hooks/cache_cluster/sdk_update_post_set_output.go.tpl @@ -1,6 +1,6 @@ if pendingModifications := resp.CacheCluster.PendingModifiedValues; pendingModifications != nil { if pendingModifications.NumCacheNodes != nil { - ko.Spec.NumCacheNodes = pendingModifications.NumCacheNodes + ko.Spec.NumCacheNodes = Int64OrNil(pendingModifications.NumCacheNodes) } if pendingModifications.CacheNodeType != nil { ko.Spec.CacheNodeType = pendingModifications.CacheNodeType diff --git a/templates/hooks/replication_group/sdk_delete_post_request.go.tpl b/templates/hooks/replication_group/sdk_delete_post_request.go.tpl index e0ee7bb9..37fcb092 100644 --- a/templates/hooks/replication_group/sdk_delete_post_request.go.tpl +++ b/templates/hooks/replication_group/sdk_delete_post_request.go.tpl @@ -1,6 +1,6 @@ // delete call successful if err == nil { - rp, _ := rm.setReplicationGroupOutput(r, resp.ReplicationGroup) + rp, _ := rm.setReplicationGroupOutput(ctx, r, resp.ReplicationGroup) // Setting resource synced condition to false will trigger a requeue of // the resource. ackcondition.SetSynced( diff --git a/templates/hooks/replication_group/sdk_file_end.go.tpl b/templates/hooks/replication_group/sdk_file_end.go.tpl index bf8e8c80..45595375 100644 --- a/templates/hooks/replication_group/sdk_file_end.go.tpl +++ b/templates/hooks/replication_group/sdk_file_end.go.tpl @@ -1,8 +1,9 @@ // This method copies the data from given {{ .CRD.Names.Camel }} by populating it // into copy of supplied resource and returns that. func (rm *resourceManager) set{{ .CRD.Names.Camel }}Output ( + ctx context.Context, r *resource, - obj *svcsdk.{{ .CRD.Names.Camel }}, + obj *svcsdktypes.{{ .CRD.Names.Camel }}, ) (*resource, error) { if obj == nil || r == nil || diff --git a/templates/hooks/replication_group/sdk_file_end_set_output_post_populate.go.tpl b/templates/hooks/replication_group/sdk_file_end_set_output_post_populate.go.tpl new file mode 100644 index 00000000..ae519213 --- /dev/null +++ b/templates/hooks/replication_group/sdk_file_end_set_output_post_populate.go.tpl @@ -0,0 +1 @@ + rm.customSetOutput(ctx, *obj, ko) \ No newline at end of file diff --git a/templates/hooks/replication_group/sdk_update_post_build_request.go.tpl b/templates/hooks/replication_group/sdk_update_post_build_request.go.tpl index d8c6113e..448a2828 100644 --- a/templates/hooks/replication_group/sdk_update_post_build_request.go.tpl +++ b/templates/hooks/replication_group/sdk_update_post_build_request.go.tpl @@ -1,5 +1,5 @@ if !delta.DifferentAt("Spec.LogDeliveryConfigurations") { - input.SetLogDeliveryConfigurations(nil) + input.LogDeliveryConfigurations = nil } if delta.DifferentAt("UserGroupIDs") { for _, diff := range delta.Differences { @@ -9,7 +9,7 @@ // User groups to add { - var userGroupsToAdd []*string + var userGroupsToAdd []string for _, requiredUserGroup := range requiredUserGroups { found := false @@ -21,16 +21,18 @@ } if !found { - userGroupsToAdd = append(userGroupsToAdd, requiredUserGroup) + if requiredUserGroup != nil { + userGroupsToAdd = append(userGroupsToAdd, *requiredUserGroup) + } } } - input.SetUserGroupIdsToAdd(userGroupsToAdd) + input.UserGroupIdsToAdd = userGroupsToAdd } // User groups to remove { - var userGroupsToRemove []*string + var userGroupsToRemove []string for _, existingUserGroup := range existingUserGroups { found := false @@ -42,11 +44,13 @@ } if !found { - userGroupsToRemove = append(userGroupsToRemove, existingUserGroup) + if existingUserGroup != nil { + userGroupsToRemove = append(userGroupsToRemove, *existingUserGroup) + } } } - input.SetUserGroupIdsToRemove(userGroupsToRemove) + input.UserGroupIdsToRemove = userGroupsToRemove } } } diff --git a/test/e2e/bootstrap_resources.py b/test/e2e/bootstrap_resources.py index 8b5f1696..5a11e71f 100644 --- a/test/e2e/bootstrap_resources.py +++ b/test/e2e/bootstrap_resources.py @@ -91,4 +91,4 @@ def read_bootstrap_config(config_dir: Path, bootstrap_file_name: str = "bootstra path = config_dir / bootstrap_file_name with open(path, "r") as stream: bootstrap = yaml.safe_load(stream) - return bootstrap + return bootstrap \ No newline at end of file diff --git a/test/e2e/resources/replicationgroup_authtoken.yaml b/test/e2e/resources/replicationgroup_authtoken.yaml deleted file mode 100644 index 47483beb..00000000 --- a/test/e2e/resources/replicationgroup_authtoken.yaml +++ /dev/null @@ -1,17 +0,0 @@ -apiVersion: elasticache.services.k8s.aws/v1alpha1 -kind: ReplicationGroup -metadata: - name: $RG_ID -spec: - engine: redis - replicationGroupID: $RG_ID - description: Auth token test - cacheNodeType: cache.t3.micro - numNodeGroups: 1 - replicasPerNodeGroup: 0 - transitEncryptionEnabled: true - cacheSubnetGroupName: default - authToken: - namespace: default - name: $NAME - key: $KEY \ No newline at end of file diff --git a/test/e2e/resources/replicationgroup_cme_ngc.yaml b/test/e2e/resources/replicationgroup_cme_ngc.yaml deleted file mode 100644 index 6a36f200..00000000 --- a/test/e2e/resources/replicationgroup_cme_ngc.yaml +++ /dev/null @@ -1,24 +0,0 @@ -# CME replication group. "NGC" means that the field nodeGroupConfiguration is specified, with a detailed configuration -# for each shard -apiVersion: elasticache.services.k8s.aws/v1alpha1 -kind: ReplicationGroup -metadata: - name: $RG_ID -spec: - cacheNodeType: cache.t3.micro - engine: redis - nodeGroupConfiguration: - - nodeGroupID: $NGID1 - primaryAvailabilityZone: us-west-2a - replicaAvailabilityZones: - - us-west-2b - - us-west-2c - replicaCount: 2 - - nodeGroupID: $NGID2 - primaryAvailabilityZone: us-west-2b - replicaAvailabilityZones: - - us-west-2c - - us-west-2a - replicaCount: 2 - description: cluster-mode enabled RG - replicationGroupID: $RG_ID \ No newline at end of file diff --git a/test/e2e/resources/replicationgroup_cmd_update.yaml b/test/e2e/resources/replicationgroup_create_delete.yaml similarity index 100% rename from test/e2e/resources/replicationgroup_cmd_update.yaml rename to test/e2e/resources/replicationgroup_create_delete.yaml diff --git a/test/e2e/resources/replicationgroup_input_coverage.yaml b/test/e2e/resources/replicationgroup_input_coverage.yaml deleted file mode 100644 index 544d4d28..00000000 --- a/test/e2e/resources/replicationgroup_input_coverage.yaml +++ /dev/null @@ -1,56 +0,0 @@ -apiVersion: elasticache.services.k8s.aws/v1alpha1 -kind: ReplicationGroup -metadata: - name: $RG_ID -spec: - atRestEncryptionEnabled: true - autoMinorVersionUpgrade: true - automaticFailoverEnabled: true - cacheNodeType: cache.t3.small - cacheParameterGroupName: default.redis6.x.cluster.on - cacheSubnetGroupName: default - engine: redis - engineVersion: 6.x - kmsKeyID: $KMS_KEY_ID - multiAZEnabled: true - nodeGroupConfiguration: - - nodeGroupID: "1111" - primaryAvailabilityZone: us-west-2a - replicaAvailabilityZones: - - us-west-2b - replicaCount: 1 - slots: 0-5999 - - nodeGroupID: "2222" - primaryAvailabilityZone: us-west-2c - replicaAvailabilityZones: - - us-west-2a - - us-west-2c - - us-west-2b - replicaCount: 3 - slots: 6000-16383 - notificationTopicARN: $SNS_TOPIC_ARN - numNodeGroups: 2 - port: 6380 - preferredMaintenanceWindow: sun:23:00-mon:01:30 - description: test replication group for input field coverage - replicationGroupID: $RG_ID - securityGroupIDs: - - $SG_ID - snapshotRetentionLimit: 5 - snapshotWindow: 05:00-06:00 - tags: - - key: service - value: elasticache - - key: region - value: us-west-2 - transitEncryptionEnabled: true - userGroupIDs: - - $USERGROUP_ID - logDeliveryConfigurations: - - - destinationType: cloudwatch-logs - logFormat: json - logType: slow-log - destinationDetails: - cloudWatchLogsDetails: - logGroup: $LOG_GROUP \ No newline at end of file diff --git a/test/e2e/resources/replicationgroup_largecluster.yaml b/test/e2e/resources/replicationgroup_largecluster.yaml deleted file mode 100644 index 2daf10be..00000000 --- a/test/e2e/resources/replicationgroup_largecluster.yaml +++ /dev/null @@ -1,11 +0,0 @@ -apiVersion: elasticache.services.k8s.aws/v1alpha1 -kind: ReplicationGroup -metadata: - name: $RG_ID -spec: - cacheNodeType: cache.t3.micro - engine: redis - numNodeGroups: $NUM_NODE_GROUPS - replicasPerNodeGroup: $REPLICAS_PER_NODE_GROUP - description: large cluster mode enabled RG - replicationGroupID: $RG_ID \ No newline at end of file diff --git a/test/e2e/resources/replicationgroup_rpng.yaml b/test/e2e/resources/replicationgroup_rpng.yaml deleted file mode 100644 index 555e2b00..00000000 --- a/test/e2e/resources/replicationgroup_rpng.yaml +++ /dev/null @@ -1,15 +0,0 @@ -# "RPNG" means that the field replicasPerNodeGroup is set (instead of nodeGroupConfiguration), -# meaning that this replication group has a uniform configuration across all shards. This is one of the more -# basic configurations as very few fields are specified and the resulting RG can either be CME or CMD -# (depending on the number of specified node groups). -apiVersion: elasticache.services.k8s.aws/v1alpha1 -kind: ReplicationGroup -metadata: - name: $RG_ID -spec: - cacheNodeType: cache.t3.micro - engine: redis - numNodeGroups: $NUM_NODE_GROUPS - replicasPerNodeGroup: $REPLICAS_PER_NODE_GROUP - description: cluster-mode enabled RG - replicationGroupID: $RG_ID \ No newline at end of file diff --git a/test/e2e/resources/replicationgroup_cme_misc.yaml b/test/e2e/resources/replicationgroup_update.yaml similarity index 65% rename from test/e2e/resources/replicationgroup_cme_misc.yaml rename to test/e2e/resources/replicationgroup_update.yaml index f7faa0f4..2761fce4 100644 --- a/test/e2e/resources/replicationgroup_cme_misc.yaml +++ b/test/e2e/resources/replicationgroup_update.yaml @@ -1,5 +1,3 @@ -# CME replication group with some optional fields specified. The modification of these fields is typically quicker -# than other actions (such as scaling) which require provisioning/deletion of nodes. apiVersion: elasticache.services.k8s.aws/v1alpha1 kind: ReplicationGroup metadata: @@ -18,4 +16,4 @@ spec: - key: tag_to_remove value: should_be_removed - key: tag_to_update - value: old_value + value: old_value \ No newline at end of file diff --git a/test/e2e/service_cleanup.py b/test/e2e/service_cleanup.py index c0ea496c..76743a0f 100644 --- a/test/e2e/service_cleanup.py +++ b/test/e2e/service_cleanup.py @@ -148,4 +148,4 @@ def service_cleanup(config: dict): if __name__ == "__main__": bootstrap_config = read_bootstrap_config(bootstrap_directory) - service_cleanup(bootstrap_config) + service_cleanup(bootstrap_config) \ No newline at end of file diff --git a/test/e2e/tests/test_cache_cluster.py b/test/e2e/tests/test_cache_cluster.py index a05aabbb..7057da1f 100644 --- a/test/e2e/tests/test_cache_cluster.py +++ b/test/e2e/tests/test_cache_cluster.py @@ -39,6 +39,8 @@ def wait_for_cache_cluster_available(elasticache_client, cache_cluster_id): + """Wait for cache cluster to reach 'available' state using boto3 waiter. + """ waiter = elasticache_client.get_waiter( 'cache_cluster_available', ) @@ -48,6 +50,8 @@ def wait_for_cache_cluster_available(elasticache_client, cache_cluster_id): def wait_until_deleted(elasticache_client, cache_cluster_id): + """Wait for cache cluster to be fully deleted using boto3 waiter. + """ waiter = elasticache_client.get_waiter( 'cache_cluster_deleted', ) @@ -57,6 +61,8 @@ def wait_until_deleted(elasticache_client, cache_cluster_id): def get_and_assert_status(ref: k8s.CustomResourceReference, expected_status: str, expected_synced: bool): + """Get the cache cluster status and assert it matches the expected status. + """ cr = k8s.get_resource(ref) assert cr is not None assert 'status' in cr diff --git a/test/e2e/tests/test_replicationgroup.py b/test/e2e/tests/test_replicationgroup.py index 252f4796..023d06bc 100644 --- a/test/e2e/tests/test_replicationgroup.py +++ b/test/e2e/tests/test_replicationgroup.py @@ -4,7 +4,7 @@ # not use this file except in compliance with the License. A copy of the # License is located at # -# http://aws.amazon.com/apache2.0/ +# http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is distributed # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either @@ -23,8 +23,7 @@ from acktest.k8s import resource as k8s from e2e import service_marker, CRD_GROUP, CRD_VERSION, load_elasticache_resource from e2e.bootstrap_resources import get_bootstrap_resources -from e2e.util import retrieve_cache_cluster, assert_even_shards_replica_count, retrieve_replication_group, \ - assert_recoverable_condition_set, retrieve_replication_group_tags +from e2e.util import retrieve_cache_cluster, retrieve_replication_group, assert_recoverable_condition_set, retrieve_replication_group_tags RESOURCE_PLURAL = "replicationgroups" @@ -37,18 +36,6 @@ def rg_deletion_waiter(): return ec.get_waiter('replication_group_deleted') -# delete the replication group using the provided k8s reference, and use the elasticache deletion waiter -# to wait for server-side deletion -@pytest.fixture(scope="module") -def perform_teardown(rg_deletion_waiter): - def _perform_teardown(reference, rg_id): - k8s.delete_custom_resource(reference) - sleep(DEFAULT_WAIT_SECS) - rg_deletion_waiter.wait(ReplicationGroupId=rg_id) # throws exception if wait fails - - return _perform_teardown - - # retrieve resources created in the bootstrap step @pytest.fixture(scope="module") def bootstrap_resources(): @@ -75,30 +62,14 @@ def _make_replication_group(yaml_name, input_dict, rg_name): reference = k8s.CustomResourceReference( CRD_GROUP, CRD_VERSION, RESOURCE_PLURAL, rg_name, namespace="default") _ = k8s.create_custom_resource(reference, rg) - resource = k8s.wait_resource_consumed_by_controller(reference, wait_periods=15, period_length=20) + resource = k8s.wait_resource_consumed_by_controller( + reference, wait_periods=15, period_length=20) assert resource is not None return (reference, resource) return _make_replication_group -@pytest.fixture(scope="module") -def rg_input_coverage(bootstrap_resources, make_rg_name, make_replication_group, perform_teardown): - input_dict = { - "RG_ID": make_rg_name("rg-input-coverage"), - "KMS_KEY_ID": bootstrap_resources.KmsKeyID, - "SNS_TOPIC_ARN": bootstrap_resources.SnsTopic1, - "SG_ID": bootstrap_resources.SecurityGroup1, - "USERGROUP_ID": bootstrap_resources.UserGroup1, - "LOG_GROUP": bootstrap_resources.CWLogGroup1 - } - - (reference, resource) = make_replication_group("replicationgroup_input_coverage", input_dict, input_dict["RG_ID"]) - yield (reference, resource) - - perform_teardown(reference, input_dict['RG_ID']) - - @pytest.fixture(scope="module") def secrets(): secrets = { @@ -107,108 +78,35 @@ def secrets(): "KEY1": "secret1", "KEY2": "secret2" } - k8s.create_opaque_secret("default", secrets['NAME1'], secrets['KEY1'], random_suffix_name("token", 32)) - k8s.create_opaque_secret("default", secrets['NAME2'], secrets['KEY2'], random_suffix_name("token", 32)) + k8s.create_opaque_secret( + "default", secrets['NAME1'], secrets['KEY1'], random_suffix_name("token", 32)) + k8s.create_opaque_secret( + "default", secrets['NAME2'], secrets['KEY2'], random_suffix_name("token", 32)) yield secrets - # teardown k8s.delete_secret("default", secrets['NAME1']) k8s.delete_secret("default", secrets['NAME2']) @pytest.fixture(scope="module") -def rg_auth_token(make_rg_name, make_replication_group, perform_teardown, secrets): - input_dict = { - "RG_ID": make_rg_name("rg-auth-token"), - "NAME": secrets['NAME1'], - "KEY": secrets['KEY1'] - } - (reference, resource) = make_replication_group("replicationgroup_authtoken", input_dict, input_dict["RG_ID"]) - yield (reference, resource) - - perform_teardown(reference, input_dict['RG_ID']) - - -@pytest.fixture(scope="module") -def rg_invalid_primary(make_rg_name, make_replication_group, perform_teardown): - input_dict = { - "RG_ID": make_rg_name("rg-invalid-primary"), - "PRIMARY_NODE": make_rg_name("node-dne") - } - (reference, resource) = make_replication_group("replicationgroup_primary_cluster", input_dict, input_dict['RG_ID']) - yield reference, resource - perform_teardown(reference, input_dict['RG_ID']) - - -@pytest.fixture(scope="module") -def rg_cmd_fromsnapshot(bootstrap_resources, make_rg_name, make_replication_group, perform_teardown): +def rg_cmd_fromsnapshot(bootstrap_resources, make_rg_name, make_replication_group, rg_deletion_waiter): input_dict = { "RG_ID": make_rg_name("rg-cmd-fromsnapshot"), "SNAPSHOT_NAME": bootstrap_resources.SnapshotName } - (reference, resource) = make_replication_group("replicationgroup_cmd_fromsnapshot", input_dict, input_dict["RG_ID"]) - yield (reference, resource) - - perform_teardown(reference, input_dict['RG_ID']) - - -@pytest.fixture(scope="module") -def rg_cme_uneven_shards_input(make_rg_name): - return { - "RG_ID": make_rg_name("rg-cme-uneven-shards"), - "NGID1": '"1111"', - "NGID2": '"2222"' - } - - -@pytest.fixture(scope="module") -def rg_cme_uneven_shards(rg_cme_uneven_shards_input, make_replication_group, perform_teardown): - (reference, resource) = make_replication_group("replicationgroup_cme_ngc", rg_cme_uneven_shards_input, - rg_cme_uneven_shards_input['RG_ID']) - yield reference, resource - perform_teardown(reference, rg_cme_uneven_shards_input['RG_ID']) - - -@pytest.fixture(scope="module") -def rg_cme_even_shards_input(make_rg_name): - return { - "RG_ID": make_rg_name("rg-cme-even-shards"), - "NUM_NODE_GROUPS": "2", - "REPLICAS_PER_NODE_GROUP": "2" - } - - -@pytest.fixture(scope="module") -def rg_cme_even_shards(rg_cme_even_shards_input, make_replication_group, perform_teardown): - (reference, resource) = make_replication_group("replicationgroup_rpng", rg_cme_even_shards_input, - rg_cme_even_shards_input['RG_ID']) - yield reference, resource - perform_teardown(reference, rg_cme_even_shards_input['RG_ID']) - - -@pytest.fixture(scope="module") -def rg_upgrade_ev_input(make_rg_name): - return { - "RG_ID": make_rg_name("rg-upgrade-ev"), - "ENGINE_VERSION": "5.0.0", - "NUM_NODE_GROUPS": "1", - "REPLICAS_PER_NODE_GROUP": "1" - } - - -@pytest.fixture(scope="module") -def rg_upgrade_ev(rg_upgrade_ev_input, make_replication_group, perform_teardown): - input_dict = rg_upgrade_ev_input - - (reference, resource) = make_replication_group("replicationgroup_cmd_update", input_dict, input_dict["RG_ID"]) + (reference, resource) = make_replication_group( + "replicationgroup_cmd_fromsnapshot", input_dict, input_dict["RG_ID"]) yield (reference, resource) - perform_teardown(reference, input_dict['RG_ID']) + k8s.delete_custom_resource(reference) + sleep(DEFAULT_WAIT_SECS) + # throws exception if wait fails + rg_deletion_waiter.wait(ReplicationGroupId=input_dict['RG_ID']) @pytest.fixture(scope="module") -def rg_update_misc_input(make_rg_name): +def rg_update_input(make_rg_name): return { "RG_ID": make_rg_name("rg-update-misc"), "PMW": "sun:23:00-mon:02:00", @@ -219,23 +117,14 @@ def rg_update_misc_input(make_rg_name): @pytest.fixture(scope="module") -def rg_update_misc(rg_update_misc_input, make_replication_group, perform_teardown): - (reference, resource) = make_replication_group("replicationgroup_cme_misc", rg_update_misc_input, - rg_update_misc_input['RG_ID']) +def rg_update(rg_update_input, make_replication_group, rg_deletion_waiter): + (reference, resource) = make_replication_group( + "replicationgroup_update", rg_update_input, rg_update_input['RG_ID']) yield reference, resource - perform_teardown(reference, rg_update_misc_input['RG_ID']) - - -# for test rg_update_misc: retrieve latest state and assert desired state -def assert_misc_fields(reference, rg_id, pmw, description, srl, sw): - resource = k8s.get_resource(reference) - cc = retrieve_cache_cluster(rg_id) - rg = retrieve_replication_group(rg_id) - assert cc is not None - assert cc['PreferredMaintenanceWindow'] == pmw - assert resource['spec']['description'] == description - assert rg['SnapshotRetentionLimit'] == srl - assert rg['SnapshotWindow'] == sw + k8s.delete_custom_resource(reference) + sleep(DEFAULT_WAIT_SECS) + # throws exception if wait fails + rg_deletion_waiter.wait(ReplicationGroupId=rg_update_input['RG_ID']) def assert_spec_tags(rg_id: str, spec_tags: list): @@ -264,299 +153,70 @@ def rg_fault_tolerance_input(make_rg_name): @pytest.fixture(scope="module") -def rg_fault_tolerance(rg_fault_tolerance_input, make_replication_group, perform_teardown): - (reference, resource) = make_replication_group("replicationgroup_fault_tolerance", rg_fault_tolerance_input, - rg_fault_tolerance_input['RG_ID']) - yield reference, resource - perform_teardown(reference, rg_fault_tolerance_input['RG_ID']) - - -@pytest.fixture(scope="module") -def rg_associate_resources_input(make_rg_name): - return { - "RG_ID": make_rg_name("rg-associate-resources"), - "NUM_NODE_GROUPS": "1", - "REPLICAS_PER_NODE_GROUP": "1" - } - - -@pytest.fixture(scope="module") -def rg_associate_resources(rg_associate_resources_input, make_replication_group, perform_teardown): - (reference, resource) = make_replication_group("replicationgroup_rpng", rg_associate_resources_input, - rg_associate_resources_input['RG_ID']) - - yield reference, resource - perform_teardown(reference, rg_associate_resources_input['RG_ID']) - - -# for test rg_associate_resources -def assert_associated_resources(rg_id, sg_list, sns_topic, ug_list): - rg = retrieve_replication_group(rg_id) - cc = retrieve_cache_cluster(rg_id) - assert len(cc['SecurityGroups']) == len(sg_list) - for sg in cc['SecurityGroups']: - assert sg['SecurityGroupId'] in sg_list - assert cc['NotificationConfiguration']['TopicArn'] == sns_topic - for ug_id in rg['UserGroupIds']: - assert ug_id in ug_list - - -@pytest.fixture(scope="module") -def rg_update_cpg_input(make_rg_name): - return { - "RG_ID": make_rg_name("rg-update-cpg"), - "ENGINE_VERSION": "6.x", - "NUM_NODE_GROUPS": "1", - "REPLICAS_PER_NODE_GROUP": "1" - } - - -@pytest.fixture(scope="module") -def rg_update_cpg(rg_update_cpg_input, make_replication_group, perform_teardown): - input_dict = rg_update_cpg_input - - (reference, resource) = make_replication_group("replicationgroup_cmd_update", input_dict, input_dict['RG_ID']) - yield reference, resource - - perform_teardown(reference, input_dict['RG_ID']) - - -@pytest.fixture(scope="module") -def rg_scale_vertically_input(make_rg_name): - return { - "RG_ID": make_rg_name("rg-scale-vertically"), - "NUM_NODE_GROUPS": "2", - "REPLICAS_PER_NODE_GROUP": "1" - } - - -@pytest.fixture(scope="module") -def rg_scale_vertically(rg_scale_vertically_input, make_replication_group, perform_teardown): - (reference, resource) = make_replication_group("replicationgroup_rpng", rg_scale_vertically_input, - rg_scale_vertically_input['RG_ID']) - - yield reference, resource - perform_teardown(reference, rg_scale_vertically_input['RG_ID']) - - -@pytest.fixture(scope="module") -def rg_scale_horizontally_input(make_rg_name): - return { - "RG_ID": make_rg_name("rg-scale-horizontally"), - "NUM_NODE_GROUPS": "2", - "REPLICAS_PER_NODE_GROUP": "1" - } - - -@pytest.fixture(scope="module") -def rg_scale_horizontally(rg_scale_horizontally_input, make_replication_group, perform_teardown): - (reference, resource) = make_replication_group("replicationgroup_rpng", rg_scale_horizontally_input, - rg_scale_horizontally_input['RG_ID']) - - yield reference, resource - perform_teardown(reference, rg_scale_horizontally_input['RG_ID']) - - -@pytest.fixture(scope="module") -def rg_log_delivery_input(make_rg_name): - return { - "RG_ID": make_rg_name("rg-log-delivery"), - "NUM_NODE_GROUPS": "1", - "REPLICAS_PER_NODE_GROUP": "1" - } - - -@pytest.fixture(scope="module") -def rg_log_delivery(rg_log_delivery_input, make_replication_group, perform_teardown): - (reference, resource) = make_replication_group("replicationgroup_rpng", rg_log_delivery_input, - rg_log_delivery_input['RG_ID']) +def rg_fault_tolerance(rg_fault_tolerance_input, make_replication_group, rg_deletion_waiter): + (reference, resource) = make_replication_group( + "replicationgroup_fault_tolerance", rg_fault_tolerance_input, rg_fault_tolerance_input['RG_ID']) yield reference, resource - perform_teardown(reference, rg_log_delivery_input['RG_ID']) - - -# assert that the latest state of the replication group matches the desired configuration -def assert_log_delivery_config(reference, config): - resource = k8s.get_resource(reference) - - # if log delivery is disabled, logDeliveryConfigurations should be empty or none - if not config['enabled']: - assert 'logDeliveryConfigurations' not in resource['status'] - else: - latest = resource['status']['logDeliveryConfigurations'][0] - assert latest['status'] == "active" - assert latest['destinationDetails'] == config['destinationDetails'] - assert latest['destinationType'] == config['destinationType'] - assert latest['logFormat'] == config['logFormat'] - assert latest['logType'] == config['logType'] - - -@pytest.fixture(scope="module") -def rg_deletion_input(make_rg_name): - return { - "RG_ID": make_rg_name("rg-delete"), - "ENGINE_VERSION": "6.x", - "NUM_NODE_GROUPS": "1", - "REPLICAS_PER_NODE_GROUP": "1" - } - - -@pytest.fixture(scope="module") -def rg_deletion(rg_deletion_input, make_replication_group): - input_dict = rg_deletion_input - - (reference, resource) = make_replication_group("replicationgroup_cmd_update", input_dict, input_dict["RG_ID"]) - return (reference, resource) # no teardown, as the teardown is part of the actual test + k8s.delete_custom_resource(reference) + sleep(DEFAULT_WAIT_SECS) + # throws exception if wait fails + rg_deletion_waiter.wait( + ReplicationGroupId=rg_fault_tolerance_input['RG_ID']) @service_marker class TestReplicationGroup: - - def test_rg_input_coverage(self, rg_input_coverage): - (reference, _) = rg_input_coverage - assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True", wait_periods=90) - def test_rg_cmd_fromsnapshot(self, rg_cmd_fromsnapshot): (reference, _) = rg_cmd_fromsnapshot - assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True", wait_periods=90) - - # if primaryClusterID is a nonexistent node, the recoverable condition should be set - def test_rg_invalid_primary(self, rg_invalid_primary): - (reference, _) = rg_invalid_primary - sleep(DEFAULT_WAIT_SECS) - - resource = k8s.get_resource(reference) - assert_recoverable_condition_set(resource) + assert k8s.wait_on_condition( + reference, "ACK.ResourceSynced", "True", wait_periods=90) - # increase and decrease replica counts per-shard in a CME RG - @pytest.mark.blocked # TODO: remove when passing - def test_rg_cme_uneven_shards(self, rg_cme_uneven_shards, rg_cme_uneven_shards_input): - (reference, _) = rg_cme_uneven_shards - assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True", wait_periods=90) - ngid1 = rg_cme_uneven_shards_input['NGID1'][1:-1] # need to strip double quotes off node group ID - ngid2 = rg_cme_uneven_shards_input['NGID2'][1:-1] - - # assert initial state - resource = k8s.get_resource(reference) - assert len(resource['status']['nodeGroups']) == 2 - for ng in resource['status']['nodeGroups']: - if ng['nodeGroupID'] == ngid1: - assert len(ng['nodeGroupMembers']) == 3 - elif ng['nodeGroupID'] == ngid2: - assert len(ng['nodeGroupMembers']) == 3 - else: # node group with unknown ID - assert False - - # increase replica count of first shard, decrease replica count of second, and wait for resource to sync - patch = {"spec": {"nodeGroupConfiguration": [ - { - "nodeGroupID": ngid1, - "primaryAvailabilityZone": "us-west-2a", - "replicaAvailabilityZones": ["us-west-2b", "us-west-2c", "us-west-2a"], - "replicaCount": 3 - }, - { - "nodeGroupID": ngid2, - "primaryAvailabilityZone": "us-west-2b", - "replicaAvailabilityZones": ["us-west-2c"], - "replicaCount": 1 - } - ] - } + def test_rg_invalid_primary(self, make_rg_name, make_replication_group, rg_deletion_waiter): + input_dict = { + "RG_ID": make_rg_name("rg-invalid-primary"), + "PRIMARY_NODE": make_rg_name("node-dne") } - _ = k8s.patch_custom_resource(reference, patch) - sleep(DEFAULT_WAIT_SECS) - assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True", wait_periods=90) + (reference, resource) = make_replication_group( + "replicationgroup_primary_cluster", input_dict, input_dict['RG_ID']) - # assert new state - resource = k8s.get_resource(reference) - assert len(resource['status']['nodeGroups']) == 2 - for ng in resource['status']['nodeGroups']: - if ng['nodeGroupID'] == ngid1: - assert len(ng['nodeGroupMembers']) == 4 - elif ng['nodeGroupID'] == ngid2: - assert len(ng['nodeGroupMembers']) == 2 - else: # node group with unknown ID - assert False - - # increase and decrease replica count evenly across all shards in a CME RG - def test_rg_cme_even_shards(self, rg_cme_even_shards, rg_cme_even_shards_input): - (reference, _) = rg_cme_even_shards - assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True", wait_periods=90) - nng = int(rg_cme_even_shards_input['NUM_NODE_GROUPS']) - rpng = int(rg_cme_even_shards_input['REPLICAS_PER_NODE_GROUP']) - - # assert initial state - resource = k8s.get_resource(reference) - assert len(resource['status']['nodeGroups']) == nng - assert_even_shards_replica_count(resource, rpng) - - # increase replica count, wait for resource to sync - rpng += 1 - patch = {"spec": {"replicasPerNodeGroup": rpng}} - _ = k8s.patch_custom_resource(reference, patch) - sleep(DEFAULT_WAIT_SECS) - assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True", wait_periods=90) - - # assert replica count has increased - resource = k8s.get_resource(reference) - assert len(resource['status']['nodeGroups']) == nng - assert_even_shards_replica_count(resource, rpng) - - # decrease replica count, wait for resource to sync - rpng -= 2 - patch = {"spec": {"replicasPerNodeGroup": rpng}} - _ = k8s.patch_custom_resource(reference, patch) sleep(DEFAULT_WAIT_SECS) - assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True", wait_periods=90) - - # assert replica count has decreased resource = k8s.get_resource(reference) - assert len(resource['status']['nodeGroups']) == nng - assert_even_shards_replica_count(resource, rpng) - - # test update behavior of controller (engine version and replica count) - def test_rg_upgrade_ev(self, rg_upgrade_ev_input, rg_upgrade_ev): - (reference, _) = rg_upgrade_ev - assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True", wait_periods=90) - - # assert initial state - cc = retrieve_cache_cluster(rg_upgrade_ev_input['RG_ID']) - assert cc is not None - assert cc['EngineVersion'] == rg_upgrade_ev_input['ENGINE_VERSION'] + assert_recoverable_condition_set(resource) - # upgrade engine version, wait for resource to sync - desired_engine_version = "5.0.6" - patch = {"spec": {"engineVersion": desired_engine_version}} - _ = k8s.patch_custom_resource(reference, patch) + # Cleanup + k8s.delete_custom_resource(reference) sleep(DEFAULT_WAIT_SECS) - assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True", wait_periods=90) - - # assert new state after upgrading engine version - resource = k8s.get_resource(reference) - assert resource['status']['status'] == "available" - assert resource['spec']['engineVersion'] == desired_engine_version - cc = retrieve_cache_cluster(rg_upgrade_ev_input['RG_ID']) - assert cc is not None - assert cc['EngineVersion'] == desired_engine_version + # throws exception if wait fails + rg_deletion_waiter.wait(ReplicationGroupId=input_dict['RG_ID']) # test update of fields that can be changed quickly - def test_rg_update_misc(self, rg_update_misc_input, rg_update_misc): - (reference, _) = rg_update_misc - assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True", wait_periods=90) + + def test_rg_update(self, rg_update_input, rg_update): + (reference, _) = rg_update + assert k8s.wait_on_condition( + reference, "ACK.ResourceSynced", "True", wait_periods=90) # desired initial state - pmw = rg_update_misc_input['PMW'] - description = rg_update_misc_input['DESCRIPTION'] - srl = int(rg_update_misc_input['SRL']) - sw = rg_update_misc_input['SW'] + pmw = rg_update_input['PMW'] + description = rg_update_input['DESCRIPTION'] + srl = int(rg_update_input['SRL']) + sw = rg_update_input['SW'] tags = [ {"key": "tag_to_remove", "value": "should_be_removed"}, {"key": "tag_to_update", "value": "old_value"} ] # assert initial state - rg_id = rg_update_misc_input['RG_ID'] - assert_misc_fields(reference, rg_id, pmw, description, srl, sw) + rg_id = rg_update_input['RG_ID'] + + resource = k8s.get_resource(reference) + cc = retrieve_cache_cluster(rg_id) + rg = retrieve_replication_group(rg_id) + assert cc is not None + assert cc['PreferredMaintenanceWindow'] == pmw + assert resource['spec']['description'] == description + assert rg['SnapshotRetentionLimit'] == srl + assert rg['SnapshotWindow'] == sw assert_spec_tags(rg_id, tags) # change field values, wait for resource to sync @@ -573,24 +233,31 @@ def test_rg_update_misc(self, rg_update_misc_input, rg_update_misc): "description": description, "snapshotRetentionLimit": srl, "snapshotWindow": sw, - } - } + }} _ = k8s.patch_custom_resource(reference, patch) sleep(DEFAULT_WAIT_SECS) - assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True", wait_periods=90) + assert k8s.wait_on_condition( + reference, "ACK.ResourceSynced", "True", wait_periods=90) - # assert new state - assert_misc_fields(reference, rg_id, pmw, description, srl, sw) + # Assert new state + resource = k8s.get_resource(reference) + cc = retrieve_cache_cluster(rg_id) + rg = retrieve_replication_group(rg_id) + assert cc is not None + assert cc['PreferredMaintenanceWindow'] == pmw + assert resource['spec']['description'] == description + assert rg['SnapshotRetentionLimit'] == srl + assert rg['SnapshotWindow'] == sw patch = {"spec": { - "tags": new_tags - } - } + "tags": new_tags + }} _ = k8s.patch_custom_resource(reference, patch) # patching tags can make cluster unavailable for a while(status: modifying) LONG_WAIT_SECS = 180 sleep(LONG_WAIT_SECS) - assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True", wait_periods=90) + assert k8s.wait_on_condition( + reference, "ACK.ResourceSynced", "True", wait_periods=90) # assert new tags assert_spec_tags(rg_id, new_tags) @@ -598,7 +265,8 @@ def test_rg_update_misc(self, rg_update_misc_input, rg_update_misc): # test modifying properties related to tolerance: replica promotion, multi AZ, automatic failover def test_rg_fault_tolerance(self, rg_fault_tolerance): (reference, _) = rg_fault_tolerance - assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True", wait_periods=90) + assert k8s.wait_on_condition( + reference, "ACK.ResourceSynced", "True", wait_periods=90) # assert initial state resource = k8s.get_resource(reference) @@ -618,10 +286,12 @@ def test_rg_fault_tolerance(self, rg_fault_tolerance): assert node1 is not None and node2 is not None # disable both fields, wait for resource to sync - patch = {"spec": {"automaticFailoverEnabled": False, "multiAZEnabled": False}} + patch = {"spec": {"automaticFailoverEnabled": False, + "multiAZEnabled": False}} _ = k8s.patch_custom_resource(reference, patch) sleep(DEFAULT_WAIT_SECS) - assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True", wait_periods=90) + assert k8s.wait_on_condition( + reference, "ACK.ResourceSynced", "True", wait_periods=90) # assert new state resource = k8s.get_resource(reference) @@ -629,10 +299,12 @@ def test_rg_fault_tolerance(self, rg_fault_tolerance): assert resource['status']['multiAZ'] == "disabled" # promote replica to primary, re-enable both multi AZ and AF - patch = {"spec": {"primaryClusterID": node2, "automaticFailoverEnabled": True, "multiAZEnabled": True}} + patch = {"spec": {"primaryClusterID": node2, + "automaticFailoverEnabled": True, "multiAZEnabled": True}} _ = k8s.patch_custom_resource(reference, patch) sleep(DEFAULT_WAIT_SECS) - assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True", wait_periods=90) + assert k8s.wait_on_condition( + reference, "ACK.ResourceSynced", "True", wait_periods=90) # assert roles resource = k8s.get_resource(reference) @@ -650,184 +322,19 @@ def test_rg_fault_tolerance(self, rg_fault_tolerance): assert resource['status']['automaticFailover'] == "enabled" assert resource['status']['multiAZ'] == "enabled" - # test association and disassociation of other resources (VPC security groups, SNS topic, user groups) - @pytest.mark.blocked # TODO: remove when passing - def test_rg_associate_resources(self, rg_associate_resources_input, rg_associate_resources, bootstrap_resources): - (reference, _) = rg_associate_resources - assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True", wait_periods=90) - - # associate resources, wait for RG to sync - sg_list = [bootstrap_resources.SecurityGroup1, bootstrap_resources.SecurityGroup2] - sns_topic = bootstrap_resources.SnsTopic1 - ug_list = [bootstrap_resources.UserGroup1] - patch = {"spec": {"securityGroupIDs": sg_list, "notificationTopicARN": sns_topic, "userGroupIDs": ug_list}} - _ = k8s.patch_custom_resource(reference, patch) - sleep(DEFAULT_WAIT_SECS) - assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True", wait_periods=90) - - # assert new state - assert_associated_resources(rg_associate_resources_input['RG_ID'], sg_list, sns_topic, ug_list) - - # change associated resources - sg_list = [bootstrap_resources.SecurityGroup2] - sns_topic = bootstrap_resources.SnsTopic2 - ug_list = [bootstrap_resources.UserGroup2] - patch = {"spec": {"securityGroupIDs": sg_list, "notificationTopicARN": sns_topic, "userGroupIDs": ug_list}} - _ = k8s.patch_custom_resource(reference, patch) - sleep(DEFAULT_WAIT_SECS) - assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True", wait_periods=90) - - # assert new state - assert_associated_resources(rg_associate_resources_input['RG_ID'], sg_list, sns_topic, ug_list) - - def test_rg_update_cpg(self, rg_update_cpg_input, rg_update_cpg, bootstrap_resources): - # wait for resource to sync and retrieve initial state - (reference, _) = rg_update_cpg - assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True", wait_periods=90) - - # update, wait for resource to sync - patch = {"spec": {"cacheParameterGroupName": bootstrap_resources.CPGName}} - _ = k8s.patch_custom_resource(reference, patch) - sleep(DEFAULT_WAIT_SECS) - assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True", wait_periods=5) # should be immediate - - # assert new state - cc = retrieve_cache_cluster(rg_update_cpg_input['RG_ID']) - assert cc['CacheParameterGroup']['CacheParameterGroupName'] == bootstrap_resources.CPGName - - @pytest.mark.blocked # TODO: remove when passing - def test_rg_scale_vertically(self, rg_scale_vertically_input, rg_scale_vertically): - (reference, _) = rg_scale_vertically - assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True", wait_periods=90) - - # assert initial state - rg = retrieve_replication_group(rg_scale_vertically_input['RG_ID']) - assert rg['CacheNodeType'] == "cache.t3.micro" - - # scale up - cnt = "cache.t3.medium" - patch = {"spec": {"cacheNodeType": cnt}} - _ = k8s.patch_custom_resource(reference, patch) - sleep(DEFAULT_WAIT_SECS) - assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True", wait_periods=90) - - # assert scale up complete - rg = retrieve_replication_group(rg_scale_vertically_input['RG_ID']) - assert rg['CacheNodeType'] == cnt - - # scale down - cnt = "cache.t3.small" - patch = {"spec": {"cacheNodeType": cnt}} - _ = k8s.patch_custom_resource(reference, patch) - sleep(DEFAULT_WAIT_SECS) - assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True", wait_periods=90) - - # assert scale down complete - rg = retrieve_replication_group(rg_scale_vertically_input['RG_ID']) - assert rg['CacheNodeType'] == cnt - - @pytest.mark.blocked # TODO: remove when passing - def test_rg_scale_horizontally(self, rg_scale_horizontally_input, rg_scale_horizontally): - (reference, _) = rg_scale_horizontally - assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True", wait_periods=90) - - # assert initial state - rg = retrieve_replication_group(rg_scale_horizontally_input['RG_ID']) - nng = int(rg_scale_horizontally_input['NUM_NODE_GROUPS']) - assert len(rg['NodeGroups']) == nng - - # scale out - nng += 1 - patch = {"spec": {"numNodeGroups": nng}} - _ = k8s.patch_custom_resource(reference, patch) - sleep(DEFAULT_WAIT_SECS) - assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True", wait_periods=90) - - # assert scale out complete - rg = retrieve_replication_group(rg_scale_horizontally_input['RG_ID']) - assert len(rg['NodeGroups']) == nng - - # scale in - nng -= 2 - patch = {"spec": {"numNodeGroups": nng}} - _ = k8s.patch_custom_resource(reference, patch) - sleep(DEFAULT_WAIT_SECS) - assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True", wait_periods=90) - - # assert scale in complete - rg = retrieve_replication_group(rg_scale_horizontally_input['RG_ID']) - assert len(rg['NodeGroups']) == nng - - # add and modify log delivery configuration to replication group - def test_rg_log_delivery(self, rg_log_delivery_input, rg_log_delivery, bootstrap_resources): - (reference, _) = rg_log_delivery - assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True", wait_periods=90) - - # add log delivery config - config = { - "destinationDetails": { - "cloudWatchLogsDetails": { - "logGroup": bootstrap_resources.CWLogGroup1 - } - }, - "destinationType": "cloudwatch-logs", - "enabled": True, - "logFormat": "json", - "logType": "slow-log" + def test_rg_creation_deletion(self, make_rg_name, make_replication_group, rg_deletion_waiter): + input_dict = { + "RG_ID": make_rg_name("rg-delete"), + "ENGINE_VERSION": "6.x", + "NUM_NODE_GROUPS": "1", + "REPLICAS_PER_NODE_GROUP": "1" } - patch = {"spec": {"logDeliveryConfigurations": [config]}} - k8s.patch_custom_resource(reference, patch) - sleep(DEFAULT_WAIT_SECS) - assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True", wait_periods=90) - # assert log delivery added - assert_log_delivery_config(reference, config) - - # change target log group and log format - config['destinationDetails']['cloudWatchLogsDetails']['logGroup'] = bootstrap_resources.CWLogGroup2 - config['logFormat'] = "text" - patch = {"spec": {"logDeliveryConfigurations": [config]}} - k8s.patch_custom_resource(reference, patch) - sleep(DEFAULT_WAIT_SECS) - assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True", wait_periods=90) - - # assert configuration modified - assert_log_delivery_config(reference, config) - - # change to nonexistent log group and ensure error status/message found - config['destinationDetails']['cloudWatchLogsDetails']['logGroup'] = random_suffix_name("dne", 16) - patch = {"spec": {"logDeliveryConfigurations": [config]}} - k8s.patch_custom_resource(reference, patch) - sleep(DEFAULT_WAIT_SECS) - assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True", wait_periods=90) - - # assert error message present - resource = k8s.get_resource(reference) - latest = resource['status']['logDeliveryConfigurations'][0] - assert 'does not exist' in latest['message'] - - # disable log delivery - config = {"logType": "slow-log", "enabled": False} - patch = {"spec": {"logDeliveryConfigurations": [config]}} - k8s.patch_custom_resource(reference, patch) - sleep(DEFAULT_WAIT_SECS) - assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True", wait_periods=90) - - # assert log delivery disabled - assert_log_delivery_config(reference, config) - - def test_rg_auth_token(self, rg_auth_token, secrets): - (reference, _) = rg_auth_token - assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True", wait_periods=90) - - patch = {"spec": {"authToken": {"name": secrets['NAME2'], "key": secrets['KEY2']}}} - k8s.patch_custom_resource(reference, patch) - sleep(DEFAULT_WAIT_SECS) - assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True", wait_periods=90) + (reference, resource) = make_replication_group( + "replicationgroup_create_delete", input_dict, input_dict["RG_ID"]) - def test_rg_deletion(self, rg_deletion_input, rg_deletion, rg_deletion_waiter): - (reference, _) = rg_deletion - assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True", wait_periods=90) + assert k8s.wait_on_condition( + reference, "ACK.ResourceSynced", "True", wait_periods=90) # assertions after initial creation resource = k8s.get_resource(reference) @@ -839,7 +346,5 @@ def test_rg_deletion(self, rg_deletion_input, rg_deletion, rg_deletion_waiter): resource = k8s.get_resource(reference) assert resource['metadata']['deletionTimestamp'] is not None - # TODO: uncomment when reconciler->cleanup() invokes patchResource() - # assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "False", wait_periods=1) - rg_deletion_waiter.wait(ReplicationGroupId=rg_deletion_input["RG_ID"]) + rg_deletion_waiter.wait(ReplicationGroupId=input_dict["RG_ID"]) diff --git a/test/e2e/tests/test_replicationgroup_largecluster.py b/test/e2e/tests/test_replicationgroup_largecluster.py deleted file mode 100644 index ff056b68..00000000 --- a/test/e2e/tests/test_replicationgroup_largecluster.py +++ /dev/null @@ -1,74 +0,0 @@ -# Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"). You may -# not use this file except in compliance with the License. A copy of the -# License is located at -# -# http://aws.amazon.com/apache2.0/ -# -# or in the "license" file accompanying this file. This file is distributed -# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either -# express or implied. See the License for the specific language governing -# permissions and limitations under the License. - -"""Large cluster test for replication group resource -""" - -import pytest - -from time import sleep -from acktest.k8s import resource as k8s - -from e2e.tests.test_replicationgroup import make_replication_group, rg_deletion_waiter, make_rg_name, DEFAULT_WAIT_SECS -from e2e.util import provide_node_group_configuration - -@pytest.fixture(scope="module") -def rg_largecluster_input(make_rg_name): - return { - "RG_ID": make_rg_name("rg-large-cluster"), - "NUM_NODE_GROUPS": "125", - "REPLICAS_PER_NODE_GROUP": "3" - } - -@pytest.fixture(scope="module") -def rg_largecluster(rg_largecluster_input, make_replication_group, rg_deletion_waiter): - input_dict = rg_largecluster_input - - (reference, resource) = make_replication_group("replicationgroup_largecluster", input_dict, input_dict["RG_ID"]) - yield (reference, resource) - - # teardown - k8s.delete_custom_resource(reference) - sleep(DEFAULT_WAIT_SECS) - rg_deletion_waiter.wait(ReplicationGroupId=input_dict["RG_ID"]) - -class TestReplicationGroupLargeCluster: - - @pytest.mark.slow - def test_rg_largecluster(self, rg_largecluster_input, rg_largecluster): - (reference, _) = rg_largecluster - assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True", wait_periods=240) - - # assertions after initial creation - desired_node_groups = int(rg_largecluster_input['NUM_NODE_GROUPS']) - desired_replica_count = int(rg_largecluster_input['REPLICAS_PER_NODE_GROUP']) - desired_total_nodes = (desired_node_groups * (1 + desired_replica_count)) - resource = k8s.get_resource(reference) - assert resource['status']['status'] == "available" - assert len(resource['status']['nodeGroups']) == desired_node_groups - assert len(resource['status']['memberClusters']) == desired_total_nodes - - # update, wait for resource to sync - desired_node_groups = desired_node_groups - 10 - desired_total_nodes = (desired_node_groups * (1 + desired_replica_count)) - patch = {"spec": {"numNodeGroups": desired_node_groups, - "nodeGroupConfiguration": provide_node_group_configuration(desired_node_groups)}} - _ = k8s.patch_custom_resource(reference, patch) - sleep(DEFAULT_WAIT_SECS) # required as controller has likely not placed the resource in modifying - assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True", wait_periods=240) - - # assert new state after scaling in - resource = k8s.get_resource(reference) - assert resource['status']['status'] == "available" - assert len(resource['status']['nodeGroups']) == desired_node_groups - assert len(resource['status']['memberClusters']) == desired_total_nodes \ No newline at end of file diff --git a/test/e2e/tests/test_user.py b/test/e2e/tests/test_user.py index f85a1852..b0f5a9e4 100644 --- a/test/e2e/tests/test_user.py +++ b/test/e2e/tests/test_user.py @@ -128,8 +128,6 @@ def test_user_nopass(self, user_nopass, user_nopass_input): resource = k8s.get_resource(reference) assert resource["status"]["lastRequestedAccessString"] == new_access_string - #TODO: add terminal condition checks - # test creation with Passwords specified (as k8s secrets) def test_user_password(self, user_password, user_password_input): (reference, resource) = user_password diff --git a/test/e2e/util.py b/test/e2e/util.py index 9a01988b..cc0cb46d 100644 --- a/test/e2e/util.py +++ b/test/e2e/util.py @@ -91,16 +91,6 @@ def assert_user_deletion(user_id: str): except ec.exceptions.UserNotFoundFault: pass # we only expect this particular exception (if deletion has already completed) - -# given "rg" (the k8s object representing a replication group), assert that: -# 1) there are non-zero amount of node groups -# 2) the number of replicas in every node group equals desired_replica_count -def assert_even_shards_replica_count(rg, desired_replica_count): - assert len(rg['status']['nodeGroups']) != 0 - for ng in rg['status']['nodeGroups']: - assert len(ng['nodeGroupMembers']) == (desired_replica_count + 1) - - # TODO: move to common repository # given the latest state of the resource, assert that the terminal condition is set def assert_terminal_condition_set(resource): @@ -150,4 +140,4 @@ def retrieve_replication_group(rg_id: str): def retrieve_replication_group_tags(rg_arn: str): taglist_response = ec.list_tags_for_resource(ResourceName=rg_arn) - return taglist_response['TagList'] + return taglist_response['TagList'] \ No newline at end of file