From be2a839fc45376b72b24ab128d63418f3712c9b7 Mon Sep 17 00:00:00 2001 From: "Maximilian Blatt (external expert on behalf of DB Netz)" Date: Fri, 6 Oct 2023 18:08:36 +0200 Subject: [PATCH] feat: Bump aws-sdk-go to v1.44.334 Update some generated resources as well. Signed-off-by: Maximilian Blatt (external expert on behalf of DB Netz) --- apis/apigateway/v1alpha1/zz_api_key.go | 4 +- apis/apigateway/v1alpha1/zz_domain_name.go | 12 +- .../v1alpha1/zz_generated.deepcopy.go | 10 + apis/apigateway/v1alpha1/zz_rest_api.go | 4 +- apis/apigateway/v1alpha1/zz_types.go | 2 + apis/apigateway/v1alpha1/zz_usage_plan.go | 4 +- apis/apigateway/v1alpha1/zz_vpc_link.go | 4 +- apis/athena/generator-config.yaml | 1 + apis/athena/v1alpha1/zz_enums.go | 19 + apis/athena/v1alpha1/zz_generated.deepcopy.go | 35 + apis/athena/v1alpha1/zz_types.go | 54 +- apis/athena/v1alpha1/zz_work_group.go | 5 +- apis/autoscaling/generator-config.yaml | 3 + .../v1beta1/zz_auto_scaling_group.go | 65 +- apis/autoscaling/v1beta1/zz_enums.go | 32 +- .../v1beta1/zz_generated.deepcopy.go | 50 +- apis/autoscaling/v1beta1/zz_types.go | 13 + apis/batch/v1alpha1/zz_compute_environment.go | 13 +- apis/batch/v1alpha1/zz_enums.go | 12 +- apis/batch/v1alpha1/zz_generated.deepcopy.go | 45 + apis/batch/v1alpha1/zz_types.go | 12 + apis/cloudfront/v1alpha1/zz_enums.go | 7 +- apis/cloudfront/v1alpha1/zz_types.go | 36 +- apis/cloudwatchlogs/v1alpha1/zz_enums.go | 18 + .../v1alpha1/zz_generated.deepcopy.go | 36 + apis/cloudwatchlogs/v1alpha1/zz_types.go | 11 +- .../v1alpha1/zz_enums.go | 12 + .../v1alpha1/zz_generated.deepcopy.go | 40 + .../v1alpha1/zz_group.go | 6 +- .../v1alpha1/zz_identity_provider.go | 6 +- .../v1alpha1/zz_types.go | 19 +- .../v1alpha1/zz_user_pool.go | 41 +- .../v1alpha1/zz_user_pool_client.go | 30 +- .../v1alpha1/zz_generated.deepcopy.go | 15 +- apis/dynamodb/v1alpha1/zz_table.go | 3 + apis/dynamodb/v1alpha1/zz_types.go | 4 +- apis/ec2/generator-config.yaml | 2 + apis/ec2/v1alpha1/zz_enums.go | 256 +- apis/ec2/v1alpha1/zz_flow_log.go | 4 +- apis/ec2/v1alpha1/zz_generated.deepcopy.go | 762 +- .../v1alpha1/zz_launch_template_version.go | 7 + apis/ec2/v1alpha1/zz_types.go | 324 +- apis/ec2/v1alpha1/zz_volume.go | 8 +- apis/ec2/v1alpha1/zz_vpc_endpoint.go | 11 +- apis/ecs/v1alpha1/zz_cluster.go | 16 +- apis/ecs/v1alpha1/zz_enums.go | 7 +- apis/ecs/v1alpha1/zz_generated.deepcopy.go | 11 + apis/ecs/v1alpha1/zz_service.go | 16 +- apis/ecs/v1alpha1/zz_task_definition.go | 6 +- apis/ecs/v1alpha1/zz_types.go | 95 +- apis/efs/v1alpha1/zz_access_point.go | 2 +- apis/efs/v1alpha1/zz_enums.go | 2 + apis/efs/v1alpha1/zz_file_system.go | 4 +- apis/eks/v1alpha1/zz_enums.go | 52 +- .../v1alpha1/zz_cache_parameter_group.go | 2 +- apis/elasticache/v1alpha1/zz_enums.go | 8 + apis/elbv2/v1alpha1/zz_enums.go | 7 + apis/elbv2/v1alpha1/zz_generated.deepcopy.go | 10 + apis/elbv2/v1alpha1/zz_load_balancer.go | 7 +- apis/elbv2/v1alpha1/zz_target_group.go | 4 +- apis/elbv2/v1alpha1/zz_types.go | 2 + .../v1alpha1/zz_generated.deepcopy.go | 55 + apis/emrcontainers/v1alpha1/zz_job_run.go | 2 + apis/emrcontainers/v1alpha1/zz_types.go | 16 +- .../globalaccelerator/v1alpha1/referencers.go | 6 +- apis/glue/v1alpha1/zz_enums.go | 78 +- apis/glue/v1alpha1/zz_generated.deepcopy.go | 1836 ++++- apis/glue/v1alpha1/zz_job.go | 96 +- apis/glue/v1alpha1/zz_types.go | 475 ++ apis/iam/v1alpha1/zz_service_linked_role.go | 2 +- apis/iam/v1alpha1/zz_types.go | 4 +- apis/iot/generator-config.yaml | 2 + apis/iot/v1alpha1/zz_enums.go | 17 + apis/kafka/generator-config.yaml | 1 + apis/kafka/v1alpha1/zz_enums.go | 20 + apis/kafka/v1alpha1/zz_generated.deepcopy.go | 383 + apis/kafka/v1alpha1/zz_types.go | 115 + apis/kms/v1alpha1/zz_enums.go | 16 +- apis/kms/v1alpha1/zz_key.go | 49 +- apis/lambda/v1alpha1/zz_enums.go | 33 + .../lambda/v1alpha1/zz_function_url_config.go | 17 +- apis/lambda/v1alpha1/zz_generated.deepcopy.go | 95 + apis/lambda/v1alpha1/zz_types.go | 29 + apis/lambda/v1beta1/zz_enums.go | 33 + apis/lambda/v1beta1/zz_function.go | 17 +- apis/lambda/v1beta1/zz_generated.deepcopy.go | 95 + apis/lambda/v1beta1/zz_types.go | 29 + apis/mq/v1alpha1/zz_broker.go | 4 + apis/mq/v1alpha1/zz_enums.go | 15 + apis/mq/v1alpha1/zz_generated.deepcopy.go | 70 + apis/mq/v1alpha1/zz_types.go | 21 +- apis/mq/v1alpha1/zz_user.go | 4 +- apis/mwaa/v1alpha1/zz_enums.go | 18 +- apis/mwaa/v1alpha1/zz_environment.go | 71 +- apis/mwaa/v1alpha1/zz_generated.deepcopy.go | 65 + apis/mwaa/v1alpha1/zz_types.go | 16 + apis/neptune/v1alpha1/zz_db_cluster.go | 3 + .../neptune/v1alpha1/zz_generated.deepcopy.go | 70 + apis/neptune/v1alpha1/zz_types.go | 26 + apis/opensearchservice/v1alpha1/zz_domain.go | 10 +- apis/opensearchservice/v1alpha1/zz_enums.go | 111 + .../v1alpha1/zz_generated.deepcopy.go | 290 + apis/opensearchservice/v1alpha1/zz_types.go | 100 + apis/ram/generator-config.yaml | 3 + apis/ram/v1alpha1/zz_enums.go | 40 + apis/ram/v1alpha1/zz_generated.deepcopy.go | 135 + apis/ram/v1alpha1/zz_resource_share.go | 8 +- apis/ram/v1alpha1/zz_types.go | 40 + apis/rds/v1alpha1/zz_db_cluster.go | 439 +- apis/rds/v1alpha1/zz_db_instance.go | 665 +- apis/rds/v1alpha1/zz_enums.go | 17 + apis/rds/v1alpha1/zz_generated.deepcopy.go | 275 +- apis/rds/v1alpha1/zz_global_cluster.go | 63 +- apis/rds/v1alpha1/zz_types.go | 91 +- apis/route53resolver/generator-config.yaml | 1 + apis/route53resolver/v1alpha1/zz_enums.go | 57 +- .../v1alpha1/zz_generated.deepcopy.go | 120 + .../v1alpha1/zz_resolver_endpoint.go | 10 + .../v1alpha1/zz_resolver_rule.go | 3 +- apis/route53resolver/v1alpha1/zz_types.go | 36 + apis/s3control/v1alpha1/zz_enums.go | 76 + .../v1alpha1/zz_generated.deepcopy.go | 30 + apis/s3control/v1alpha1/zz_types.go | 9 + apis/secretsmanager/v1beta1/zz_secret.go | 2 +- apis/sesv2/generator-config.yaml | 1 + apis/sesv2/v1alpha1/zz_enums.go | 42 + apis/sesv2/v1alpha1/zz_generated.deepcopy.go | 88 + apis/sesv2/v1alpha1/zz_types.go | 26 + apis/sfn/generator-config.yaml | 2 + apis/sfn/v1alpha1/zz_enums.go | 1 + apis/sfn/v1alpha1/zz_generated.deepcopy.go | 93 + apis/sfn/v1alpha1/zz_state_machine.go | 13 +- apis/sfn/v1alpha1/zz_types.go | 23 + apis/transfer/v1alpha1/zz_enums.go | 9 + .../v1alpha1/zz_generated.deepcopy.go | 27 + apis/transfer/v1alpha1/zz_server.go | 43 +- apis/transfer/v1alpha1/zz_types.go | 52 +- apis/transfer/v1alpha1/zz_user.go | 15 +- go.mod | 4 +- go.sum | 8 +- .../apigateway.aws.crossplane.io_apikeys.yaml | 4 +- ...gateway.aws.crossplane.io_domainnames.yaml | 10 +- ...apigateway.aws.crossplane.io_restapis.yaml | 5 +- ...igateway.aws.crossplane.io_usageplans.yaml | 5 +- ...apigateway.aws.crossplane.io_vpclinks.yaml | 2 +- .../athena.aws.crossplane.io_workgroups.yaml | 36 +- ...g.aws.crossplane.io_autoscalinggroups.yaml | 80 +- ...aws.crossplane.io_computeenvironments.yaml | 11 +- ...front.aws.crossplane.io_cachepolicies.yaml | 58 +- ...tityprovider.aws.crossplane.io_groups.yaml | 6 +- ...r.aws.crossplane.io_identityproviders.yaml | 6 +- ...der.aws.crossplane.io_userpoolclients.yaml | 22 +- ...yprovider.aws.crossplane.io_userpools.yaml | 50 +- .../dynamodb.aws.crossplane.io_tables.yaml | 4 + .../crds/ec2.aws.crossplane.io_flowlogs.yaml | 4 +- ...ec2.aws.crossplane.io_launchtemplates.yaml | 34 +- ....crossplane.io_launchtemplateversions.yaml | 82 +- .../crds/ec2.aws.crossplane.io_volumes.yaml | 11 +- .../ec2.aws.crossplane.io_vpcendpoints.yaml | 26 +- ...s.crossplane.io_vpcpeeringconnections.yaml | 12 +- .../crds/ecs.aws.crossplane.io_clusters.yaml | 15 +- .../crds/ecs.aws.crossplane.io_services.yaml | 101 +- ...ecs.aws.crossplane.io_taskdefinitions.yaml | 240 +- .../efs.aws.crossplane.io_accesspoints.yaml | 2 +- .../efs.aws.crossplane.io_filesystems.yaml | 3 +- ...ws.crossplane.io_cacheparametergroups.yaml | 2 +- ...elbv2.aws.crossplane.io_loadbalancers.yaml | 9 +- .../elbv2.aws.crossplane.io_targetgroups.yaml | 4 +- ...rcontainers.aws.crossplane.io_jobruns.yaml | 7 + ...ers.aws.crossplane.io_virtualclusters.yaml | 2 +- .../glue.aws.crossplane.io_databases.yaml | 2 + package/crds/glue.aws.crossplane.io_jobs.yaml | 1075 ++- ...am.aws.crossplane.io_instanceprofiles.yaml | 2 +- ....aws.crossplane.io_servicelinkedroles.yaml | 2 +- package/crds/kms.aws.crossplane.io_keys.yaml | 61 +- .../lambda.aws.crossplane.io_functions.yaml | 34 +- ....aws.crossplane.io_functionurlconfigs.yaml | 13 +- .../crds/mq.aws.crossplane.io_brokers.yaml | 10 +- package/crds/mq.aws.crossplane.io_users.yaml | 2 + .../mwaa.aws.crossplane.io_environments.yaml | 86 +- .../neptune.aws.crossplane.io_dbclusters.yaml | 35 + ...archservice.aws.crossplane.io_domains.yaml | 50 +- .../ram.aws.crossplane.io_resourceshares.yaml | 12 +- .../rds.aws.crossplane.io_dbclusters.yaml | 580 +- .../rds.aws.crossplane.io_dbinstances.yaml | 781 +- .../rds.aws.crossplane.io_globalclusters.yaml | 58 +- ...r.aws.crossplane.io_resolverendpoints.yaml | 14 + ...olver.aws.crossplane.io_resolverrules.yaml | 8 +- ...retsmanager.aws.crossplane.io_secrets.yaml | 3 +- .../sfn.aws.crossplane.io_statemachines.yaml | 15 + .../transfer.aws.crossplane.io_servers.yaml | 54 +- .../transfer.aws.crossplane.io_users.yaml | 21 +- pkg/clients/database/rds_test.go | 18 +- pkg/clients/ec2/tags_test.go | 4 +- .../apigateway/restapi/zz_controller.go | 29 +- .../apigateway/restapi/zz_conversions.go | 29 +- .../athena/workgroup/zz_conversions.go | 66 +- .../autoscalinggroup/zz_conversions.go | 18 +- pkg/controller/docdb/dbcluster/setup_test.go | 18 +- .../docdb/dbcluster/zz_conversions.go | 12 +- pkg/controller/dynamodb/table/hooks.go | 6 +- .../dynamodb/table/zz_controller.go | 285 +- .../dynamodb/table/zz_conversions.go | 525 +- .../ec2/launchtemplate/zz_conversions.go | 24 +- .../launchtemplateversion/zz_controller.go | 27 +- .../launchtemplateversion/zz_conversions.go | 55 +- pkg/controller/ec2/volume/zz_controller.go | 23 +- pkg/controller/ec2/volume/zz_conversions.go | 23 +- pkg/controller/ec2/vpcendpoint/setup_test.go | 4 +- .../ec2/vpcendpoint/zz_controller.go | 3 + .../ec2/vpcendpoint/zz_conversions.go | 77 +- .../setup_test.go | 4 +- .../ec2/vpcpeeringconnection/setup_test.go | 4 +- .../ecs/taskdefinition/zz_controller.go | 487 +- .../ecs/taskdefinition/zz_conversions.go | 974 +-- .../elbv2/loadbalancer/zz_controller.go | 25 +- .../elbv2/loadbalancer/zz_conversions.go | 25 +- .../emrcontainers/jobrun/zz_conversions.go | 40 +- .../globalaccelerator/accelerator/setup.go | 8 +- .../globalaccelerator/endpointgroup/setup.go | 4 +- .../globalaccelerator/listener/setup.go | 8 +- pkg/controller/glue/job/zz_conversions.go | 7026 ++++++++++++----- .../lambda/function/zz_controller.go | 61 +- .../lambda/functionurlconfig/zz_controller.go | 5 + .../functionurlconfig/zz_conversions.go | 11 + pkg/controller/mq/broker/zz_conversions.go | 278 +- pkg/controller/mq/user/zz_conversions.go | 11 + .../mwaa/environment/zz_conversions.go | 46 +- .../neptune/dbcluster/zz_controller.go | 89 +- .../neptune/dbcluster/zz_conversions.go | 89 +- .../opensearchservice/domain/zz_controller.go | 112 +- .../domain/zz_conversions.go | 170 +- .../ram/resourceshare/zz_conversions.go | 27 +- pkg/controller/rds/dbcluster/zz_controller.go | 107 +- .../rds/dbcluster/zz_conversions.go | 182 +- .../rds/dbinstance/zz_controller.go | 182 +- .../rds/dbinstance/zz_conversions.go | 290 +- .../rds/globalcluster/zz_controller.go | 6 + .../rds/globalcluster/zz_conversions.go | 6 + .../resolverendpoint/zz_controller.go | 27 +- .../resolverendpoint/zz_conversions.go | 57 +- .../resolverrule/zz_controller.go | 3 + .../resolverrule/zz_conversions.go | 6 + pkg/controller/s3/bucket/policy_test.go | 4 +- .../sfn/statemachine/zz_controller.go | 5 + .../sfn/statemachine/zz_conversions.go | 76 +- .../transfer/server/zz_conversions.go | 194 +- 247 files changed, 18422 insertions(+), 6534 deletions(-) diff --git a/apis/apigateway/v1alpha1/zz_api_key.go b/apis/apigateway/v1alpha1/zz_api_key.go index 2c1306a9fd..657696940d 100644 --- a/apis/apigateway/v1alpha1/zz_api_key.go +++ b/apis/apigateway/v1alpha1/zz_api_key.go @@ -29,8 +29,8 @@ type APIKeyParameters struct { // Region is which region the APIKey will be created. // +kubebuilder:validation:Required Region string `json:"region"` - // An AWS Marketplace customer identifier , when integrating with the AWS SaaS - // Marketplace. + // An Amazon Web Services Marketplace customer identifier, when integrating + // with the Amazon Web Services SaaS Marketplace. CustomerID *string `json:"customerID,omitempty"` // The description of the ApiKey. Description *string `json:"description,omitempty"` diff --git a/apis/apigateway/v1alpha1/zz_domain_name.go b/apis/apigateway/v1alpha1/zz_domain_name.go index fb06ce46a5..4e813c5bbe 100644 --- a/apis/apigateway/v1alpha1/zz_domain_name.go +++ b/apis/apigateway/v1alpha1/zz_domain_name.go @@ -29,9 +29,9 @@ type DomainNameParameters struct { // Region is which region the DomainName will be created. // +kubebuilder:validation:Required Region string `json:"region"` - // The reference to an AWS-managed certificate that will be used by edge-optimized - // endpoint for this domain name. AWS Certificate Manager is the only supported - // source. + // The reference to an Amazon Web Services-managed certificate that will be + // used by edge-optimized endpoint for this domain name. Certificate Manager + // is the only supported source. CertificateARN *string `json:"certificateARN,omitempty"` // [Deprecated] The body of the server certificate that will be used by edge-optimized // endpoint for this domain name provided by your certificate authority. @@ -62,9 +62,9 @@ type DomainNameParameters struct { // your custom domain. Only required when configuring mutual TLS and using an // ACM imported or private CA certificate ARN as the regionalCertificateArn. OwnershipVerificationCertificateARN *string `json:"ownershipVerificationCertificateARN,omitempty"` - // The reference to an AWS-managed certificate that will be used by regional - // endpoint for this domain name. AWS Certificate Manager is the only supported - // source. + // The reference to an Amazon Web Services-managed certificate that will be + // used by regional endpoint for this domain name. Certificate Manager is the + // only supported source. RegionalCertificateARN *string `json:"regionalCertificateARN,omitempty"` // The user-friendly name of the certificate that will be used by regional endpoint // for this domain name. diff --git a/apis/apigateway/v1alpha1/zz_generated.deepcopy.go b/apis/apigateway/v1alpha1/zz_generated.deepcopy.go index b73156002d..8db0173b60 100644 --- a/apis/apigateway/v1alpha1/zz_generated.deepcopy.go +++ b/apis/apigateway/v1alpha1/zz_generated.deepcopy.go @@ -4392,6 +4392,11 @@ func (in *RestAPIObservation) DeepCopyInto(out *RestAPIObservation) { *out = new(string) **out = **in } + if in.RootResourceID != nil { + in, out := &in.RootResourceID, &out.RootResourceID + *out = new(string) + **out = **in + } if in.Warnings != nil { in, out := &in.Warnings, &out.Warnings *out = make([]*string, len(*in)) @@ -4594,6 +4599,11 @@ func (in *RestAPI_SDK) DeepCopyInto(out *RestAPI_SDK) { *out = new(string) **out = **in } + if in.RootResourceID != nil { + in, out := &in.RootResourceID, &out.RootResourceID + *out = new(string) + **out = **in + } if in.Tags != nil { in, out := &in.Tags, &out.Tags *out = make(map[string]*string, len(*in)) diff --git a/apis/apigateway/v1alpha1/zz_rest_api.go b/apis/apigateway/v1alpha1/zz_rest_api.go index 22c320c936..6cb131d9b9 100644 --- a/apis/apigateway/v1alpha1/zz_rest_api.go +++ b/apis/apigateway/v1alpha1/zz_rest_api.go @@ -30,7 +30,7 @@ type RestAPIParameters struct { // +kubebuilder:validation:Required Region string `json:"region"` // The source of the API key for metering requests according to a usage plan. - // Valid values are: >HEADER to read the API key from the X-API-Key header of + // Valid values are: HEADER to read the API key from the X-API-Key header of // a request. AUTHORIZER to read the API key from the UsageIdentifierKey from // a custom authorizer. APIKeySource *string `json:"apiKeySource,omitempty"` @@ -83,6 +83,8 @@ type RestAPIObservation struct { // The API's identifier. This identifier is unique across all of your APIs in // API Gateway. ID *string `json:"id,omitempty"` + // The API's root resource ID. + RootResourceID *string `json:"rootResourceID,omitempty"` // The warning messages reported when failonwarnings is turned on during API // import. Warnings []*string `json:"warnings,omitempty"` diff --git a/apis/apigateway/v1alpha1/zz_types.go b/apis/apigateway/v1alpha1/zz_types.go index ba70085611..6607b5ea8b 100644 --- a/apis/apigateway/v1alpha1/zz_types.go +++ b/apis/apigateway/v1alpha1/zz_types.go @@ -324,6 +324,8 @@ type RestAPI_SDK struct { Policy *string `json:"policy,omitempty"` + RootResourceID *string `json:"rootResourceID,omitempty"` + Tags map[string]*string `json:"tags,omitempty"` Version *string `json:"version,omitempty"` diff --git a/apis/apigateway/v1alpha1/zz_usage_plan.go b/apis/apigateway/v1alpha1/zz_usage_plan.go index bda13468bb..450875f3b5 100644 --- a/apis/apigateway/v1alpha1/zz_usage_plan.go +++ b/apis/apigateway/v1alpha1/zz_usage_plan.go @@ -57,8 +57,8 @@ type UsagePlanObservation struct { APIStages []*APIStage `json:"apiStages,omitempty"` // The identifier of a UsagePlan resource. ID *string `json:"id,omitempty"` - // The AWS Markeplace product identifier to associate with the usage plan as - // a SaaS product on AWS Marketplace. + // The Amazon Web Services Marketplace product identifier to associate with + // the usage plan as a SaaS product on the Amazon Web Services Marketplace. ProductCode *string `json:"productCode,omitempty"` } diff --git a/apis/apigateway/v1alpha1/zz_vpc_link.go b/apis/apigateway/v1alpha1/zz_vpc_link.go index 028d0da8db..e8656f0db3 100644 --- a/apis/apigateway/v1alpha1/zz_vpc_link.go +++ b/apis/apigateway/v1alpha1/zz_vpc_link.go @@ -39,8 +39,8 @@ type VPCLinkParameters struct { // tag value can be up to 256 characters. Tags map[string]*string `json:"tags,omitempty"` // The ARN of the network load balancer of the VPC targeted by the VPC link. - // The network load balancer must be owned by the same AWS account of the API - // owner. + // The network load balancer must be owned by the same Amazon Web Services account + // of the API owner. // +kubebuilder:validation:Required TargetARNs []*string `json:"targetARNs"` CustomVPCLinkParameters `json:",inline"` diff --git a/apis/athena/generator-config.yaml b/apis/athena/generator-config.yaml index a8f671f42e..d6606df16f 100644 --- a/apis/athena/generator-config.yaml +++ b/apis/athena/generator-config.yaml @@ -6,6 +6,7 @@ ignore: - Notebook - PresignedNotebook - PresignedNotebookUrl + - CapacityReservation field_paths: - CreateWorkGroupInput.Name resources: diff --git a/apis/athena/v1alpha1/zz_enums.go b/apis/athena/v1alpha1/zz_enums.go index 90b2d332a6..10a423a0f3 100644 --- a/apis/athena/v1alpha1/zz_enums.go +++ b/apis/athena/v1alpha1/zz_enums.go @@ -31,6 +31,25 @@ const ( CalculationExecutionState_FAILED CalculationExecutionState = "FAILED" ) +type CapacityAllocationStatus string + +const ( + CapacityAllocationStatus_PENDING CapacityAllocationStatus = "PENDING" + CapacityAllocationStatus_SUCCEEDED CapacityAllocationStatus = "SUCCEEDED" + CapacityAllocationStatus_FAILED CapacityAllocationStatus = "FAILED" +) + +type CapacityReservationStatus string + +const ( + CapacityReservationStatus_PENDING CapacityReservationStatus = "PENDING" + CapacityReservationStatus_ACTIVE CapacityReservationStatus = "ACTIVE" + CapacityReservationStatus_CANCELLING CapacityReservationStatus = "CANCELLING" + CapacityReservationStatus_CANCELLED CapacityReservationStatus = "CANCELLED" + CapacityReservationStatus_FAILED CapacityReservationStatus = "FAILED" + CapacityReservationStatus_UPDATE_PENDING CapacityReservationStatus = "UPDATE_PENDING" +) + type ColumnNullable string const ( diff --git a/apis/athena/v1alpha1/zz_generated.deepcopy.go b/apis/athena/v1alpha1/zz_generated.deepcopy.go index b2063a62f2..86a081f273 100644 --- a/apis/athena/v1alpha1/zz_generated.deepcopy.go +++ b/apis/athena/v1alpha1/zz_generated.deepcopy.go @@ -108,6 +108,26 @@ func (in *CalculationStatus) DeepCopy() *CalculationStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CapacityAllocation) DeepCopyInto(out *CapacityAllocation) { + *out = *in + if in.StatusMessage != nil { + in, out := &in.StatusMessage, &out.StatusMessage + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CapacityAllocation. +func (in *CapacityAllocation) DeepCopy() *CapacityAllocation { + if in == nil { + return nil + } + out := new(CapacityAllocation) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Column) DeepCopyInto(out *Column) { *out = *in @@ -406,6 +426,11 @@ func (in *QueryExecution) DeepCopyInto(out *QueryExecution) { *out = new(ResultConfiguration) (*in).DeepCopyInto(*out) } + if in.SubstatementType != nil { + in, out := &in.SubstatementType, &out.SubstatementType + *out = new(string) + **out = **in + } if in.WorkGroup != nil { in, out := &in.WorkGroup, &out.WorkGroup *out = new(string) @@ -782,6 +807,11 @@ func (in *WorkGroupConfiguration) DeepCopyInto(out *WorkGroupConfiguration) { *out = new(CustomerContentEncryptionConfiguration) (*in).DeepCopyInto(*out) } + if in.EnableMinimumEncryptionConfiguration != nil { + in, out := &in.EnableMinimumEncryptionConfiguration, &out.EnableMinimumEncryptionConfiguration + *out = new(bool) + **out = **in + } if in.EnforceWorkGroupConfiguration != nil { in, out := &in.EnforceWorkGroupConfiguration, &out.EnforceWorkGroupConfiguration *out = new(bool) @@ -842,6 +872,11 @@ func (in *WorkGroupConfigurationUpdates) DeepCopyInto(out *WorkGroupConfiguratio *out = new(CustomerContentEncryptionConfiguration) (*in).DeepCopyInto(*out) } + if in.EnableMinimumEncryptionConfiguration != nil { + in, out := &in.EnableMinimumEncryptionConfiguration, &out.EnableMinimumEncryptionConfiguration + *out = new(bool) + **out = **in + } if in.EnforceWorkGroupConfiguration != nil { in, out := &in.EnforceWorkGroupConfiguration, &out.EnforceWorkGroupConfiguration *out = new(bool) diff --git a/apis/athena/v1alpha1/zz_types.go b/apis/athena/v1alpha1/zz_types.go index 7456a4befd..aa82708018 100644 --- a/apis/athena/v1alpha1/zz_types.go +++ b/apis/athena/v1alpha1/zz_types.go @@ -49,6 +49,11 @@ type CalculationStatus struct { SubmissionDateTime *metav1.Time `json:"submissionDateTime,omitempty"` } +// +kubebuilder:skipversion +type CapacityAllocation struct { + StatusMessage *string `json:"statusMessage,omitempty"` +} + // +kubebuilder:skipversion type Column struct { Name *string `json:"name,omitempty"` @@ -131,12 +136,14 @@ type QueryExecution struct { // The Athena engine version for running queries, or the PySpark engine version // for running sessions. EngineVersion *EngineVersion `json:"engineVersion,omitempty"` - // The location in Amazon S3 where query results are stored and the encryption - // option, if any, used for query results. These are known as "client-side settings". - // If workgroup settings override client-side settings, then the query uses - // the workgroup settings. + // The location in Amazon S3 where query and calculation results are stored + // and the encryption option, if any, used for query and calculation results. + // These are known as "client-side settings". If workgroup settings override + // client-side settings, then the query uses the workgroup settings. ResultConfiguration *ResultConfiguration `json:"resultConfiguration,omitempty"` + SubstatementType *string `json:"substatementType,omitempty"` + WorkGroup *string `json:"workGroup,omitempty"` } @@ -174,8 +181,8 @@ type ResultConfiguration struct { // about S3 Object Ownership, see Object Ownership settings (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html#object-ownership-overview) // in the Amazon S3 User Guide. ACLConfiguration *ACLConfiguration `json:"aclConfiguration,omitempty"` - // If query results are encrypted in Amazon S3, indicates the encryption option - // used (for example, SSE_KMS or CSE_KMS) and key information. + // If query and calculation results are encrypted in Amazon S3, indicates the + // encryption option used (for example, SSE_KMS or CSE_KMS) and key information. EncryptionConfiguration *EncryptionConfiguration `json:"encryptionConfiguration,omitempty"` ExpectedBucketOwner *string `json:"expectedBucketOwner,omitempty"` @@ -191,8 +198,8 @@ type ResultConfigurationUpdates struct { // about S3 Object Ownership, see Object Ownership settings (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html#object-ownership-overview) // in the Amazon S3 User Guide. ACLConfiguration *ACLConfiguration `json:"aclConfiguration,omitempty"` - // If query results are encrypted in Amazon S3, indicates the encryption option - // used (for example, SSE_KMS or CSE_KMS) and key information. + // If query and calculation results are encrypted in Amazon S3, indicates the + // encryption option used (for example, SSE_KMS or CSE_KMS) and key information. EncryptionConfiguration *EncryptionConfiguration `json:"encryptionConfiguration,omitempty"` ExpectedBucketOwner *string `json:"expectedBucketOwner,omitempty"` @@ -210,8 +217,8 @@ type ResultConfigurationUpdates struct { // +kubebuilder:skipversion type SessionConfiguration struct { - // If query results are encrypted in Amazon S3, indicates the encryption option - // used (for example, SSE_KMS or CSE_KMS) and key information. + // If query and calculation results are encrypted in Amazon S3, indicates the + // encryption option used (for example, SSE_KMS or CSE_KMS) and key information. EncryptionConfiguration *EncryptionConfiguration `json:"encryptionConfiguration,omitempty"` ExecutionRole *string `json:"executionRole,omitempty"` @@ -257,8 +264,11 @@ type WorkGroupConfiguration struct { BytesScannedCutoffPerQuery *int64 `json:"bytesScannedCutoffPerQuery,omitempty"` // Specifies the KMS key that is used to encrypt the user's data stores in Athena. + // This setting does not apply to Athena SQL workgroups. CustomerContentEncryptionConfiguration *CustomerContentEncryptionConfiguration `json:"customerContentEncryptionConfiguration,omitempty"` + EnableMinimumEncryptionConfiguration *bool `json:"enableMinimumEncryptionConfiguration,omitempty"` + EnforceWorkGroupConfiguration *bool `json:"enforceWorkGroupConfiguration,omitempty"` // The Athena engine version for running queries, or the PySpark engine version // for running sessions. @@ -269,10 +279,10 @@ type WorkGroupConfiguration struct { PublishCloudWatchMetricsEnabled *bool `json:"publishCloudWatchMetricsEnabled,omitempty"` RequesterPaysEnabled *bool `json:"requesterPaysEnabled,omitempty"` - // The location in Amazon S3 where query results are stored and the encryption - // option, if any, used for query results. These are known as "client-side settings". - // If workgroup settings override client-side settings, then the query uses - // the workgroup settings. + // The location in Amazon S3 where query and calculation results are stored + // and the encryption option, if any, used for query and calculation results. + // These are known as "client-side settings". If workgroup settings override + // client-side settings, then the query uses the workgroup settings. ResultConfiguration *ResultConfiguration `json:"resultConfiguration,omitempty"` } @@ -282,8 +292,11 @@ type WorkGroupConfigurationUpdates struct { BytesScannedCutoffPerQuery *int64 `json:"bytesScannedCutoffPerQuery,omitempty"` // Specifies the KMS key that is used to encrypt the user's data stores in Athena. + // This setting does not apply to Athena SQL workgroups. CustomerContentEncryptionConfiguration *CustomerContentEncryptionConfiguration `json:"customerContentEncryptionConfiguration,omitempty"` + EnableMinimumEncryptionConfiguration *bool `json:"enableMinimumEncryptionConfiguration,omitempty"` + EnforceWorkGroupConfiguration *bool `json:"enforceWorkGroupConfiguration,omitempty"` // The Athena engine version for running queries, or the PySpark engine version // for running sessions. @@ -320,12 +333,13 @@ type WorkGroupSummary struct { // +kubebuilder:skipversion type WorkGroup_SDK struct { // The configuration of the workgroup, which includes the location in Amazon - // S3 where query results are stored, the encryption option, if any, used for - // query results, whether the Amazon CloudWatch Metrics are enabled for the - // workgroup and whether workgroup settings override query settings, and the - // data usage limits for the amount of data scanned per query or per workgroup. - // The workgroup settings override is specified in EnforceWorkGroupConfiguration - // (true/false) in the WorkGroupConfiguration. See WorkGroupConfiguration$EnforceWorkGroupConfiguration. + // S3 where query and calculation results are stored, the encryption option, + // if any, used for query and calculation results, whether the Amazon CloudWatch + // Metrics are enabled for the workgroup and whether workgroup settings override + // query settings, and the data usage limits for the amount of data scanned + // per query or per workgroup. The workgroup settings override is specified + // in EnforceWorkGroupConfiguration (true/false) in the WorkGroupConfiguration. + // See WorkGroupConfiguration$EnforceWorkGroupConfiguration. Configuration *WorkGroupConfiguration `json:"configuration,omitempty"` CreationTime *metav1.Time `json:"creationTime,omitempty"` diff --git a/apis/athena/v1alpha1/zz_work_group.go b/apis/athena/v1alpha1/zz_work_group.go index a37c2b2aba..f775616ebe 100644 --- a/apis/athena/v1alpha1/zz_work_group.go +++ b/apis/athena/v1alpha1/zz_work_group.go @@ -29,8 +29,9 @@ type WorkGroupParameters struct { // Region is which region the WorkGroup will be created. // +kubebuilder:validation:Required Region string `json:"region"` - // Contains configuration information for creating an Athena SQL workgroup, - // which includes the location in Amazon S3 where query results are stored, + // Contains configuration information for creating an Athena SQL workgroup or + // Spark enabled Athena workgroup. Athena SQL workgroup configuration includes + // the location in Amazon S3 where query and calculation results are stored, // the encryption configuration, if any, used for encrypting query results, // whether the Amazon CloudWatch Metrics are enabled for the workgroup, the // limit for the amount of bytes scanned (cutoff) per query, if it is specified, diff --git a/apis/autoscaling/generator-config.yaml b/apis/autoscaling/generator-config.yaml index b06cc718f8..68d743a7f3 100644 --- a/apis/autoscaling/generator-config.yaml +++ b/apis/autoscaling/generator-config.yaml @@ -2,6 +2,9 @@ ignore: field_paths: - CreateAutoScalingGroupInput.AutoScalingGroupName - DeleteAutoScalingGroupInput.AutoScalingGroupName + # TODO: Ignore for now because it generates a property that does not match + # the convention (type_). + - CreateAutoScalingGroupInput.TrafficSources resource_names: - LaunchConfiguration resources: diff --git a/apis/autoscaling/v1beta1/zz_auto_scaling_group.go b/apis/autoscaling/v1beta1/zz_auto_scaling_group.go index 797796f036..04d93dffc0 100644 --- a/apis/autoscaling/v1beta1/zz_auto_scaling_group.go +++ b/apis/autoscaling/v1beta1/zz_auto_scaling_group.go @@ -54,23 +54,24 @@ type AutoScalingGroupParameters struct { // // Default: 300 seconds DefaultCooldown *int64 `json:"defaultCooldown,omitempty"` - // The amount of time, in seconds, until a newly launched instance can contribute - // to the Amazon CloudWatch metrics. This delay lets an instance finish initializing - // before Amazon EC2 Auto Scaling aggregates instance metrics, resulting in - // more reliable usage data. Set this value equal to the amount of time that - // it takes for resource consumption to become stable after an instance reaches - // the InService state. For more information, see Set the default instance warmup - // for an Auto Scaling group (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-default-instance-warmup.html) - // in the Amazon EC2 Auto Scaling User Guide. + // The amount of time, in seconds, until a new instance is considered to have + // finished initializing and resource consumption to become stable after it + // enters the InService state. // - // To manage your warm-up settings at the group level, we recommend that you - // set the default instance warmup, even if its value is set to 0 seconds. This - // also optimizes the performance of scaling policies that scale continuously, - // such as target tracking and step scaling policies. + // During an instance refresh, Amazon EC2 Auto Scaling waits for the warm-up + // period after it replaces an instance before it moves on to replacing the + // next instance. Amazon EC2 Auto Scaling also waits for the warm-up period + // before aggregating the metrics for new instances with existing instances + // in the Amazon CloudWatch metrics that are used for scaling, resulting in + // more reliable usage data. For more information, see Set the default instance + // warmup for an Auto Scaling group (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-default-instance-warmup.html) + // in the Amazon EC2 Auto Scaling User Guide. // - // If you need to remove a value that you previously set, include the property - // but specify -1 for the value. However, we strongly recommend keeping the - // default instance warmup enabled by specifying a minimum value of 0. + // To manage various warm-up settings at the group level, we recommend that + // you set the default instance warmup, even if it is set to 0 seconds. To remove + // a value that you previously set, include the property but specify -1 for + // the value. However, we strongly recommend keeping the default instance warmup + // enabled by specifying a value of 0 or other nominal value. // // Default: None DefaultInstanceWarmup *int64 `json:"defaultInstanceWarmup,omitempty"` @@ -102,14 +103,14 @@ type AutoScalingGroupParameters struct { // // Default: 0 seconds HealthCheckGracePeriod *int64 `json:"healthCheckGracePeriod,omitempty"` - // Determines whether any additional health checks are performed on the instances - // in this group. Amazon EC2 health checks are always on. For more information, - // see Health checks for Auto Scaling instances (https://docs.aws.amazon.com/autoscaling/ec2/userguide/healthcheck.html) + // A comma-separated value string of one or more health check types. + // + // The valid values are EC2, ELB, and VPC_LATTICE. EC2 is the default health + // check and cannot be disabled. For more information, see Health checks for + // Auto Scaling instances (https://docs.aws.amazon.com/autoscaling/ec2/userguide/healthcheck.html) // in the Amazon EC2 Auto Scaling User Guide. // - // The valid values are EC2 (default), ELB, and VPC_LATTICE. The VPC_LATTICE - // health check type is reserved for use with VPC Lattice, which is in preview - // release and is subject to change. + // Only specify EC2 if you must clear a value that was previously set. HealthCheckType *string `json:"healthCheckType,omitempty"` // The ID of the instance used to base the launch configuration on. If specified, // Amazon EC2 Auto Scaling uses the configuration values from the specified @@ -142,7 +143,7 @@ type AutoScalingGroupParameters struct { LifecycleHookSpecificationList []*LifecycleHookSpecification `json:"lifecycleHookSpecificationList,omitempty"` // A list of Classic Load Balancers associated with this Auto Scaling group. // For Application Load Balancers, Network Load Balancers, and Gateway Load - // Balancer, specify the TargetGroupARNs property instead. + // Balancers, specify the TargetGroupARNs property instead. LoadBalancerNames []*string `json:"loadBalancerNames,omitempty"` // The maximum amount of time, in seconds, that an instance can be in service. // The default is null. If specified, the value must be either 0 or a number @@ -214,17 +215,6 @@ type AutoScalingGroupParameters struct { // NewestInstance | OldestInstance | OldestLaunchConfiguration | OldestLaunchTemplate // | arn:aws:lambda:region:account-id:function:my-function:my-alias TerminationPolicies []*string `json:"terminationPolicies,omitempty"` - // Reserved for use with Amazon VPC Lattice, which is in preview release and - // is subject to change. Do not use this parameter for production workloads. - // It is also subject to change. - // - // The unique identifiers of one or more traffic sources. - // - // Currently, you must specify an Amazon Resource Name (ARN) for an existing - // VPC Lattice target group. Amazon EC2 Auto Scaling registers the running instances - // with the attached target groups. The target groups receive incoming traffic - // and route requests to one or more registered targets. - TrafficSources []*TrafficSourceIdentifier `json:"trafficSources,omitempty"` // A comma-separated list of subnet IDs for a virtual private cloud (VPC) where // instances in the Auto Scaling group can be created. If you specify VPCZoneIdentifier // with AvailabilityZones, the subnets that you specify must reside in those @@ -261,12 +251,7 @@ type AutoScalingGroupObservation struct { EnabledMetrics []*EnabledMetric `json:"enabledMetrics,omitempty"` // The duration of the health check grace period, in seconds. HealthCheckGracePeriod *int64 `json:"healthCheckGracePeriod,omitempty"` - // Determines whether any additional health checks are performed on the instances - // in this group. Amazon EC2 health checks are always on. - // - // The valid values are EC2 (default), ELB, and VPC_LATTICE. The VPC_LATTICE - // health check type is reserved for use with VPC Lattice, which is in preview - // release and is subject to change. + // A comma-separated value string of one or more health check types. HealthCheckType *string `json:"healthCheckType,omitempty"` // The EC2 instances associated with the group. Instances []*Instance `json:"instances,omitempty"` @@ -307,7 +292,7 @@ type AutoScalingGroupObservation struct { TargetGroupARNs []*string `json:"targetGroupARNs,omitempty"` // The termination policies for the group. TerminationPolicies []*string `json:"terminationPolicies,omitempty"` - // The unique identifiers of the traffic sources. + // The traffic sources associated with this Auto Scaling group. TrafficSources []*TrafficSourceIdentifier `json:"trafficSources,omitempty"` // One or more subnet IDs, if applicable, separated by commas. VPCZoneIdentifier *string `json:"vpcZoneIdentifier,omitempty"` diff --git a/apis/autoscaling/v1beta1/zz_enums.go b/apis/autoscaling/v1beta1/zz_enums.go index a6b072211c..8967e89d4e 100644 --- a/apis/autoscaling/v1beta1/zz_enums.go +++ b/apis/autoscaling/v1beta1/zz_enums.go @@ -95,12 +95,15 @@ const ( type InstanceRefreshStatus string const ( - InstanceRefreshStatus_Pending InstanceRefreshStatus = "Pending" - InstanceRefreshStatus_InProgress InstanceRefreshStatus = "InProgress" - InstanceRefreshStatus_Successful InstanceRefreshStatus = "Successful" - InstanceRefreshStatus_Failed InstanceRefreshStatus = "Failed" - InstanceRefreshStatus_Cancelling InstanceRefreshStatus = "Cancelling" - InstanceRefreshStatus_Cancelled InstanceRefreshStatus = "Cancelled" + InstanceRefreshStatus_Pending InstanceRefreshStatus = "Pending" + InstanceRefreshStatus_InProgress InstanceRefreshStatus = "InProgress" + InstanceRefreshStatus_Successful InstanceRefreshStatus = "Successful" + InstanceRefreshStatus_Failed InstanceRefreshStatus = "Failed" + InstanceRefreshStatus_Cancelling InstanceRefreshStatus = "Cancelling" + InstanceRefreshStatus_Cancelled InstanceRefreshStatus = "Cancelled" + InstanceRefreshStatus_RollbackInProgress InstanceRefreshStatus = "RollbackInProgress" + InstanceRefreshStatus_RollbackFailed InstanceRefreshStatus = "RollbackFailed" + InstanceRefreshStatus_RollbackSuccessful InstanceRefreshStatus = "RollbackSuccessful" ) type LifecycleState string @@ -212,6 +215,14 @@ const ( RefreshStrategy_Rolling RefreshStrategy = "Rolling" ) +type ScaleInProtectedInstances string + +const ( + ScaleInProtectedInstances_Refresh ScaleInProtectedInstances = "Refresh" + ScaleInProtectedInstances_Ignore ScaleInProtectedInstances = "Ignore" + ScaleInProtectedInstances_Wait ScaleInProtectedInstances = "Wait" +) + type ScalingActivityStatusCode string const ( @@ -227,6 +238,15 @@ const ( ScalingActivityStatusCode_Successful ScalingActivityStatusCode = "Successful" ScalingActivityStatusCode_Failed ScalingActivityStatusCode = "Failed" ScalingActivityStatusCode_Cancelled ScalingActivityStatusCode = "Cancelled" + ScalingActivityStatusCode_WaitingForConnectionDraining ScalingActivityStatusCode = "WaitingForConnectionDraining" +) + +type StandbyInstances string + +const ( + StandbyInstances_Terminate StandbyInstances = "Terminate" + StandbyInstances_Ignore StandbyInstances = "Ignore" + StandbyInstances_Wait StandbyInstances = "Wait" ) type WarmPoolState string diff --git a/apis/autoscaling/v1beta1/zz_generated.deepcopy.go b/apis/autoscaling/v1beta1/zz_generated.deepcopy.go index 094ec2a47e..96ac377a59 100644 --- a/apis/autoscaling/v1beta1/zz_generated.deepcopy.go +++ b/apis/autoscaling/v1beta1/zz_generated.deepcopy.go @@ -624,17 +624,6 @@ func (in *AutoScalingGroupParameters) DeepCopyInto(out *AutoScalingGroupParamete } } } - if in.TrafficSources != nil { - in, out := &in.TrafficSources, &out.TrafficSources - *out = make([]*TrafficSourceIdentifier, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(TrafficSourceIdentifier) - (*in).DeepCopyInto(*out) - } - } - } if in.VPCZoneIdentifier != nil { in, out := &in.VPCZoneIdentifier, &out.VPCZoneIdentifier *out = new(string) @@ -2157,6 +2146,30 @@ func (in *ProcessType) DeepCopy() *ProcessType { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RollbackDetails) DeepCopyInto(out *RollbackDetails) { + *out = *in + if in.RollbackReason != nil { + in, out := &in.RollbackReason, &out.RollbackReason + *out = new(string) + **out = **in + } + if in.RollbackStartTime != nil { + in, out := &in.RollbackStartTime, &out.RollbackStartTime + *out = (*in).DeepCopy() + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollbackDetails. +func (in *RollbackDetails) DeepCopy() *RollbackDetails { + if in == nil { + return nil + } + out := new(RollbackDetails) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ScalingPolicy) DeepCopyInto(out *ScalingPolicy) { *out = *in @@ -2485,6 +2498,11 @@ func (in *TrafficSourceIdentifier) DeepCopyInto(out *TrafficSourceIdentifier) { *out = new(string) **out = **in } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrafficSourceIdentifier. @@ -2500,6 +2518,11 @@ func (in *TrafficSourceIdentifier) DeepCopy() *TrafficSourceIdentifier { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TrafficSourceState) DeepCopyInto(out *TrafficSourceState) { *out = *in + if in.Identifier != nil { + in, out := &in.Identifier, &out.Identifier + *out = new(string) + **out = **in + } if in.State != nil { in, out := &in.State, &out.State *out = new(string) @@ -2510,6 +2533,11 @@ func (in *TrafficSourceState) DeepCopyInto(out *TrafficSourceState) { *out = new(string) **out = **in } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrafficSourceState. diff --git a/apis/autoscaling/v1beta1/zz_types.go b/apis/autoscaling/v1beta1/zz_types.go index d7aa9c1beb..7d87690a71 100644 --- a/apis/autoscaling/v1beta1/zz_types.go +++ b/apis/autoscaling/v1beta1/zz_types.go @@ -587,6 +587,13 @@ type ProcessType struct { ProcessName *string `json:"processName,omitempty"` } +// +kubebuilder:skipversion +type RollbackDetails struct { + RollbackReason *string `json:"rollbackReason,omitempty"` + + RollbackStartTime *metav1.Time `json:"rollbackStartTime,omitempty"` +} + // +kubebuilder:skipversion type ScalingPolicy struct { AdjustmentType *string `json:"adjustmentType,omitempty"` @@ -696,13 +703,19 @@ type TotalLocalStorageGBRequest struct { // +kubebuilder:skipversion type TrafficSourceIdentifier struct { Identifier *string `json:"identifier,omitempty"` + + Type *string `json:"type_,omitempty"` } // +kubebuilder:skipversion type TrafficSourceState struct { + Identifier *string `json:"identifier,omitempty"` + State *string `json:"state,omitempty"` TrafficSource *string `json:"trafficSource,omitempty"` + + Type *string `json:"type_,omitempty"` } // +kubebuilder:skipversion diff --git a/apis/batch/v1alpha1/zz_compute_environment.go b/apis/batch/v1alpha1/zz_compute_environment.go index 8c92bef76b..ad736b3dfc 100644 --- a/apis/batch/v1alpha1/zz_compute_environment.go +++ b/apis/batch/v1alpha1/zz_compute_environment.go @@ -87,8 +87,17 @@ type ComputeEnvironmentObservation struct { // If the state is DISABLED, then the Batch scheduler doesn't attempt to place // jobs within the environment. Jobs in a STARTING or RUNNING state continue // to progress normally. Managed compute environments in the DISABLED state - // don't scale out. However, they scale in to minvCpus value after instances - // become idle. + // don't scale out. + // + // Compute environments in a DISABLED state may continue to incur billing charges. + // To prevent additional charges, turn off and then delete the compute environment. + // For more information, see State (https://docs.aws.amazon.com/batch/latest/userguide/compute_environment_parameters.html#compute_environment_state) + // in the Batch User Guide. + // + // When an instance is idle, the instance scales down to the minvCpus value. + // However, the instance size doesn't change. For example, consider a c5.8xlarge + // instance with a minvCpus value of 4 and a desiredvCpus value of 36. This + // instance doesn't scale down to a c5.large instance. State *string `json:"state,omitempty"` // The current status of the compute environment (for example, CREATING or VALID). Status *string `json:"status,omitempty"` diff --git a/apis/batch/v1alpha1/zz_enums.go b/apis/batch/v1alpha1/zz_enums.go index d1c2393c49..7c9bc6b047 100644 --- a/apis/batch/v1alpha1/zz_enums.go +++ b/apis/batch/v1alpha1/zz_enums.go @@ -60,9 +60,10 @@ const ( type CRAllocationStrategy string const ( - CRAllocationStrategy_BEST_FIT CRAllocationStrategy = "BEST_FIT" - CRAllocationStrategy_BEST_FIT_PROGRESSIVE CRAllocationStrategy = "BEST_FIT_PROGRESSIVE" - CRAllocationStrategy_SPOT_CAPACITY_OPTIMIZED CRAllocationStrategy = "SPOT_CAPACITY_OPTIMIZED" + CRAllocationStrategy_BEST_FIT CRAllocationStrategy = "BEST_FIT" + CRAllocationStrategy_BEST_FIT_PROGRESSIVE CRAllocationStrategy = "BEST_FIT_PROGRESSIVE" + CRAllocationStrategy_SPOT_CAPACITY_OPTIMIZED CRAllocationStrategy = "SPOT_CAPACITY_OPTIMIZED" + CRAllocationStrategy_SPOT_PRICE_CAPACITY_OPTIMIZED CRAllocationStrategy = "SPOT_PRICE_CAPACITY_OPTIMIZED" ) type CRType string @@ -77,8 +78,9 @@ const ( type CRUpdateAllocationStrategy string const ( - CRUpdateAllocationStrategy_BEST_FIT_PROGRESSIVE CRUpdateAllocationStrategy = "BEST_FIT_PROGRESSIVE" - CRUpdateAllocationStrategy_SPOT_CAPACITY_OPTIMIZED CRUpdateAllocationStrategy = "SPOT_CAPACITY_OPTIMIZED" + CRUpdateAllocationStrategy_BEST_FIT_PROGRESSIVE CRUpdateAllocationStrategy = "BEST_FIT_PROGRESSIVE" + CRUpdateAllocationStrategy_SPOT_CAPACITY_OPTIMIZED CRUpdateAllocationStrategy = "SPOT_CAPACITY_OPTIMIZED" + CRUpdateAllocationStrategy_SPOT_PRICE_CAPACITY_OPTIMIZED CRUpdateAllocationStrategy = "SPOT_PRICE_CAPACITY_OPTIMIZED" ) type DeviceCgroupPermission string diff --git a/apis/batch/v1alpha1/zz_generated.deepcopy.go b/apis/batch/v1alpha1/zz_generated.deepcopy.go index 9d3619b453..ab06a32c90 100644 --- a/apis/batch/v1alpha1/zz_generated.deepcopy.go +++ b/apis/batch/v1alpha1/zz_generated.deepcopy.go @@ -1569,6 +1569,26 @@ func (in *EKSVolume) DeepCopy() *EKSVolume { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EphemeralStorage) DeepCopyInto(out *EphemeralStorage) { + *out = *in + if in.SizeInGiB != nil { + in, out := &in.SizeInGiB, &out.SizeInGiB + *out = new(int64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EphemeralStorage. +func (in *EphemeralStorage) DeepCopy() *EphemeralStorage { + if in == nil { + return nil + } + out := new(EphemeralStorage) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *EvaluateOnExit) DeepCopyInto(out *EvaluateOnExit) { *out = *in @@ -2435,6 +2455,31 @@ func (in *RetryStrategy) DeepCopy() *RetryStrategy { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuntimePlatform) DeepCopyInto(out *RuntimePlatform) { + *out = *in + if in.CPUArchitecture != nil { + in, out := &in.CPUArchitecture, &out.CPUArchitecture + *out = new(string) + **out = **in + } + if in.OperatingSystemFamily != nil { + in, out := &in.OperatingSystemFamily, &out.OperatingSystemFamily + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuntimePlatform. +func (in *RuntimePlatform) DeepCopy() *RuntimePlatform { + if in == nil { + return nil + } + out := new(RuntimePlatform) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SchedulingPolicyDetail) DeepCopyInto(out *SchedulingPolicyDetail) { *out = *in diff --git a/apis/batch/v1alpha1/zz_types.go b/apis/batch/v1alpha1/zz_types.go index 8360ae81ae..f89d2e68b0 100644 --- a/apis/batch/v1alpha1/zz_types.go +++ b/apis/batch/v1alpha1/zz_types.go @@ -386,6 +386,11 @@ type EKSVolume struct { Name *string `json:"name,omitempty"` } +// +kubebuilder:skipversion +type EphemeralStorage struct { + SizeInGiB *int64 `json:"sizeInGiB,omitempty"` +} + // +kubebuilder:skipversion type EvaluateOnExit struct { OnExitCode *string `json:"onExitCode,omitempty"` @@ -583,6 +588,13 @@ type RetryStrategy struct { Attempts *int64 `json:"attempts,omitempty"` } +// +kubebuilder:skipversion +type RuntimePlatform struct { + CPUArchitecture *string `json:"cpuArchitecture,omitempty"` + + OperatingSystemFamily *string `json:"operatingSystemFamily,omitempty"` +} + // +kubebuilder:skipversion type SchedulingPolicyDetail struct { ARN *string `json:"arn,omitempty"` diff --git a/apis/cloudfront/v1alpha1/zz_enums.go b/apis/cloudfront/v1alpha1/zz_enums.go index 09d3a56e35..60c1c433df 100644 --- a/apis/cloudfront/v1alpha1/zz_enums.go +++ b/apis/cloudfront/v1alpha1/zz_enums.go @@ -91,6 +91,7 @@ type FunctionRuntime string const ( FunctionRuntime_cloudfront_js_1_0 FunctionRuntime = "cloudfront-js-1.0" + FunctionRuntime_cloudfront_js_2_0 FunctionRuntime = "cloudfront-js-2.0" ) type FunctionStage string @@ -160,7 +161,8 @@ const ( type OriginAccessControlOriginTypes string const ( - OriginAccessControlOriginTypes_s3 OriginAccessControlOriginTypes = "s3" + OriginAccessControlOriginTypes_s3 OriginAccessControlOriginTypes = "s3" + OriginAccessControlOriginTypes_mediastore OriginAccessControlOriginTypes = "mediastore" ) type OriginAccessControlSigningBehaviors string @@ -191,6 +193,7 @@ const ( OriginRequestPolicyCookieBehavior_none OriginRequestPolicyCookieBehavior = "none" OriginRequestPolicyCookieBehavior_whitelist OriginRequestPolicyCookieBehavior = "whitelist" OriginRequestPolicyCookieBehavior_all OriginRequestPolicyCookieBehavior = "all" + OriginRequestPolicyCookieBehavior_allExcept OriginRequestPolicyCookieBehavior = "allExcept" ) type OriginRequestPolicyHeaderBehavior string @@ -200,6 +203,7 @@ const ( OriginRequestPolicyHeaderBehavior_whitelist OriginRequestPolicyHeaderBehavior = "whitelist" OriginRequestPolicyHeaderBehavior_allViewer OriginRequestPolicyHeaderBehavior = "allViewer" OriginRequestPolicyHeaderBehavior_allViewerAndWhitelistCloudFront OriginRequestPolicyHeaderBehavior = "allViewerAndWhitelistCloudFront" + OriginRequestPolicyHeaderBehavior_allExcept OriginRequestPolicyHeaderBehavior = "allExcept" ) type OriginRequestPolicyQueryStringBehavior string @@ -208,6 +212,7 @@ const ( OriginRequestPolicyQueryStringBehavior_none OriginRequestPolicyQueryStringBehavior = "none" OriginRequestPolicyQueryStringBehavior_whitelist OriginRequestPolicyQueryStringBehavior = "whitelist" OriginRequestPolicyQueryStringBehavior_all OriginRequestPolicyQueryStringBehavior = "all" + OriginRequestPolicyQueryStringBehavior_allExcept OriginRequestPolicyQueryStringBehavior = "allExcept" ) type OriginRequestPolicyType string diff --git a/apis/cloudfront/v1alpha1/zz_types.go b/apis/cloudfront/v1alpha1/zz_types.go index a27c396d78..f66a0a5c41 100644 --- a/apis/cloudfront/v1alpha1/zz_types.go +++ b/apis/cloudfront/v1alpha1/zz_types.go @@ -179,10 +179,10 @@ type CachePolicyConfig struct { // viewer. // // The headers, cookies, and query strings that are included in the cache key - // are automatically included in requests that CloudFront sends to the origin. - // CloudFront sends a request when it can't find an object in its cache that - // matches the request's cache key. If you want to send values to the origin - // but not include them in the cache key, use OriginRequestPolicy. + // are also included in requests that CloudFront sends to the origin. CloudFront + // sends a request when it can't find an object in its cache that matches the + // request's cache key. If you want to send values to the origin but not include + // them in the cache key, use OriginRequestPolicy. ParametersInCacheKeyAndForwardedToOrigin *ParametersInCacheKeyAndForwardedToOrigin `json:"parametersInCacheKeyAndForwardedToOrigin,omitempty"` } @@ -233,10 +233,10 @@ type CachePolicySummary struct { // want objects to stay in the CloudFront cache. // // The headers, cookies, and query strings that are included in the cache key - // are automatically included in requests that CloudFront sends to the origin. - // CloudFront sends a request when it can't find a valid object in its cache - // that matches the request's cache key. If you want to send values to the origin - // but not include them in the cache key, use OriginRequestPolicy. + // are also included in requests that CloudFront sends to the origin. CloudFront + // sends a request when it can't find a valid object in its cache that matches + // the request's cache key. If you want to send values to the origin but not + // include them in the cache key, use OriginRequestPolicy. CachePolicy *CachePolicy_SDK `json:"cachePolicy,omitempty"` Type *string `json:"type_,omitempty"` @@ -257,10 +257,10 @@ type CachePolicy_SDK struct { // want objects to stay in the CloudFront cache. // // The headers, cookies, and query strings that are included in the cache key - // are automatically included in requests that CloudFront sends to the origin. - // CloudFront sends a request when it can't find a valid object in its cache - // that matches the request's cache key. If you want to send values to the origin - // but not include them in the cache key, use OriginRequestPolicy. + // are also included in requests that CloudFront sends to the origin. CloudFront + // sends a request when it can't find a valid object in its cache that matches + // the request's cache key. If you want to send values to the origin but not + // include them in the cache key, use OriginRequestPolicy. CachePolicyConfig *CachePolicyConfig `json:"cachePolicyConfig,omitempty"` ID *string `json:"id,omitempty"` @@ -1212,20 +1212,20 @@ type Origins struct { // +kubebuilder:skipversion type ParametersInCacheKeyAndForwardedToOrigin struct { // An object that determines whether any cookies in viewer requests (and if - // so, which cookies) are included in the cache key and automatically included - // in requests that CloudFront sends to the origin. + // so, which cookies) are included in the cache key and in requests that CloudFront + // sends to the origin. CookiesConfig *CachePolicyCookiesConfig `json:"cookiesConfig,omitempty"` EnableAcceptEncodingBrotli *bool `json:"enableAcceptEncodingBrotli,omitempty"` EnableAcceptEncodingGzip *bool `json:"enableAcceptEncodingGzip,omitempty"` // An object that determines whether any HTTP headers (and if so, which headers) - // are included in the cache key and automatically included in requests that - // CloudFront sends to the origin. + // are included in the cache key and in requests that CloudFront sends to the + // origin. HeadersConfig *CachePolicyHeadersConfig `json:"headersConfig,omitempty"` // An object that determines whether any URL query strings in viewer requests - // (and if so, which query strings) are included in the cache key and automatically - // included in requests that CloudFront sends to the origin. + // (and if so, which query strings) are included in the cache key and in requests + // that CloudFront sends to the origin. QueryStringsConfig *CachePolicyQueryStringsConfig `json:"queryStringsConfig,omitempty"` } diff --git a/apis/cloudwatchlogs/v1alpha1/zz_enums.go b/apis/cloudwatchlogs/v1alpha1/zz_enums.go index 87b67410e2..6fa97c373e 100644 --- a/apis/cloudwatchlogs/v1alpha1/zz_enums.go +++ b/apis/cloudwatchlogs/v1alpha1/zz_enums.go @@ -45,6 +45,12 @@ const ( ExportTaskStatusCode_RUNNING ExportTaskStatusCode = "RUNNING" ) +type InheritedProperty string + +const ( + InheritedProperty_ACCOUNT_DATA_PROTECTION InheritedProperty = "ACCOUNT_DATA_PROTECTION" +) + type OrderBy string const ( @@ -52,6 +58,12 @@ const ( OrderBy_LastEventTime OrderBy = "LastEventTime" ) +type PolicyType string + +const ( + PolicyType_DATA_PROTECTION_POLICY PolicyType = "DATA_PROTECTION_POLICY" +) + type QueryStatus string const ( @@ -64,6 +76,12 @@ const ( QueryStatus_Unknown QueryStatus = "Unknown" ) +type Scope string + +const ( + Scope_ALL Scope = "ALL" +) + type StandardUnit string const ( diff --git a/apis/cloudwatchlogs/v1alpha1/zz_generated.deepcopy.go b/apis/cloudwatchlogs/v1alpha1/zz_generated.deepcopy.go index 953b304489..d7d7a5a325 100644 --- a/apis/cloudwatchlogs/v1alpha1/zz_generated.deepcopy.go +++ b/apis/cloudwatchlogs/v1alpha1/zz_generated.deepcopy.go @@ -26,6 +26,31 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccountPolicy) DeepCopyInto(out *AccountPolicy) { + *out = *in + if in.AccountID != nil { + in, out := &in.AccountID, &out.AccountID + *out = new(string) + **out = **in + } + if in.LastUpdatedTime != nil { + in, out := &in.LastUpdatedTime, &out.LastUpdatedTime + *out = new(int64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccountPolicy. +func (in *AccountPolicy) DeepCopy() *AccountPolicy { + if in == nil { + return nil + } + out := new(AccountPolicy) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CustomLogGroupParameters) DeepCopyInto(out *CustomLogGroupParameters) { *out = *in @@ -383,6 +408,17 @@ func (in *LogGroup_SDK) DeepCopyInto(out *LogGroup_SDK) { *out = new(string) **out = **in } + if in.InheritedProperties != nil { + in, out := &in.InheritedProperties, &out.InheritedProperties + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } if in.KMSKeyID != nil { in, out := &in.KMSKeyID, &out.KMSKeyID *out = new(string) diff --git a/apis/cloudwatchlogs/v1alpha1/zz_types.go b/apis/cloudwatchlogs/v1alpha1/zz_types.go index b0369be792..bb21b061a6 100644 --- a/apis/cloudwatchlogs/v1alpha1/zz_types.go +++ b/apis/cloudwatchlogs/v1alpha1/zz_types.go @@ -27,6 +27,13 @@ var ( _ = &metav1.Time{} ) +// +kubebuilder:skipversion +type AccountPolicy struct { + AccountID *string `json:"accountID,omitempty"` + + LastUpdatedTime *int64 `json:"lastUpdatedTime,omitempty"` +} + // +kubebuilder:skipversion type Destination struct { ARN *string `json:"arn,omitempty"` @@ -70,6 +77,8 @@ type LogGroup_SDK struct { DataProtectionStatus *string `json:"dataProtectionStatus,omitempty"` + InheritedProperties []*string `json:"inheritedProperties,omitempty"` + KMSKeyID *string `json:"kmsKeyID,omitempty"` LogGroupName *string `json:"logGroupName,omitempty"` @@ -77,7 +86,7 @@ type LogGroup_SDK struct { MetricFilterCount *int64 `json:"metricFilterCount,omitempty"` // The number of days to retain the log events in the specified log group. Possible // values are: 1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, - // 1827, 2192, 2557, 2922, 3288, and 3653. + // 1096, 1827, 2192, 2557, 2922, 3288, and 3653. // // To set a log group so that its log events do not expire, use DeleteRetentionPolicy // (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_DeleteRetentionPolicy.html). diff --git a/apis/cognitoidentityprovider/v1alpha1/zz_enums.go b/apis/cognitoidentityprovider/v1alpha1/zz_enums.go index c8fbba37d0..ec8daffca0 100644 --- a/apis/cognitoidentityprovider/v1alpha1/zz_enums.go +++ b/apis/cognitoidentityprovider/v1alpha1/zz_enums.go @@ -173,6 +173,12 @@ const ( EventResponseType_InProgress EventResponseType = "InProgress" ) +type EventSourceName string + +const ( + EventSourceName_userNotification EventSourceName = "userNotification" +) + type EventType string const ( @@ -214,6 +220,12 @@ const ( IdentityProviderTypeType_OIDC IdentityProviderTypeType = "OIDC" ) +type LogLevel string + +const ( + LogLevel_ERROR LogLevel = "ERROR" +) + type MessageActionType string const ( diff --git a/apis/cognitoidentityprovider/v1alpha1/zz_generated.deepcopy.go b/apis/cognitoidentityprovider/v1alpha1/zz_generated.deepcopy.go index 4aa3fae8ea..7662e600d7 100644 --- a/apis/cognitoidentityprovider/v1alpha1/zz_generated.deepcopy.go +++ b/apis/cognitoidentityprovider/v1alpha1/zz_generated.deepcopy.go @@ -186,6 +186,26 @@ func (in *AuthenticationResultType) DeepCopy() *AuthenticationResultType { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudWatchLogsConfigurationType) DeepCopyInto(out *CloudWatchLogsConfigurationType) { + *out = *in + if in.LogGroupARN != nil { + in, out := &in.LogGroupARN, &out.LogGroupARN + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudWatchLogsConfigurationType. +func (in *CloudWatchLogsConfigurationType) DeepCopy() *CloudWatchLogsConfigurationType { + if in == nil { + return nil + } + out := new(CloudWatchLogsConfigurationType) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CodeDeliveryDetailsType) DeepCopyInto(out *CodeDeliveryDetailsType) { *out = *in @@ -1295,6 +1315,26 @@ func (in *LambdaConfigType) DeepCopy() *LambdaConfigType { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogDeliveryConfigurationType) DeepCopyInto(out *LogDeliveryConfigurationType) { + *out = *in + if in.UserPoolID != nil { + in, out := &in.UserPoolID, &out.UserPoolID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogDeliveryConfigurationType. +func (in *LogDeliveryConfigurationType) DeepCopy() *LogDeliveryConfigurationType { + if in == nil { + return nil + } + out := new(LogDeliveryConfigurationType) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *MessageTemplateType) DeepCopyInto(out *MessageTemplateType) { *out = *in diff --git a/apis/cognitoidentityprovider/v1alpha1/zz_group.go b/apis/cognitoidentityprovider/v1alpha1/zz_group.go index f1d75877c6..ea094837ee 100644 --- a/apis/cognitoidentityprovider/v1alpha1/zz_group.go +++ b/apis/cognitoidentityprovider/v1alpha1/zz_group.go @@ -58,11 +58,13 @@ type GroupSpec struct { // GroupObservation defines the observed state of Group type GroupObservation struct { - // The date the group was created. + // The date and time, in ISO 8601 (https://www.iso.org/iso-8601-date-and-time-format.html) + // format, when the item was created. CreationDate *metav1.Time `json:"creationDate,omitempty"` // The name of the group. GroupName *string `json:"groupName,omitempty"` - // The date the group was last modified. + // The date and time, in ISO 8601 (https://www.iso.org/iso-8601-date-and-time-format.html) + // format, when the item was modified. LastModifiedDate *metav1.Time `json:"lastModifiedDate,omitempty"` // The role Amazon Resource Name (ARN) for the group. RoleARN *string `json:"roleARN,omitempty"` diff --git a/apis/cognitoidentityprovider/v1alpha1/zz_identity_provider.go b/apis/cognitoidentityprovider/v1alpha1/zz_identity_provider.go index d2bc648890..5e27b50e64 100644 --- a/apis/cognitoidentityprovider/v1alpha1/zz_identity_provider.go +++ b/apis/cognitoidentityprovider/v1alpha1/zz_identity_provider.go @@ -47,9 +47,11 @@ type IdentityProviderSpec struct { // IdentityProviderObservation defines the observed state of IdentityProvider type IdentityProviderObservation struct { - // The date the IdP was created. + // The date and time, in ISO 8601 (https://www.iso.org/iso-8601-date-and-time-format.html) + // format, when the item was created. CreationDate *metav1.Time `json:"creationDate,omitempty"` - // The date the IdP was last modified. + // The date and time, in ISO 8601 (https://www.iso.org/iso-8601-date-and-time-format.html) + // format, when the item was modified. LastModifiedDate *metav1.Time `json:"lastModifiedDate,omitempty"` // The IdP name. ProviderName *string `json:"providerName,omitempty"` diff --git a/apis/cognitoidentityprovider/v1alpha1/zz_types.go b/apis/cognitoidentityprovider/v1alpha1/zz_types.go index 3e4a775b84..ea0b95e27e 100644 --- a/apis/cognitoidentityprovider/v1alpha1/zz_types.go +++ b/apis/cognitoidentityprovider/v1alpha1/zz_types.go @@ -71,6 +71,11 @@ type AuthenticationResultType struct { TokenType *string `json:"tokenType,omitempty"` } +// +kubebuilder:skipversion +type CloudWatchLogsConfigurationType struct { + LogGroupARN *string `json:"logGroupARN,omitempty"` +} + // +kubebuilder:skipversion type CodeDeliveryDetailsType struct { Destination *string `json:"destination,omitempty"` @@ -257,6 +262,11 @@ type LambdaConfigType struct { VerifyAuthChallengeResponse *string `json:"verifyAuthChallengeResponse,omitempty"` } +// +kubebuilder:skipversion +type LogDeliveryConfigurationType struct { + UserPoolID *string `json:"userPoolID,omitempty"` +} + // +kubebuilder:skipversion type MessageTemplateType struct { EmailMessage *string `json:"emailMessage,omitempty"` @@ -492,7 +502,7 @@ type UserPoolClientType struct { // The Amazon Pinpoint analytics configuration necessary to collect metrics // for a user pool. // - // In Regions where Amazon Pinpointisn't available, user pools only support + // In Regions where Amazon Pinpoint isn't available, user pools only support // sending events to Amazon Pinpoint projects in us-east-1. In Regions where // Amazon Pinpoint is available, user pools support sending events to Amazon // Pinpoint projects within that same Region. @@ -653,7 +663,12 @@ type UserPoolType struct { // and phone number attributes. For more information, see Verifying updates // to email addresses and phone numbers (https://docs.aws.amazon.com/cognito/latest/developerguide/user-pool-settings-email-phone-verification.html#user-pool-settings-verifications-verify-attribute-updates). UserAttributeUpdateSettings *UserAttributeUpdateSettingsType `json:"userAttributeUpdateSettings,omitempty"` - // The user pool add-ons type. + // User pool add-ons. Contains settings for activation of advanced security + // features. To log user security information but take no action, set to AUDIT. + // To configure automatic security responses to risky traffic to your user pool, + // set to ENFORCED. + // + // For more information, see Adding advanced security to a user pool (https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-pool-settings-advanced-security.html). UserPoolAddOns *UserPoolAddOnsType `json:"userPoolAddOns,omitempty"` UserPoolTags map[string]*string `json:"userPoolTags,omitempty"` diff --git a/apis/cognitoidentityprovider/v1alpha1/zz_user_pool.go b/apis/cognitoidentityprovider/v1alpha1/zz_user_pool.go index d1cecd8bd2..b86ae11967 100644 --- a/apis/cognitoidentityprovider/v1alpha1/zz_user_pool.go +++ b/apis/cognitoidentityprovider/v1alpha1/zz_user_pool.go @@ -107,8 +107,12 @@ type UserPoolParameters struct { // and phone number attributes. For more information, see Verifying updates // to email addresses and phone numbers (https://docs.aws.amazon.com/cognito/latest/developerguide/user-pool-settings-email-phone-verification.html#user-pool-settings-verifications-verify-attribute-updates). UserAttributeUpdateSettings *UserAttributeUpdateSettingsType `json:"userAttributeUpdateSettings,omitempty"` - // Enables advanced security risk detection. Set the key AdvancedSecurityMode - // to the value "AUDIT". + // User pool add-ons. Contains settings for activation of advanced security + // features. To log user security information but take no action, set to AUDIT. + // To configure automatic security responses to risky traffic to your user pool, + // set to ENFORCED. + // + // For more information, see Adding advanced security to a user pool (https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-pool-settings-advanced-security.html). UserPoolAddOns *UserPoolAddOnsType `json:"userPoolAddOns,omitempty"` // The tag keys and values to assign to the user pool. A tag is a label that // you can use to categorize and manage user pools in different ways, such as @@ -117,10 +121,17 @@ type UserPoolParameters struct { // Specifies whether a user can use an email address or phone number as a username // when they sign up. UsernameAttributes []*string `json:"usernameAttributes,omitempty"` - // Case sensitivity on the username input for the selected sign-in option. For - // example, when case sensitivity is set to False, users can sign in using either - // "username" or "Username". This configuration is immutable once it has been - // set. For more information, see UsernameConfigurationType (https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_UsernameConfigurationType.html). + // Case sensitivity on the username input for the selected sign-in option. When + // case sensitivity is set to False (case insensitive), users can sign in with + // any combination of capital and lowercase letters. For example, username, + // USERNAME, or UserName, or for email, email@example.com or EMaiL@eXamplE.Com. + // For most use cases, set case sensitivity to False (case insensitive) as a + // best practice. When usernames and email addresses are case insensitive, Amazon + // Cognito treats any variation in case as the same user, and prevents a case + // variation from being assigned to the same attribute for a different user. + // + // This configuration is immutable after you set it. For more information, see + // UsernameConfigurationType (https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_UsernameConfigurationType.html). UsernameConfiguration *UsernameConfigurationType `json:"usernameConfiguration,omitempty"` // The template for the verification message that the user sees when the app // requests permission to access the user's information. @@ -138,7 +149,8 @@ type UserPoolSpec struct { type UserPoolObservation struct { // The Amazon Resource Name (ARN) for the user pool. ARN *string `json:"arn,omitempty"` - // The date the user pool was created. + // The date and time, in ISO 8601 (https://www.iso.org/iso-8601-date-and-time-format.html) + // format, when the item was created. CreationDate *metav1.Time `json:"creationDate,omitempty"` // A custom domain name that you provide to Amazon Cognito. This parameter applies // only if you use a custom domain to host the sign-up and sign-in pages for @@ -156,11 +168,20 @@ type UserPoolObservation struct { EstimatedNumberOfUsers *int64 `json:"estimatedNumberOfUsers,omitempty"` // The ID of the user pool. ID *string `json:"id,omitempty"` - // The date the user pool was last modified. + // The date and time, in ISO 8601 (https://www.iso.org/iso-8601-date-and-time-format.html) + // format, when the item was modified. LastModifiedDate *metav1.Time `json:"lastModifiedDate,omitempty"` // The name of the user pool. Name *string `json:"name,omitempty"` - // A container with the schema attributes of a user pool. + // A list of the user attributes and their properties in your user pool. The + // attribute schema contains standard attributes, custom attributes with a custom: + // prefix, and developer attributes with a dev: prefix. For more information, + // see User pool attributes (https://docs.aws.amazon.com/cognito/latest/developerguide/user-pool-settings-attributes.html). + // + // Developer-only attributes are a legacy feature of user pools, are read-only + // to all app clients. You can create and update developer-only attributes only + // with IAM-authenticated API operations. Use app client read/write permissions + // instead. SchemaAttributes []*SchemaAttributeType `json:"schemaAttributes,omitempty"` // The reason why the SMS configuration can't send the messages to your users. // @@ -177,7 +198,7 @@ type UserPoolObservation struct { // // The Amazon Web Services account is in the SNS SMS Sandbox and messages will // only reach verified end users. This parameter won’t get populated with - // SNSSandbox if the IAM user creating the user pool doesn’t have SNS permissions. + // SNSSandbox if the user creating the user pool doesn’t have SNS permissions. // To learn how to move your Amazon Web Services account out of the sandbox, // see Moving out of the SMS sandbox (https://docs.aws.amazon.com/sns/latest/dg/sns-sms-sandbox-moving-to-production.html). SmsConfigurationFailure *string `json:"smsConfigurationFailure,omitempty"` diff --git a/apis/cognitoidentityprovider/v1alpha1/zz_user_pool_client.go b/apis/cognitoidentityprovider/v1alpha1/zz_user_pool_client.go index b68714755c..f5e2943d5c 100644 --- a/apis/cognitoidentityprovider/v1alpha1/zz_user_pool_client.go +++ b/apis/cognitoidentityprovider/v1alpha1/zz_user_pool_client.go @@ -59,8 +59,24 @@ type UserPoolClientParameters struct { // Issue the access token from the /oauth2/token endpoint directly to a non-person // user using a combination of the client ID and client secret. AllowedOAuthFlows []*string `json:"allowedOAuthFlows,omitempty"` - // Set to true if the client is allowed to follow the OAuth protocol when interacting - // with Amazon Cognito user pools. + // Set to true to use OAuth 2.0 features in your user pool app client. + // + // AllowedOAuthFlowsUserPoolClient must be true before you can configure the + // following features in your app client. + // + // * CallBackURLs: Callback URLs. + // + // * LogoutURLs: Sign-out redirect URLs. + // + // * AllowedOAuthScopes: OAuth 2.0 scopes. + // + // * AllowedOAuthFlows: Support for authorization code, implicit, and client + // credentials OAuth 2.0 grants. + // + // To use OAuth 2.0 features, configure one of these features in the Amazon + // Cognito console or set AllowedOAuthFlowsUserPoolClient to true in a CreateUserPoolClient + // or UpdateUserPoolClient API request. If you don't set a value for AllowedOAuthFlowsUserPoolClient + // in a request with the CLI or SDKs, it defaults to false. AllowedOAuthFlowsUserPoolClient *bool `json:"allowedOAuthFlowsUserPoolClient,omitempty"` // The allowed OAuth scopes. Possible values provided by OAuth are phone, email, // openid, and profile. Possible values provided by Amazon Web Services are @@ -175,8 +191,8 @@ type UserPoolClientParameters struct { // hours, your user can authenticate their session with their ID token for 10 // hours. // - // The default time unit for AccessTokenValidity in an API request is hours. - // Valid range is displayed below in seconds. + // The default time unit for IdTokenValidity in an API request is hours. Valid + // range is displayed below in seconds. // // If you don't specify otherwise in the configuration of your app client, your // ID tokens are valid for one hour. @@ -251,9 +267,11 @@ type UserPoolClientObservation struct { ClientID *string `json:"clientID,omitempty"` // The client secret from the user pool request of the client type. ClientSecret *string `json:"clientSecret,omitempty"` - // The date the user pool client was created. + // The date and time, in ISO 8601 (https://www.iso.org/iso-8601-date-and-time-format.html) + // format, when the item was created. CreationDate *metav1.Time `json:"creationDate,omitempty"` - // The date the user pool client was last modified. + // The date and time, in ISO 8601 (https://www.iso.org/iso-8601-date-and-time-format.html) + // format, when the item was modified. LastModifiedDate *metav1.Time `json:"lastModifiedDate,omitempty"` // The user pool ID for the user pool client. UserPoolID *string `json:"userPoolID,omitempty"` diff --git a/apis/dynamodb/v1alpha1/zz_generated.deepcopy.go b/apis/dynamodb/v1alpha1/zz_generated.deepcopy.go index 496f1d0a42..29a5044fd6 100644 --- a/apis/dynamodb/v1alpha1/zz_generated.deepcopy.go +++ b/apis/dynamodb/v1alpha1/zz_generated.deepcopy.go @@ -862,11 +862,6 @@ func (in *Endpoint) DeepCopyInto(out *Endpoint) { *out = new(string) **out = **in } - if in.CachePeriodInMinutes != nil { - in, out := &in.CachePeriodInMinutes, &out.CachePeriodInMinutes - *out = new(int64) - **out = **in - } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Endpoint. @@ -2629,6 +2624,11 @@ func (in *TableDescription) DeepCopyInto(out *TableDescription) { in, out := &in.CreationDateTime, &out.CreationDateTime *out = (*in).DeepCopy() } + if in.DeletionProtectionEnabled != nil { + in, out := &in.DeletionProtectionEnabled, &out.DeletionProtectionEnabled + *out = new(bool) + **out = **in + } if in.GlobalSecondaryIndexes != nil { in, out := &in.GlobalSecondaryIndexes, &out.GlobalSecondaryIndexes *out = make([]*GlobalSecondaryIndexDescription, len(*in)) @@ -2906,6 +2906,11 @@ func (in *TableParameters) DeepCopyInto(out *TableParameters) { *out = new(string) **out = **in } + if in.DeletionProtectionEnabled != nil { + in, out := &in.DeletionProtectionEnabled, &out.DeletionProtectionEnabled + *out = new(bool) + **out = **in + } if in.GlobalSecondaryIndexes != nil { in, out := &in.GlobalSecondaryIndexes, &out.GlobalSecondaryIndexes *out = make([]*GlobalSecondaryIndex, len(*in)) diff --git a/apis/dynamodb/v1alpha1/zz_table.go b/apis/dynamodb/v1alpha1/zz_table.go index 70ab291230..e1a9011a38 100644 --- a/apis/dynamodb/v1alpha1/zz_table.go +++ b/apis/dynamodb/v1alpha1/zz_table.go @@ -41,6 +41,9 @@ type TableParameters struct { // * PAY_PER_REQUEST - We recommend using PAY_PER_REQUEST for unpredictable // workloads. PAY_PER_REQUEST sets the billing mode to On-Demand Mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadWriteCapacityMode.html#HowItWorks.OnDemand). BillingMode *string `json:"billingMode,omitempty"` + // Indicates whether deletion protection is to be enabled (true) or disabled + // (false) on the table. + DeletionProtectionEnabled *bool `json:"deletionProtectionEnabled,omitempty"` // One or more global secondary indexes (the maximum is 20) to be created on // the table. Each global secondary index in the array includes the following: // diff --git a/apis/dynamodb/v1alpha1/zz_types.go b/apis/dynamodb/v1alpha1/zz_types.go index 9ad6873d11..27d2b69ea7 100644 --- a/apis/dynamodb/v1alpha1/zz_types.go +++ b/apis/dynamodb/v1alpha1/zz_types.go @@ -218,8 +218,6 @@ type DeleteReplicationGroupMemberAction struct { // +kubebuilder:skipversion type Endpoint struct { Address *string `json:"address,omitempty"` - - CachePeriodInMinutes *int64 `json:"cachePeriodInMinutes,omitempty"` } // +kubebuilder:skipversion @@ -721,6 +719,8 @@ type TableDescription struct { CreationDateTime *metav1.Time `json:"creationDateTime,omitempty"` + DeletionProtectionEnabled *bool `json:"deletionProtectionEnabled,omitempty"` + GlobalSecondaryIndexes []*GlobalSecondaryIndexDescription `json:"globalSecondaryIndexes,omitempty"` GlobalTableVersion *string `json:"globalTableVersion,omitempty"` diff --git a/apis/ec2/generator-config.yaml b/apis/ec2/generator-config.yaml index 0749dd5ac2..4542545e7b 100644 --- a/apis/ec2/generator-config.yaml +++ b/apis/ec2/generator-config.yaml @@ -54,7 +54,9 @@ ignore: - VpnConnectionRoute - VpnConnection - VpnGateway + - InstanceConnectEndpoint - Ipam + - IpamResourceDiscovery - IpamScope - IpamPool - NetworkInsightsAccessScope diff --git a/apis/ec2/v1alpha1/zz_enums.go b/apis/ec2/v1alpha1/zz_enums.go index 7510fd45f0..06016fe622 100644 --- a/apis/ec2/v1alpha1/zz_enums.go +++ b/apis/ec2/v1alpha1/zz_enums.go @@ -127,6 +127,13 @@ const ( AllowsMultipleInstanceTypes_off AllowsMultipleInstanceTypes = "off" ) +type AmdSevSnpSpecification string + +const ( + AmdSevSnpSpecification_enabled AmdSevSnpSpecification = "enabled" + AmdSevSnpSpecification_disabled AmdSevSnpSpecification = "disabled" +) + type AnalysisStatus string const ( @@ -262,8 +269,9 @@ const ( type BootModeValues string const ( - BootModeValues_legacy_bios BootModeValues = "legacy-bios" - BootModeValues_uefi BootModeValues = "uefi" + BootModeValues_legacy_bios BootModeValues = "legacy-bios" + BootModeValues_uefi BootModeValues = "uefi" + BootModeValues_uefi_preferred BootModeValues = "uefi-preferred" ) type BundleTaskState string @@ -642,6 +650,17 @@ const ( EBSOptimizedSupport_default EBSOptimizedSupport = "default" ) +type EC2InstanceConnectEndpointState string + +const ( + EC2InstanceConnectEndpointState_create_in_progress EC2InstanceConnectEndpointState = "create-in-progress" + EC2InstanceConnectEndpointState_create_complete EC2InstanceConnectEndpointState = "create-complete" + EC2InstanceConnectEndpointState_create_failed EC2InstanceConnectEndpointState = "create-failed" + EC2InstanceConnectEndpointState_delete_in_progress EC2InstanceConnectEndpointState = "delete-in-progress" + EC2InstanceConnectEndpointState_delete_complete EC2InstanceConnectEndpointState = "delete-complete" + EC2InstanceConnectEndpointState_delete_failed EC2InstanceConnectEndpointState = "delete-failed" +) + type ENASupport string const ( @@ -882,6 +901,13 @@ const ( HTTPTokensState_required HTTPTokensState = "required" ) +type HostMaintenance string + +const ( + HostMaintenance_on HostMaintenance = "on" + HostMaintenance_off HostMaintenance = "off" +) + type HostRecovery string const ( @@ -929,6 +955,13 @@ const ( IPAMAddressHistoryResourceType_instance IPAMAddressHistoryResourceType = "instance" ) +type IPAMAssociatedResourceDiscoveryStatus string + +const ( + IPAMAssociatedResourceDiscoveryStatus_active IPAMAssociatedResourceDiscoveryStatus = "active" + IPAMAssociatedResourceDiscoveryStatus_not_found IPAMAssociatedResourceDiscoveryStatus = "not-found" +) + type IPAMComplianceStatus string const ( @@ -938,6 +971,14 @@ const ( IPAMComplianceStatus_ignored IPAMComplianceStatus = "ignored" ) +type IPAMDiscoveryFailureCode string + +const ( + IPAMDiscoveryFailureCode_assume_role_failure IPAMDiscoveryFailureCode = "assume-role-failure" + IPAMDiscoveryFailureCode_throttling_failure IPAMDiscoveryFailureCode = "throttling-failure" + IPAMDiscoveryFailureCode_unauthorized_failure IPAMDiscoveryFailureCode = "unauthorized-failure" +) + type IPAMManagementState string const ( @@ -973,6 +1014,7 @@ type IPAMPoolCIDRFailureCode string const ( IPAMPoolCIDRFailureCode_cidr_not_available IPAMPoolCIDRFailureCode = "cidr-not-available" + IPAMPoolCIDRFailureCode_limit_exceeded IPAMPoolCIDRFailureCode = "limit-exceeded" ) type IPAMPoolCIDRState string @@ -988,6 +1030,13 @@ const ( IPAMPoolCIDRState_failed_import IPAMPoolCIDRState = "failed-import" ) +type IPAMPoolPublicIPSource string + +const ( + IPAMPoolPublicIPSource_amazon IPAMPoolPublicIPSource = "amazon" + IPAMPoolPublicIPSource_byoip IPAMPoolPublicIPSource = "byoip" +) + type IPAMPoolState string const ( @@ -1005,6 +1054,37 @@ const ( IPAMPoolState_restore_in_progress IPAMPoolState = "restore-in-progress" ) +type IPAMResourceDiscoveryAssociationState string + +const ( + IPAMResourceDiscoveryAssociationState_associate_in_progress IPAMResourceDiscoveryAssociationState = "associate-in-progress" + IPAMResourceDiscoveryAssociationState_associate_complete IPAMResourceDiscoveryAssociationState = "associate-complete" + IPAMResourceDiscoveryAssociationState_associate_failed IPAMResourceDiscoveryAssociationState = "associate-failed" + IPAMResourceDiscoveryAssociationState_disassociate_in_progress IPAMResourceDiscoveryAssociationState = "disassociate-in-progress" + IPAMResourceDiscoveryAssociationState_disassociate_complete IPAMResourceDiscoveryAssociationState = "disassociate-complete" + IPAMResourceDiscoveryAssociationState_disassociate_failed IPAMResourceDiscoveryAssociationState = "disassociate-failed" + IPAMResourceDiscoveryAssociationState_isolate_in_progress IPAMResourceDiscoveryAssociationState = "isolate-in-progress" + IPAMResourceDiscoveryAssociationState_isolate_complete IPAMResourceDiscoveryAssociationState = "isolate-complete" + IPAMResourceDiscoveryAssociationState_restore_in_progress IPAMResourceDiscoveryAssociationState = "restore-in-progress" +) + +type IPAMResourceDiscoveryState string + +const ( + IPAMResourceDiscoveryState_create_in_progress IPAMResourceDiscoveryState = "create-in-progress" + IPAMResourceDiscoveryState_create_complete IPAMResourceDiscoveryState = "create-complete" + IPAMResourceDiscoveryState_create_failed IPAMResourceDiscoveryState = "create-failed" + IPAMResourceDiscoveryState_modify_in_progress IPAMResourceDiscoveryState = "modify-in-progress" + IPAMResourceDiscoveryState_modify_complete IPAMResourceDiscoveryState = "modify-complete" + IPAMResourceDiscoveryState_modify_failed IPAMResourceDiscoveryState = "modify-failed" + IPAMResourceDiscoveryState_delete_in_progress IPAMResourceDiscoveryState = "delete-in-progress" + IPAMResourceDiscoveryState_delete_complete IPAMResourceDiscoveryState = "delete-complete" + IPAMResourceDiscoveryState_delete_failed IPAMResourceDiscoveryState = "delete-failed" + IPAMResourceDiscoveryState_isolate_in_progress IPAMResourceDiscoveryState = "isolate-in-progress" + IPAMResourceDiscoveryState_isolate_complete IPAMResourceDiscoveryState = "isolate-complete" + IPAMResourceDiscoveryState_restore_in_progress IPAMResourceDiscoveryState = "restore-in-progress" +) + type IPAMResourceType string const ( @@ -1149,6 +1229,13 @@ const ( InstanceAutoRecoveryState_default InstanceAutoRecoveryState = "default" ) +type InstanceBootModeValues string + +const ( + InstanceBootModeValues_legacy_bios InstanceBootModeValues = "legacy-bios" + InstanceBootModeValues_uefi InstanceBootModeValues = "uefi" +) + type InstanceEventWindowState string const ( @@ -1824,6 +1911,128 @@ const ( InstanceType_trn1_2xlarge InstanceType = "trn1.2xlarge" InstanceType_trn1_32xlarge InstanceType = "trn1.32xlarge" InstanceType_hpc6id_32xlarge InstanceType = "hpc6id.32xlarge" + InstanceType_c6in_large InstanceType = "c6in.large" + InstanceType_c6in_xlarge InstanceType = "c6in.xlarge" + InstanceType_c6in_2xlarge InstanceType = "c6in.2xlarge" + InstanceType_c6in_4xlarge InstanceType = "c6in.4xlarge" + InstanceType_c6in_8xlarge InstanceType = "c6in.8xlarge" + InstanceType_c6in_12xlarge InstanceType = "c6in.12xlarge" + InstanceType_c6in_16xlarge InstanceType = "c6in.16xlarge" + InstanceType_c6in_24xlarge InstanceType = "c6in.24xlarge" + InstanceType_c6in_32xlarge InstanceType = "c6in.32xlarge" + InstanceType_m6in_large InstanceType = "m6in.large" + InstanceType_m6in_xlarge InstanceType = "m6in.xlarge" + InstanceType_m6in_2xlarge InstanceType = "m6in.2xlarge" + InstanceType_m6in_4xlarge InstanceType = "m6in.4xlarge" + InstanceType_m6in_8xlarge InstanceType = "m6in.8xlarge" + InstanceType_m6in_12xlarge InstanceType = "m6in.12xlarge" + InstanceType_m6in_16xlarge InstanceType = "m6in.16xlarge" + InstanceType_m6in_24xlarge InstanceType = "m6in.24xlarge" + InstanceType_m6in_32xlarge InstanceType = "m6in.32xlarge" + InstanceType_m6idn_large InstanceType = "m6idn.large" + InstanceType_m6idn_xlarge InstanceType = "m6idn.xlarge" + InstanceType_m6idn_2xlarge InstanceType = "m6idn.2xlarge" + InstanceType_m6idn_4xlarge InstanceType = "m6idn.4xlarge" + InstanceType_m6idn_8xlarge InstanceType = "m6idn.8xlarge" + InstanceType_m6idn_12xlarge InstanceType = "m6idn.12xlarge" + InstanceType_m6idn_16xlarge InstanceType = "m6idn.16xlarge" + InstanceType_m6idn_24xlarge InstanceType = "m6idn.24xlarge" + InstanceType_m6idn_32xlarge InstanceType = "m6idn.32xlarge" + InstanceType_r6in_large InstanceType = "r6in.large" + InstanceType_r6in_xlarge InstanceType = "r6in.xlarge" + InstanceType_r6in_2xlarge InstanceType = "r6in.2xlarge" + InstanceType_r6in_4xlarge InstanceType = "r6in.4xlarge" + InstanceType_r6in_8xlarge InstanceType = "r6in.8xlarge" + InstanceType_r6in_12xlarge InstanceType = "r6in.12xlarge" + InstanceType_r6in_16xlarge InstanceType = "r6in.16xlarge" + InstanceType_r6in_24xlarge InstanceType = "r6in.24xlarge" + InstanceType_r6in_32xlarge InstanceType = "r6in.32xlarge" + InstanceType_r6idn_large InstanceType = "r6idn.large" + InstanceType_r6idn_xlarge InstanceType = "r6idn.xlarge" + InstanceType_r6idn_2xlarge InstanceType = "r6idn.2xlarge" + InstanceType_r6idn_4xlarge InstanceType = "r6idn.4xlarge" + InstanceType_r6idn_8xlarge InstanceType = "r6idn.8xlarge" + InstanceType_r6idn_12xlarge InstanceType = "r6idn.12xlarge" + InstanceType_r6idn_16xlarge InstanceType = "r6idn.16xlarge" + InstanceType_r6idn_24xlarge InstanceType = "r6idn.24xlarge" + InstanceType_r6idn_32xlarge InstanceType = "r6idn.32xlarge" + InstanceType_c7g_metal InstanceType = "c7g.metal" + InstanceType_m7g_medium InstanceType = "m7g.medium" + InstanceType_m7g_large InstanceType = "m7g.large" + InstanceType_m7g_xlarge InstanceType = "m7g.xlarge" + InstanceType_m7g_2xlarge InstanceType = "m7g.2xlarge" + InstanceType_m7g_4xlarge InstanceType = "m7g.4xlarge" + InstanceType_m7g_8xlarge InstanceType = "m7g.8xlarge" + InstanceType_m7g_12xlarge InstanceType = "m7g.12xlarge" + InstanceType_m7g_16xlarge InstanceType = "m7g.16xlarge" + InstanceType_m7g_metal InstanceType = "m7g.metal" + InstanceType_r7g_medium InstanceType = "r7g.medium" + InstanceType_r7g_large InstanceType = "r7g.large" + InstanceType_r7g_xlarge InstanceType = "r7g.xlarge" + InstanceType_r7g_2xlarge InstanceType = "r7g.2xlarge" + InstanceType_r7g_4xlarge InstanceType = "r7g.4xlarge" + InstanceType_r7g_8xlarge InstanceType = "r7g.8xlarge" + InstanceType_r7g_12xlarge InstanceType = "r7g.12xlarge" + InstanceType_r7g_16xlarge InstanceType = "r7g.16xlarge" + InstanceType_r7g_metal InstanceType = "r7g.metal" + InstanceType_c6in_metal InstanceType = "c6in.metal" + InstanceType_m6in_metal InstanceType = "m6in.metal" + InstanceType_m6idn_metal InstanceType = "m6idn.metal" + InstanceType_r6in_metal InstanceType = "r6in.metal" + InstanceType_r6idn_metal InstanceType = "r6idn.metal" + InstanceType_inf2_xlarge InstanceType = "inf2.xlarge" + InstanceType_inf2_8xlarge InstanceType = "inf2.8xlarge" + InstanceType_inf2_24xlarge InstanceType = "inf2.24xlarge" + InstanceType_inf2_48xlarge InstanceType = "inf2.48xlarge" + InstanceType_trn1n_32xlarge InstanceType = "trn1n.32xlarge" + InstanceType_i4g_large InstanceType = "i4g.large" + InstanceType_i4g_xlarge InstanceType = "i4g.xlarge" + InstanceType_i4g_2xlarge InstanceType = "i4g.2xlarge" + InstanceType_i4g_4xlarge InstanceType = "i4g.4xlarge" + InstanceType_i4g_8xlarge InstanceType = "i4g.8xlarge" + InstanceType_i4g_16xlarge InstanceType = "i4g.16xlarge" + InstanceType_hpc7g_4xlarge InstanceType = "hpc7g.4xlarge" + InstanceType_hpc7g_8xlarge InstanceType = "hpc7g.8xlarge" + InstanceType_hpc7g_16xlarge InstanceType = "hpc7g.16xlarge" + InstanceType_c7gn_medium InstanceType = "c7gn.medium" + InstanceType_c7gn_large InstanceType = "c7gn.large" + InstanceType_c7gn_xlarge InstanceType = "c7gn.xlarge" + InstanceType_c7gn_2xlarge InstanceType = "c7gn.2xlarge" + InstanceType_c7gn_4xlarge InstanceType = "c7gn.4xlarge" + InstanceType_c7gn_8xlarge InstanceType = "c7gn.8xlarge" + InstanceType_c7gn_12xlarge InstanceType = "c7gn.12xlarge" + InstanceType_c7gn_16xlarge InstanceType = "c7gn.16xlarge" + InstanceType_p5_48xlarge InstanceType = "p5.48xlarge" + InstanceType_m7i_large InstanceType = "m7i.large" + InstanceType_m7i_xlarge InstanceType = "m7i.xlarge" + InstanceType_m7i_2xlarge InstanceType = "m7i.2xlarge" + InstanceType_m7i_4xlarge InstanceType = "m7i.4xlarge" + InstanceType_m7i_8xlarge InstanceType = "m7i.8xlarge" + InstanceType_m7i_12xlarge InstanceType = "m7i.12xlarge" + InstanceType_m7i_16xlarge InstanceType = "m7i.16xlarge" + InstanceType_m7i_24xlarge InstanceType = "m7i.24xlarge" + InstanceType_m7i_48xlarge InstanceType = "m7i.48xlarge" + InstanceType_m7i_flex_large InstanceType = "m7i-flex.large" + InstanceType_m7i_flex_xlarge InstanceType = "m7i-flex.xlarge" + InstanceType_m7i_flex_2xlarge InstanceType = "m7i-flex.2xlarge" + InstanceType_m7i_flex_4xlarge InstanceType = "m7i-flex.4xlarge" + InstanceType_m7i_flex_8xlarge InstanceType = "m7i-flex.8xlarge" + InstanceType_m7a_medium InstanceType = "m7a.medium" + InstanceType_m7a_large InstanceType = "m7a.large" + InstanceType_m7a_xlarge InstanceType = "m7a.xlarge" + InstanceType_m7a_2xlarge InstanceType = "m7a.2xlarge" + InstanceType_m7a_4xlarge InstanceType = "m7a.4xlarge" + InstanceType_m7a_8xlarge InstanceType = "m7a.8xlarge" + InstanceType_m7a_12xlarge InstanceType = "m7a.12xlarge" + InstanceType_m7a_16xlarge InstanceType = "m7a.16xlarge" + InstanceType_m7a_24xlarge InstanceType = "m7a.24xlarge" + InstanceType_m7a_32xlarge InstanceType = "m7a.32xlarge" + InstanceType_m7a_48xlarge InstanceType = "m7a.48xlarge" + InstanceType_m7a_metal_48xl InstanceType = "m7a.metal-48xl" + InstanceType_hpc7a_12xlarge InstanceType = "hpc7a.12xlarge" + InstanceType_hpc7a_24xlarge InstanceType = "hpc7a.24xlarge" + InstanceType_hpc7a_48xlarge InstanceType = "hpc7a.48xlarge" + InstanceType_hpc7a_96xlarge InstanceType = "hpc7a.96xlarge" ) type InstanceTypeHypervisor string @@ -2036,6 +2245,17 @@ const ( MulticastSupportValue_disable MulticastSupportValue = "disable" ) +type NATGatewayAddressStatus string + +const ( + NATGatewayAddressStatus_assigning NATGatewayAddressStatus = "assigning" + NATGatewayAddressStatus_unassigning NATGatewayAddressStatus = "unassigning" + NATGatewayAddressStatus_associating NATGatewayAddressStatus = "associating" + NATGatewayAddressStatus_disassociating NATGatewayAddressStatus = "disassociating" + NATGatewayAddressStatus_succeeded NATGatewayAddressStatus = "succeeded" + NATGatewayAddressStatus_failed NATGatewayAddressStatus = "failed" +) + type NATGatewayState string const ( @@ -2104,6 +2324,20 @@ const ( NetworkInterfaceType_aws_codestar_connections_managed NetworkInterfaceType = "aws_codestar_connections_managed" ) +type NitroEnclavesSupport string + +const ( + NitroEnclavesSupport_unsupported NitroEnclavesSupport = "unsupported" + NitroEnclavesSupport_supported NitroEnclavesSupport = "supported" +) + +type NitroTPMSupport string + +const ( + NitroTPMSupport_unsupported NitroTPMSupport = "unsupported" + NitroTPMSupport_supported NitroTPMSupport = "supported" +) + type OfferingClassType string const ( @@ -2427,6 +2661,9 @@ const ( ResourceType_verified_access_trust_provider ResourceType = "verified-access-trust-provider" ResourceType_vpn_connection_device_type ResourceType = "vpn-connection-device-type" ResourceType_vpc_block_public_access_exclusion ResourceType = "vpc-block-public-access-exclusion" + ResourceType_ipam_resource_discovery ResourceType = "ipam-resource-discovery" + ResourceType_ipam_resource_discovery_association ResourceType = "ipam-resource-discovery-association" + ResourceType_instance_connect_endpoint ResourceType = "instance-connect-endpoint" ) type RootDeviceType string @@ -2468,6 +2705,14 @@ const ( RuleAction_deny RuleAction = "deny" ) +type SSEType string + +const ( + SSEType_sse_ebs SSEType = "sse-ebs" + SSEType_sse_kms SSEType = "sse-kms" + SSEType_none SSEType = "none" +) + type Scope string const ( @@ -2557,6 +2802,7 @@ const ( SpotInstanceState_closed SpotInstanceState = "closed" SpotInstanceState_cancelled SpotInstanceState = "cancelled" SpotInstanceState_failed SpotInstanceState = "failed" + SpotInstanceState_disabled SpotInstanceState = "disabled" ) type SpotInstanceType string @@ -2664,6 +2910,12 @@ const ( SummaryStatus_initializing SummaryStatus = "initializing" ) +type SupportedAdditionalProcessorFeature string + +const ( + SupportedAdditionalProcessorFeature_amd_sev_snp SupportedAdditionalProcessorFeature = "amd-sev-snp" +) + type TPMSupportValues string const ( diff --git a/apis/ec2/v1alpha1/zz_flow_log.go b/apis/ec2/v1alpha1/zz_flow_log.go index 6f088c047f..0b8350fc5a 100644 --- a/apis/ec2/v1alpha1/zz_flow_log.go +++ b/apis/ec2/v1alpha1/zz_flow_log.go @@ -63,9 +63,7 @@ type FlowLogParameters struct { // in the Amazon VPC User Guide or Transit Gateway Flow Log records (https://docs.aws.amazon.com/vpc/latest/tgw/tgw-flow-logs.html#flow-log-records) // in the Amazon Web Services Transit Gateway Guide. // - // Specify the fields using the ${field-id} format, separated by spaces. For - // the CLI, surround this parameter value with single quotes on Linux or double - // quotes on Windows. + // Specify the fields using the ${field-id} format, separated by spaces. LogFormat *string `json:"logFormat,omitempty"` // The name of a new or existing CloudWatch Logs log group where Amazon EC2 // publishes your flow logs. diff --git a/apis/ec2/v1alpha1/zz_generated.deepcopy.go b/apis/ec2/v1alpha1/zz_generated.deepcopy.go index f6dc6746f6..879c7cf65a 100644 --- a/apis/ec2/v1alpha1/zz_generated.deepcopy.go +++ b/apis/ec2/v1alpha1/zz_generated.deepcopy.go @@ -299,6 +299,11 @@ func (in *AdditionalDetail) DeepCopyInto(out *AdditionalDetail) { *out = new(string) **out = **in } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdditionalDetail. @@ -634,6 +639,11 @@ func (in *AnalysisPacketHeader) DeepCopy() *AnalysisPacketHeader { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AnalysisRouteTableRoute) DeepCopyInto(out *AnalysisRouteTableRoute) { *out = *in + if in.CarrierGatewayID != nil { + in, out := &in.CarrierGatewayID, &out.CarrierGatewayID + *out = new(string) + **out = **in + } if in.DestinationCIDR != nil { in, out := &in.DestinationCIDR, &out.DestinationCIDR *out = new(string) @@ -659,6 +669,11 @@ func (in *AnalysisRouteTableRoute) DeepCopyInto(out *AnalysisRouteTableRoute) { *out = new(string) **out = **in } + if in.LocalGatewayID != nil { + in, out := &in.LocalGatewayID, &out.LocalGatewayID + *out = new(string) + **out = **in + } if in.NATGatewayID != nil { in, out := &in.NATGatewayID, &out.NATGatewayID *out = new(string) @@ -1295,6 +1310,11 @@ func (in *CIDRBlock) DeepCopy() *CIDRBlock { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CPUOptions) DeepCopyInto(out *CPUOptions) { *out = *in + if in.AmdSevSnp != nil { + in, out := &in.AmdSevSnp, &out.AmdSevSnp + *out = new(string) + **out = **in + } if in.CoreCount != nil { in, out := &in.CoreCount, &out.CoreCount *out = new(int64) @@ -1320,6 +1340,11 @@ func (in *CPUOptions) DeepCopy() *CPUOptions { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CPUOptionsRequest) DeepCopyInto(out *CPUOptionsRequest) { *out = *in + if in.AmdSevSnp != nil { + in, out := &in.AmdSevSnp, &out.AmdSevSnp + *out = new(string) + **out = **in + } if in.CoreCount != nil { in, out := &in.CoreCount, &out.CoreCount *out = new(int64) @@ -2774,11 +2799,6 @@ func (in *CreateVerifiedAccessTrustProviderOIDCOptions) DeepCopyInto(out *Create *out = new(string) **out = **in } - if in.ClientSecret != nil { - in, out := &in.ClientSecret, &out.ClientSecret - *out = new(string) - **out = **in - } if in.Issuer != nil { in, out := &in.Issuer, &out.Issuer *out = new(string) @@ -3798,6 +3818,11 @@ func (in *DNSOptions) DeepCopyInto(out *DNSOptions) { *out = new(string) **out = **in } + if in.PrivateDNSOnlyForInboundResolverEndpoint != nil { + in, out := &in.PrivateDNSOnlyForInboundResolverEndpoint, &out.PrivateDNSOnlyForInboundResolverEndpoint + *out = new(bool) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSOptions. @@ -3818,6 +3843,11 @@ func (in *DNSOptionsSpecification) DeepCopyInto(out *DNSOptionsSpecification) { *out = new(string) **out = **in } + if in.PrivateDNSOnlyForInboundResolverEndpoint != nil { + in, out := &in.PrivateDNSOnlyForInboundResolverEndpoint, &out.PrivateDNSOnlyForInboundResolverEndpoint + *out = new(bool) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSOptionsSpecification. @@ -4458,11 +4488,6 @@ func (in *DiskImageDescription) DeepCopyInto(out *DiskImageDescription) { *out = new(string) **out = **in } - if in.ImportManifestURL != nil { - in, out := &in.ImportManifestURL, &out.ImportManifestURL - *out = new(string) - **out = **in - } if in.Size != nil { in, out := &in.Size, &out.Size *out = new(int64) @@ -4488,11 +4513,6 @@ func (in *DiskImageDetail) DeepCopyInto(out *DiskImageDetail) { *out = new(int64) **out = **in } - if in.ImportManifestURL != nil { - in, out := &in.ImportManifestURL, &out.ImportManifestURL - *out = new(string) - **out = **in - } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskImageDetail. @@ -4644,6 +4664,71 @@ func (in *EBSInstanceBlockDeviceSpecification) DeepCopy() *EBSInstanceBlockDevic return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EC2InstanceConnectEndpoint) DeepCopyInto(out *EC2InstanceConnectEndpoint) { + *out = *in + if in.AvailabilityZone != nil { + in, out := &in.AvailabilityZone, &out.AvailabilityZone + *out = new(string) + **out = **in + } + if in.CreatedAt != nil { + in, out := &in.CreatedAt, &out.CreatedAt + *out = (*in).DeepCopy() + } + if in.DNSName != nil { + in, out := &in.DNSName, &out.DNSName + *out = new(string) + **out = **in + } + if in.FipsDNSName != nil { + in, out := &in.FipsDNSName, &out.FipsDNSName + *out = new(string) + **out = **in + } + if in.OwnerID != nil { + in, out := &in.OwnerID, &out.OwnerID + *out = new(string) + **out = **in + } + if in.PreserveClientIP != nil { + in, out := &in.PreserveClientIP, &out.PreserveClientIP + *out = new(bool) + **out = **in + } + if in.StateMessage != nil { + in, out := &in.StateMessage, &out.StateMessage + *out = new(string) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make([]*Tag, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(Tag) + (*in).DeepCopyInto(*out) + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EC2InstanceConnectEndpoint. +func (in *EC2InstanceConnectEndpoint) DeepCopy() *EC2InstanceConnectEndpoint { + if in == nil { + return nil + } + out := new(EC2InstanceConnectEndpoint) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ENASrdSpecification) DeepCopyInto(out *ENASrdSpecification) { *out = *in @@ -5669,6 +5754,100 @@ func (in *Filter) DeepCopy() *Filter { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FirewallStatefulRule) DeepCopyInto(out *FirewallStatefulRule) { + *out = *in + if in.Destinations != nil { + in, out := &in.Destinations, &out.Destinations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Direction != nil { + in, out := &in.Direction, &out.Direction + *out = new(string) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } + if in.RuleAction != nil { + in, out := &in.RuleAction, &out.RuleAction + *out = new(string) + **out = **in + } + if in.Sources != nil { + in, out := &in.Sources, &out.Sources + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FirewallStatefulRule. +func (in *FirewallStatefulRule) DeepCopy() *FirewallStatefulRule { + if in == nil { + return nil + } + out := new(FirewallStatefulRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FirewallStatelessRule) DeepCopyInto(out *FirewallStatelessRule) { + *out = *in + if in.Destinations != nil { + in, out := &in.Destinations, &out.Destinations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.RuleAction != nil { + in, out := &in.RuleAction, &out.RuleAction + *out = new(string) + **out = **in + } + if in.Sources != nil { + in, out := &in.Sources, &out.Sources + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FirewallStatelessRule. +func (in *FirewallStatelessRule) DeepCopy() *FirewallStatelessRule { + if in == nil { + return nil + } + out := new(FirewallStatelessRule) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *FleetCapacityReservation) DeepCopyInto(out *FleetCapacityReservation) { *out = *in @@ -6837,6 +7016,11 @@ func (in *IPAM) DeepCopyInto(out *IPAM) { *out = new(string) **out = **in } + if in.ResourceDiscoveryAssociationCount != nil { + in, out := &in.ResourceDiscoveryAssociationCount, &out.ResourceDiscoveryAssociationCount + *out = new(int64) + **out = **in + } if in.ScopeCount != nil { in, out := &in.ScopeCount, &out.ScopeCount *out = new(int64) @@ -6943,6 +7127,103 @@ func (in *IPAMCIDRAuthorizationContext) DeepCopy() *IPAMCIDRAuthorizationContext return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPAMDiscoveredAccount) DeepCopyInto(out *IPAMDiscoveredAccount) { + *out = *in + if in.AccountID != nil { + in, out := &in.AccountID, &out.AccountID + *out = new(string) + **out = **in + } + if in.DiscoveryRegion != nil { + in, out := &in.DiscoveryRegion, &out.DiscoveryRegion + *out = new(string) + **out = **in + } + if in.LastAttemptedDiscoveryTime != nil { + in, out := &in.LastAttemptedDiscoveryTime, &out.LastAttemptedDiscoveryTime + *out = (*in).DeepCopy() + } + if in.LastSuccessfulDiscoveryTime != nil { + in, out := &in.LastSuccessfulDiscoveryTime, &out.LastSuccessfulDiscoveryTime + *out = (*in).DeepCopy() + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAMDiscoveredAccount. +func (in *IPAMDiscoveredAccount) DeepCopy() *IPAMDiscoveredAccount { + if in == nil { + return nil + } + out := new(IPAMDiscoveredAccount) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPAMDiscoveredResourceCIDR) DeepCopyInto(out *IPAMDiscoveredResourceCIDR) { + *out = *in + if in.ResourceCIDR != nil { + in, out := &in.ResourceCIDR, &out.ResourceCIDR + *out = new(string) + **out = **in + } + if in.ResourceID != nil { + in, out := &in.ResourceID, &out.ResourceID + *out = new(string) + **out = **in + } + if in.ResourceOwnerID != nil { + in, out := &in.ResourceOwnerID, &out.ResourceOwnerID + *out = new(string) + **out = **in + } + if in.ResourceRegion != nil { + in, out := &in.ResourceRegion, &out.ResourceRegion + *out = new(string) + **out = **in + } + if in.SampleTime != nil { + in, out := &in.SampleTime, &out.SampleTime + *out = (*in).DeepCopy() + } + if in.VPCID != nil { + in, out := &in.VPCID, &out.VPCID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAMDiscoveredResourceCIDR. +func (in *IPAMDiscoveredResourceCIDR) DeepCopy() *IPAMDiscoveredResourceCIDR { + if in == nil { + return nil + } + out := new(IPAMDiscoveredResourceCIDR) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPAMDiscoveryFailureReason) DeepCopyInto(out *IPAMDiscoveryFailureReason) { + *out = *in + if in.Message != nil { + in, out := &in.Message, &out.Message + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAMDiscoveryFailureReason. +func (in *IPAMDiscoveryFailureReason) DeepCopy() *IPAMDiscoveryFailureReason { + if in == nil { + return nil + } + out := new(IPAMDiscoveryFailureReason) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *IPAMOperatingRegion) DeepCopyInto(out *IPAMOperatingRegion) { *out = *in @@ -7077,6 +7358,11 @@ func (in *IPAMPoolCIDR) DeepCopyInto(out *IPAMPoolCIDR) { *out = new(string) **out = **in } + if in.NetmaskLength != nil { + in, out := &in.NetmaskLength, &out.NetmaskLength + *out = new(int64) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAMPoolCIDR. @@ -7127,29 +7413,126 @@ func (in *IPAMResourceCIDR) DeepCopyInto(out *IPAMResourceCIDR) { *out = new(string) **out = **in } - if in.ResourceOwnerID != nil { - in, out := &in.ResourceOwnerID, &out.ResourceOwnerID + if in.ResourceOwnerID != nil { + in, out := &in.ResourceOwnerID, &out.ResourceOwnerID + *out = new(string) + **out = **in + } + if in.ResourceRegion != nil { + in, out := &in.ResourceRegion, &out.ResourceRegion + *out = new(string) + **out = **in + } + if in.VPCID != nil { + in, out := &in.VPCID, &out.VPCID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAMResourceCIDR. +func (in *IPAMResourceCIDR) DeepCopy() *IPAMResourceCIDR { + if in == nil { + return nil + } + out := new(IPAMResourceCIDR) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPAMResourceDiscovery) DeepCopyInto(out *IPAMResourceDiscovery) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.IPAMResourceDiscoveryARN != nil { + in, out := &in.IPAMResourceDiscoveryARN, &out.IPAMResourceDiscoveryARN + *out = new(string) + **out = **in + } + if in.IPAMResourceDiscoveryRegion != nil { + in, out := &in.IPAMResourceDiscoveryRegion, &out.IPAMResourceDiscoveryRegion + *out = new(string) + **out = **in + } + if in.IsDefault != nil { + in, out := &in.IsDefault, &out.IsDefault + *out = new(bool) + **out = **in + } + if in.OwnerID != nil { + in, out := &in.OwnerID, &out.OwnerID + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make([]*Tag, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(Tag) + (*in).DeepCopyInto(*out) + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAMResourceDiscovery. +func (in *IPAMResourceDiscovery) DeepCopy() *IPAMResourceDiscovery { + if in == nil { + return nil + } + out := new(IPAMResourceDiscovery) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPAMResourceDiscoveryAssociation) DeepCopyInto(out *IPAMResourceDiscoveryAssociation) { + *out = *in + if in.IPAMRegion != nil { + in, out := &in.IPAMRegion, &out.IPAMRegion + *out = new(string) + **out = **in + } + if in.IPAMResourceDiscoveryAssociationARN != nil { + in, out := &in.IPAMResourceDiscoveryAssociationARN, &out.IPAMResourceDiscoveryAssociationARN *out = new(string) **out = **in } - if in.ResourceRegion != nil { - in, out := &in.ResourceRegion, &out.ResourceRegion - *out = new(string) + if in.IsDefault != nil { + in, out := &in.IsDefault, &out.IsDefault + *out = new(bool) **out = **in } - if in.VPCID != nil { - in, out := &in.VPCID, &out.VPCID + if in.OwnerID != nil { + in, out := &in.OwnerID, &out.OwnerID *out = new(string) **out = **in } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make([]*Tag, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(Tag) + (*in).DeepCopyInto(*out) + } + } + } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAMResourceCIDR. -func (in *IPAMResourceCIDR) DeepCopy() *IPAMResourceCIDR { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAMResourceDiscoveryAssociation. +func (in *IPAMResourceDiscoveryAssociation) DeepCopy() *IPAMResourceDiscoveryAssociation { if in == nil { return nil } - out := new(IPAMResourceCIDR) + out := new(IPAMResourceDiscoveryAssociation) in.DeepCopyInto(out) return out } @@ -7640,11 +8023,6 @@ func (in *ImageDiskContainer) DeepCopyInto(out *ImageDiskContainer) { *out = new(string) **out = **in } - if in.URL != nil { - in, out := &in.URL, &out.URL - *out = new(string) - **out = **in - } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageDiskContainer. @@ -8346,6 +8724,11 @@ func (in *InstanceIPv6Address) DeepCopyInto(out *InstanceIPv6Address) { *out = new(string) **out = **in } + if in.IsPrimaryIPv6 != nil { + in, out := &in.IsPrimaryIPv6, &out.IsPrimaryIPv6 + *out = new(bool) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceIPv6Address. @@ -8735,6 +9118,11 @@ func (in *InstanceNetworkInterfaceSpecification) DeepCopyInto(out *InstanceNetwo *out = new(string) **out = **in } + if in.PrimaryIPv6 != nil { + in, out := &in.PrimaryIPv6, &out.PrimaryIPv6 + *out = new(bool) + **out = **in + } if in.PrivateIPAddress != nil { in, out := &in.PrivateIPAddress, &out.PrivateIPAddress *out = new(string) @@ -9757,6 +10145,11 @@ func (in *LaunchTemplateBlockDeviceMappingRequest) DeepCopy() *LaunchTemplateBlo // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *LaunchTemplateCPUOptions) DeepCopyInto(out *LaunchTemplateCPUOptions) { *out = *in + if in.AmdSevSnp != nil { + in, out := &in.AmdSevSnp, &out.AmdSevSnp + *out = new(string) + **out = **in + } if in.CoreCount != nil { in, out := &in.CoreCount, &out.CoreCount *out = new(int64) @@ -9782,6 +10175,11 @@ func (in *LaunchTemplateCPUOptions) DeepCopy() *LaunchTemplateCPUOptions { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *LaunchTemplateCPUOptionsRequest) DeepCopyInto(out *LaunchTemplateCPUOptionsRequest) { *out = *in + if in.AmdSevSnp != nil { + in, out := &in.AmdSevSnp, &out.AmdSevSnp + *out = new(string) + **out = **in + } if in.CoreCount != nil { in, out := &in.CoreCount, &out.CoreCount *out = new(int64) @@ -10421,6 +10819,11 @@ func (in *LaunchTemplateInstanceNetworkInterfaceSpecification) DeepCopyInto(out *out = new(string) **out = **in } + if in.PrimaryIPv6 != nil { + in, out := &in.PrimaryIPv6, &out.PrimaryIPv6 + *out = new(bool) + **out = **in + } if in.PrivateIPAddress != nil { in, out := &in.PrivateIPAddress, &out.PrivateIPAddress *out = new(string) @@ -10561,6 +10964,11 @@ func (in *LaunchTemplateInstanceNetworkInterfaceSpecificationRequest) DeepCopyIn *out = new(string) **out = **in } + if in.PrimaryIPv6 != nil { + in, out := &in.PrimaryIPv6, &out.PrimaryIPv6 + *out = new(bool) + **out = **in + } if in.PrivateIPAddress != nil { in, out := &in.PrivateIPAddress, &out.PrivateIPAddress *out = new(string) @@ -11264,6 +11672,11 @@ func (in *LaunchTemplateVersionParameters) DeepCopyInto(out *LaunchTemplateVersi *out = new(RequestLaunchTemplateData) (*in).DeepCopyInto(*out) } + if in.ResolveAlias != nil { + in, out := &in.ResolveAlias, &out.ResolveAlias + *out = new(bool) + **out = **in + } if in.SourceVersion != nil { in, out := &in.SourceVersion, &out.SourceVersion *out = new(string) @@ -11604,6 +12017,11 @@ func (in *LocalGatewayRoute) DeepCopyInto(out *LocalGatewayRoute) { *out = new(string) **out = **in } + if in.DestinationPrefixListID != nil { + in, out := &in.DestinationPrefixListID, &out.DestinationPrefixListID + *out = new(string) + **out = **in + } if in.NetworkInterfaceID != nil { in, out := &in.NetworkInterfaceID, &out.NetworkInterfaceID *out = new(string) @@ -11876,6 +12294,34 @@ func (in *LocalGatewayVirtualInterfaceGroup) DeepCopy() *LocalGatewayVirtualInte return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MaintenanceDetails) DeepCopyInto(out *MaintenanceDetails) { + *out = *in + if in.LastMaintenanceApplied != nil { + in, out := &in.LastMaintenanceApplied, &out.LastMaintenanceApplied + *out = (*in).DeepCopy() + } + if in.MaintenanceAutoAppliedAfter != nil { + in, out := &in.MaintenanceAutoAppliedAfter, &out.MaintenanceAutoAppliedAfter + *out = (*in).DeepCopy() + } + if in.PendingMaintenance != nil { + in, out := &in.PendingMaintenance, &out.PendingMaintenance + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaintenanceDetails. +func (in *MaintenanceDetails) DeepCopy() *MaintenanceDetails { + if in == nil { + return nil + } + out := new(MaintenanceDetails) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ManagedPrefixList) DeepCopyInto(out *ManagedPrefixList) { *out = *in @@ -12153,6 +12599,11 @@ func (in *ModifyVPNTunnelOptionsSpecification) DeepCopyInto(out *ModifyVPNTunnel *out = new(int64) **out = **in } + if in.EnableTunnelLifecycleControl != nil { + in, out := &in.EnableTunnelLifecycleControl, &out.EnableTunnelLifecycleControl + *out = new(bool) + **out = **in + } if in.Phase1LifetimeSeconds != nil { in, out := &in.Phase1LifetimeSeconds, &out.Phase1LifetimeSeconds *out = new(int64) @@ -12163,11 +12614,6 @@ func (in *ModifyVPNTunnelOptionsSpecification) DeepCopyInto(out *ModifyVPNTunnel *out = new(int64) **out = **in } - if in.PreSharedKey != nil { - in, out := &in.PreSharedKey, &out.PreSharedKey - *out = new(string) - **out = **in - } if in.RekeyFuzzPercentage != nil { in, out := &in.RekeyFuzzPercentage, &out.RekeyFuzzPercentage *out = new(int64) @@ -12213,11 +12659,36 @@ func (in *ModifyVPNTunnelOptionsSpecification) DeepCopy() *ModifyVPNTunnelOption // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ModifyVerifiedAccessTrustProviderOIDCOptions) DeepCopyInto(out *ModifyVerifiedAccessTrustProviderOIDCOptions) { *out = *in + if in.AuthorizationEndpoint != nil { + in, out := &in.AuthorizationEndpoint, &out.AuthorizationEndpoint + *out = new(string) + **out = **in + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.Issuer != nil { + in, out := &in.Issuer, &out.Issuer + *out = new(string) + **out = **in + } if in.Scope != nil { in, out := &in.Scope, &out.Scope *out = new(string) **out = **in } + if in.TokenEndpoint != nil { + in, out := &in.TokenEndpoint, &out.TokenEndpoint + *out = new(string) + **out = **in + } + if in.UserInfoEndpoint != nil { + in, out := &in.UserInfoEndpoint, &out.UserInfoEndpoint + *out = new(string) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ModifyVerifiedAccessTrustProviderOIDCOptions. @@ -12317,6 +12788,21 @@ func (in *NATGatewayAddress) DeepCopyInto(out *NATGatewayAddress) { *out = new(string) **out = **in } + if in.AssociationID != nil { + in, out := &in.AssociationID, &out.AssociationID + *out = new(string) + **out = **in + } + if in.FailureMessage != nil { + in, out := &in.FailureMessage, &out.FailureMessage + *out = new(string) + **out = **in + } + if in.IsPrimary != nil { + in, out := &in.IsPrimary, &out.IsPrimary + *out = new(bool) + **out = **in + } if in.NetworkInterfaceID != nil { in, out := &in.NetworkInterfaceID, &out.NetworkInterfaceID *out = new(string) @@ -12988,6 +13474,11 @@ func (in *NetworkInterfaceIPv6Address) DeepCopyInto(out *NetworkInterfaceIPv6Add *out = new(string) **out = **in } + if in.IsPrimaryIPv6 != nil { + in, out := &in.IsPrimaryIPv6, &out.IsPrimaryIPv6 + *out = new(bool) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkInterfaceIPv6Address. @@ -13129,11 +13620,6 @@ func (in *OIDCOptions) DeepCopyInto(out *OIDCOptions) { *out = new(string) **out = **in } - if in.ClientSecret != nil { - in, out := &in.ClientSecret, &out.ClientSecret - *out = new(string) - **out = **in - } if in.Issuer != nil { in, out := &in.Issuer, &out.Issuer *out = new(string) @@ -13406,6 +13892,11 @@ func (in *PathComponent) DeepCopyInto(out *PathComponent) { *out = new(int64) **out = **in } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PathComponent. @@ -16190,6 +16681,46 @@ func (in *Route_SDK) DeepCopy() *Route_SDK { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleGroupTypePair) DeepCopyInto(out *RuleGroupTypePair) { + *out = *in + if in.RuleGroupType != nil { + in, out := &in.RuleGroupType, &out.RuleGroupType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleGroupTypePair. +func (in *RuleGroupTypePair) DeepCopy() *RuleGroupTypePair { + if in == nil { + return nil + } + out := new(RuleGroupTypePair) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleOption) DeepCopyInto(out *RuleOption) { + *out = *in + if in.Keyword != nil { + in, out := &in.Keyword, &out.Keyword + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleOption. +func (in *RuleOption) DeepCopy() *RuleOption { + if in == nil { + return nil + } + out := new(RuleOption) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RunInstancesMonitoringEnabled) DeepCopyInto(out *RunInstancesMonitoringEnabled) { *out = *in @@ -16253,11 +16784,6 @@ func (in *S3Storage) DeepCopyInto(out *S3Storage) { *out = new(string) **out = **in } - if in.UploadPolicySignature != nil { - in, out := &in.UploadPolicySignature, &out.UploadPolicySignature - *out = new(string) - **out = **in - } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3Storage. @@ -17321,11 +17847,6 @@ func (in *SnapshotDetail) DeepCopyInto(out *SnapshotDetail) { *out = new(string) **out = **in } - if in.URL != nil { - in, out := &in.URL, &out.URL - *out = new(string) - **out = **in - } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotDetail. @@ -17351,11 +17872,6 @@ func (in *SnapshotDiskContainer) DeepCopyInto(out *SnapshotDiskContainer) { *out = new(string) **out = **in } - if in.URL != nil { - in, out := &in.URL, &out.URL - *out = new(string) - **out = **in - } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotDiskContainer. @@ -17401,6 +17917,11 @@ func (in *SnapshotInfo) DeepCopyInto(out *SnapshotInfo) { *out = new(string) **out = **in } + if in.SSEType != nil { + in, out := &in.SSEType, &out.SSEType + *out = new(string) + **out = **in + } if in.StartTime != nil { in, out := &in.StartTime, &out.StartTime *out = (*in).DeepCopy() @@ -17524,11 +18045,6 @@ func (in *SnapshotTaskDetail) DeepCopyInto(out *SnapshotTaskDetail) { *out = new(string) **out = **in } - if in.URL != nil { - in, out := &in.URL, &out.URL - *out = new(string) - **out = **in - } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotTaskDetail. @@ -18620,6 +19136,36 @@ func (in *SubnetCIDRReservation) DeepCopy() *SubnetCIDRReservation { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubnetConfiguration) DeepCopyInto(out *SubnetConfiguration) { + *out = *in + if in.IPv4 != nil { + in, out := &in.IPv4, &out.IPv4 + *out = new(string) + **out = **in + } + if in.IPv6 != nil { + in, out := &in.IPv6, &out.IPv6 + *out = new(string) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubnetConfiguration. +func (in *SubnetConfiguration) DeepCopy() *SubnetConfiguration { + if in == nil { + return nil + } + out := new(SubnetConfiguration) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SubnetIPv6CIDRBlockAssociation) DeepCopyInto(out *SubnetIPv6CIDRBlockAssociation) { *out = *in @@ -21618,6 +22164,11 @@ func (in *TunnelOption) DeepCopyInto(out *TunnelOption) { *out = new(int64) **out = **in } + if in.EnableTunnelLifecycleControl != nil { + in, out := &in.EnableTunnelLifecycleControl, &out.EnableTunnelLifecycleControl + *out = new(bool) + **out = **in + } if in.OutsideIPAddress != nil { in, out := &in.OutsideIPAddress, &out.OutsideIPAddress *out = new(string) @@ -21633,11 +22184,6 @@ func (in *TunnelOption) DeepCopyInto(out *TunnelOption) { *out = new(int64) **out = **in } - if in.PreSharedKey != nil { - in, out := &in.PreSharedKey, &out.PreSharedKey - *out = new(string) - **out = **in - } if in.RekeyFuzzPercentage != nil { in, out := &in.RekeyFuzzPercentage, &out.RekeyFuzzPercentage *out = new(int64) @@ -22426,6 +22972,17 @@ func (in *VPCEndpointParameters) DeepCopyInto(out *VPCEndpointParameters) { *out = new(string) **out = **in } + if in.SubnetConfigurations != nil { + in, out := &in.SubnetConfigurations, &out.SubnetConfigurations + *out = make([]*SubnetConfiguration, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(SubnetConfiguration) + (*in).DeepCopyInto(*out) + } + } + } if in.TagSpecifications != nil { in, out := &in.TagSpecifications, &out.TagSpecifications *out = make([]*TagSpecification, len(*in)) @@ -23200,11 +23757,6 @@ func (in *VPNConnection) DeepCopyInto(out *VPNConnection) { *out = new(string) **out = **in } - if in.CustomerGatewayConfiguration != nil { - in, out := &in.CustomerGatewayConfiguration, &out.CustomerGatewayConfiguration - *out = new(string) - **out = **in - } if in.CustomerGatewayID != nil { in, out := &in.CustomerGatewayID, &out.CustomerGatewayID *out = new(string) @@ -23467,6 +24019,11 @@ func (in *VPNTunnelOptionsSpecification) DeepCopyInto(out *VPNTunnelOptionsSpeci *out = new(int64) **out = **in } + if in.EnableTunnelLifecycleControl != nil { + in, out := &in.EnableTunnelLifecycleControl, &out.EnableTunnelLifecycleControl + *out = new(bool) + **out = **in + } if in.Phase1LifetimeSeconds != nil { in, out := &in.Phase1LifetimeSeconds, &out.Phase1LifetimeSeconds *out = new(int64) @@ -23477,11 +24034,6 @@ func (in *VPNTunnelOptionsSpecification) DeepCopyInto(out *VPNTunnelOptionsSpeci *out = new(int64) **out = **in } - if in.PreSharedKey != nil { - in, out := &in.PreSharedKey, &out.PreSharedKey - *out = new(string) - **out = **in - } if in.RekeyFuzzPercentage != nil { in, out := &in.RekeyFuzzPercentage, &out.RekeyFuzzPercentage *out = new(int64) @@ -23968,6 +24520,31 @@ func (in *VerifiedAccessLogKinesisDataFirehoseDestinationOptions) DeepCopy() *Ve return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VerifiedAccessLogOptions) DeepCopyInto(out *VerifiedAccessLogOptions) { + *out = *in + if in.IncludeTrustContext != nil { + in, out := &in.IncludeTrustContext, &out.IncludeTrustContext + *out = new(bool) + **out = **in + } + if in.LogVersion != nil { + in, out := &in.LogVersion, &out.LogVersion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VerifiedAccessLogOptions. +func (in *VerifiedAccessLogOptions) DeepCopy() *VerifiedAccessLogOptions { + if in == nil { + return nil + } + out := new(VerifiedAccessLogOptions) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *VerifiedAccessLogS3Destination) DeepCopyInto(out *VerifiedAccessLogS3Destination) { *out = *in @@ -24038,6 +24615,31 @@ func (in *VerifiedAccessLogS3DestinationOptions) DeepCopy() *VerifiedAccessLogS3 return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VerifiedAccessLogs) DeepCopyInto(out *VerifiedAccessLogs) { + *out = *in + if in.IncludeTrustContext != nil { + in, out := &in.IncludeTrustContext, &out.IncludeTrustContext + *out = new(bool) + **out = **in + } + if in.LogVersion != nil { + in, out := &in.LogVersion, &out.LogVersion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VerifiedAccessLogs. +func (in *VerifiedAccessLogs) DeepCopy() *VerifiedAccessLogs { + if in == nil { + return nil + } + out := new(VerifiedAccessLogs) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *VerifiedAccessTrustProvider) DeepCopyInto(out *VerifiedAccessTrustProvider) { *out = *in @@ -24358,6 +24960,11 @@ func (in *VolumeObservation) DeepCopyInto(out *VolumeObservation) { *out = new(string) **out = **in } + if in.SSEType != nil { + in, out := &in.SSEType, &out.SSEType + *out = new(string) + **out = **in + } if in.State != nil { in, out := &in.State, &out.State *out = new(string) @@ -24713,6 +25320,11 @@ func (in *Volume_SDK) DeepCopyInto(out *Volume_SDK) { *out = new(string) **out = **in } + if in.SSEType != nil { + in, out := &in.SSEType, &out.SSEType + *out = new(string) + **out = **in + } if in.State != nil { in, out := &in.State, &out.State *out = new(string) diff --git a/apis/ec2/v1alpha1/zz_launch_template_version.go b/apis/ec2/v1alpha1/zz_launch_template_version.go index 7edf5dae64..d6db37763e 100644 --- a/apis/ec2/v1alpha1/zz_launch_template_version.go +++ b/apis/ec2/v1alpha1/zz_launch_template_version.go @@ -32,6 +32,13 @@ type LaunchTemplateVersionParameters struct { // The information for the launch template. // +kubebuilder:validation:Required LaunchTemplateData *RequestLaunchTemplateData `json:"launchTemplateData"` + // If true, and if a Systems Manager parameter is specified for ImageId, the + // AMI ID is displayed in the response for imageID. For more information, see + // Use a Systems Manager parameter instead of an AMI ID (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-launch-templates.html#use-an-ssm-parameter-instead-of-an-ami-id) + // in the Amazon Elastic Compute Cloud User Guide. + // + // Default: false + ResolveAlias *bool `json:"resolveAlias,omitempty"` // The version number of the launch template version on which to base the new // version. The new version inherits the same launch parameters as the source // version, except for parameters that you specify in LaunchTemplateData. Snapshots diff --git a/apis/ec2/v1alpha1/zz_types.go b/apis/ec2/v1alpha1/zz_types.go index 883f2f025c..886391d414 100644 --- a/apis/ec2/v1alpha1/zz_types.go +++ b/apis/ec2/v1alpha1/zz_types.go @@ -103,6 +103,8 @@ type AddedPrincipal struct { // +kubebuilder:skipversion type AdditionalDetail struct { AdditionalDetailType *string `json:"additionalDetailType,omitempty"` + + ServiceName *string `json:"serviceName,omitempty"` } // +kubebuilder:skipversion @@ -204,6 +206,8 @@ type AnalysisPacketHeader struct { // +kubebuilder:skipversion type AnalysisRouteTableRoute struct { + CarrierGatewayID *string `json:"carrierGatewayID,omitempty"` + DestinationCIDR *string `json:"destinationCIDR,omitempty"` DestinationPrefixListID *string `json:"destinationPrefixListID,omitempty"` @@ -214,6 +218,8 @@ type AnalysisRouteTableRoute struct { InstanceID *string `json:"instanceID,omitempty"` + LocalGatewayID *string `json:"localGatewayID,omitempty"` + NATGatewayID *string `json:"natGatewayID,omitempty"` NetworkInterfaceID *string `json:"networkInterfaceID,omitempty"` @@ -401,6 +407,8 @@ type CIDRBlock struct { // +kubebuilder:skipversion type CPUOptions struct { + AmdSevSnp *string `json:"amdSevSnp,omitempty"` + CoreCount *int64 `json:"coreCount,omitempty"` ThreadsPerCore *int64 `json:"threadsPerCore,omitempty"` @@ -408,6 +416,8 @@ type CPUOptions struct { // +kubebuilder:skipversion type CPUOptionsRequest struct { + AmdSevSnp *string `json:"amdSevSnp,omitempty"` + CoreCount *int64 `json:"coreCount,omitempty"` ThreadsPerCore *int64 `json:"threadsPerCore,omitempty"` @@ -825,8 +835,6 @@ type CreateVerifiedAccessTrustProviderOIDCOptions struct { ClientID *string `json:"clientID,omitempty"` - ClientSecret *string `json:"clientSecret,omitempty"` - Issuer *string `json:"issuer,omitempty"` Scope *string `json:"scope,omitempty"` @@ -894,11 +902,15 @@ type DNSEntry struct { // +kubebuilder:skipversion type DNSOptions struct { DNSRecordIPType *string `json:"dnsRecordIPType,omitempty"` + + PrivateDNSOnlyForInboundResolverEndpoint *bool `json:"privateDNSOnlyForInboundResolverEndpoint,omitempty"` } // +kubebuilder:skipversion type DNSOptionsSpecification struct { DNSRecordIPType *string `json:"dnsRecordIPType,omitempty"` + + PrivateDNSOnlyForInboundResolverEndpoint *bool `json:"privateDNSOnlyForInboundResolverEndpoint,omitempty"` } // +kubebuilder:skipversion @@ -1089,16 +1101,12 @@ type DiskImage struct { type DiskImageDescription struct { Checksum *string `json:"checksum,omitempty"` - ImportManifestURL *string `json:"importManifestURL,omitempty"` - Size *int64 `json:"size,omitempty"` } // +kubebuilder:skipversion type DiskImageDetail struct { Bytes *int64 `json:"bytes,omitempty"` - - ImportManifestURL *string `json:"importManifestURL,omitempty"` } // +kubebuilder:skipversion @@ -1145,6 +1153,27 @@ type EBSInstanceBlockDeviceSpecification struct { VolumeID *string `json:"volumeID,omitempty"` } +// +kubebuilder:skipversion +type EC2InstanceConnectEndpoint struct { + AvailabilityZone *string `json:"availabilityZone,omitempty"` + + CreatedAt *metav1.Time `json:"createdAt,omitempty"` + + DNSName *string `json:"dnsName,omitempty"` + + FipsDNSName *string `json:"fipsDNSName,omitempty"` + + OwnerID *string `json:"ownerID,omitempty"` + + PreserveClientIP *bool `json:"preserveClientIP,omitempty"` + + StateMessage *string `json:"stateMessage,omitempty"` + + SubnetID *string `json:"subnetID,omitempty"` + + Tags []*Tag `json:"tags,omitempty"` +} + // +kubebuilder:skipversion type ENASrdSpecification struct { ENASrdEnabled *bool `json:"enaSrdEnabled,omitempty"` @@ -1437,6 +1466,28 @@ type Filter struct { Values []*string `json:"values,omitempty"` } +// +kubebuilder:skipversion +type FirewallStatefulRule struct { + Destinations []*string `json:"destinations,omitempty"` + + Direction *string `json:"direction,omitempty"` + + Protocol *string `json:"protocol,omitempty"` + + RuleAction *string `json:"ruleAction,omitempty"` + + Sources []*string `json:"sources,omitempty"` +} + +// +kubebuilder:skipversion +type FirewallStatelessRule struct { + Destinations []*string `json:"destinations,omitempty"` + + RuleAction *string `json:"ruleAction,omitempty"` + + Sources []*string `json:"sources,omitempty"` +} + // +kubebuilder:skipversion type FleetCapacityReservation struct { AvailabilityZone *string `json:"availabilityZone,omitempty"` @@ -1487,6 +1538,9 @@ type FleetLaunchTemplateOverrides struct { // The attributes for the instance types. When you specify instance attributes, // Amazon EC2 will identify instance types with these attributes. // + // You must specify VCpuCount and MemoryMiB. All other attributes are optional. + // Any unspecified optional attribute is set to its default. + // // When you specify multiple attributes, you get instance types that satisfy // all of the specified attributes. If you specify multiple values for an attribute, // you get instance types that satisfy any of the specified values. @@ -1501,8 +1555,13 @@ type FleetLaunchTemplateOverrides struct { // * ExcludedInstanceTypes - The instance types to exclude from the list, // even if they match your specified attributes. // - // You must specify VCpuCount and MemoryMiB. All other attributes are optional. - // Any unspecified optional attribute is set to its default. + // If you specify InstanceRequirements, you can't specify InstanceType. + // + // Attribute-based instance type selection is only supported when using Auto + // Scaling groups, EC2 Fleet, and Spot Fleet to launch instances. If you plan + // to use the launch template in the launch instance wizard (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-launch-instance-wizard.html) + // or with the RunInstances API (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_RunInstances.html), + // you can't specify InstanceRequirements. // // For more information, see Attribute-based instance type selection for EC2 // Fleet (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-fleet-attribute-based-instance-type-selection.html), @@ -1530,6 +1589,9 @@ type FleetLaunchTemplateOverridesRequest struct { // The attributes for the instance types. When you specify instance attributes, // Amazon EC2 will identify instance types with these attributes. // + // You must specify VCpuCount and MemoryMiB. All other attributes are optional. + // Any unspecified optional attribute is set to its default. + // // When you specify multiple attributes, you get instance types that satisfy // all of the specified attributes. If you specify multiple values for an attribute, // you get instance types that satisfy any of the specified values. @@ -1544,8 +1606,14 @@ type FleetLaunchTemplateOverridesRequest struct { // * ExcludedInstanceTypes - The instance types to exclude from the list, // even if they match your specified attributes. // - // You must specify VCpuCount and MemoryMiB. All other attributes are optional. - // Any unspecified optional attribute is set to its default. + // If you specify InstanceRequirements, you can't specify InstanceType. + // + // Attribute-based instance type selection is only supported when using Auto + // Scaling groups, EC2 Fleet, and Spot Fleet to launch instances. If you plan + // to use the launch template in the launch instance wizard (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-launch-instance-wizard.html), + // or with the RunInstances (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_RunInstances.html) + // API or AWS::EC2::Instance (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-instance.html) + // Amazon Web Services CloudFormation resource, you can't specify InstanceRequirements. // // For more information, see Attribute-based instance type selection for EC2 // Fleet (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-fleet-attribute-based-instance-type-selection.html), @@ -1789,6 +1857,8 @@ type IPAM struct { OwnerID *string `json:"ownerID,omitempty"` + ResourceDiscoveryAssociationCount *int64 `json:"resourceDiscoveryAssociationCount,omitempty"` + ScopeCount *int64 `json:"scopeCount,omitempty"` Tags []*Tag `json:"tags,omitempty"` @@ -1820,6 +1890,37 @@ type IPAMCIDRAuthorizationContext struct { Signature *string `json:"signature,omitempty"` } +// +kubebuilder:skipversion +type IPAMDiscoveredAccount struct { + AccountID *string `json:"accountID,omitempty"` + + DiscoveryRegion *string `json:"discoveryRegion,omitempty"` + + LastAttemptedDiscoveryTime *metav1.Time `json:"lastAttemptedDiscoveryTime,omitempty"` + + LastSuccessfulDiscoveryTime *metav1.Time `json:"lastSuccessfulDiscoveryTime,omitempty"` +} + +// +kubebuilder:skipversion +type IPAMDiscoveredResourceCIDR struct { + ResourceCIDR *string `json:"resourceCIDR,omitempty"` + + ResourceID *string `json:"resourceID,omitempty"` + + ResourceOwnerID *string `json:"resourceOwnerID,omitempty"` + + ResourceRegion *string `json:"resourceRegion,omitempty"` + + SampleTime *metav1.Time `json:"sampleTime,omitempty"` + + VPCID *string `json:"vpcID,omitempty"` +} + +// +kubebuilder:skipversion +type IPAMDiscoveryFailureReason struct { + Message *string `json:"message,omitempty"` +} + // +kubebuilder:skipversion type IPAMOperatingRegion struct { RegionName *string `json:"regionName,omitempty"` @@ -1862,6 +1963,8 @@ type IPAMPoolAllocation struct { // +kubebuilder:skipversion type IPAMPoolCIDR struct { CIDR *string `json:"cidr,omitempty"` + + NetmaskLength *int64 `json:"netmaskLength,omitempty"` } // +kubebuilder:skipversion @@ -1884,6 +1987,34 @@ type IPAMResourceCIDR struct { VPCID *string `json:"vpcID,omitempty"` } +// +kubebuilder:skipversion +type IPAMResourceDiscovery struct { + Description *string `json:"description,omitempty"` + + IPAMResourceDiscoveryARN *string `json:"ipamResourceDiscoveryARN,omitempty"` + + IPAMResourceDiscoveryRegion *string `json:"ipamResourceDiscoveryRegion,omitempty"` + + IsDefault *bool `json:"isDefault,omitempty"` + + OwnerID *string `json:"ownerID,omitempty"` + + Tags []*Tag `json:"tags,omitempty"` +} + +// +kubebuilder:skipversion +type IPAMResourceDiscoveryAssociation struct { + IPAMRegion *string `json:"ipamRegion,omitempty"` + + IPAMResourceDiscoveryAssociationARN *string `json:"ipamResourceDiscoveryAssociationARN,omitempty"` + + IsDefault *bool `json:"isDefault,omitempty"` + + OwnerID *string `json:"ownerID,omitempty"` + + Tags []*Tag `json:"tags,omitempty"` +} + // +kubebuilder:skipversion type IPAMResourceTag struct { Key *string `json:"key,omitempty"` @@ -2026,8 +2157,6 @@ type ImageDiskContainer struct { Format *string `json:"format,omitempty"` SnapshotID *string `json:"snapshotID,omitempty"` - - URL *string `json:"url,omitempty"` } // +kubebuilder:skipversion @@ -2221,6 +2350,8 @@ type InstanceIPv4Prefix struct { // +kubebuilder:skipversion type InstanceIPv6Address struct { IPv6Address *string `json:"ipv6Address,omitempty"` + + IsPrimaryIPv6 *bool `json:"isPrimaryIPv6,omitempty"` } // +kubebuilder:skipversion @@ -2336,6 +2467,8 @@ type InstanceNetworkInterfaceSpecification struct { NetworkInterfaceID *string `json:"networkInterfaceID,omitempty"` + PrimaryIPv6 *bool `json:"primaryIPv6,omitempty"` + PrivateIPAddress *string `json:"privateIPAddress,omitempty"` PrivateIPAddresses []*PrivateIPAddressSpecification `json:"privateIPAddresses,omitempty"` @@ -2482,6 +2615,9 @@ type InstanceRequirementsWithMetadataRequest struct { // The attributes for the instance types. When you specify instance attributes, // Amazon EC2 will identify instance types with these attributes. // + // You must specify VCpuCount and MemoryMiB. All other attributes are optional. + // Any unspecified optional attribute is set to its default. + // // When you specify multiple attributes, you get instance types that satisfy // all of the specified attributes. If you specify multiple values for an attribute, // you get instance types that satisfy any of the specified values. @@ -2496,8 +2632,14 @@ type InstanceRequirementsWithMetadataRequest struct { // * ExcludedInstanceTypes - The instance types to exclude from the list, // even if they match your specified attributes. // - // You must specify VCpuCount and MemoryMiB. All other attributes are optional. - // Any unspecified optional attribute is set to its default. + // If you specify InstanceRequirements, you can't specify InstanceType. + // + // Attribute-based instance type selection is only supported when using Auto + // Scaling groups, EC2 Fleet, and Spot Fleet to launch instances. If you plan + // to use the launch template in the launch instance wizard (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-launch-instance-wizard.html), + // or with the RunInstances (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_RunInstances.html) + // API or AWS::EC2::Instance (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-instance.html) + // Amazon Web Services CloudFormation resource, you can't specify InstanceRequirements. // // For more information, see Attribute-based instance type selection for EC2 // Fleet (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-fleet-attribute-based-instance-type-selection.html), @@ -2666,6 +2808,8 @@ type LaunchTemplateBlockDeviceMappingRequest struct { // +kubebuilder:skipversion type LaunchTemplateCPUOptions struct { + AmdSevSnp *string `json:"amdSevSnp,omitempty"` + CoreCount *int64 `json:"coreCount,omitempty"` ThreadsPerCore *int64 `json:"threadsPerCore,omitempty"` @@ -2673,6 +2817,8 @@ type LaunchTemplateCPUOptions struct { // +kubebuilder:skipversion type LaunchTemplateCPUOptionsRequest struct { + AmdSevSnp *string `json:"amdSevSnp,omitempty"` + CoreCount *int64 `json:"coreCount,omitempty"` ThreadsPerCore *int64 `json:"threadsPerCore,omitempty"` @@ -2862,6 +3008,8 @@ type LaunchTemplateInstanceNetworkInterfaceSpecification struct { NetworkInterfaceID *string `json:"networkInterfaceID,omitempty"` + PrimaryIPv6 *bool `json:"primaryIPv6,omitempty"` + PrivateIPAddress *string `json:"privateIPAddress,omitempty"` PrivateIPAddresses []*PrivateIPAddressSpecification `json:"privateIPAddresses,omitempty"` @@ -2903,6 +3051,8 @@ type LaunchTemplateInstanceNetworkInterfaceSpecificationRequest struct { NetworkInterfaceID *string `json:"networkInterfaceID,omitempty"` + PrimaryIPv6 *bool `json:"primaryIPv6,omitempty"` + PrivateIPAddress *string `json:"privateIPAddress,omitempty"` PrivateIPAddresses []*PrivateIPAddressSpecification `json:"privateIPAddresses,omitempty"` @@ -2928,6 +3078,9 @@ type LaunchTemplateOverrides struct { // The attributes for the instance types. When you specify instance attributes, // Amazon EC2 will identify instance types with these attributes. // + // You must specify VCpuCount and MemoryMiB. All other attributes are optional. + // Any unspecified optional attribute is set to its default. + // // When you specify multiple attributes, you get instance types that satisfy // all of the specified attributes. If you specify multiple values for an attribute, // you get instance types that satisfy any of the specified values. @@ -2942,8 +3095,13 @@ type LaunchTemplateOverrides struct { // * ExcludedInstanceTypes - The instance types to exclude from the list, // even if they match your specified attributes. // - // You must specify VCpuCount and MemoryMiB. All other attributes are optional. - // Any unspecified optional attribute is set to its default. + // If you specify InstanceRequirements, you can't specify InstanceType. + // + // Attribute-based instance type selection is only supported when using Auto + // Scaling groups, EC2 Fleet, and Spot Fleet to launch instances. If you plan + // to use the launch template in the launch instance wizard (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-launch-instance-wizard.html) + // or with the RunInstances API (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_RunInstances.html), + // you can't specify InstanceRequirements. // // For more information, see Attribute-based instance type selection for EC2 // Fleet (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-fleet-attribute-based-instance-type-selection.html), @@ -3155,6 +3313,8 @@ type LocalGateway struct { type LocalGatewayRoute struct { DestinationCIDRBlock *string `json:"destinationCIDRBlock,omitempty"` + DestinationPrefixListID *string `json:"destinationPrefixListID,omitempty"` + NetworkInterfaceID *string `json:"networkInterfaceID,omitempty"` OwnerID *string `json:"ownerID,omitempty"` @@ -3233,6 +3393,15 @@ type LocalGatewayVirtualInterfaceGroup struct { Tags []*Tag `json:"tags,omitempty"` } +// +kubebuilder:skipversion +type MaintenanceDetails struct { + LastMaintenanceApplied *metav1.Time `json:"lastMaintenanceApplied,omitempty"` + + MaintenanceAutoAppliedAfter *metav1.Time `json:"maintenanceAutoAppliedAfter,omitempty"` + + PendingMaintenance *string `json:"pendingMaintenance,omitempty"` +} + // +kubebuilder:skipversion type ManagedPrefixList struct { AddressFamily *string `json:"addressFamily,omitempty"` @@ -3319,12 +3488,12 @@ type ModifyVPNTunnelOptionsSpecification struct { DPDTimeoutSeconds *int64 `json:"dpdTimeoutSeconds,omitempty"` + EnableTunnelLifecycleControl *bool `json:"enableTunnelLifecycleControl,omitempty"` + Phase1LifetimeSeconds *int64 `json:"phase1LifetimeSeconds,omitempty"` Phase2LifetimeSeconds *int64 `json:"phase2LifetimeSeconds,omitempty"` - PreSharedKey *string `json:"preSharedKey,omitempty"` - RekeyFuzzPercentage *int64 `json:"rekeyFuzzPercentage,omitempty"` RekeyMarginTimeSeconds *int64 `json:"rekeyMarginTimeSeconds,omitempty"` @@ -3340,7 +3509,17 @@ type ModifyVPNTunnelOptionsSpecification struct { // +kubebuilder:skipversion type ModifyVerifiedAccessTrustProviderOIDCOptions struct { + AuthorizationEndpoint *string `json:"authorizationEndpoint,omitempty"` + + ClientID *string `json:"clientID,omitempty"` + + Issuer *string `json:"issuer,omitempty"` + Scope *string `json:"scope,omitempty"` + + TokenEndpoint *string `json:"tokenEndpoint,omitempty"` + + UserInfoEndpoint *string `json:"userInfoEndpoint,omitempty"` } // +kubebuilder:skipversion @@ -3371,6 +3550,12 @@ type NATGateway struct { type NATGatewayAddress struct { AllocationID *string `json:"allocationID,omitempty"` + AssociationID *string `json:"associationID,omitempty"` + + FailureMessage *string `json:"failureMessage,omitempty"` + + IsPrimary *bool `json:"isPrimary,omitempty"` + NetworkInterfaceID *string `json:"networkInterfaceID,omitempty"` PrivateIP *string `json:"privateIP,omitempty"` @@ -3574,6 +3759,8 @@ type NetworkInterfaceCountRequest struct { // +kubebuilder:skipversion type NetworkInterfaceIPv6Address struct { IPv6Address *string `json:"ipv6Address,omitempty"` + + IsPrimaryIPv6 *bool `json:"isPrimaryIPv6,omitempty"` } // +kubebuilder:skipversion @@ -3614,8 +3801,6 @@ type OIDCOptions struct { ClientID *string `json:"clientID,omitempty"` - ClientSecret *string `json:"clientSecret,omitempty"` - Issuer *string `json:"issuer,omitempty"` Scope *string `json:"scope,omitempty"` @@ -3680,6 +3865,8 @@ type PacketHeaderStatementRequest struct { // +kubebuilder:skipversion type PathComponent struct { SequenceNumber *int64 `json:"sequenceNumber,omitempty"` + + ServiceName *string `json:"serviceName,omitempty"` } // +kubebuilder:skipversion @@ -4133,6 +4320,9 @@ type RequestLaunchTemplateData struct { // The attributes for the instance types. When you specify instance attributes, // Amazon EC2 will identify instance types with these attributes. // + // You must specify VCpuCount and MemoryMiB. All other attributes are optional. + // Any unspecified optional attribute is set to its default. + // // When you specify multiple attributes, you get instance types that satisfy // all of the specified attributes. If you specify multiple values for an attribute, // you get instance types that satisfy any of the specified values. @@ -4147,8 +4337,14 @@ type RequestLaunchTemplateData struct { // * ExcludedInstanceTypes - The instance types to exclude from the list, // even if they match your specified attributes. // - // You must specify VCpuCount and MemoryMiB. All other attributes are optional. - // Any unspecified optional attribute is set to its default. + // If you specify InstanceRequirements, you can't specify InstanceType. + // + // Attribute-based instance type selection is only supported when using Auto + // Scaling groups, EC2 Fleet, and Spot Fleet to launch instances. If you plan + // to use the launch template in the launch instance wizard (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-launch-instance-wizard.html), + // or with the RunInstances (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_RunInstances.html) + // API or AWS::EC2::Instance (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-instance.html) + // Amazon Web Services CloudFormation resource, you can't specify InstanceRequirements. // // For more information, see Attribute-based instance type selection for EC2 // Fleet (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-fleet-attribute-based-instance-type-selection.html), @@ -4385,6 +4581,9 @@ type ResponseLaunchTemplateData struct { // The attributes for the instance types. When you specify instance attributes, // Amazon EC2 will identify instance types with these attributes. // + // You must specify VCpuCount and MemoryMiB. All other attributes are optional. + // Any unspecified optional attribute is set to its default. + // // When you specify multiple attributes, you get instance types that satisfy // all of the specified attributes. If you specify multiple values for an attribute, // you get instance types that satisfy any of the specified values. @@ -4399,8 +4598,13 @@ type ResponseLaunchTemplateData struct { // * ExcludedInstanceTypes - The instance types to exclude from the list, // even if they match your specified attributes. // - // You must specify VCpuCount and MemoryMiB. All other attributes are optional. - // Any unspecified optional attribute is set to its default. + // If you specify InstanceRequirements, you can't specify InstanceType. + // + // Attribute-based instance type selection is only supported when using Auto + // Scaling groups, EC2 Fleet, and Spot Fleet to launch instances. If you plan + // to use the launch template in the launch instance wizard (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-launch-instance-wizard.html) + // or with the RunInstances API (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_RunInstances.html), + // you can't specify InstanceRequirements. // // For more information, see Attribute-based instance type selection for EC2 // Fleet (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-fleet-attribute-based-instance-type-selection.html), @@ -4502,6 +4706,16 @@ type Route_SDK struct { VPCPeeringConnectionID *string `json:"vpcPeeringConnectionID,omitempty"` } +// +kubebuilder:skipversion +type RuleGroupTypePair struct { + RuleGroupType *string `json:"ruleGroupType,omitempty"` +} + +// +kubebuilder:skipversion +type RuleOption struct { + Keyword *string `json:"keyword,omitempty"` +} + // +kubebuilder:skipversion type RunInstancesMonitoringEnabled struct { Enabled *bool `json:"enabled,omitempty"` @@ -4521,8 +4735,6 @@ type S3Storage struct { Bucket *string `json:"bucket,omitempty"` Prefix *string `json:"prefix,omitempty"` - - UploadPolicySignature *string `json:"uploadPolicySignature,omitempty"` } // +kubebuilder:skipversion @@ -4852,8 +5064,6 @@ type SnapshotDetail struct { Status *string `json:"status,omitempty"` StatusMessage *string `json:"statusMessage,omitempty"` - - URL *string `json:"url,omitempty"` } // +kubebuilder:skipversion @@ -4861,8 +5071,6 @@ type SnapshotDiskContainer struct { Description *string `json:"description,omitempty"` Format *string `json:"format,omitempty"` - - URL *string `json:"url,omitempty"` } // +kubebuilder:skipversion @@ -4879,6 +5087,8 @@ type SnapshotInfo struct { SnapshotID *string `json:"snapshotID,omitempty"` + SSEType *string `json:"sseType,omitempty"` + StartTime *metav1.Time `json:"startTime,omitempty"` Tags []*Tag `json:"tags,omitempty"` @@ -4920,8 +5130,6 @@ type SnapshotTaskDetail struct { Status *string `json:"status,omitempty"` StatusMessage *string `json:"statusMessage,omitempty"` - - URL *string `json:"url,omitempty"` } // +kubebuilder:skipversion @@ -4969,6 +5177,9 @@ type SpotFleetLaunchSpecification struct { // The attributes for the instance types. When you specify instance attributes, // Amazon EC2 will identify instance types with these attributes. // + // You must specify VCpuCount and MemoryMiB. All other attributes are optional. + // Any unspecified optional attribute is set to its default. + // // When you specify multiple attributes, you get instance types that satisfy // all of the specified attributes. If you specify multiple values for an attribute, // you get instance types that satisfy any of the specified values. @@ -4983,8 +5194,13 @@ type SpotFleetLaunchSpecification struct { // * ExcludedInstanceTypes - The instance types to exclude from the list, // even if they match your specified attributes. // - // You must specify VCpuCount and MemoryMiB. All other attributes are optional. - // Any unspecified optional attribute is set to its default. + // If you specify InstanceRequirements, you can't specify InstanceType. + // + // Attribute-based instance type selection is only supported when using Auto + // Scaling groups, EC2 Fleet, and Spot Fleet to launch instances. If you plan + // to use the launch template in the launch instance wizard (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-launch-instance-wizard.html) + // or with the RunInstances API (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_RunInstances.html), + // you can't specify InstanceRequirements. // // For more information, see Attribute-based instance type selection for EC2 // Fleet (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-fleet-attribute-based-instance-type-selection.html), @@ -5290,6 +5506,15 @@ type SubnetCIDRReservation struct { Tags []*Tag `json:"tags,omitempty"` } +// +kubebuilder:skipversion +type SubnetConfiguration struct { + IPv4 *string `json:"ipv4,omitempty"` + + IPv6 *string `json:"ipv6,omitempty"` + + SubnetID *string `json:"subnetID,omitempty"` +} + // +kubebuilder:skipversion type SubnetIPv6CIDRBlockAssociation struct { IPv6CIDRBlock *string `json:"ipv6CIDRBlock,omitempty"` @@ -5969,14 +6194,14 @@ type TunnelOption struct { DPDTimeoutSeconds *int64 `json:"dpdTimeoutSeconds,omitempty"` + EnableTunnelLifecycleControl *bool `json:"enableTunnelLifecycleControl,omitempty"` + OutsideIPAddress *string `json:"outsideIPAddress,omitempty"` Phase1LifetimeSeconds *int64 `json:"phase1LifetimeSeconds,omitempty"` Phase2LifetimeSeconds *int64 `json:"phase2LifetimeSeconds,omitempty"` - PreSharedKey *string `json:"preSharedKey,omitempty"` - RekeyFuzzPercentage *int64 `json:"rekeyFuzzPercentage,omitempty"` RekeyMarginTimeSeconds *int64 `json:"rekeyMarginTimeSeconds,omitempty"` @@ -6220,11 +6445,6 @@ type VPCPeeringConnectionVPCInfo struct { IPv6CIDRBlockSet []*IPv6CIDRBlock `json:"ipv6CIDRBlockSet,omitempty"` OwnerID *string `json:"ownerID,omitempty"` - // - // We are retiring EC2-Classic. We recommend that you migrate from EC2-Classic - // to a VPC. For more information, see Migrate from EC2-Classic to a VPC (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/vpc-migrate.html) - // in the Amazon Elastic Compute Cloud User Guide. - // // Describes the VPC peering connection options. PeeringOptions *VPCPeeringConnectionOptionsDescription `json:"peeringOptions,omitempty"` @@ -6257,8 +6477,6 @@ type VPNConnection struct { CoreNetworkAttachmentARN *string `json:"coreNetworkAttachmentARN,omitempty"` - CustomerGatewayConfiguration *string `json:"customerGatewayConfiguration,omitempty"` - CustomerGatewayID *string `json:"customerGatewayID,omitempty"` Tags []*Tag `json:"tags,omitempty"` @@ -6341,12 +6559,12 @@ type VPNTunnelOptionsSpecification struct { DPDTimeoutSeconds *int64 `json:"dpdTimeoutSeconds,omitempty"` + EnableTunnelLifecycleControl *bool `json:"enableTunnelLifecycleControl,omitempty"` + Phase1LifetimeSeconds *int64 `json:"phase1LifetimeSeconds,omitempty"` Phase2LifetimeSeconds *int64 `json:"phase2LifetimeSeconds,omitempty"` - PreSharedKey *string `json:"preSharedKey,omitempty"` - RekeyFuzzPercentage *int64 `json:"rekeyFuzzPercentage,omitempty"` RekeyMarginTimeSeconds *int64 `json:"rekeyMarginTimeSeconds,omitempty"` @@ -6486,6 +6704,13 @@ type VerifiedAccessLogKinesisDataFirehoseDestinationOptions struct { Enabled *bool `json:"enabled,omitempty"` } +// +kubebuilder:skipversion +type VerifiedAccessLogOptions struct { + IncludeTrustContext *bool `json:"includeTrustContext,omitempty"` + + LogVersion *string `json:"logVersion,omitempty"` +} + // +kubebuilder:skipversion type VerifiedAccessLogS3Destination struct { BucketName *string `json:"bucketName,omitempty"` @@ -6508,6 +6733,13 @@ type VerifiedAccessLogS3DestinationOptions struct { Prefix *string `json:"prefix,omitempty"` } +// +kubebuilder:skipversion +type VerifiedAccessLogs struct { + IncludeTrustContext *bool `json:"includeTrustContext,omitempty"` + + LogVersion *string `json:"logVersion,omitempty"` +} + // +kubebuilder:skipversion type VerifiedAccessTrustProvider struct { CreationTime *string `json:"creationTime,omitempty"` @@ -6656,6 +6888,8 @@ type Volume_SDK struct { SnapshotID *string `json:"snapshotID,omitempty"` + SSEType *string `json:"sseType,omitempty"` + State *string `json:"state,omitempty"` Tags []*Tag `json:"tags,omitempty"` diff --git a/apis/ec2/v1alpha1/zz_volume.go b/apis/ec2/v1alpha1/zz_volume.go index b3d2ec9221..368c22f6cd 100644 --- a/apis/ec2/v1alpha1/zz_volume.go +++ b/apis/ec2/v1alpha1/zz_volume.go @@ -29,7 +29,8 @@ type VolumeParameters struct { // Region is which region the Volume will be created. // +kubebuilder:validation:Required Region string `json:"region"` - // The Availability Zone in which to create the volume. + // The ID of the Availability Zone in which to create the volume. For example, + // us-east-1a. // +kubebuilder:validation:Required AvailabilityZone *string `json:"availabilityZone"` // Indicates whether the volume should be encrypted. The effect of setting the @@ -109,6 +110,9 @@ type VolumeParameters struct { // // * Magnetic: standard // + // Throughput Optimized HDD (st1) and Cold HDD (sc1) volumes can't be used as + // boot volumes. + // // For more information, see Amazon EBS volume types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html) // in the Amazon Elastic Compute Cloud User Guide. // @@ -134,6 +138,8 @@ type VolumeObservation struct { // The Amazon Resource Name (ARN) of the Key Management Service (KMS) KMS key // that was used to protect the volume encryption key for the volume. KMSKeyID *string `json:"kmsKeyID,omitempty"` + // Reserved for future use. + SSEType *string `json:"sseType,omitempty"` // The volume state. State *string `json:"state,omitempty"` // Any tags assigned to the volume. diff --git a/apis/ec2/v1alpha1/zz_vpc_endpoint.go b/apis/ec2/v1alpha1/zz_vpc_endpoint.go index 94989bfd71..76dff6c3a7 100644 --- a/apis/ec2/v1alpha1/zz_vpc_endpoint.go +++ b/apis/ec2/v1alpha1/zz_vpc_endpoint.go @@ -52,10 +52,11 @@ type VPCEndpointParameters struct { // // Default: true PrivateDNSEnabled *bool `json:"privateDNSEnabled,omitempty"` - // The service name. To get a list of available services, use the DescribeVpcEndpointServices - // request, or get the name from the service provider. + // The name of the endpoint service. // +kubebuilder:validation:Required ServiceName *string `json:"serviceName"` + // The subnet configurations for the endpoint. + SubnetConfigurations []*SubnetConfiguration `json:"subnetConfigurations,omitempty"` // The tags to associate with the endpoint. TagSpecifications []*TagSpecification `json:"tagSpecifications,omitempty"` // The type of endpoint. @@ -82,19 +83,19 @@ type VPCEndpointObservation struct { Groups []*SecurityGroupIdentifier `json:"groups,omitempty"` // The last error that occurred for endpoint. LastError *LastError `json:"lastError,omitempty"` - // (Interface endpoint) One or more network interfaces for the endpoint. + // (Interface endpoint) The network interfaces for the endpoint. NetworkInterfaceIDs []*string `json:"networkInterfaceIDs,omitempty"` // The ID of the Amazon Web Services account that owns the endpoint. OwnerID *string `json:"ownerID,omitempty"` // Indicates whether the endpoint is being managed by its service. RequesterManaged *bool `json:"requesterManaged,omitempty"` - // (Gateway endpoint) One or more route tables associated with the endpoint. + // (Gateway endpoint) The IDs of the route tables associated with the endpoint. RouteTableIDs []*string `json:"routeTableIDs,omitempty"` // The state of the endpoint. State *string `json:"state,omitempty"` // (Interface endpoint) The subnets for the endpoint. SubnetIDs []*string `json:"subnetIDs,omitempty"` - // Any tags assigned to the endpoint. + // The tags assigned to the endpoint. Tags []*Tag `json:"tags,omitempty"` // The ID of the endpoint. VPCEndpointID *string `json:"vpcEndpointID,omitempty"` diff --git a/apis/ecs/v1alpha1/zz_cluster.go b/apis/ecs/v1alpha1/zz_cluster.go index 4e5f61d7ea..3687b8af5f 100644 --- a/apis/ecs/v1alpha1/zz_cluster.go +++ b/apis/ecs/v1alpha1/zz_cluster.go @@ -32,20 +32,23 @@ type ClusterParameters struct { // The short name of one or more capacity providers to associate with the cluster. // A capacity provider must be associated with a cluster before it can be included // as part of the default capacity provider strategy of the cluster or used - // in a capacity provider strategy when calling the CreateService or RunTask + // in a capacity provider strategy when calling the CreateService (https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_CreateService.html) + // or RunTask (https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_RunTask.html) // actions. // // If specifying a capacity provider that uses an Auto Scaling group, the capacity // provider must be created but not associated with another cluster. New Auto // Scaling group capacity providers can be created with the CreateCapacityProvider + // (https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_CreateCapacityProvider.html) // API operation. // // To use a Fargate capacity provider, specify either the FARGATE or FARGATE_SPOT // capacity providers. The Fargate capacity providers are available to all accounts // and only need to be associated with a cluster to be used. // - // The PutClusterCapacityProviders API operation is used to update the list - // of available capacity providers for a cluster after the cluster is created. + // The PutCapacityProvider (https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_PutCapacityProvider.html) + // API operation is used to update the list of available capacity providers + // for a cluster after the cluster is created. CapacityProviders []*string `json:"capacityProviders,omitempty"` // The name of your cluster. If you don't specify a name for your cluster, you // create a cluster that's named default. Up to 255 letters (uppercase and lowercase), @@ -55,9 +58,10 @@ type ClusterParameters struct { Configuration *ClusterConfiguration `json:"configuration,omitempty"` // The capacity provider strategy to set as the default for the cluster. After // a default capacity provider strategy is set for a cluster, when you call - // the RunTask or CreateService APIs with no capacity provider strategy or launch - // type specified, the default capacity provider strategy for the cluster is - // used. + // the CreateService (https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_CreateService.html) + // or RunTask (https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_RunTask.html) + // APIs with no capacity provider strategy or launch type specified, the default + // capacity provider strategy for the cluster is used. // // If a default capacity provider strategy isn't defined for a cluster when // it was created, it can be defined later with the PutClusterCapacityProviders diff --git a/apis/ecs/v1alpha1/zz_enums.go b/apis/ecs/v1alpha1/zz_enums.go index 2785b9a367..48829fdf40 100644 --- a/apis/ecs/v1alpha1/zz_enums.go +++ b/apis/ecs/v1alpha1/zz_enums.go @@ -376,6 +376,8 @@ const ( SettingName_containerInstanceLongArnFormat SettingName = "containerInstanceLongArnFormat" SettingName_awsvpcTrunking SettingName = "awsvpcTrunking" SettingName_containerInsights SettingName = "containerInsights" + SettingName_fargateFIPSMode SettingName = "fargateFIPSMode" + SettingName_tagResourceAuthorization SettingName = "tagResourceAuthorization" ) type SortOrder string @@ -421,8 +423,9 @@ const ( type TaskDefinitionStatus_SDK string const ( - TaskDefinitionStatus_SDK_ACTIVE TaskDefinitionStatus_SDK = "ACTIVE" - TaskDefinitionStatus_SDK_INACTIVE TaskDefinitionStatus_SDK = "INACTIVE" + TaskDefinitionStatus_SDK_ACTIVE TaskDefinitionStatus_SDK = "ACTIVE" + TaskDefinitionStatus_SDK_INACTIVE TaskDefinitionStatus_SDK = "INACTIVE" + TaskDefinitionStatus_SDK_DELETE_IN_PROGRESS TaskDefinitionStatus_SDK = "DELETE_IN_PROGRESS" ) type TaskField string diff --git a/apis/ecs/v1alpha1/zz_generated.deepcopy.go b/apis/ecs/v1alpha1/zz_generated.deepcopy.go index 83a8c46e7e..b2ef823e66 100644 --- a/apis/ecs/v1alpha1/zz_generated.deepcopy.go +++ b/apis/ecs/v1alpha1/zz_generated.deepcopy.go @@ -810,6 +810,17 @@ func (in *ContainerDefinition) DeepCopyInto(out *ContainerDefinition) { *out = new(int64) **out = **in } + if in.CredentialSpecs != nil { + in, out := &in.CredentialSpecs, &out.CredentialSpecs + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } if in.DependsOn != nil { in, out := &in.DependsOn, &out.DependsOn *out = make([]*ContainerDependency, len(*in)) diff --git a/apis/ecs/v1alpha1/zz_service.go b/apis/ecs/v1alpha1/zz_service.go index 9c001c12d8..aa13c36b42 100644 --- a/apis/ecs/v1alpha1/zz_service.go +++ b/apis/ecs/v1alpha1/zz_service.go @@ -44,7 +44,7 @@ type ServiceParameters struct { // is specified, the default value of ECS is used. DeploymentController *DeploymentController `json:"deploymentController,omitempty"` // The number of instantiations of the specified task definition to place and - // keep running on your cluster. + // keep running in your service. // // This is required if schedulingStrategy is REPLICA or isn't specified. If // schedulingStrategy is DAEMON then this isn't required. @@ -53,10 +53,13 @@ type ServiceParameters struct { // the service. For more information, see Tagging your Amazon ECS resources // (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-using-tags.html) // in the Amazon Elastic Container Service Developer Guide. + // + // When you use Amazon ECS managed tags, you need to set the propagateTags request + // parameter. EnableECSManagedTags *bool `json:"enableECSManagedTags,omitempty"` - // Determines whether the execute command functionality is enabled for the service. - // If true, this enables execute command functionality on all containers in - // the service tasks. + // Determines whether the execute command functionality is turned on for the + // service. If true, this enables execute command functionality on all containers + // in the service tasks. EnableExecuteCommand *bool `json:"enableExecuteCommand,omitempty"` // The period of time, in seconds, that the Amazon ECS service scheduler ignores // unhealthy Elastic Load Balancing target health checks after a task has first @@ -112,7 +115,10 @@ type ServiceParameters struct { // Specifies whether to propagate the tags from the task definition to the task. // If no value is specified, the tags aren't propagated. Tags can only be propagated // to the task during task creation. To add tags to a task after task creation, - // use the TagResource API action. + // use the TagResource (https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_TagResource.html) + // API action. + // + // The default is NONE. PropagateTags *string `json:"propagateTags,omitempty"` // The name or full Amazon Resource Name (ARN) of the IAM role that allows Amazon // ECS to make calls to your load balancer on your behalf. This parameter is diff --git a/apis/ecs/v1alpha1/zz_task_definition.go b/apis/ecs/v1alpha1/zz_task_definition.go index d3536fe073..150a0d320d 100644 --- a/apis/ecs/v1alpha1/zz_task_definition.go +++ b/apis/ecs/v1alpha1/zz_task_definition.go @@ -79,10 +79,12 @@ type TaskDefinitionParameters struct { // Fargate task storage (https://docs.aws.amazon.com/AmazonECS/latest/userguide/using_data_volumes.html) // in the Amazon ECS User Guide for Fargate. // - // This parameter is only supported for tasks hosted on Fargate using the following - // platform versions: + // For tasks using the Fargate launch type, the task requires the following + // platforms: // // * Linux platform version 1.4.0 or later. + // + // * Windows platform version 1.0.0 or later. EphemeralStorage *EphemeralStorage `json:"ephemeralStorage,omitempty"` // You must specify a family for a task definition. You can use it track multiple // versions of the same task definition. The family is used as a name for your diff --git a/apis/ecs/v1alpha1/zz_types.go b/apis/ecs/v1alpha1/zz_types.go index b74c71c926..d8a92d8ccc 100644 --- a/apis/ecs/v1alpha1/zz_types.go +++ b/apis/ecs/v1alpha1/zz_types.go @@ -195,6 +195,8 @@ type ContainerDefinition struct { CPU *int64 `json:"cpu,omitempty"` + CredentialSpecs []*string `json:"credentialSpecs,omitempty"` + DependsOn []*ContainerDependency `json:"dependsOn,omitempty"` DisableNetworking *bool `json:"disableNetworking,omitempty"` @@ -224,7 +226,8 @@ type ContainerDefinition struct { // An object representing a container health check. Health check parameters // that are specified in a container definition override any Docker health checks // that exist in the container image (such as those specified in a parent image - // or from the image's Dockerfile). + // or from the image's Dockerfile). This configuration maps to the HEALTHCHECK + // parameter of docker run (https://docs.docker.com/engine/reference/run/). // // The Amazon ECS container agent only monitors and reports on the health checks // specified in the task definition. Amazon ECS does not monitor Docker health @@ -235,6 +238,9 @@ type ContainerDefinition struct { // You can view the health status of both individual containers and a task with // the DescribeTasks API operation or when viewing the task details in the console. // + // The health check is designed to make sure that your containers survive agent + // restarts, upgrades, or temporary unavailability. + // // The following describes the possible healthStatus values for a container: // // * HEALTHY-The container health check has passed successfully. @@ -245,8 +251,8 @@ type ContainerDefinition struct { // container health check defined. // // The following describes the possible healthStatus values for a task. The - // container health check status of nonessential containers only affects the - // health status of a task if no essential containers have health checks defined. + // container health check status of non-essential containers don't have an effect + // on the health status of a task. // // * HEALTHY-All essential containers within the task have passed their health // checks. @@ -255,23 +261,23 @@ type ContainerDefinition struct { // check. // // * UNKNOWN-The essential containers within the task are still having their - // health checks evaluated or there are only nonessential containers with - // health checks defined. + // health checks evaluated, there are only nonessential containers with health + // checks defined, or there are no container health checks defined. // // If a task is run manually, and not as part of a service, the task will continue // its lifecycle regardless of its health status. For tasks that are part of // a service, if the task reports as unhealthy then the task will be stopped // and the service scheduler will replace it. // - // For tasks that are a part of a service and the service uses the ECS rolling - // deployment type, the deployment is paused while the new tasks have the UNKNOWN - // task health check status. For example, tasks that define health checks for - // nonessential containers when no essential containers have health checks will - // have the UNKNOWN health check status indefinitely which prevents the deployment - // from completing. - // // The following are notes about container health check support: // + // * When the Amazon ECS agent cannot connect to the Amazon ECS service, + // the service reports the container as UNHEALTHY. + // + // * The health check statuses are the "last heard from" response from the + // Amazon ECS agent. There are no assumptions made about the status of the + // container health checks. + // // * Container health checks require version 1.17.0 or greater of the Amazon // ECS container agent. For more information, see Updating the Amazon ECS // container agent (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-update.html). @@ -291,7 +297,8 @@ type ContainerDefinition struct { Interactive *bool `json:"interactive,omitempty"` Links []*string `json:"links,omitempty"` - // Linux-specific options that are applied to the container, such as Linux KernelCapabilities. + // The Linux-specific options that are applied to the container, such as Linux + // KernelCapabilities (https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_KernelCapabilities.html). LinuxParameters *LinuxParameters `json:"linuxParameters,omitempty"` // The log configuration for the container. This parameter maps to LogConfig // in the Create a container (https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) @@ -308,9 +315,11 @@ type ContainerDefinition struct { // Understand the following when specifying a log configuration for your containers. // // * Amazon ECS currently supports a subset of the logging drivers available - // to the Docker daemon (shown in the valid values below). Additional log - // drivers may be available in future releases of the Amazon ECS container - // agent. + // to the Docker daemon. Additional log drivers may be available in future + // releases of the Amazon ECS container agent. For tasks on Fargate, the + // supported log drivers are awslogs, splunk, and awsfirelens. For tasks + // hosted on Amazon EC2 instances, the supported log drivers are awslogs, + // fluentd, gelf, json-file, journald, logentries,syslog, splunk, and awsfirelens. // // * This parameter requires version 1.18 of the Docker Remote API or greater // on your container instance. @@ -445,7 +454,7 @@ type Deployment struct { ID *string `json:"id,omitempty"` LaunchType *string `json:"launchType,omitempty"` - // An object representing the network configuration for a task or service. + // The network configuration for a task or service. NetworkConfiguration *NetworkConfiguration `json:"networkConfiguration,omitempty"` PendingCount *int64 `json:"pendingCount,omitempty"` @@ -515,13 +524,16 @@ type DeploymentConfiguration struct { Alarms *DeploymentAlarms `json:"alarms,omitempty"` // // The deployment circuit breaker can only be used for services using the rolling - // update (ECS) deployment type that aren't behind a Classic Load Balancer. + // update (ECS) deployment type. // // The deployment circuit breaker determines whether a service deployment will - // fail if the service can't reach a steady state. If enabled, a service deployment - // will transition to a failed state and stop launching new tasks. You can also - // configure Amazon ECS to roll back your service to the last completed deployment - // after a failure. For more information, see Rolling update (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/deployment-type-ecs.html) + // fail if the service can't reach a steady state. If it is turned on, a service + // deployment will transition to a failed state and stop launching new tasks. + // You can also configure Amazon ECS to roll back your service to the last completed + // deployment after a failure. For more information, see Rolling update (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/deployment-type-ecs.html) + // in the Amazon Elastic Container Service Developer Guide. + // + // For more information about API failure reasons, see API failure reasons (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/api_failures_messages.html) // in the Amazon Elastic Container Service Developer Guide. DeploymentCircuitBreaker *DeploymentCircuitBreaker `json:"deploymentCircuitBreaker,omitempty"` @@ -936,9 +948,11 @@ type ServiceConnectConfiguration struct { // Understand the following when specifying a log configuration for your containers. // // * Amazon ECS currently supports a subset of the logging drivers available - // to the Docker daemon (shown in the valid values below). Additional log - // drivers may be available in future releases of the Amazon ECS container - // agent. + // to the Docker daemon. Additional log drivers may be available in future + // releases of the Amazon ECS container agent. For tasks on Fargate, the + // supported log drivers are awslogs, splunk, and awsfirelens. For tasks + // hosted on Amazon EC2 instances, the supported log drivers are awslogs, + // fluentd, gelf, json-file, journald, logentries,syslog, splunk, and awsfirelens. // // * This parameter requires version 1.18 of the Docker Remote API or greater // on your container instance. @@ -1032,7 +1046,7 @@ type Service_SDK struct { LaunchType *string `json:"launchType,omitempty"` LoadBalancers []*LoadBalancer `json:"loadBalancers,omitempty"` - // An object representing the network configuration for a task or service. + // The network configuration for a task or service. NetworkConfiguration *NetworkConfiguration `json:"networkConfiguration,omitempty"` PendingCount *int64 `json:"pendingCount,omitempty"` @@ -1123,9 +1137,12 @@ type Task struct { // Fargate task storage (https://docs.aws.amazon.com/AmazonECS/latest/userguide/using_data_volumes.html) // in the Amazon ECS User Guide for Fargate. // - // This parameter is only supported for tasks hosted on Fargate using Linux - // platform version 1.4.0 or later. This parameter is not supported for Windows - // containers on Fargate. + // For tasks using the Fargate launch type, the task requires the following + // platforms: + // + // * Linux platform version 1.4.0 or later. + // + // * Windows platform version 1.0.0 or later. EphemeralStorage *EphemeralStorage `json:"ephemeralStorage,omitempty"` ExecutionStoppedAt *metav1.Time `json:"executionStoppedAt,omitempty"` @@ -1187,9 +1204,12 @@ type TaskDefinition_SDK struct { // Fargate task storage (https://docs.aws.amazon.com/AmazonECS/latest/userguide/using_data_volumes.html) // in the Amazon ECS User Guide for Fargate. // - // This parameter is only supported for tasks hosted on Fargate using Linux - // platform version 1.4.0 or later. This parameter is not supported for Windows - // containers on Fargate. + // For tasks using the Fargate launch type, the task requires the following + // platforms: + // + // * Linux platform version 1.4.0 or later. + // + // * Windows platform version 1.0.0 or later. EphemeralStorage *EphemeralStorage `json:"ephemeralStorage,omitempty"` ExecutionRoleARN *string `json:"executionRoleARN,omitempty"` @@ -1250,9 +1270,12 @@ type TaskOverride struct { // Fargate task storage (https://docs.aws.amazon.com/AmazonECS/latest/userguide/using_data_volumes.html) // in the Amazon ECS User Guide for Fargate. // - // This parameter is only supported for tasks hosted on Fargate using Linux - // platform version 1.4.0 or later. This parameter is not supported for Windows - // containers on Fargate. + // For tasks using the Fargate launch type, the task requires the following + // platforms: + // + // * Linux platform version 1.4.0 or later. + // + // * Windows platform version 1.0.0 or later. EphemeralStorage *EphemeralStorage `json:"ephemeralStorage,omitempty"` ExecutionRoleARN *string `json:"executionRoleARN,omitempty"` @@ -1279,7 +1302,7 @@ type TaskSet struct { LaunchType *string `json:"launchType,omitempty"` LoadBalancers []*LoadBalancer `json:"loadBalancers,omitempty"` - // An object representing the network configuration for a task or service. + // The network configuration for a task or service. NetworkConfiguration *NetworkConfiguration `json:"networkConfiguration,omitempty"` PendingCount *int64 `json:"pendingCount,omitempty"` diff --git a/apis/efs/v1alpha1/zz_access_point.go b/apis/efs/v1alpha1/zz_access_point.go index 3372ce056a..94651b676b 100644 --- a/apis/efs/v1alpha1/zz_access_point.go +++ b/apis/efs/v1alpha1/zz_access_point.go @@ -73,7 +73,7 @@ type AccessPointObservation struct { LifeCycleState *string `json:"lifeCycleState,omitempty"` // The name of the access point. This is the value of the Name tag. Name *string `json:"name,omitempty"` - // Identified the Amazon Web Services account that owns the access point resource. + // Identifies the Amazon Web Services account that owns the access point resource. OwnerID *string `json:"ownerID,omitempty"` } diff --git a/apis/efs/v1alpha1/zz_enums.go b/apis/efs/v1alpha1/zz_enums.go index baeb92302b..b39d338e37 100644 --- a/apis/efs/v1alpha1/zz_enums.go +++ b/apis/efs/v1alpha1/zz_enums.go @@ -43,6 +43,8 @@ const ( ReplicationStatus_ENABLING ReplicationStatus = "ENABLING" ReplicationStatus_DELETING ReplicationStatus = "DELETING" ReplicationStatus_ERROR ReplicationStatus = "ERROR" + ReplicationStatus_PAUSED ReplicationStatus = "PAUSED" + ReplicationStatus_PAUSING ReplicationStatus = "PAUSING" ) type Resource string diff --git a/apis/efs/v1alpha1/zz_file_system.go b/apis/efs/v1alpha1/zz_file_system.go index 588aae3b69..9009cb98fe 100644 --- a/apis/efs/v1alpha1/zz_file_system.go +++ b/apis/efs/v1alpha1/zz_file_system.go @@ -134,9 +134,7 @@ type FileSystemObservation struct { // The current number of mount targets that the file system has. For more information, // see CreateMountTarget. NumberOfMountTargets *int64 `json:"numberOfMountTargets,omitempty"` - // The Amazon Web Services account that created the file system. If the file - // system was created by an IAM user, the parent account to which the user belongs - // is the owner. + // The Amazon Web Services account that created the file system. OwnerID *string `json:"ownerID,omitempty"` // The latest known metered size (in bytes) of data stored in the file system, // in its Value field, and the time at which that size was determined in its diff --git a/apis/eks/v1alpha1/zz_enums.go b/apis/eks/v1alpha1/zz_enums.go index f5618c15fd..e761bbe367 100644 --- a/apis/eks/v1alpha1/zz_enums.go +++ b/apis/eks/v1alpha1/zz_enums.go @@ -164,25 +164,39 @@ const ( type NodegroupIssueCode string const ( - NodegroupIssueCode_AutoScalingGroupNotFound NodegroupIssueCode = "AutoScalingGroupNotFound" - NodegroupIssueCode_AutoScalingGroupInvalidConfiguration NodegroupIssueCode = "AutoScalingGroupInvalidConfiguration" - NodegroupIssueCode_Ec2SecurityGroupNotFound NodegroupIssueCode = "Ec2SecurityGroupNotFound" - NodegroupIssueCode_Ec2SecurityGroupDeletionFailure NodegroupIssueCode = "Ec2SecurityGroupDeletionFailure" - NodegroupIssueCode_Ec2LaunchTemplateNotFound NodegroupIssueCode = "Ec2LaunchTemplateNotFound" - NodegroupIssueCode_Ec2LaunchTemplateVersionMismatch NodegroupIssueCode = "Ec2LaunchTemplateVersionMismatch" - NodegroupIssueCode_Ec2SubnetNotFound NodegroupIssueCode = "Ec2SubnetNotFound" - NodegroupIssueCode_Ec2SubnetInvalidConfiguration NodegroupIssueCode = "Ec2SubnetInvalidConfiguration" - NodegroupIssueCode_IamInstanceProfileNotFound NodegroupIssueCode = "IamInstanceProfileNotFound" - NodegroupIssueCode_IamLimitExceeded NodegroupIssueCode = "IamLimitExceeded" - NodegroupIssueCode_IamNodeRoleNotFound NodegroupIssueCode = "IamNodeRoleNotFound" - NodegroupIssueCode_NodeCreationFailure NodegroupIssueCode = "NodeCreationFailure" - NodegroupIssueCode_AsgInstanceLaunchFailures NodegroupIssueCode = "AsgInstanceLaunchFailures" - NodegroupIssueCode_InstanceLimitExceeded NodegroupIssueCode = "InstanceLimitExceeded" - NodegroupIssueCode_InsufficientFreeAddresses NodegroupIssueCode = "InsufficientFreeAddresses" - NodegroupIssueCode_AccessDenied NodegroupIssueCode = "AccessDenied" - NodegroupIssueCode_InternalFailure NodegroupIssueCode = "InternalFailure" - NodegroupIssueCode_ClusterUnreachable NodegroupIssueCode = "ClusterUnreachable" - NodegroupIssueCode_Ec2SubnetMissingIpv6Assignment NodegroupIssueCode = "Ec2SubnetMissingIpv6Assignment" + NodegroupIssueCode_AutoScalingGroupNotFound NodegroupIssueCode = "AutoScalingGroupNotFound" + NodegroupIssueCode_AutoScalingGroupInvalidConfiguration NodegroupIssueCode = "AutoScalingGroupInvalidConfiguration" + NodegroupIssueCode_Ec2SecurityGroupNotFound NodegroupIssueCode = "Ec2SecurityGroupNotFound" + NodegroupIssueCode_Ec2SecurityGroupDeletionFailure NodegroupIssueCode = "Ec2SecurityGroupDeletionFailure" + NodegroupIssueCode_Ec2LaunchTemplateNotFound NodegroupIssueCode = "Ec2LaunchTemplateNotFound" + NodegroupIssueCode_Ec2LaunchTemplateVersionMismatch NodegroupIssueCode = "Ec2LaunchTemplateVersionMismatch" + NodegroupIssueCode_Ec2SubnetNotFound NodegroupIssueCode = "Ec2SubnetNotFound" + NodegroupIssueCode_Ec2SubnetInvalidConfiguration NodegroupIssueCode = "Ec2SubnetInvalidConfiguration" + NodegroupIssueCode_IamInstanceProfileNotFound NodegroupIssueCode = "IamInstanceProfileNotFound" + NodegroupIssueCode_Ec2SubnetMissingIpv6Assignment NodegroupIssueCode = "Ec2SubnetMissingIpv6Assignment" + NodegroupIssueCode_IamLimitExceeded NodegroupIssueCode = "IamLimitExceeded" + NodegroupIssueCode_IamNodeRoleNotFound NodegroupIssueCode = "IamNodeRoleNotFound" + NodegroupIssueCode_NodeCreationFailure NodegroupIssueCode = "NodeCreationFailure" + NodegroupIssueCode_AsgInstanceLaunchFailures NodegroupIssueCode = "AsgInstanceLaunchFailures" + NodegroupIssueCode_InstanceLimitExceeded NodegroupIssueCode = "InstanceLimitExceeded" + NodegroupIssueCode_InsufficientFreeAddresses NodegroupIssueCode = "InsufficientFreeAddresses" + NodegroupIssueCode_AccessDenied NodegroupIssueCode = "AccessDenied" + NodegroupIssueCode_InternalFailure NodegroupIssueCode = "InternalFailure" + NodegroupIssueCode_ClusterUnreachable NodegroupIssueCode = "ClusterUnreachable" + NodegroupIssueCode_AmiIdNotFound NodegroupIssueCode = "AmiIdNotFound" + NodegroupIssueCode_AutoScalingGroupOptInRequired NodegroupIssueCode = "AutoScalingGroupOptInRequired" + NodegroupIssueCode_AutoScalingGroupRateLimitExceeded NodegroupIssueCode = "AutoScalingGroupRateLimitExceeded" + NodegroupIssueCode_Ec2LaunchTemplateDeletionFailure NodegroupIssueCode = "Ec2LaunchTemplateDeletionFailure" + NodegroupIssueCode_Ec2LaunchTemplateInvalidConfiguration NodegroupIssueCode = "Ec2LaunchTemplateInvalidConfiguration" + NodegroupIssueCode_Ec2LaunchTemplateMaxLimitExceeded NodegroupIssueCode = "Ec2LaunchTemplateMaxLimitExceeded" + NodegroupIssueCode_Ec2SubnetListTooLong NodegroupIssueCode = "Ec2SubnetListTooLong" + NodegroupIssueCode_IamThrottling NodegroupIssueCode = "IamThrottling" + NodegroupIssueCode_NodeTerminationFailure NodegroupIssueCode = "NodeTerminationFailure" + NodegroupIssueCode_PodEvictionFailure NodegroupIssueCode = "PodEvictionFailure" + NodegroupIssueCode_SourceEc2LaunchTemplateNotFound NodegroupIssueCode = "SourceEc2LaunchTemplateNotFound" + NodegroupIssueCode_LimitExceeded NodegroupIssueCode = "LimitExceeded" + NodegroupIssueCode_Unknown NodegroupIssueCode = "Unknown" + NodegroupIssueCode_AutoScalingGroupInstanceRefreshActive NodegroupIssueCode = "AutoScalingGroupInstanceRefreshActive" ) type NodegroupStatus string diff --git a/apis/elasticache/v1alpha1/zz_cache_parameter_group.go b/apis/elasticache/v1alpha1/zz_cache_parameter_group.go index fd4a97caf7..d8d8e15643 100644 --- a/apis/elasticache/v1alpha1/zz_cache_parameter_group.go +++ b/apis/elasticache/v1alpha1/zz_cache_parameter_group.go @@ -33,7 +33,7 @@ type CacheParameterGroupParameters struct { // can be used with. // // Valid values are: memcached1.4 | memcached1.5 | memcached1.6 | redis2.6 | - // redis2.8 | redis3.2 | redis4.0 | redis5.0 | redis6.x + // redis2.8 | redis3.2 | redis4.0 | redis5.0 | redis6.x | redis7 // +kubebuilder:validation:Required CacheParameterGroupFamily *string `json:"cacheParameterGroupFamily"` // A user-specified description for the cache parameter group. diff --git a/apis/elasticache/v1alpha1/zz_enums.go b/apis/elasticache/v1alpha1/zz_enums.go index bbbe6f8eb4..08c51da575 100644 --- a/apis/elasticache/v1alpha1/zz_enums.go +++ b/apis/elasticache/v1alpha1/zz_enums.go @@ -64,6 +64,14 @@ const ( ChangeType_requires_reboot ChangeType = "requires-reboot" ) +type ClusterMode string + +const ( + ClusterMode_enabled ClusterMode = "enabled" + ClusterMode_disabled ClusterMode = "disabled" + ClusterMode_compatible ClusterMode = "compatible" +) + type DataTieringStatus string const ( diff --git a/apis/elbv2/v1alpha1/zz_enums.go b/apis/elbv2/v1alpha1/zz_enums.go index 45d7caf8a4..75e2629174 100644 --- a/apis/elbv2/v1alpha1/zz_enums.go +++ b/apis/elbv2/v1alpha1/zz_enums.go @@ -44,6 +44,13 @@ const ( AuthenticateOIDCActionConditionalBehaviorEnum_authenticate AuthenticateOIDCActionConditionalBehaviorEnum = "authenticate" ) +type EnforceSecurityGroupInboundRulesOnPrivateLinkTrafficEnum string + +const ( + EnforceSecurityGroupInboundRulesOnPrivateLinkTrafficEnum_on EnforceSecurityGroupInboundRulesOnPrivateLinkTrafficEnum = "on" + EnforceSecurityGroupInboundRulesOnPrivateLinkTrafficEnum_off EnforceSecurityGroupInboundRulesOnPrivateLinkTrafficEnum = "off" +) + type IPAddressType string const ( diff --git a/apis/elbv2/v1alpha1/zz_generated.deepcopy.go b/apis/elbv2/v1alpha1/zz_generated.deepcopy.go index e08340e49f..71cbaf38f4 100644 --- a/apis/elbv2/v1alpha1/zz_generated.deepcopy.go +++ b/apis/elbv2/v1alpha1/zz_generated.deepcopy.go @@ -1019,6 +1019,11 @@ func (in *LoadBalancerObservation) DeepCopyInto(out *LoadBalancerObservation) { *out = new(string) **out = **in } + if in.EnforceSecurityGroupInboundRulesOnPrivateLinkTraffic != nil { + in, out := &in.EnforceSecurityGroupInboundRulesOnPrivateLinkTraffic, &out.EnforceSecurityGroupInboundRulesOnPrivateLinkTraffic + *out = new(string) + **out = **in + } if in.LoadBalancerARN != nil { in, out := &in.LoadBalancerARN, &out.LoadBalancerARN *out = new(string) @@ -1228,6 +1233,11 @@ func (in *LoadBalancer_SDK) DeepCopyInto(out *LoadBalancer_SDK) { *out = new(string) **out = **in } + if in.EnforceSecurityGroupInboundRulesOnPrivateLinkTraffic != nil { + in, out := &in.EnforceSecurityGroupInboundRulesOnPrivateLinkTraffic, &out.EnforceSecurityGroupInboundRulesOnPrivateLinkTraffic + *out = new(string) + **out = **in + } if in.IPAddressType != nil { in, out := &in.IPAddressType, &out.IPAddressType *out = new(string) diff --git a/apis/elbv2/v1alpha1/zz_load_balancer.go b/apis/elbv2/v1alpha1/zz_load_balancer.go index 0ba4ea8a09..6afe785497 100644 --- a/apis/elbv2/v1alpha1/zz_load_balancer.go +++ b/apis/elbv2/v1alpha1/zz_load_balancer.go @@ -57,8 +57,8 @@ type LoadBalancerParameters struct { // // You cannot specify a scheme for a Gateway Load Balancer. Scheme *string `json:"scheme,omitempty"` - // [Application Load Balancers] The IDs of the security groups for the load - // balancer. + // [Application Load Balancers and Network Load Balancers] The IDs of the security + // groups for the load balancer. SecurityGroups []*string `json:"securityGroups,omitempty"` // The IDs of the public subnets. You can specify only one subnet per Availability // Zone. You must specify either subnets or subnet mappings, but not both. @@ -120,6 +120,9 @@ type LoadBalancerObservation struct { CreatedTime *metav1.Time `json:"createdTime,omitempty"` // The public DNS name of the load balancer. DNSName *string `json:"dnsName,omitempty"` + // Indicates whether to evaluate inbound security group rules for traffic sent + // to a Network Load Balancer through Amazon Web Services PrivateLink. + EnforceSecurityGroupInboundRulesOnPrivateLinkTraffic *string `json:"enforceSecurityGroupInboundRulesOnPrivateLinkTraffic,omitempty"` // The Amazon Resource Name (ARN) of the load balancer. LoadBalancerARN *string `json:"loadBalancerARN,omitempty"` // The name of the load balancer. diff --git a/apis/elbv2/v1alpha1/zz_target_group.go b/apis/elbv2/v1alpha1/zz_target_group.go index 2acab13521..d5913d3570 100644 --- a/apis/elbv2/v1alpha1/zz_target_group.go +++ b/apis/elbv2/v1alpha1/zz_target_group.go @@ -67,7 +67,7 @@ type TargetGroupParameters struct { // The number of consecutive health check successes required before considering // a target healthy. The range is 2-10. If the target group protocol is TCP, // TCP_UDP, UDP, TLS, HTTP or HTTPS, the default is 5. For target groups with - // a protocol of GENEVE, the default is 3. If the target type is lambda, the + // a protocol of GENEVE, the default is 5. If the target type is lambda, the // default is 5. HealthyThresholdCount *int64 `json:"healthyThresholdCount,omitempty"` // The type of IP address used for this target group. The possible values are @@ -124,7 +124,7 @@ type TargetGroupParameters struct { // The number of consecutive health check failures required before considering // a target unhealthy. The range is 2-10. If the target group protocol is TCP, // TCP_UDP, UDP, TLS, HTTP or HTTPS, the default is 2. For target groups with - // a protocol of GENEVE, the default is 3. If the target type is lambda, the + // a protocol of GENEVE, the default is 2. If the target type is lambda, the // default is 5. UnhealthyThresholdCount *int64 `json:"unhealthyThresholdCount,omitempty"` // The identifier of the virtual private cloud (VPC). If the target is a Lambda diff --git a/apis/elbv2/v1alpha1/zz_types.go b/apis/elbv2/v1alpha1/zz_types.go index 2199450a9e..f8ef34dfaa 100644 --- a/apis/elbv2/v1alpha1/zz_types.go +++ b/apis/elbv2/v1alpha1/zz_types.go @@ -199,6 +199,8 @@ type LoadBalancer_SDK struct { DNSName *string `json:"dnsName,omitempty"` + EnforceSecurityGroupInboundRulesOnPrivateLinkTraffic *string `json:"enforceSecurityGroupInboundRulesOnPrivateLinkTraffic,omitempty"` + IPAddressType *string `json:"ipAddressType,omitempty"` LoadBalancerARN *string `json:"loadBalancerARN,omitempty"` diff --git a/apis/emrcontainers/v1alpha1/zz_generated.deepcopy.go b/apis/emrcontainers/v1alpha1/zz_generated.deepcopy.go index 68110c3977..e68e3bc4eb 100644 --- a/apis/emrcontainers/v1alpha1/zz_generated.deepcopy.go +++ b/apis/emrcontainers/v1alpha1/zz_generated.deepcopy.go @@ -432,6 +432,11 @@ func (in *JobRunParameters) DeepCopyInto(out *JobRunParameters) { *out = new(string) **out = **in } + if in.RetryPolicyConfiguration != nil { + in, out := &in.RetryPolicyConfiguration, &out.RetryPolicyConfiguration + *out = new(RetryPolicyConfiguration) + (*in).DeepCopyInto(*out) + } if in.Tags != nil { in, out := &in.Tags, &out.Tags *out = make(map[string]*string, len(*in)) @@ -550,6 +555,16 @@ func (in *JobRun_SDK) DeepCopyInto(out *JobRun_SDK) { *out = new(string) **out = **in } + if in.RetryPolicyConfiguration != nil { + in, out := &in.RetryPolicyConfiguration, &out.RetryPolicyConfiguration + *out = new(RetryPolicyConfiguration) + (*in).DeepCopyInto(*out) + } + if in.RetryPolicyExecution != nil { + in, out := &in.RetryPolicyExecution, &out.RetryPolicyExecution + *out = new(RetryPolicyExecution) + (*in).DeepCopyInto(*out) + } if in.State != nil { in, out := &in.State, &out.State *out = new(string) @@ -696,6 +711,46 @@ func (in *ParametricCloudWatchMonitoringConfiguration) DeepCopy() *ParametricClo return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RetryPolicyConfiguration) DeepCopyInto(out *RetryPolicyConfiguration) { + *out = *in + if in.MaxAttempts != nil { + in, out := &in.MaxAttempts, &out.MaxAttempts + *out = new(int64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RetryPolicyConfiguration. +func (in *RetryPolicyConfiguration) DeepCopy() *RetryPolicyConfiguration { + if in == nil { + return nil + } + out := new(RetryPolicyConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RetryPolicyExecution) DeepCopyInto(out *RetryPolicyExecution) { + *out = *in + if in.CurrentAttemptCount != nil { + in, out := &in.CurrentAttemptCount, &out.CurrentAttemptCount + *out = new(int64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RetryPolicyExecution. +func (in *RetryPolicyExecution) DeepCopy() *RetryPolicyExecution { + if in == nil { + return nil + } + out := new(RetryPolicyExecution) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SparkSQLJobDriver) DeepCopyInto(out *SparkSQLJobDriver) { *out = *in diff --git a/apis/emrcontainers/v1alpha1/zz_job_run.go b/apis/emrcontainers/v1alpha1/zz_job_run.go index de8d328e39..3c81176b6b 100644 --- a/apis/emrcontainers/v1alpha1/zz_job_run.go +++ b/apis/emrcontainers/v1alpha1/zz_job_run.go @@ -41,6 +41,8 @@ type JobRunParameters struct { JobTemplateParameters map[string]*string `json:"jobTemplateParameters,omitempty"` // The Amazon EMR release version to use for the job run. ReleaseLabel *string `json:"releaseLabel,omitempty"` + // The retry policy configuration for the job run. + RetryPolicyConfiguration *RetryPolicyConfiguration `json:"retryPolicyConfiguration,omitempty"` // The tags assigned to job runs. Tags map[string]*string `json:"tags,omitempty"` CustomJobRunParameters `json:",inline"` diff --git a/apis/emrcontainers/v1alpha1/zz_types.go b/apis/emrcontainers/v1alpha1/zz_types.go index 8b9dbfe941..0615948c1f 100644 --- a/apis/emrcontainers/v1alpha1/zz_types.go +++ b/apis/emrcontainers/v1alpha1/zz_types.go @@ -39,7 +39,7 @@ type Configuration struct { // +kubebuilder:skipversion type ContainerInfo struct { - // The information about the EKS cluster. + // The information about the Amazon EKS cluster. EKSInfo *EKSInfo `json:"eksInfo,omitempty"` } @@ -112,6 +112,10 @@ type JobRun_SDK struct { Name *string `json:"name,omitempty"` ReleaseLabel *string `json:"releaseLabel,omitempty"` + // The configuration of the retry policy that the job runs on. + RetryPolicyConfiguration *RetryPolicyConfiguration `json:"retryPolicyConfiguration,omitempty"` + // The current status of the retry policy executed on the job. + RetryPolicyExecution *RetryPolicyExecution `json:"retryPolicyExecution,omitempty"` State *string `json:"state,omitempty"` @@ -149,6 +153,16 @@ type ParametricCloudWatchMonitoringConfiguration struct { LogStreamNamePrefix *string `json:"logStreamNamePrefix,omitempty"` } +// +kubebuilder:skipversion +type RetryPolicyConfiguration struct { + MaxAttempts *int64 `json:"maxAttempts,omitempty"` +} + +// +kubebuilder:skipversion +type RetryPolicyExecution struct { + CurrentAttemptCount *int64 `json:"currentAttemptCount,omitempty"` +} + // +kubebuilder:skipversion type SparkSQLJobDriver struct { EntryPoint *string `json:"entryPoint,omitempty"` diff --git a/apis/globalaccelerator/v1alpha1/referencers.go b/apis/globalaccelerator/v1alpha1/referencers.go index bb5ff9171a..5167230080 100644 --- a/apis/globalaccelerator/v1alpha1/referencers.go +++ b/apis/globalaccelerator/v1alpha1/referencers.go @@ -3,7 +3,7 @@ package v1alpha1 import ( "github.com/crossplane/crossplane-runtime/pkg/reference" "github.com/crossplane/crossplane-runtime/pkg/resource" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ) // AcceleratorARN returns the status.atProvider.ARN of an Accelerator @@ -14,7 +14,7 @@ func AcceleratorARN() reference.ExtractValueFn { return "" } - return pointer.StringDeref(r.Status.AtProvider.AcceleratorARN, "") + return ptr.Deref(r.Status.AtProvider.AcceleratorARN, "") } } @@ -26,6 +26,6 @@ func ListenerARN() reference.ExtractValueFn { return "" } - return pointer.StringDeref(r.Status.AtProvider.ListenerARN, "") + return ptr.Deref(r.Status.AtProvider.ListenerARN, "") } } diff --git a/apis/glue/v1alpha1/zz_enums.go b/apis/glue/v1alpha1/zz_enums.go index c5d72e5a1d..15ba557069 100644 --- a/apis/glue/v1alpha1/zz_enums.go +++ b/apis/glue/v1alpha1/zz_enums.go @@ -18,6 +18,12 @@ limitations under the License. package v1alpha1 +type AdditionalOptionKeys string + +const ( + AdditionalOptionKeys_performanceTuning_caching AdditionalOptionKeys = "performanceTuning.caching" +) + type AggFunction string const ( @@ -155,6 +161,15 @@ const ( ConnectionPropertyKey_CONNECTOR_URL ConnectionPropertyKey = "CONNECTOR_URL" ConnectionPropertyKey_CONNECTOR_TYPE ConnectionPropertyKey = "CONNECTOR_TYPE" ConnectionPropertyKey_CONNECTOR_CLASS_NAME ConnectionPropertyKey = "CONNECTOR_CLASS_NAME" + ConnectionPropertyKey_KAFKA_SASL_MECHANISM ConnectionPropertyKey = "KAFKA_SASL_MECHANISM" + ConnectionPropertyKey_KAFKA_SASL_SCRAM_USERNAME ConnectionPropertyKey = "KAFKA_SASL_SCRAM_USERNAME" + ConnectionPropertyKey_KAFKA_SASL_SCRAM_PASSWORD ConnectionPropertyKey = "KAFKA_SASL_SCRAM_PASSWORD" + ConnectionPropertyKey_KAFKA_SASL_SCRAM_SECRETS_ARN ConnectionPropertyKey = "KAFKA_SASL_SCRAM_SECRETS_ARN" + ConnectionPropertyKey_ENCRYPTED_KAFKA_SASL_SCRAM_PASSWORD ConnectionPropertyKey = "ENCRYPTED_KAFKA_SASL_SCRAM_PASSWORD" + ConnectionPropertyKey_KAFKA_SASL_GSSAPI_KEYTAB ConnectionPropertyKey = "KAFKA_SASL_GSSAPI_KEYTAB" + ConnectionPropertyKey_KAFKA_SASL_GSSAPI_KRB5_CONF ConnectionPropertyKey = "KAFKA_SASL_GSSAPI_KRB5_CONF" + ConnectionPropertyKey_KAFKA_SASL_GSSAPI_SERVICE ConnectionPropertyKey = "KAFKA_SASL_GSSAPI_SERVICE" + ConnectionPropertyKey_KAFKA_SASL_GSSAPI_PRINCIPAL ConnectionPropertyKey = "KAFKA_SASL_GSSAPI_PRINCIPAL" ) type ConnectionType string @@ -212,6 +227,14 @@ const ( CsvHeaderOption_ABSENT CsvHeaderOption = "ABSENT" ) +type CsvSerdeOption string + +const ( + CsvSerdeOption_OpenCSVSerDe CsvSerdeOption = "OpenCSVSerDe" + CsvSerdeOption_LazySimpleSerDe CsvSerdeOption = "LazySimpleSerDe" + CsvSerdeOption_None CsvSerdeOption = "None" +) + type DQStopJobOnFailureTiming string const ( @@ -250,6 +273,13 @@ const ( DeleteBehavior_DEPRECATE_IN_DATABASE DeleteBehavior = "DEPRECATE_IN_DATABASE" ) +type DeltaTargetCompressionType string + +const ( + DeltaTargetCompressionType_uncompressed DeltaTargetCompressionType = "uncompressed" + DeltaTargetCompressionType_snappy DeltaTargetCompressionType = "snappy" +) + type EnableHybridValues string const ( @@ -272,6 +302,16 @@ const ( ExistCondition_NONE ExistCondition = "NONE" ) +type FederationSourceErrorCode string + +const ( + FederationSourceErrorCode_InvalidResponseException FederationSourceErrorCode = "InvalidResponseException" + FederationSourceErrorCode_OperationTimeoutException FederationSourceErrorCode = "OperationTimeoutException" + FederationSourceErrorCode_OperationNotSupportedException FederationSourceErrorCode = "OperationNotSupportedException" + FederationSourceErrorCode_InternalServiceException FederationSourceErrorCode = "InternalServiceException" + FederationSourceErrorCode_ThrottlingException FederationSourceErrorCode = "ThrottlingException" +) + type FieldName string const ( @@ -334,6 +374,25 @@ const ( GlueRecordType_DOUBLE GlueRecordType = "DOUBLE" ) +type HudiTargetCompressionType string + +const ( + HudiTargetCompressionType_gzip HudiTargetCompressionType = "gzip" + HudiTargetCompressionType_lzo HudiTargetCompressionType = "lzo" + HudiTargetCompressionType_uncompressed HudiTargetCompressionType = "uncompressed" + HudiTargetCompressionType_snappy HudiTargetCompressionType = "snappy" +) + +type JDBCConnectionType string + +const ( + JDBCConnectionType_sqlserver JDBCConnectionType = "sqlserver" + JDBCConnectionType_mysql JDBCConnectionType = "mysql" + JDBCConnectionType_oracle JDBCConnectionType = "oracle" + JDBCConnectionType_postgresql JDBCConnectionType = "postgresql" + JDBCConnectionType_redshift JDBCConnectionType = "redshift" +) + type JDBCDataType string const ( @@ -452,6 +511,12 @@ const ( MLUserDataEncryptionModeString_SSE_KMS MLUserDataEncryptionModeString = "SSE-KMS" ) +type MetadataOperation string + +const ( + MetadataOperation_CREATE MetadataOperation = "CREATE" +) + type NodeType string const ( @@ -510,6 +575,8 @@ type PermissionType string const ( PermissionType_COLUMN_PERMISSION PermissionType = "COLUMN_PERMISSION" PermissionType_CELL_FILTER_PERMISSION PermissionType = "CELL_FILTER_PERMISSION" + PermissionType_NESTED_PERMISSION PermissionType = "NESTED_PERMISSION" + PermissionType_NESTED_CELL_PERMISSION PermissionType = "NESTED_CELL_PERMISSION" ) type PiiType string @@ -556,8 +623,9 @@ const ( type ResourceShareType string const ( - ResourceShareType_FOREIGN ResourceShareType = "FOREIGN" - ResourceShareType_ALL ResourceShareType = "ALL" + ResourceShareType_FOREIGN ResourceShareType = "FOREIGN" + ResourceShareType_ALL ResourceShareType = "ALL" + ResourceShareType_FEDERATED ResourceShareType = "FEDERATED" ) type ResourceType string @@ -662,6 +730,7 @@ const ( StartingPosition_latest StartingPosition = "latest" StartingPosition_trim_horizon StartingPosition = "trim_horizon" StartingPosition_earliest StartingPosition = "earliest" + StartingPosition_timestamp StartingPosition = "timestamp" ) type StatementState string @@ -683,6 +752,8 @@ const ( TargetFormat_avro TargetFormat = "avro" TargetFormat_orc TargetFormat = "orc" TargetFormat_parquet TargetFormat = "parquet" + TargetFormat_hudi TargetFormat = "hudi" + TargetFormat_delta TargetFormat = "delta" ) type TaskRunSortColumnType string @@ -789,6 +860,9 @@ const ( WorkerType_G_1X WorkerType = "G.1X" WorkerType_G_2X WorkerType = "G.2X" WorkerType_G_025X WorkerType = "G.025X" + WorkerType_G_4X WorkerType = "G.4X" + WorkerType_G_8X WorkerType = "G.8X" + WorkerType_Z_2X WorkerType = "Z.2X" ) type WorkflowRunStatus string diff --git a/apis/glue/v1alpha1/zz_generated.deepcopy.go b/apis/glue/v1alpha1/zz_generated.deepcopy.go index ef4c6cebc9..15c65664e6 100644 --- a/apis/glue/v1alpha1/zz_generated.deepcopy.go +++ b/apis/glue/v1alpha1/zz_generated.deepcopy.go @@ -171,6 +171,255 @@ func (in *AggregateOperation) DeepCopy() *AggregateOperation { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AmazonRedshiftAdvancedOption) DeepCopyInto(out *AmazonRedshiftAdvancedOption) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AmazonRedshiftAdvancedOption. +func (in *AmazonRedshiftAdvancedOption) DeepCopy() *AmazonRedshiftAdvancedOption { + if in == nil { + return nil + } + out := new(AmazonRedshiftAdvancedOption) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AmazonRedshiftNodeData) DeepCopyInto(out *AmazonRedshiftNodeData) { + *out = *in + if in.AccessType != nil { + in, out := &in.AccessType, &out.AccessType + *out = new(string) + **out = **in + } + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.AdvancedOptions != nil { + in, out := &in.AdvancedOptions, &out.AdvancedOptions + *out = make([]*AmazonRedshiftAdvancedOption, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(AmazonRedshiftAdvancedOption) + (*in).DeepCopyInto(*out) + } + } + } + if in.CatalogDatabase != nil { + in, out := &in.CatalogDatabase, &out.CatalogDatabase + *out = new(Option) + (*in).DeepCopyInto(*out) + } + if in.CatalogRedshiftSchema != nil { + in, out := &in.CatalogRedshiftSchema, &out.CatalogRedshiftSchema + *out = new(string) + **out = **in + } + if in.CatalogRedshiftTable != nil { + in, out := &in.CatalogRedshiftTable, &out.CatalogRedshiftTable + *out = new(string) + **out = **in + } + if in.CatalogTable != nil { + in, out := &in.CatalogTable, &out.CatalogTable + *out = new(Option) + (*in).DeepCopyInto(*out) + } + if in.Connection != nil { + in, out := &in.Connection, &out.Connection + *out = new(Option) + (*in).DeepCopyInto(*out) + } + if in.CrawlerConnection != nil { + in, out := &in.CrawlerConnection, &out.CrawlerConnection + *out = new(string) + **out = **in + } + if in.IAMRole != nil { + in, out := &in.IAMRole, &out.IAMRole + *out = new(Option) + (*in).DeepCopyInto(*out) + } + if in.MergeAction != nil { + in, out := &in.MergeAction, &out.MergeAction + *out = new(string) + **out = **in + } + if in.MergeClause != nil { + in, out := &in.MergeClause, &out.MergeClause + *out = new(string) + **out = **in + } + if in.MergeWhenMatched != nil { + in, out := &in.MergeWhenMatched, &out.MergeWhenMatched + *out = new(string) + **out = **in + } + if in.MergeWhenNotMatched != nil { + in, out := &in.MergeWhenNotMatched, &out.MergeWhenNotMatched + *out = new(string) + **out = **in + } + if in.PostAction != nil { + in, out := &in.PostAction, &out.PostAction + *out = new(string) + **out = **in + } + if in.PreAction != nil { + in, out := &in.PreAction, &out.PreAction + *out = new(string) + **out = **in + } + if in.SampleQuery != nil { + in, out := &in.SampleQuery, &out.SampleQuery + *out = new(string) + **out = **in + } + if in.Schema != nil { + in, out := &in.Schema, &out.Schema + *out = new(Option) + (*in).DeepCopyInto(*out) + } + if in.SelectedColumns != nil { + in, out := &in.SelectedColumns, &out.SelectedColumns + *out = make([]*Option, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(Option) + (*in).DeepCopyInto(*out) + } + } + } + if in.SourceType != nil { + in, out := &in.SourceType, &out.SourceType + *out = new(string) + **out = **in + } + if in.StagingTable != nil { + in, out := &in.StagingTable, &out.StagingTable + *out = new(string) + **out = **in + } + if in.Table != nil { + in, out := &in.Table, &out.Table + *out = new(Option) + (*in).DeepCopyInto(*out) + } + if in.TablePrefix != nil { + in, out := &in.TablePrefix, &out.TablePrefix + *out = new(string) + **out = **in + } + if in.TableSchema != nil { + in, out := &in.TableSchema, &out.TableSchema + *out = make([]*Option, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(Option) + (*in).DeepCopyInto(*out) + } + } + } + if in.TempDir != nil { + in, out := &in.TempDir, &out.TempDir + *out = new(string) + **out = **in + } + if in.Upsert != nil { + in, out := &in.Upsert, &out.Upsert + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AmazonRedshiftNodeData. +func (in *AmazonRedshiftNodeData) DeepCopy() *AmazonRedshiftNodeData { + if in == nil { + return nil + } + out := new(AmazonRedshiftNodeData) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AmazonRedshiftSource) DeepCopyInto(out *AmazonRedshiftSource) { + *out = *in + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = new(AmazonRedshiftNodeData) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AmazonRedshiftSource. +func (in *AmazonRedshiftSource) DeepCopy() *AmazonRedshiftSource { + if in == nil { + return nil + } + out := new(AmazonRedshiftSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AmazonRedshiftTarget) DeepCopyInto(out *AmazonRedshiftTarget) { + *out = *in + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = new(AmazonRedshiftNodeData) + (*in).DeepCopyInto(*out) + } + if in.Inputs != nil { + in, out := &in.Inputs, &out.Inputs + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AmazonRedshiftTarget. +func (in *AmazonRedshiftTarget) DeepCopy() *AmazonRedshiftTarget { + if in == nil { + return nil + } + out := new(AmazonRedshiftTarget) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ApplyMapping) DeepCopyInto(out *ApplyMapping) { *out = *in @@ -437,86 +686,198 @@ func (in *BlueprintRun) DeepCopy() *BlueprintRun { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CatalogEntry) DeepCopyInto(out *CatalogEntry) { +func (in *CatalogDeltaSource) DeepCopyInto(out *CatalogDeltaSource) { *out = *in - if in.DatabaseName != nil { - in, out := &in.DatabaseName, &out.DatabaseName + if in.AdditionalDeltaOptions != nil { + in, out := &in.AdditionalDeltaOptions, &out.AdditionalDeltaOptions + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Database != nil { + in, out := &in.Database, &out.Database *out = new(string) **out = **in } - if in.TableName != nil { - in, out := &in.TableName, &out.TableName + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.OutputSchemas != nil { + in, out := &in.OutputSchemas, &out.OutputSchemas + *out = make([]*GlueSchema, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(GlueSchema) + (*in).DeepCopyInto(*out) + } + } + } + if in.Table != nil { + in, out := &in.Table, &out.Table *out = new(string) **out = **in } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CatalogEntry. -func (in *CatalogEntry) DeepCopy() *CatalogEntry { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CatalogDeltaSource. +func (in *CatalogDeltaSource) DeepCopy() *CatalogDeltaSource { if in == nil { return nil } - out := new(CatalogEntry) + out := new(CatalogDeltaSource) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CatalogImportStatus) DeepCopyInto(out *CatalogImportStatus) { +func (in *CatalogEntry) DeepCopyInto(out *CatalogEntry) { *out = *in - if in.ImportCompleted != nil { - in, out := &in.ImportCompleted, &out.ImportCompleted - *out = new(bool) + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) **out = **in } - if in.ImportTime != nil { - in, out := &in.ImportTime, &out.ImportTime - *out = (*in).DeepCopy() - } - if in.ImportedBy != nil { - in, out := &in.ImportedBy, &out.ImportedBy + if in.TableName != nil { + in, out := &in.TableName, &out.TableName *out = new(string) **out = **in } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CatalogImportStatus. -func (in *CatalogImportStatus) DeepCopy() *CatalogImportStatus { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CatalogEntry. +func (in *CatalogEntry) DeepCopy() *CatalogEntry { if in == nil { return nil } - out := new(CatalogImportStatus) + out := new(CatalogEntry) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CatalogKafkaSource) DeepCopyInto(out *CatalogKafkaSource) { +func (in *CatalogHudiSource) DeepCopyInto(out *CatalogHudiSource) { *out = *in - if in.DataPreviewOptions != nil { - in, out := &in.DataPreviewOptions, &out.DataPreviewOptions - *out = new(StreamingDataPreviewOptions) - (*in).DeepCopyInto(*out) + if in.AdditionalHudiOptions != nil { + in, out := &in.AdditionalHudiOptions, &out.AdditionalHudiOptions + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } } if in.Database != nil { in, out := &in.Database, &out.Database *out = new(string) **out = **in } - if in.DetectSchema != nil { - in, out := &in.DetectSchema, &out.DetectSchema - *out = new(bool) - **out = **in - } if in.Name != nil { in, out := &in.Name, &out.Name *out = new(string) **out = **in } - if in.StreamingOptions != nil { - in, out := &in.StreamingOptions, &out.StreamingOptions - *out = new(KafkaStreamingSourceOptions) - (*in).DeepCopyInto(*out) + if in.OutputSchemas != nil { + in, out := &in.OutputSchemas, &out.OutputSchemas + *out = make([]*GlueSchema, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(GlueSchema) + (*in).DeepCopyInto(*out) + } + } + } + if in.Table != nil { + in, out := &in.Table, &out.Table + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CatalogHudiSource. +func (in *CatalogHudiSource) DeepCopy() *CatalogHudiSource { + if in == nil { + return nil + } + out := new(CatalogHudiSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CatalogImportStatus) DeepCopyInto(out *CatalogImportStatus) { + *out = *in + if in.ImportCompleted != nil { + in, out := &in.ImportCompleted, &out.ImportCompleted + *out = new(bool) + **out = **in + } + if in.ImportTime != nil { + in, out := &in.ImportTime, &out.ImportTime + *out = (*in).DeepCopy() + } + if in.ImportedBy != nil { + in, out := &in.ImportedBy, &out.ImportedBy + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CatalogImportStatus. +func (in *CatalogImportStatus) DeepCopy() *CatalogImportStatus { + if in == nil { + return nil + } + out := new(CatalogImportStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CatalogKafkaSource) DeepCopyInto(out *CatalogKafkaSource) { + *out = *in + if in.DataPreviewOptions != nil { + in, out := &in.DataPreviewOptions, &out.DataPreviewOptions + *out = new(StreamingDataPreviewOptions) + (*in).DeepCopyInto(*out) + } + if in.Database != nil { + in, out := &in.Database, &out.Database + *out = new(string) + **out = **in + } + if in.DetectSchema != nil { + in, out := &in.DetectSchema, &out.DetectSchema + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.StreamingOptions != nil { + in, out := &in.StreamingOptions, &out.StreamingOptions + *out = new(KafkaStreamingSourceOptions) + (*in).DeepCopyInto(*out) } if in.Table != nil { in, out := &in.Table, &out.Table @@ -896,6 +1257,16 @@ func (in *CodeGenConfigurationNode) DeepCopyInto(out *CodeGenConfigurationNode) *out = new(Aggregate) (*in).DeepCopyInto(*out) } + if in.AmazonRedshiftSource != nil { + in, out := &in.AmazonRedshiftSource, &out.AmazonRedshiftSource + *out = new(AmazonRedshiftSource) + (*in).DeepCopyInto(*out) + } + if in.AmazonRedshiftTarget != nil { + in, out := &in.AmazonRedshiftTarget, &out.AmazonRedshiftTarget + *out = new(AmazonRedshiftTarget) + (*in).DeepCopyInto(*out) + } if in.ApplyMapping != nil { in, out := &in.ApplyMapping, &out.ApplyMapping *out = new(ApplyMapping) @@ -906,6 +1277,16 @@ func (in *CodeGenConfigurationNode) DeepCopyInto(out *CodeGenConfigurationNode) *out = new(AthenaConnectorSource) (*in).DeepCopyInto(*out) } + if in.CatalogDeltaSource != nil { + in, out := &in.CatalogDeltaSource, &out.CatalogDeltaSource + *out = new(CatalogDeltaSource) + (*in).DeepCopyInto(*out) + } + if in.CatalogHudiSource != nil { + in, out := &in.CatalogHudiSource, &out.CatalogHudiSource + *out = new(CatalogHudiSource) + (*in).DeepCopyInto(*out) + } if in.CatalogKafkaSource != nil { in, out := &in.CatalogKafkaSource, &out.CatalogKafkaSource *out = new(CatalogKafkaSource) @@ -931,6 +1312,11 @@ func (in *CodeGenConfigurationNode) DeepCopyInto(out *CodeGenConfigurationNode) *out = new(CustomCode) (*in).DeepCopyInto(*out) } + if in.DirectJDBCSource != nil { + in, out := &in.DirectJDBCSource, &out.DirectJDBCSource + *out = new(DirectJDBCSource) + (*in).DeepCopyInto(*out) + } if in.DirectKafkaSource != nil { in, out := &in.DirectKafkaSource, &out.DirectKafkaSource *out = new(DirectKafkaSource) @@ -971,6 +1357,11 @@ func (in *CodeGenConfigurationNode) DeepCopyInto(out *CodeGenConfigurationNode) *out = new(EvaluateDataQuality) (*in).DeepCopyInto(*out) } + if in.EvaluateDataQualityMultiFrame != nil { + in, out := &in.EvaluateDataQualityMultiFrame, &out.EvaluateDataQualityMultiFrame + *out = new(EvaluateDataQualityMultiFrame) + (*in).DeepCopyInto(*out) + } if in.FillMissingValues != nil { in, out := &in.FillMissingValues, &out.FillMissingValues *out = new(FillMissingValues) @@ -1056,6 +1447,11 @@ func (in *CodeGenConfigurationNode) DeepCopyInto(out *CodeGenConfigurationNode) *out = new(PostgreSQLCatalogTarget) (*in).DeepCopyInto(*out) } + if in.Recipe != nil { + in, out := &in.Recipe, &out.Recipe + *out = new(Recipe) + (*in).DeepCopyInto(*out) + } if in.RedshiftSource != nil { in, out := &in.RedshiftSource, &out.RedshiftSource *out = new(RedshiftSource) @@ -1076,6 +1472,16 @@ func (in *CodeGenConfigurationNode) DeepCopyInto(out *CodeGenConfigurationNode) *out = new(RenameField) (*in).DeepCopyInto(*out) } + if in.S3CatalogDeltaSource != nil { + in, out := &in.S3CatalogDeltaSource, &out.S3CatalogDeltaSource + *out = new(S3CatalogDeltaSource) + (*in).DeepCopyInto(*out) + } + if in.S3CatalogHudiSource != nil { + in, out := &in.S3CatalogHudiSource, &out.S3CatalogHudiSource + *out = new(S3CatalogHudiSource) + (*in).DeepCopyInto(*out) + } if in.S3CatalogSource != nil { in, out := &in.S3CatalogSource, &out.S3CatalogSource *out = new(S3CatalogSource) @@ -1091,6 +1497,21 @@ func (in *CodeGenConfigurationNode) DeepCopyInto(out *CodeGenConfigurationNode) *out = new(S3CsvSource) (*in).DeepCopyInto(*out) } + if in.S3DeltaCatalogTarget != nil { + in, out := &in.S3DeltaCatalogTarget, &out.S3DeltaCatalogTarget + *out = new(S3DeltaCatalogTarget) + (*in).DeepCopyInto(*out) + } + if in.S3DeltaDirectTarget != nil { + in, out := &in.S3DeltaDirectTarget, &out.S3DeltaDirectTarget + *out = new(S3DeltaDirectTarget) + (*in).DeepCopyInto(*out) + } + if in.S3DeltaSource != nil { + in, out := &in.S3DeltaSource, &out.S3DeltaSource + *out = new(S3DeltaSource) + (*in).DeepCopyInto(*out) + } if in.S3DirectTarget != nil { in, out := &in.S3DirectTarget, &out.S3DirectTarget *out = new(S3DirectTarget) @@ -1101,6 +1522,21 @@ func (in *CodeGenConfigurationNode) DeepCopyInto(out *CodeGenConfigurationNode) *out = new(S3GlueParquetTarget) (*in).DeepCopyInto(*out) } + if in.S3HudiCatalogTarget != nil { + in, out := &in.S3HudiCatalogTarget, &out.S3HudiCatalogTarget + *out = new(S3HudiCatalogTarget) + (*in).DeepCopyInto(*out) + } + if in.S3HudiDirectTarget != nil { + in, out := &in.S3HudiDirectTarget, &out.S3HudiDirectTarget + *out = new(S3HudiDirectTarget) + (*in).DeepCopyInto(*out) + } + if in.S3HudiSource != nil { + in, out := &in.S3HudiSource, &out.S3HudiSource + *out = new(S3HudiSource) + (*in).DeepCopyInto(*out) + } if in.S3JSONSource != nil { in, out := &in.S3JSONSource, &out.S3JSONSource *out = new(S3JSONSource) @@ -1121,6 +1557,16 @@ func (in *CodeGenConfigurationNode) DeepCopyInto(out *CodeGenConfigurationNode) *out = new(SelectFromCollection) (*in).DeepCopyInto(*out) } + if in.SnowflakeSource != nil { + in, out := &in.SnowflakeSource, &out.SnowflakeSource + *out = new(SnowflakeSource) + (*in).DeepCopyInto(*out) + } + if in.SnowflakeTarget != nil { + in, out := &in.SnowflakeTarget, &out.SnowflakeTarget + *out = new(SnowflakeTarget) + (*in).DeepCopyInto(*out) + } if in.SparkConnectorSource != nil { in, out := &in.SparkConnectorSource, &out.SparkConnectorSource *out = new(SparkConnectorSource) @@ -2034,6 +2480,28 @@ func (in *CrawlerTargets) DeepCopyInto(out *CrawlerTargets) { } } } + if in.HudiTargets != nil { + in, out := &in.HudiTargets, &out.HudiTargets + *out = make([]*HudiTarget, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(HudiTarget) + (*in).DeepCopyInto(*out) + } + } + } + if in.IcebergTargets != nil { + in, out := &in.IcebergTargets, &out.IcebergTargets + *out = make([]*IcebergTarget, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(IcebergTarget) + (*in).DeepCopyInto(*out) + } + } + } if in.JdbcTargets != nil { in, out := &in.JdbcTargets, &out.JdbcTargets *out = make([]*JdbcTarget, len(*in)) @@ -2278,6 +2746,11 @@ func (in *CreateCsvClassifierRequest) DeepCopyInto(out *CreateCsvClassifierReque *out = new(string) **out = **in } + if in.Serde != nil { + in, out := &in.Serde, &out.Serde + *out = new(string) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CreateCsvClassifierRequest. @@ -2448,6 +2921,11 @@ func (in *CsvClassifier) DeepCopyInto(out *CsvClassifier) { *out = new(string) **out = **in } + if in.Serde != nil { + in, out := &in.Serde, &out.Serde + *out = new(string) + **out = **in + } if in.Version != nil { in, out := &in.Version, &out.Version *out = new(int64) @@ -3843,6 +4321,11 @@ func (in *DataQualityRulesetListDetails) DeepCopy() *DataQualityRulesetListDetai // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DataQualityTargetTable) DeepCopyInto(out *DataQualityTargetTable) { *out = *in + if in.CatalogID != nil { + in, out := &in.CatalogID, &out.CatalogID + *out = new(string) + **out = **in + } if in.DatabaseName != nil { in, out := &in.DatabaseName, &out.DatabaseName *out = new(string) @@ -3905,6 +4388,11 @@ func (in *DatabaseIdentifier) DeepCopyInto(out *DatabaseIdentifier) { *out = new(string) **out = **in } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseIdentifier. @@ -3936,6 +4424,11 @@ func (in *DatabaseInput) DeepCopyInto(out *DatabaseInput) { *out = new(string) **out = **in } + if in.FederatedDatabase != nil { + in, out := &in.FederatedDatabase, &out.FederatedDatabase + *out = new(FederatedDatabase) + (*in).DeepCopyInto(*out) + } if in.LocationURI != nil { in, out := &in.LocationURI, &out.LocationURI *out = new(string) @@ -4127,6 +4620,11 @@ func (in *Database_SDK) DeepCopyInto(out *Database_SDK) { *out = new(string) **out = **in } + if in.FederatedDatabase != nil { + in, out := &in.FederatedDatabase, &out.FederatedDatabase + *out = new(FederatedDatabase) + (*in).DeepCopyInto(*out) + } if in.LocationURI != nil { in, out := &in.LocationURI, &out.LocationURI *out = new(string) @@ -4401,6 +4899,51 @@ func (in *DevEndpointCustomLibraries) DeepCopy() *DevEndpointCustomLibraries { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DirectJDBCSource) DeepCopyInto(out *DirectJDBCSource) { + *out = *in + if in.ConnectionName != nil { + in, out := &in.ConnectionName, &out.ConnectionName + *out = new(string) + **out = **in + } + if in.ConnectionType != nil { + in, out := &in.ConnectionType, &out.ConnectionType + *out = new(string) + **out = **in + } + if in.Database != nil { + in, out := &in.Database, &out.Database + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RedshiftTmpDir != nil { + in, out := &in.RedshiftTmpDir, &out.RedshiftTmpDir + *out = new(string) + **out = **in + } + if in.Table != nil { + in, out := &in.Table, &out.Table + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DirectJDBCSource. +func (in *DirectJDBCSource) DeepCopy() *DirectJDBCSource { + if in == nil { + return nil + } + out := new(DirectJDBCSource) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DirectKafkaSource) DeepCopyInto(out *DirectKafkaSource) { *out = *in @@ -4683,6 +5226,17 @@ func (in *DynamicTransform) DeepCopyInto(out *DynamicTransform) { *out = new(string) **out = **in } + if in.OutputSchemas != nil { + in, out := &in.OutputSchemas, &out.OutputSchemas + *out = make([]*GlueSchema, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(GlueSchema) + (*in).DeepCopyInto(*out) + } + } + } if in.Parameters != nil { in, out := &in.Parameters, &out.Parameters *out = make([]*TransformConfigParameter, len(*in)) @@ -4939,21 +5493,97 @@ func (in *EvaluateDataQuality) DeepCopy() *EvaluateDataQuality { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ExecutionProperty) DeepCopyInto(out *ExecutionProperty) { +func (in *EvaluateDataQualityMultiFrame) DeepCopyInto(out *EvaluateDataQualityMultiFrame) { *out = *in - if in.MaxConcurrentRuns != nil { - in, out := &in.MaxConcurrentRuns, &out.MaxConcurrentRuns - *out = new(int64) + if in.AdditionalDataSources != nil { + in, out := &in.AdditionalDataSources, &out.AdditionalDataSources + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.AdditionalOptions != nil { + in, out := &in.AdditionalOptions, &out.AdditionalOptions + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Inputs != nil { + in, out := &in.Inputs, &out.Inputs + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) **out = **in } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecutionProperty. -func (in *ExecutionProperty) DeepCopy() *ExecutionProperty { - if in == nil { - return nil + if in.PublishingOptions != nil { + in, out := &in.PublishingOptions, &out.PublishingOptions + *out = new(DQResultsPublishingOptions) + (*in).DeepCopyInto(*out) } - out := new(ExecutionProperty) + if in.Ruleset != nil { + in, out := &in.Ruleset, &out.Ruleset + *out = new(string) + **out = **in + } + if in.StopJobOnFailureOptions != nil { + in, out := &in.StopJobOnFailureOptions, &out.StopJobOnFailureOptions + *out = new(DQStopJobOnFailureOptions) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EvaluateDataQualityMultiFrame. +func (in *EvaluateDataQualityMultiFrame) DeepCopy() *EvaluateDataQualityMultiFrame { + if in == nil { + return nil + } + out := new(EvaluateDataQualityMultiFrame) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExecutionProperty) DeepCopyInto(out *ExecutionProperty) { + *out = *in + if in.MaxConcurrentRuns != nil { + in, out := &in.MaxConcurrentRuns, &out.MaxConcurrentRuns + *out = new(int64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecutionProperty. +func (in *ExecutionProperty) DeepCopy() *ExecutionProperty { + if in == nil { + return nil + } + out := new(ExecutionProperty) in.DeepCopyInto(out) return out } @@ -4978,6 +5608,61 @@ func (in *ExportLabelsTaskRunProperties) DeepCopy() *ExportLabelsTaskRunProperti return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FederatedDatabase) DeepCopyInto(out *FederatedDatabase) { + *out = *in + if in.ConnectionName != nil { + in, out := &in.ConnectionName, &out.ConnectionName + *out = new(string) + **out = **in + } + if in.Identifier != nil { + in, out := &in.Identifier, &out.Identifier + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FederatedDatabase. +func (in *FederatedDatabase) DeepCopy() *FederatedDatabase { + if in == nil { + return nil + } + out := new(FederatedDatabase) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FederatedTable) DeepCopyInto(out *FederatedTable) { + *out = *in + if in.ConnectionName != nil { + in, out := &in.ConnectionName, &out.ConnectionName + *out = new(string) + **out = **in + } + if in.DatabaseIdentifier != nil { + in, out := &in.DatabaseIdentifier, &out.DatabaseIdentifier + *out = new(string) + **out = **in + } + if in.Identifier != nil { + in, out := &in.Identifier, &out.Identifier + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FederatedTable. +func (in *FederatedTable) DeepCopy() *FederatedTable { + if in == nil { + return nil + } + out := new(FederatedTable) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *FillMissingValues) DeepCopyInto(out *FillMissingValues) { *out = *in @@ -5429,6 +6114,100 @@ func (in *GrokClassifier) DeepCopy() *GrokClassifier { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HudiTarget) DeepCopyInto(out *HudiTarget) { + *out = *in + if in.ConnectionName != nil { + in, out := &in.ConnectionName, &out.ConnectionName + *out = new(string) + **out = **in + } + if in.Exclusions != nil { + in, out := &in.Exclusions, &out.Exclusions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MaximumTraversalDepth != nil { + in, out := &in.MaximumTraversalDepth, &out.MaximumTraversalDepth + *out = new(int64) + **out = **in + } + if in.Paths != nil { + in, out := &in.Paths, &out.Paths + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HudiTarget. +func (in *HudiTarget) DeepCopy() *HudiTarget { + if in == nil { + return nil + } + out := new(HudiTarget) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IcebergTarget) DeepCopyInto(out *IcebergTarget) { + *out = *in + if in.ConnectionName != nil { + in, out := &in.ConnectionName, &out.ConnectionName + *out = new(string) + **out = **in + } + if in.Exclusions != nil { + in, out := &in.Exclusions, &out.Exclusions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MaximumTraversalDepth != nil { + in, out := &in.MaximumTraversalDepth, &out.MaximumTraversalDepth + *out = new(int64) + **out = **in + } + if in.Paths != nil { + in, out := &in.Paths, &out.Paths + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IcebergTarget. +func (in *IcebergTarget) DeepCopy() *IcebergTarget { + if in == nil { + return nil + } + out := new(IcebergTarget) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ImportLabelsTaskRunProperties) DeepCopyInto(out *ImportLabelsTaskRunProperties) { *out = *in @@ -5838,6 +6617,11 @@ func (in *JobCommand) DeepCopyInto(out *JobCommand) { *out = new(string) **out = **in } + if in.Runtime != nil { + in, out := &in.Runtime, &out.Runtime + *out = new(string) + **out = **in + } if in.ScriptLocation != nil { in, out := &in.ScriptLocation, &out.ScriptLocation *out = new(string) @@ -6592,6 +7376,11 @@ func (in *JoinColumn) DeepCopy() *JoinColumn { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *KafkaStreamingSourceOptions) DeepCopyInto(out *KafkaStreamingSourceOptions) { *out = *in + if in.AddRecordTimestamp != nil { + in, out := &in.AddRecordTimestamp, &out.AddRecordTimestamp + *out = new(string) + **out = **in + } if in.Assign != nil { in, out := &in.Assign, &out.Assign *out = new(string) @@ -6617,11 +7406,21 @@ func (in *KafkaStreamingSourceOptions) DeepCopyInto(out *KafkaStreamingSourceOpt *out = new(string) **out = **in } + if in.EmitConsumerLagMetrics != nil { + in, out := &in.EmitConsumerLagMetrics, &out.EmitConsumerLagMetrics + *out = new(string) + **out = **in + } if in.EndingOffsets != nil { in, out := &in.EndingOffsets, &out.EndingOffsets *out = new(string) **out = **in } + if in.IncludeHeaders != nil { + in, out := &in.IncludeHeaders, &out.IncludeHeaders + *out = new(bool) + **out = **in + } if in.MaxOffsetsPerTrigger != nil { in, out := &in.MaxOffsetsPerTrigger, &out.MaxOffsetsPerTrigger *out = new(int64) @@ -6657,6 +7456,10 @@ func (in *KafkaStreamingSourceOptions) DeepCopyInto(out *KafkaStreamingSourceOpt *out = new(string) **out = **in } + if in.StartingTimestamp != nil { + in, out := &in.StartingTimestamp, &out.StartingTimestamp + *out = (*in).DeepCopy() + } if in.SubscribePattern != nil { in, out := &in.SubscribePattern, &out.SubscribePattern *out = new(string) @@ -6712,6 +7515,11 @@ func (in *KinesisStreamingSourceOptions) DeepCopyInto(out *KinesisStreamingSourc *out = new(bool) **out = **in } + if in.AddRecordTimestamp != nil { + in, out := &in.AddRecordTimestamp, &out.AddRecordTimestamp + *out = new(string) + **out = **in + } if in.AvoidEmptyBatches != nil { in, out := &in.AvoidEmptyBatches, &out.AvoidEmptyBatches *out = new(bool) @@ -6732,6 +7540,11 @@ func (in *KinesisStreamingSourceOptions) DeepCopyInto(out *KinesisStreamingSourc *out = new(int64) **out = **in } + if in.EmitConsumerLagMetrics != nil { + in, out := &in.EmitConsumerLagMetrics, &out.EmitConsumerLagMetrics + *out = new(string) + **out = **in + } if in.EndpointURL != nil { in, out := &in.EndpointURL, &out.EndpointURL *out = new(string) @@ -6787,6 +7600,10 @@ func (in *KinesisStreamingSourceOptions) DeepCopyInto(out *KinesisStreamingSourc *out = new(string) **out = **in } + if in.StartingTimestamp != nil { + in, out := &in.StartingTimestamp, &out.StartingTimestamp + *out = (*in).DeepCopy() + } if in.StreamARN != nil { in, out := &in.StreamARN, &out.StreamARN *out = new(string) @@ -7411,6 +8228,36 @@ func (in *NullValueField) DeepCopy() *NullValueField { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Option) DeepCopyInto(out *Option) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Label != nil { + in, out := &in.Label, &out.Label + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Option. +func (in *Option) DeepCopy() *Option { + if in == nil { + return nil + } + out := new(Option) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *OracleSQLCatalogSource) DeepCopyInto(out *OracleSQLCatalogSource) { *out = *in @@ -7887,6 +8734,67 @@ func (in *PropertyPredicate) DeepCopy() *PropertyPredicate { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Recipe) DeepCopyInto(out *Recipe) { + *out = *in + if in.Inputs != nil { + in, out := &in.Inputs, &out.Inputs + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RecipeReference != nil { + in, out := &in.RecipeReference, &out.RecipeReference + *out = new(RecipeReference) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Recipe. +func (in *Recipe) DeepCopy() *Recipe { + if in == nil { + return nil + } + out := new(Recipe) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecipeReference) DeepCopyInto(out *RecipeReference) { + *out = *in + if in.RecipeARN != nil { + in, out := &in.RecipeARN, &out.RecipeARN + *out = new(string) + **out = **in + } + if in.RecipeVersion != nil { + in, out := &in.RecipeVersion, &out.RecipeVersion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecipeReference. +func (in *RecipeReference) DeepCopy() *RecipeReference { + if in == nil { + return nil + } + out := new(RecipeReference) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RecrawlPolicy) DeepCopyInto(out *RecrawlPolicy) { *out = *in @@ -8127,11 +9035,123 @@ func (in *ResourceURI) DeepCopy() *ResourceURI { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *S3CatalogSource) DeepCopyInto(out *S3CatalogSource) { +func (in *S3CatalogDeltaSource) DeepCopyInto(out *S3CatalogDeltaSource) { *out = *in - if in.AdditionalOptions != nil { - in, out := &in.AdditionalOptions, &out.AdditionalOptions - *out = new(S3SourceAdditionalOptions) + if in.AdditionalDeltaOptions != nil { + in, out := &in.AdditionalDeltaOptions, &out.AdditionalDeltaOptions + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Database != nil { + in, out := &in.Database, &out.Database + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.OutputSchemas != nil { + in, out := &in.OutputSchemas, &out.OutputSchemas + *out = make([]*GlueSchema, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(GlueSchema) + (*in).DeepCopyInto(*out) + } + } + } + if in.Table != nil { + in, out := &in.Table, &out.Table + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3CatalogDeltaSource. +func (in *S3CatalogDeltaSource) DeepCopy() *S3CatalogDeltaSource { + if in == nil { + return nil + } + out := new(S3CatalogDeltaSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3CatalogHudiSource) DeepCopyInto(out *S3CatalogHudiSource) { + *out = *in + if in.AdditionalHudiOptions != nil { + in, out := &in.AdditionalHudiOptions, &out.AdditionalHudiOptions + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Database != nil { + in, out := &in.Database, &out.Database + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.OutputSchemas != nil { + in, out := &in.OutputSchemas, &out.OutputSchemas + *out = make([]*GlueSchema, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(GlueSchema) + (*in).DeepCopyInto(*out) + } + } + } + if in.Table != nil { + in, out := &in.Table, &out.Table + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3CatalogHudiSource. +func (in *S3CatalogHudiSource) DeepCopy() *S3CatalogHudiSource { + if in == nil { + return nil + } + out := new(S3CatalogHudiSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3CatalogSource) DeepCopyInto(out *S3CatalogSource) { + *out = *in + if in.AdditionalOptions != nil { + in, out := &in.AdditionalOptions, &out.AdditionalOptions + *out = new(S3SourceAdditionalOptions) (*in).DeepCopyInto(*out) } if in.Database != nil { @@ -8357,6 +9377,229 @@ func (in *S3CsvSource) DeepCopy() *S3CsvSource { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3DeltaCatalogTarget) DeepCopyInto(out *S3DeltaCatalogTarget) { + *out = *in + if in.AdditionalOptions != nil { + in, out := &in.AdditionalOptions, &out.AdditionalOptions + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Database != nil { + in, out := &in.Database, &out.Database + *out = new(string) + **out = **in + } + if in.Inputs != nil { + in, out := &in.Inputs, &out.Inputs + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PartitionKeys != nil { + in, out := &in.PartitionKeys, &out.PartitionKeys + *out = make([][]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + } + } + if in.SchemaChangePolicy != nil { + in, out := &in.SchemaChangePolicy, &out.SchemaChangePolicy + *out = new(CatalogSchemaChangePolicy) + (*in).DeepCopyInto(*out) + } + if in.Table != nil { + in, out := &in.Table, &out.Table + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3DeltaCatalogTarget. +func (in *S3DeltaCatalogTarget) DeepCopy() *S3DeltaCatalogTarget { + if in == nil { + return nil + } + out := new(S3DeltaCatalogTarget) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3DeltaDirectTarget) DeepCopyInto(out *S3DeltaDirectTarget) { + *out = *in + if in.AdditionalOptions != nil { + in, out := &in.AdditionalOptions, &out.AdditionalOptions + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Compression != nil { + in, out := &in.Compression, &out.Compression + *out = new(string) + **out = **in + } + if in.Format != nil { + in, out := &in.Format, &out.Format + *out = new(string) + **out = **in + } + if in.Inputs != nil { + in, out := &in.Inputs, &out.Inputs + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PartitionKeys != nil { + in, out := &in.PartitionKeys, &out.PartitionKeys + *out = make([][]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + } + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.SchemaChangePolicy != nil { + in, out := &in.SchemaChangePolicy, &out.SchemaChangePolicy + *out = new(DirectSchemaChangePolicy) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3DeltaDirectTarget. +func (in *S3DeltaDirectTarget) DeepCopy() *S3DeltaDirectTarget { + if in == nil { + return nil + } + out := new(S3DeltaDirectTarget) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3DeltaSource) DeepCopyInto(out *S3DeltaSource) { + *out = *in + if in.AdditionalDeltaOptions != nil { + in, out := &in.AdditionalDeltaOptions, &out.AdditionalDeltaOptions + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.AdditionalOptions != nil { + in, out := &in.AdditionalOptions, &out.AdditionalOptions + *out = new(S3DirectSourceAdditionalOptions) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.OutputSchemas != nil { + in, out := &in.OutputSchemas, &out.OutputSchemas + *out = make([]*GlueSchema, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(GlueSchema) + (*in).DeepCopyInto(*out) + } + } + } + if in.Paths != nil { + in, out := &in.Paths, &out.Paths + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3DeltaSource. +func (in *S3DeltaSource) DeepCopy() *S3DeltaSource { + if in == nil { + return nil + } + out := new(S3DeltaSource) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *S3DirectSourceAdditionalOptions) DeepCopyInto(out *S3DirectSourceAdditionalOptions) { *out = *in @@ -8480,19 +9723,180 @@ func (in *S3Encryption) DeepCopy() *S3Encryption { if in == nil { return nil } - out := new(S3Encryption) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *S3GlueParquetTarget) DeepCopyInto(out *S3GlueParquetTarget) { - *out = *in + out := new(S3Encryption) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3GlueParquetTarget) DeepCopyInto(out *S3GlueParquetTarget) { + *out = *in + if in.Compression != nil { + in, out := &in.Compression, &out.Compression + *out = new(string) + **out = **in + } + if in.Inputs != nil { + in, out := &in.Inputs, &out.Inputs + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PartitionKeys != nil { + in, out := &in.PartitionKeys, &out.PartitionKeys + *out = make([][]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + } + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.SchemaChangePolicy != nil { + in, out := &in.SchemaChangePolicy, &out.SchemaChangePolicy + *out = new(DirectSchemaChangePolicy) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3GlueParquetTarget. +func (in *S3GlueParquetTarget) DeepCopy() *S3GlueParquetTarget { + if in == nil { + return nil + } + out := new(S3GlueParquetTarget) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3HudiCatalogTarget) DeepCopyInto(out *S3HudiCatalogTarget) { + *out = *in + if in.AdditionalOptions != nil { + in, out := &in.AdditionalOptions, &out.AdditionalOptions + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Database != nil { + in, out := &in.Database, &out.Database + *out = new(string) + **out = **in + } + if in.Inputs != nil { + in, out := &in.Inputs, &out.Inputs + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PartitionKeys != nil { + in, out := &in.PartitionKeys, &out.PartitionKeys + *out = make([][]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + } + } + if in.SchemaChangePolicy != nil { + in, out := &in.SchemaChangePolicy, &out.SchemaChangePolicy + *out = new(CatalogSchemaChangePolicy) + (*in).DeepCopyInto(*out) + } + if in.Table != nil { + in, out := &in.Table, &out.Table + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3HudiCatalogTarget. +func (in *S3HudiCatalogTarget) DeepCopy() *S3HudiCatalogTarget { + if in == nil { + return nil + } + out := new(S3HudiCatalogTarget) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3HudiDirectTarget) DeepCopyInto(out *S3HudiDirectTarget) { + *out = *in + if in.AdditionalOptions != nil { + in, out := &in.AdditionalOptions, &out.AdditionalOptions + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } if in.Compression != nil { in, out := &in.Compression, &out.Compression *out = new(string) **out = **in } + if in.Format != nil { + in, out := &in.Format, &out.Format + *out = new(string) + **out = **in + } if in.Inputs != nil { in, out := &in.Inputs, &out.Inputs *out = make([]*string, len(*in)) @@ -8538,12 +9942,74 @@ func (in *S3GlueParquetTarget) DeepCopyInto(out *S3GlueParquetTarget) { } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3GlueParquetTarget. -func (in *S3GlueParquetTarget) DeepCopy() *S3GlueParquetTarget { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3HudiDirectTarget. +func (in *S3HudiDirectTarget) DeepCopy() *S3HudiDirectTarget { if in == nil { return nil } - out := new(S3GlueParquetTarget) + out := new(S3HudiDirectTarget) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3HudiSource) DeepCopyInto(out *S3HudiSource) { + *out = *in + if in.AdditionalHudiOptions != nil { + in, out := &in.AdditionalHudiOptions, &out.AdditionalHudiOptions + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.AdditionalOptions != nil { + in, out := &in.AdditionalOptions, &out.AdditionalOptions + *out = new(S3DirectSourceAdditionalOptions) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.OutputSchemas != nil { + in, out := &in.OutputSchemas, &out.OutputSchemas + *out = make([]*GlueSchema, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(GlueSchema) + (*in).DeepCopyInto(*out) + } + } + } + if in.Paths != nil { + in, out := &in.Paths, &out.Paths + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3HudiSource. +func (in *S3HudiSource) DeepCopy() *S3HudiSource { + if in == nil { + return nil + } + out := new(S3HudiSource) in.DeepCopyInto(out) return out } @@ -9214,6 +10680,10 @@ func (in *SerDeInfo) DeepCopy() *SerDeInfo { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Session) DeepCopyInto(out *Session) { *out = *in + if in.CompletedOn != nil { + in, out := &in.CompletedOn, &out.CompletedOn + *out = (*in).DeepCopy() + } if in.Connections != nil { in, out := &in.Connections, &out.Connections *out = new(ConnectionsList) @@ -9223,6 +10693,11 @@ func (in *Session) DeepCopyInto(out *Session) { in, out := &in.CreatedOn, &out.CreatedOn *out = (*in).DeepCopy() } + if in.DPUSeconds != nil { + in, out := &in.DPUSeconds, &out.DPUSeconds + *out = new(float64) + **out = **in + } if in.Description != nil { in, out := &in.Description, &out.Description *out = new(string) @@ -9233,6 +10708,11 @@ func (in *Session) DeepCopyInto(out *Session) { *out = new(string) **out = **in } + if in.ExecutionTime != nil { + in, out := &in.ExecutionTime, &out.ExecutionTime + *out = new(float64) + **out = **in + } if in.GlueVersion != nil { in, out := &in.GlueVersion, &out.GlueVersion *out = new(string) @@ -9248,11 +10728,21 @@ func (in *Session) DeepCopyInto(out *Session) { *out = new(float64) **out = **in } + if in.NumberOfWorkers != nil { + in, out := &in.NumberOfWorkers, &out.NumberOfWorkers + *out = new(int64) + **out = **in + } if in.SecurityConfiguration != nil { in, out := &in.SecurityConfiguration, &out.SecurityConfiguration *out = new(string) **out = **in } + if in.WorkerType != nil { + in, out := &in.WorkerType, &out.WorkerType + *out = new(string) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Session. @@ -9290,6 +10780,220 @@ func (in *SessionCommand) DeepCopy() *SessionCommand { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnowflakeNodeData) DeepCopyInto(out *SnowflakeNodeData) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.AdditionalOptions != nil { + in, out := &in.AdditionalOptions, &out.AdditionalOptions + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.AutoPushdown != nil { + in, out := &in.AutoPushdown, &out.AutoPushdown + *out = new(bool) + **out = **in + } + if in.Connection != nil { + in, out := &in.Connection, &out.Connection + *out = new(Option) + (*in).DeepCopyInto(*out) + } + if in.Database != nil { + in, out := &in.Database, &out.Database + *out = new(string) + **out = **in + } + if in.IAMRole != nil { + in, out := &in.IAMRole, &out.IAMRole + *out = new(Option) + (*in).DeepCopyInto(*out) + } + if in.MergeAction != nil { + in, out := &in.MergeAction, &out.MergeAction + *out = new(string) + **out = **in + } + if in.MergeClause != nil { + in, out := &in.MergeClause, &out.MergeClause + *out = new(string) + **out = **in + } + if in.MergeWhenMatched != nil { + in, out := &in.MergeWhenMatched, &out.MergeWhenMatched + *out = new(string) + **out = **in + } + if in.MergeWhenNotMatched != nil { + in, out := &in.MergeWhenNotMatched, &out.MergeWhenNotMatched + *out = new(string) + **out = **in + } + if in.PostAction != nil { + in, out := &in.PostAction, &out.PostAction + *out = new(string) + **out = **in + } + if in.PreAction != nil { + in, out := &in.PreAction, &out.PreAction + *out = new(string) + **out = **in + } + if in.SampleQuery != nil { + in, out := &in.SampleQuery, &out.SampleQuery + *out = new(string) + **out = **in + } + if in.Schema != nil { + in, out := &in.Schema, &out.Schema + *out = new(string) + **out = **in + } + if in.SelectedColumns != nil { + in, out := &in.SelectedColumns, &out.SelectedColumns + *out = make([]*Option, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(Option) + (*in).DeepCopyInto(*out) + } + } + } + if in.SourceType != nil { + in, out := &in.SourceType, &out.SourceType + *out = new(string) + **out = **in + } + if in.StagingTable != nil { + in, out := &in.StagingTable, &out.StagingTable + *out = new(string) + **out = **in + } + if in.Table != nil { + in, out := &in.Table, &out.Table + *out = new(string) + **out = **in + } + if in.TableSchema != nil { + in, out := &in.TableSchema, &out.TableSchema + *out = make([]*Option, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(Option) + (*in).DeepCopyInto(*out) + } + } + } + if in.TempDir != nil { + in, out := &in.TempDir, &out.TempDir + *out = new(string) + **out = **in + } + if in.Upsert != nil { + in, out := &in.Upsert, &out.Upsert + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnowflakeNodeData. +func (in *SnowflakeNodeData) DeepCopy() *SnowflakeNodeData { + if in == nil { + return nil + } + out := new(SnowflakeNodeData) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnowflakeSource) DeepCopyInto(out *SnowflakeSource) { + *out = *in + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = new(SnowflakeNodeData) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.OutputSchemas != nil { + in, out := &in.OutputSchemas, &out.OutputSchemas + *out = make([]*GlueSchema, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(GlueSchema) + (*in).DeepCopyInto(*out) + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnowflakeSource. +func (in *SnowflakeSource) DeepCopy() *SnowflakeSource { + if in == nil { + return nil + } + out := new(SnowflakeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnowflakeTarget) DeepCopyInto(out *SnowflakeTarget) { + *out = *in + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = new(SnowflakeNodeData) + (*in).DeepCopyInto(*out) + } + if in.Inputs != nil { + in, out := &in.Inputs, &out.Inputs + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnowflakeTarget. +func (in *SnowflakeTarget) DeepCopy() *SnowflakeTarget { + if in == nil { + return nil + } + out := new(SnowflakeTarget) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SortCriterion) DeepCopyInto(out *SortCriterion) { *out = *in @@ -9980,6 +11684,11 @@ func (in *TableIdentifier) DeepCopyInto(out *TableIdentifier) { *out = new(string) **out = **in } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableIdentifier. @@ -10423,6 +12132,11 @@ func (in *UpdateCsvClassifierRequest) DeepCopyInto(out *UpdateCsvClassifierReque *out = new(string) **out = **in } + if in.Serde != nil { + in, out := &in.Serde, &out.Serde + *out = new(string) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UpdateCsvClassifierRequest. diff --git a/apis/glue/v1alpha1/zz_job.go b/apis/glue/v1alpha1/zz_job.go index 98b6fe6cc6..e13897523a 100644 --- a/apis/glue/v1alpha1/zz_job.go +++ b/apis/glue/v1alpha1/zz_job.go @@ -42,7 +42,8 @@ type JobParameters struct { // The JobCommand that runs this job. // +kubebuilder:validation:Required Command *JobCommand `json:"command"` - // The default arguments for this job. + // The default arguments for every run of this job, specified as name-value + // pairs. // // You can specify arguments here that your own job-execution script consumes, // as well as arguments that Glue itself consumes. @@ -55,9 +56,13 @@ type JobParameters struct { // see the Calling Glue APIs in Python (https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-python-calling.html) // topic in the developer guide. // - // For information about the key-value pairs that Glue consumes to set up your - // job, see the Special Parameters Used by Glue (https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-etl-glue-arguments.html) + // For information about the arguments you can provide to this field when configuring + // Spark jobs, see the Special Parameters Used by Glue (https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-etl-glue-arguments.html) // topic in the developer guide. + // + // For information about the arguments you can provide to this field when configuring + // Ray jobs, see Using job parameters in Ray jobs (https://docs.aws.amazon.com/glue/latest/dg/author-job-ray-job-parameters.html) + // in the developer guide. DefaultArguments map[string]*string `json:"defaultArguments,omitempty"` // Description of the job being defined. Description *string `json:"description,omitempty"` @@ -75,9 +80,13 @@ type JobParameters struct { // An ExecutionProperty specifying the maximum number of concurrent runs allowed // for this job. ExecutionProperty *ExecutionProperty `json:"executionProperty,omitempty"` - // Glue version determines the versions of Apache Spark and Python that Glue - // supports. The Python version indicates the version supported for jobs of - // type Spark. + // In Spark jobs, GlueVersion determines the versions of Apache Spark and Python + // that Glue available in a job. The Python version indicates the version supported + // for jobs of type Spark. + // + // Ray jobs should set GlueVersion to 4.0 or greater. However, the versions + // of Ray, Python and additional libraries available in your Ray job are determined + // by the Runtime parameter of the Job command. // // For more information about the available Glue versions and corresponding // Spark and Python versions, see Glue version (https://docs.aws.amazon.com/glue/latest/dg/add-job.html) @@ -93,25 +102,27 @@ type JobParameters struct { // 4 vCPUs of compute capacity and 16 GB of memory. For more information, see // the Glue pricing page (https://aws.amazon.com/glue/pricing/). // - // Do not set Max Capacity if using WorkerType and NumberOfWorkers. + // For Glue version 2.0+ jobs, you cannot specify a Maximum capacity. Instead, + // you should specify a Worker type and the Number of workers. + // + // Do not set MaxCapacity if using WorkerType and NumberOfWorkers. // // The value that can be allocated for MaxCapacity depends on whether you are - // running a Python shell job or an Apache Spark ETL job: + // running a Python shell job, an Apache Spark ETL job, or an Apache Spark streaming + // ETL job: // // * When you specify a Python shell job (JobCommand.Name="pythonshell"), // you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU. // // * When you specify an Apache Spark ETL job (JobCommand.Name="glueetl") // or Apache Spark streaming ETL job (JobCommand.Name="gluestreaming"), you - // can allocate a minimum of 2 DPUs. The default is 10 DPUs. This job type + // can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type // cannot have a fractional DPU allocation. - // - // For Glue version 2.0 jobs, you cannot instead specify a Maximum capacity. - // Instead, you should specify a Worker type and the Number of workers. MaxCapacity *float64 `json:"maxCapacity,omitempty"` // The maximum number of times to retry this job if it fails. MaxRetries *int64 `json:"maxRetries,omitempty"` - // Non-overridable arguments for this job, specified as name-value pairs. + // Arguments for this job that are not overridden when providing job arguments + // in a job run, specified as name-value pairs. NonOverridableArguments map[string]*string `json:"nonOverridableArguments,omitempty"` // Specifies configuration properties of a job notification. NotificationProperty *NotificationProperty `json:"notificationProperty,omitempty"` @@ -131,23 +142,48 @@ type JobParameters struct { // is 2,880 minutes (48 hours). Timeout *int64 `json:"timeout,omitempty"` // The type of predefined worker that is allocated when a job runs. Accepts - // a value of Standard, G.1X, G.2X, or G.025X. - // - // * For the Standard worker type, each worker provides 4 vCPU, 16 GB of - // memory and a 50GB disk, and 2 executors per worker. - // - // * For the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of - // memory, 64 GB disk), and provides 1 executor per worker. We recommend - // this worker type for memory-intensive jobs. - // - // * For the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of - // memory, 128 GB disk), and provides 1 executor per worker. We recommend - // this worker type for memory-intensive jobs. - // - // * For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPU, 4 - // GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend - // this worker type for low volume streaming jobs. This worker type is only - // available for Glue version 3.0 streaming jobs. + // a value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value + // Z.2X for Ray jobs. + // + // * For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB + // of memory) with 84GB disk (approximately 34GB free), and provides 1 executor + // per worker. We recommend this worker type for workloads such as data transforms, + // joins, and queries, to offers a scalable and cost effective way to run + // most jobs. + // + // * For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB + // of memory) with 128GB disk (approximately 77GB free), and provides 1 executor + // per worker. We recommend this worker type for workloads such as data transforms, + // joins, and queries, to offers a scalable and cost effective way to run + // most jobs. + // + // * For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB + // of memory) with 256GB disk (approximately 235GB free), and provides 1 + // executor per worker. We recommend this worker type for jobs whose workloads + // contain your most demanding transforms, aggregations, joins, and queries. + // This worker type is available only for Glue version 3.0 or later Spark + // ETL jobs in the following Amazon Web Services Regions: US East (Ohio), + // US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia + // Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), + // Europe (Ireland), and Europe (Stockholm). + // + // * For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB + // of memory) with 512GB disk (approximately 487GB free), and provides 1 + // executor per worker. We recommend this worker type for jobs whose workloads + // contain your most demanding transforms, aggregations, joins, and queries. + // This worker type is available only for Glue version 3.0 or later Spark + // ETL jobs, in the same Amazon Web Services Regions as supported for the + // G.4X worker type. + // + // * For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 + // GB of memory) with 84GB disk (approximately 34GB free), and provides 1 + // executor per worker. We recommend this worker type for low volume streaming + // jobs. This worker type is only available for Glue version 3.0 streaming + // jobs. + // + // * For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB + // of memory) with 128 GB disk (approximately 120GB free), and provides up + // to 8 Ray workers based on the autoscaler. WorkerType *string `json:"workerType,omitempty"` CustomJobParameters `json:",inline"` } diff --git a/apis/glue/v1alpha1/zz_types.go b/apis/glue/v1alpha1/zz_types.go index 95e8c34d9a..7c659fc045 100644 --- a/apis/glue/v1alpha1/zz_types.go +++ b/apis/glue/v1alpha1/zz_types.go @@ -60,6 +60,86 @@ type AggregateOperation struct { Column []*string `json:"column,omitempty"` } +// +kubebuilder:skipversion +type AmazonRedshiftAdvancedOption struct { + Key *string `json:"key,omitempty"` + + Value *string `json:"value,omitempty"` +} + +// +kubebuilder:skipversion +type AmazonRedshiftNodeData struct { + AccessType *string `json:"accessType,omitempty"` + + Action *string `json:"action,omitempty"` + + AdvancedOptions []*AmazonRedshiftAdvancedOption `json:"advancedOptions,omitempty"` + // Specifies an option value. + CatalogDatabase *Option `json:"catalogDatabase,omitempty"` + + CatalogRedshiftSchema *string `json:"catalogRedshiftSchema,omitempty"` + + CatalogRedshiftTable *string `json:"catalogRedshiftTable,omitempty"` + // Specifies an option value. + CatalogTable *Option `json:"catalogTable,omitempty"` + // Specifies an option value. + Connection *Option `json:"connection,omitempty"` + + CrawlerConnection *string `json:"crawlerConnection,omitempty"` + // Specifies an option value. + IAMRole *Option `json:"iamRole,omitempty"` + + MergeAction *string `json:"mergeAction,omitempty"` + + MergeClause *string `json:"mergeClause,omitempty"` + + MergeWhenMatched *string `json:"mergeWhenMatched,omitempty"` + + MergeWhenNotMatched *string `json:"mergeWhenNotMatched,omitempty"` + + PostAction *string `json:"postAction,omitempty"` + + PreAction *string `json:"preAction,omitempty"` + + SampleQuery *string `json:"sampleQuery,omitempty"` + // Specifies an option value. + Schema *Option `json:"schema,omitempty"` + + SelectedColumns []*Option `json:"selectedColumns,omitempty"` + + SourceType *string `json:"sourceType,omitempty"` + + StagingTable *string `json:"stagingTable,omitempty"` + // Specifies an option value. + Table *Option `json:"table,omitempty"` + + TablePrefix *string `json:"tablePrefix,omitempty"` + + TableSchema []*Option `json:"tableSchema,omitempty"` + + TempDir *string `json:"tempDir,omitempty"` + + Upsert *bool `json:"upsert,omitempty"` +} + +// +kubebuilder:skipversion +type AmazonRedshiftSource struct { + // Specifies an Amazon Redshift node. + Data *AmazonRedshiftNodeData `json:"data,omitempty"` + + Name *string `json:"name,omitempty"` +} + +// +kubebuilder:skipversion +type AmazonRedshiftTarget struct { + // Specifies an Amazon Redshift node. + Data *AmazonRedshiftNodeData `json:"data,omitempty"` + + Inputs []*string `json:"inputs,omitempty"` + + Name *string `json:"name,omitempty"` +} + // +kubebuilder:skipversion type ApplyMapping struct { Inputs []*string `json:"inputs,omitempty"` @@ -134,6 +214,19 @@ type BlueprintRun struct { WorkflowName *string `json:"workflowName,omitempty"` } +// +kubebuilder:skipversion +type CatalogDeltaSource struct { + AdditionalDeltaOptions map[string]*string `json:"additionalDeltaOptions,omitempty"` + + Database *string `json:"database,omitempty"` + + Name *string `json:"name,omitempty"` + + OutputSchemas []*GlueSchema `json:"outputSchemas,omitempty"` + + Table *string `json:"table,omitempty"` +} + // +kubebuilder:skipversion type CatalogEntry struct { DatabaseName *string `json:"databaseName,omitempty"` @@ -141,6 +234,19 @@ type CatalogEntry struct { TableName *string `json:"tableName,omitempty"` } +// +kubebuilder:skipversion +type CatalogHudiSource struct { + AdditionalHudiOptions map[string]*string `json:"additionalHudiOptions,omitempty"` + + Database *string `json:"database,omitempty"` + + Name *string `json:"name,omitempty"` + + OutputSchemas []*GlueSchema `json:"outputSchemas,omitempty"` + + Table *string `json:"table,omitempty"` +} + // +kubebuilder:skipversion type CatalogImportStatus struct { ImportCompleted *bool `json:"importCompleted,omitempty"` @@ -239,12 +345,20 @@ type CodeGenConfigurationNode struct { // Specifies a transform that groups rows by chosen fields and computes the // aggregated value by specified function. Aggregate *Aggregate `json:"aggregate,omitempty"` + // Specifies an Amazon Redshift source. + AmazonRedshiftSource *AmazonRedshiftSource `json:"amazonRedshiftSource,omitempty"` + // Specifies an Amazon Redshift target. + AmazonRedshiftTarget *AmazonRedshiftTarget `json:"amazonRedshiftTarget,omitempty"` // Specifies a transform that maps data property keys in the data source to // data property keys in the data target. You can rename keys, modify the data // types for keys, and choose which keys to drop from the dataset. ApplyMapping *ApplyMapping `json:"applyMapping,omitempty"` // Specifies a connector to an Amazon Athena data source. AthenaConnectorSource *AthenaConnectorSource `json:"athenaConnectorSource,omitempty"` + // Specifies a Delta Lake data source that is registered in the Glue Data Catalog. + CatalogDeltaSource *CatalogDeltaSource `json:"catalogDeltaSource,omitempty"` + // Specifies a Hudi data source that is registered in the Glue Data Catalog. + CatalogHudiSource *CatalogHudiSource `json:"catalogHudiSource,omitempty"` // Specifies an Apache Kafka data store in the Data Catalog. CatalogKafkaSource *CatalogKafkaSource `json:"catalogKafkaSource,omitempty"` // Specifies a Kinesis data source in the Glue Data Catalog. @@ -256,6 +370,8 @@ type CodeGenConfigurationNode struct { // Specifies a transform that uses custom code you provide to perform the data // transformation. The output is a collection of DynamicFrames. CustomCode *CustomCode `json:"customCode,omitempty"` + // Specifies the direct JDBC source connection. + DirectJDBCSource *DirectJDBCSource `json:"directJDBCSource,omitempty"` // Specifies an Apache Kafka data store. DirectKafkaSource *DirectKafkaSource `json:"directKafkaSource,omitempty"` // Specifies a direct Amazon Kinesis data source. @@ -277,6 +393,8 @@ type CodeGenConfigurationNode struct { DynamoDBCatalogSource *DynamoDBCatalogSource `json:"dynamoDBCatalogSource,omitempty"` // Specifies your data quality evaluation criteria. EvaluateDataQuality *EvaluateDataQuality `json:"evaluateDataQuality,omitempty"` + // Specifies your data quality evaluation criteria. + EvaluateDataQualityMultiFrame *EvaluateDataQualityMultiFrame `json:"evaluateDataQualityMultiFrame,omitempty"` // Specifies a transform that locates records in the dataset that have missing // values and adds a new field with a value determined by imputation. The input // data set is used to train the machine learning model that determines what @@ -319,6 +437,8 @@ type CodeGenConfigurationNode struct { PostgreSQLCatalogSource *PostgreSQLCatalogSource `json:"postgreSQLCatalogSource,omitempty"` // Specifies a target that uses Postgres SQL. PostgreSQLCatalogTarget *PostgreSQLCatalogTarget `json:"postgreSQLCatalogTarget,omitempty"` + // A Glue Studio node that uses a Glue DataBrew recipe in Glue jobs. + Recipe *Recipe `json:"recipe,omitempty"` // Specifies an Amazon Redshift data store. RedshiftSource *RedshiftSource `json:"redshiftSource,omitempty"` // Specifies a target that uses Amazon Redshift. @@ -327,17 +447,36 @@ type CodeGenConfigurationNode struct { RelationalCatalogSource *RelationalCatalogSource `json:"relationalCatalogSource,omitempty"` // Specifies a transform that renames a single data property key. RenameField *RenameField `json:"renameField,omitempty"` + // Specifies a Delta Lake data source that is registered in the Glue Data Catalog. + // The data source must be stored in Amazon S3. + S3CatalogDeltaSource *S3CatalogDeltaSource `json:"s3CatalogDeltaSource,omitempty"` + // Specifies a Hudi data source that is registered in the Glue Data Catalog. + // The Hudi data source must be stored in Amazon S3. + S3CatalogHudiSource *S3CatalogHudiSource `json:"s3CatalogHudiSource,omitempty"` // Specifies an Amazon S3 data store in the Glue Data Catalog. S3CatalogSource *S3CatalogSource `json:"s3CatalogSource,omitempty"` // Specifies a data target that writes to Amazon S3 using the Glue Data Catalog. S3CatalogTarget *S3CatalogTarget `json:"s3CatalogTarget,omitempty"` // Specifies a command-separated value (CSV) data store stored in Amazon S3. S3CsvSource *S3CsvSource `json:"s3CsvSource,omitempty"` + // Specifies a target that writes to a Delta Lake data source in the Glue Data + // Catalog. + S3DeltaCatalogTarget *S3DeltaCatalogTarget `json:"s3DeltaCatalogTarget,omitempty"` + // Specifies a target that writes to a Delta Lake data source in Amazon S3. + S3DeltaDirectTarget *S3DeltaDirectTarget `json:"s3DeltaDirectTarget,omitempty"` + // Specifies a Delta Lake data source stored in Amazon S3. + S3DeltaSource *S3DeltaSource `json:"s3DeltaSource,omitempty"` // Specifies a data target that writes to Amazon S3. S3DirectTarget *S3DirectTarget `json:"s3DirectTarget,omitempty"` // Specifies a data target that writes to Amazon S3 in Apache Parquet columnar // storage. S3GlueParquetTarget *S3GlueParquetTarget `json:"s3GlueParquetTarget,omitempty"` + // Specifies a target that writes to a Hudi data source in the Glue Data Catalog. + S3HudiCatalogTarget *S3HudiCatalogTarget `json:"s3HudiCatalogTarget,omitempty"` + // Specifies a target that writes to a Hudi data source in Amazon S3. + S3HudiDirectTarget *S3HudiDirectTarget `json:"s3HudiDirectTarget,omitempty"` + // Specifies a Hudi data source stored in Amazon S3. + S3HudiSource *S3HudiSource `json:"s3HudiSource,omitempty"` // Specifies a JSON data store stored in Amazon S3. S3JSONSource *S3JSONSource `json:"s3JSONSource,omitempty"` // Specifies an Apache Parquet data store stored in Amazon S3. @@ -348,6 +487,10 @@ type CodeGenConfigurationNode struct { // Specifies a transform that chooses one DynamicFrame from a collection of // DynamicFrames. The output is the selected DynamicFrame SelectFromCollection *SelectFromCollection `json:"selectFromCollection,omitempty"` + // Specifies a Snowflake data source. + SnowflakeSource *SnowflakeSource `json:"snowflakeSource,omitempty"` + // Specifies a Snowflake target. + SnowflakeTarget *SnowflakeTarget `json:"snowflakeTarget,omitempty"` // Specifies a connector to an Apache Spark data source. SparkConnectorSource *SparkConnectorSource `json:"sparkConnectorSource,omitempty"` // Specifies a target that uses an Apache Spark connector. @@ -502,6 +645,10 @@ type CrawlerTargets struct { DynamoDBTargets []*DynamoDBTarget `json:"dynamoDBTargets,omitempty"` + HudiTargets []*HudiTarget `json:"hudiTargets,omitempty"` + + IcebergTargets []*IcebergTarget `json:"icebergTargets,omitempty"` + JdbcTargets []*JdbcTarget `json:"jdbcTargets,omitempty"` MongoDBTargets []*MongoDBTarget `json:"mongoDBTargets,omitempty"` @@ -580,6 +727,8 @@ type CreateCsvClassifierRequest struct { Name *string `json:"name,omitempty"` QuoteSymbol *string `json:"quoteSymbol,omitempty"` + + Serde *string `json:"serde,omitempty"` } // +kubebuilder:skipversion @@ -633,6 +782,8 @@ type CsvClassifier struct { QuoteSymbol *string `json:"quoteSymbol,omitempty"` + Serde *string `json:"serde,omitempty"` + Version *int64 `json:"version,omitempty"` } @@ -776,6 +927,8 @@ type DataQualityRulesetListDetails struct { // +kubebuilder:skipversion type DataQualityTargetTable struct { + CatalogID *string `json:"catalogID,omitempty"` + DatabaseName *string `json:"databaseName,omitempty"` TableName *string `json:"tableName,omitempty"` @@ -786,6 +939,8 @@ type DatabaseIdentifier struct { CatalogID *string `json:"catalogID,omitempty"` DatabaseName *string `json:"databaseName,omitempty"` + + Region *string `json:"region,omitempty"` } // +kubebuilder:skipversion @@ -793,6 +948,8 @@ type DatabaseInput struct { CreateTableDefaultPermissions []*PrincipalPermissions `json:"createTableDefaultPermissions,omitempty"` Description *string `json:"description,omitempty"` + // A database that points to an entity outside the Glue Data Catalog. + FederatedDatabase *FederatedDatabase `json:"federatedDatabase,omitempty"` LocationURI *string `json:"locationURI,omitempty"` @@ -812,6 +969,8 @@ type Database_SDK struct { CreateTime *metav1.Time `json:"createTime,omitempty"` Description *string `json:"description,omitempty"` + // A database that points to an entity outside the Glue Data Catalog. + FederatedDatabase *FederatedDatabase `json:"federatedDatabase,omitempty"` LocationURI *string `json:"locationURI,omitempty"` @@ -899,6 +1058,21 @@ type DevEndpointCustomLibraries struct { ExtraPythonLibsS3Path *string `json:"extraPythonLibsS3Path,omitempty"` } +// +kubebuilder:skipversion +type DirectJDBCSource struct { + ConnectionName *string `json:"connectionName,omitempty"` + + ConnectionType *string `json:"connectionType,omitempty"` + + Database *string `json:"database,omitempty"` + + Name *string `json:"name,omitempty"` + + RedshiftTmpDir *string `json:"redshiftTmpDir,omitempty"` + + Table *string `json:"table,omitempty"` +} + // +kubebuilder:skipversion type DirectKafkaSource struct { // Specifies options related to data preview for viewing a sample of your data. @@ -975,6 +1149,8 @@ type DynamicTransform struct { Name *string `json:"name,omitempty"` + OutputSchemas []*GlueSchema `json:"outputSchemas,omitempty"` + Parameters []*TransformConfigParameter `json:"parameters,omitempty"` Path *string `json:"path,omitempty"` @@ -1047,6 +1223,24 @@ type EvaluateDataQuality struct { StopJobOnFailureOptions *DQStopJobOnFailureOptions `json:"stopJobOnFailureOptions,omitempty"` } +// +kubebuilder:skipversion +type EvaluateDataQualityMultiFrame struct { + AdditionalDataSources map[string]*string `json:"additionalDataSources,omitempty"` + + AdditionalOptions map[string]*string `json:"additionalOptions,omitempty"` + + Inputs []*string `json:"inputs,omitempty"` + + Name *string `json:"name,omitempty"` + // Options to configure how your data quality evaluation results are published. + PublishingOptions *DQResultsPublishingOptions `json:"publishingOptions,omitempty"` + + Ruleset *string `json:"ruleset,omitempty"` + // Options to configure how your job will stop if your data quality evaluation + // fails. + StopJobOnFailureOptions *DQStopJobOnFailureOptions `json:"stopJobOnFailureOptions,omitempty"` +} + // +kubebuilder:skipversion type ExecutionProperty struct { MaxConcurrentRuns *int64 `json:"maxConcurrentRuns,omitempty"` @@ -1057,6 +1251,22 @@ type ExportLabelsTaskRunProperties struct { OutputS3Path *string `json:"outputS3Path,omitempty"` } +// +kubebuilder:skipversion +type FederatedDatabase struct { + ConnectionName *string `json:"connectionName,omitempty"` + + Identifier *string `json:"identifier,omitempty"` +} + +// +kubebuilder:skipversion +type FederatedTable struct { + ConnectionName *string `json:"connectionName,omitempty"` + + DatabaseIdentifier *string `json:"databaseIdentifier,omitempty"` + + Identifier *string `json:"identifier,omitempty"` +} + // +kubebuilder:skipversion type FillMissingValues struct { FilledPath *string `json:"filledPath,omitempty"` @@ -1177,6 +1387,28 @@ type GrokClassifier struct { Version *int64 `json:"version,omitempty"` } +// +kubebuilder:skipversion +type HudiTarget struct { + ConnectionName *string `json:"connectionName,omitempty"` + + Exclusions []*string `json:"exclusions,omitempty"` + + MaximumTraversalDepth *int64 `json:"maximumTraversalDepth,omitempty"` + + Paths []*string `json:"paths,omitempty"` +} + +// +kubebuilder:skipversion +type IcebergTarget struct { + ConnectionName *string `json:"connectionName,omitempty"` + + Exclusions []*string `json:"exclusions,omitempty"` + + MaximumTraversalDepth *int64 `json:"maximumTraversalDepth,omitempty"` + + Paths []*string `json:"paths,omitempty"` +} + // +kubebuilder:skipversion type ImportLabelsTaskRunProperties struct { InputS3Path *string `json:"inputS3Path,omitempty"` @@ -1286,6 +1518,8 @@ type JobCommand struct { PythonVersion *string `json:"pythonVersion,omitempty"` + Runtime *string `json:"runtime,omitempty"` + ScriptLocation *string `json:"scriptLocation,omitempty"` } @@ -1440,6 +1674,8 @@ type JoinColumn struct { // +kubebuilder:skipversion type KafkaStreamingSourceOptions struct { + AddRecordTimestamp *string `json:"addRecordTimestamp,omitempty"` + Assign *string `json:"assign,omitempty"` BootstrapServers *string `json:"bootstrapServers,omitempty"` @@ -1450,8 +1686,12 @@ type KafkaStreamingSourceOptions struct { Delimiter *string `json:"delimiter,omitempty"` + EmitConsumerLagMetrics *string `json:"emitConsumerLagMetrics,omitempty"` + EndingOffsets *string `json:"endingOffsets,omitempty"` + IncludeHeaders *bool `json:"includeHeaders,omitempty"` + MaxOffsetsPerTrigger *int64 `json:"maxOffsetsPerTrigger,omitempty"` MinPartitions *int64 `json:"minPartitions,omitempty"` @@ -1466,6 +1706,8 @@ type KafkaStreamingSourceOptions struct { StartingOffsets *string `json:"startingOffsets,omitempty"` + StartingTimestamp *metav1.Time `json:"startingTimestamp,omitempty"` + SubscribePattern *string `json:"subscribePattern,omitempty"` TopicName *string `json:"topicName,omitempty"` @@ -1482,6 +1724,8 @@ type KeySchemaElement struct { type KinesisStreamingSourceOptions struct { AddIdleTimeBetweenReads *bool `json:"addIdleTimeBetweenReads,omitempty"` + AddRecordTimestamp *string `json:"addRecordTimestamp,omitempty"` + AvoidEmptyBatches *bool `json:"avoidEmptyBatches,omitempty"` Classification *string `json:"classification,omitempty"` @@ -1490,6 +1734,8 @@ type KinesisStreamingSourceOptions struct { DescribeShardInterval *int64 `json:"describeShardInterval,omitempty"` + EmitConsumerLagMetrics *string `json:"emitConsumerLagMetrics,omitempty"` + EndpointURL *string `json:"endpointURL,omitempty"` IdleTimeBetweenReadsInMs *int64 `json:"idleTimeBetweenReadsInMs,omitempty"` @@ -1512,6 +1758,8 @@ type KinesisStreamingSourceOptions struct { StartingPosition *string `json:"startingPosition,omitempty"` + StartingTimestamp *metav1.Time `json:"startingTimestamp,omitempty"` + StreamARN *string `json:"streamARN,omitempty"` StreamName *string `json:"streamName,omitempty"` @@ -1692,6 +1940,15 @@ type NullValueField struct { Value *string `json:"value,omitempty"` } +// +kubebuilder:skipversion +type Option struct { + Description *string `json:"description,omitempty"` + + Label *string `json:"label,omitempty"` + + Value *string `json:"value,omitempty"` +} + // +kubebuilder:skipversion type OracleSQLCatalogSource struct { Database *string `json:"database,omitempty"` @@ -1820,6 +2077,22 @@ type PropertyPredicate struct { Value *string `json:"value,omitempty"` } +// +kubebuilder:skipversion +type Recipe struct { + Inputs []*string `json:"inputs,omitempty"` + + Name *string `json:"name,omitempty"` + // A reference to a Glue DataBrew recipe. + RecipeReference *RecipeReference `json:"recipeReference,omitempty"` +} + +// +kubebuilder:skipversion +type RecipeReference struct { + RecipeARN *string `json:"recipeARN,omitempty"` + + RecipeVersion *string `json:"recipeVersion,omitempty"` +} + // +kubebuilder:skipversion type RecrawlPolicy struct { RecrawlBehavior *string `json:"recrawlBehavior,omitempty"` @@ -1885,6 +2158,32 @@ type ResourceURI struct { URI *string `json:"uri,omitempty"` } +// +kubebuilder:skipversion +type S3CatalogDeltaSource struct { + AdditionalDeltaOptions map[string]*string `json:"additionalDeltaOptions,omitempty"` + + Database *string `json:"database,omitempty"` + + Name *string `json:"name,omitempty"` + + OutputSchemas []*GlueSchema `json:"outputSchemas,omitempty"` + + Table *string `json:"table,omitempty"` +} + +// +kubebuilder:skipversion +type S3CatalogHudiSource struct { + AdditionalHudiOptions map[string]*string `json:"additionalHudiOptions,omitempty"` + + Database *string `json:"database,omitempty"` + + Name *string `json:"name,omitempty"` + + OutputSchemas []*GlueSchema `json:"outputSchemas,omitempty"` + + Table *string `json:"table,omitempty"` +} + // +kubebuilder:skipversion type S3CatalogSource struct { // Specifies additional connection options for the Amazon S3 data store. @@ -1956,6 +2255,55 @@ type S3CsvSource struct { WriteHeader *bool `json:"writeHeader,omitempty"` } +// +kubebuilder:skipversion +type S3DeltaCatalogTarget struct { + AdditionalOptions map[string]*string `json:"additionalOptions,omitempty"` + + Database *string `json:"database,omitempty"` + + Inputs []*string `json:"inputs,omitempty"` + + Name *string `json:"name,omitempty"` + + PartitionKeys [][]*string `json:"partitionKeys,omitempty"` + // A policy that specifies update behavior for the crawler. + SchemaChangePolicy *CatalogSchemaChangePolicy `json:"schemaChangePolicy,omitempty"` + + Table *string `json:"table,omitempty"` +} + +// +kubebuilder:skipversion +type S3DeltaDirectTarget struct { + AdditionalOptions map[string]*string `json:"additionalOptions,omitempty"` + + Compression *string `json:"compression,omitempty"` + + Format *string `json:"format,omitempty"` + + Inputs []*string `json:"inputs,omitempty"` + + Name *string `json:"name,omitempty"` + + PartitionKeys [][]*string `json:"partitionKeys,omitempty"` + + Path *string `json:"path,omitempty"` + // A policy that specifies update behavior for the crawler. + SchemaChangePolicy *DirectSchemaChangePolicy `json:"schemaChangePolicy,omitempty"` +} + +// +kubebuilder:skipversion +type S3DeltaSource struct { + AdditionalDeltaOptions map[string]*string `json:"additionalDeltaOptions,omitempty"` + // Specifies additional connection options for the Amazon S3 data store. + AdditionalOptions *S3DirectSourceAdditionalOptions `json:"additionalOptions,omitempty"` + + Name *string `json:"name,omitempty"` + + OutputSchemas []*GlueSchema `json:"outputSchemas,omitempty"` + + Paths []*string `json:"paths,omitempty"` +} + // +kubebuilder:skipversion type S3DirectSourceAdditionalOptions struct { BoundedFiles *int64 `json:"boundedFiles,omitempty"` @@ -2006,6 +2354,55 @@ type S3GlueParquetTarget struct { SchemaChangePolicy *DirectSchemaChangePolicy `json:"schemaChangePolicy,omitempty"` } +// +kubebuilder:skipversion +type S3HudiCatalogTarget struct { + AdditionalOptions map[string]*string `json:"additionalOptions,omitempty"` + + Database *string `json:"database,omitempty"` + + Inputs []*string `json:"inputs,omitempty"` + + Name *string `json:"name,omitempty"` + + PartitionKeys [][]*string `json:"partitionKeys,omitempty"` + // A policy that specifies update behavior for the crawler. + SchemaChangePolicy *CatalogSchemaChangePolicy `json:"schemaChangePolicy,omitempty"` + + Table *string `json:"table,omitempty"` +} + +// +kubebuilder:skipversion +type S3HudiDirectTarget struct { + AdditionalOptions map[string]*string `json:"additionalOptions,omitempty"` + + Compression *string `json:"compression,omitempty"` + + Format *string `json:"format,omitempty"` + + Inputs []*string `json:"inputs,omitempty"` + + Name *string `json:"name,omitempty"` + + PartitionKeys [][]*string `json:"partitionKeys,omitempty"` + + Path *string `json:"path,omitempty"` + // A policy that specifies update behavior for the crawler. + SchemaChangePolicy *DirectSchemaChangePolicy `json:"schemaChangePolicy,omitempty"` +} + +// +kubebuilder:skipversion +type S3HudiSource struct { + AdditionalHudiOptions map[string]*string `json:"additionalHudiOptions,omitempty"` + // Specifies additional connection options for the Amazon S3 data store. + AdditionalOptions *S3DirectSourceAdditionalOptions `json:"additionalOptions,omitempty"` + + Name *string `json:"name,omitempty"` + + OutputSchemas []*GlueSchema `json:"outputSchemas,omitempty"` + + Paths []*string `json:"paths,omitempty"` +} + // +kubebuilder:skipversion type S3JSONSource struct { // Specifies additional connection options for the Amazon S3 data store. @@ -2153,22 +2550,31 @@ type SerDeInfo struct { // +kubebuilder:skipversion type Session struct { + CompletedOn *metav1.Time `json:"completedOn,omitempty"` // Specifies the connections used by a job. Connections *ConnectionsList `json:"connections,omitempty"` CreatedOn *metav1.Time `json:"createdOn,omitempty"` + DPUSeconds *float64 `json:"dPUSeconds,omitempty"` + Description *string `json:"description,omitempty"` ErrorMessage *string `json:"errorMessage,omitempty"` + ExecutionTime *float64 `json:"executionTime,omitempty"` + GlueVersion *string `json:"glueVersion,omitempty"` ID *string `json:"id,omitempty"` MaxCapacity *float64 `json:"maxCapacity,omitempty"` + NumberOfWorkers *int64 `json:"numberOfWorkers,omitempty"` + SecurityConfiguration *string `json:"securityConfiguration,omitempty"` + + WorkerType *string `json:"workerType,omitempty"` } // +kubebuilder:skipversion @@ -2178,6 +2584,71 @@ type SessionCommand struct { PythonVersion *string `json:"pythonVersion,omitempty"` } +// +kubebuilder:skipversion +type SnowflakeNodeData struct { + Action *string `json:"action,omitempty"` + + AdditionalOptions map[string]*string `json:"additionalOptions,omitempty"` + + AutoPushdown *bool `json:"autoPushdown,omitempty"` + // Specifies an option value. + Connection *Option `json:"connection,omitempty"` + + Database *string `json:"database,omitempty"` + // Specifies an option value. + IAMRole *Option `json:"iamRole,omitempty"` + + MergeAction *string `json:"mergeAction,omitempty"` + + MergeClause *string `json:"mergeClause,omitempty"` + + MergeWhenMatched *string `json:"mergeWhenMatched,omitempty"` + + MergeWhenNotMatched *string `json:"mergeWhenNotMatched,omitempty"` + + PostAction *string `json:"postAction,omitempty"` + + PreAction *string `json:"preAction,omitempty"` + + SampleQuery *string `json:"sampleQuery,omitempty"` + + Schema *string `json:"schema,omitempty"` + + SelectedColumns []*Option `json:"selectedColumns,omitempty"` + + SourceType *string `json:"sourceType,omitempty"` + + StagingTable *string `json:"stagingTable,omitempty"` + + Table *string `json:"table,omitempty"` + + TableSchema []*Option `json:"tableSchema,omitempty"` + + TempDir *string `json:"tempDir,omitempty"` + + Upsert *bool `json:"upsert,omitempty"` +} + +// +kubebuilder:skipversion +type SnowflakeSource struct { + // Specifies configuration for Snowflake nodes in Glue Studio. + Data *SnowflakeNodeData `json:"data,omitempty"` + + Name *string `json:"name,omitempty"` + + OutputSchemas []*GlueSchema `json:"outputSchemas,omitempty"` +} + +// +kubebuilder:skipversion +type SnowflakeTarget struct { + // Specifies configuration for Snowflake nodes in Glue Studio. + Data *SnowflakeNodeData `json:"data,omitempty"` + + Inputs []*string `json:"inputs,omitempty"` + + Name *string `json:"name,omitempty"` +} + // +kubebuilder:skipversion type SortCriterion struct { FieldName *string `json:"fieldName,omitempty"` @@ -2365,6 +2836,8 @@ type TableIdentifier struct { DatabaseName *string `json:"databaseName,omitempty"` Name *string `json:"name,omitempty"` + + Region *string `json:"region,omitempty"` } // +kubebuilder:skipversion @@ -2497,6 +2970,8 @@ type UpdateCsvClassifierRequest struct { Name *string `json:"name,omitempty"` QuoteSymbol *string `json:"quoteSymbol,omitempty"` + + Serde *string `json:"serde,omitempty"` } // +kubebuilder:skipversion diff --git a/apis/iam/v1alpha1/zz_service_linked_role.go b/apis/iam/v1alpha1/zz_service_linked_role.go index d2d347b9fb..f70711a9ab 100644 --- a/apis/iam/v1alpha1/zz_service_linked_role.go +++ b/apis/iam/v1alpha1/zz_service_linked_role.go @@ -94,7 +94,7 @@ type ServiceLinkedRoleObservation struct { // if your Region began supporting these features within the last year. The // role might have been used more than 400 days ago. For more information, see // Regions where data is tracked (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_access-advisor.html#access-advisor_tracking-period) - // in the IAM User Guide. + // in the IAM user Guide. RoleLastUsed *RoleLastUsed `json:"roleLastUsed,omitempty"` // The friendly name that identifies the role. RoleName *string `json:"roleName,omitempty"` diff --git a/apis/iam/v1alpha1/zz_types.go b/apis/iam/v1alpha1/zz_types.go index 0a4205c836..779a8540ac 100644 --- a/apis/iam/v1alpha1/zz_types.go +++ b/apis/iam/v1alpha1/zz_types.go @@ -324,7 +324,7 @@ type Role struct { // if your Region began supporting these features within the last year. The // role might have been used more than 400 days ago. For more information, see // Regions where data is tracked (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_access-advisor.html#access-advisor_tracking-period) - // in the IAM User Guide. + // in the IAM user Guide. // // This data type is returned as a response element in the GetRole and GetAccountAuthorizationDetails // operations. @@ -368,7 +368,7 @@ type RoleDetail struct { // if your Region began supporting these features within the last year. The // role might have been used more than 400 days ago. For more information, see // Regions where data is tracked (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_access-advisor.html#access-advisor_tracking-period) - // in the IAM User Guide. + // in the IAM user Guide. // // This data type is returned as a response element in the GetRole and GetAccountAuthorizationDetails // operations. diff --git a/apis/iot/generator-config.yaml b/apis/iot/generator-config.yaml index 5e72dfe6a4..314eb6b72a 100644 --- a/apis/iot/generator-config.yaml +++ b/apis/iot/generator-config.yaml @@ -26,6 +26,8 @@ ignore: - ThingType - TopicRule - TopicRuleDestination + - Package + - PackageVersion field_paths: - CreatePolicyInput.PolicyName - CreatePolicyOutput.PolicyName diff --git a/apis/iot/v1alpha1/zz_enums.go b/apis/iot/v1alpha1/zz_enums.go index 85c821a714..b485bdbcf8 100644 --- a/apis/iot/v1alpha1/zz_enums.go +++ b/apis/iot/v1alpha1/zz_enums.go @@ -489,6 +489,23 @@ const ( OTAUpdateStatus_CREATE_IN_PROGRESS OTAUpdateStatus = "CREATE_IN_PROGRESS" OTAUpdateStatus_CREATE_COMPLETE OTAUpdateStatus = "CREATE_COMPLETE" OTAUpdateStatus_CREATE_FAILED OTAUpdateStatus = "CREATE_FAILED" + OTAUpdateStatus_DELETE_IN_PROGRESS OTAUpdateStatus = "DELETE_IN_PROGRESS" + OTAUpdateStatus_DELETE_FAILED OTAUpdateStatus = "DELETE_FAILED" +) + +type PackageVersionAction string + +const ( + PackageVersionAction_PUBLISH PackageVersionAction = "PUBLISH" + PackageVersionAction_DEPRECATE PackageVersionAction = "DEPRECATE" +) + +type PackageVersionStatus string + +const ( + PackageVersionStatus_DRAFT PackageVersionStatus = "DRAFT" + PackageVersionStatus_PUBLISHED PackageVersionStatus = "PUBLISHED" + PackageVersionStatus_DEPRECATED PackageVersionStatus = "DEPRECATED" ) type PolicyTemplateName string diff --git a/apis/kafka/generator-config.yaml b/apis/kafka/generator-config.yaml index 635c5658aa..872b02a445 100644 --- a/apis/kafka/generator-config.yaml +++ b/apis/kafka/generator-config.yaml @@ -1,6 +1,7 @@ ignore: resource_names: - ClusterV2 + - VpcConnection field_paths: - CreateClusterInput.BrokerNodeGroupInfo - CreateClusterInput.ConfigurationInfo diff --git a/apis/kafka/v1alpha1/zz_enums.go b/apis/kafka/v1alpha1/zz_enums.go index 13f3d9a823..a842e688fe 100644 --- a/apis/kafka/v1alpha1/zz_enums.go +++ b/apis/kafka/v1alpha1/zz_enums.go @@ -88,3 +88,23 @@ const ( StorageMode_LOCAL StorageMode = "LOCAL" StorageMode_TIERED StorageMode = "TIERED" ) + +type UserIdentityType string + +const ( + UserIdentityType_AWSACCOUNT UserIdentityType = "AWSACCOUNT" + UserIdentityType_AWSSERVICE UserIdentityType = "AWSSERVICE" +) + +type VPCConnectionState string + +const ( + VPCConnectionState_CREATING VPCConnectionState = "CREATING" + VPCConnectionState_AVAILABLE VPCConnectionState = "AVAILABLE" + VPCConnectionState_INACTIVE VPCConnectionState = "INACTIVE" + VPCConnectionState_DEACTIVATING VPCConnectionState = "DEACTIVATING" + VPCConnectionState_DELETING VPCConnectionState = "DELETING" + VPCConnectionState_FAILED VPCConnectionState = "FAILED" + VPCConnectionState_REJECTED VPCConnectionState = "REJECTED" + VPCConnectionState_REJECTING VPCConnectionState = "REJECTING" +) diff --git a/apis/kafka/v1alpha1/zz_generated.deepcopy.go b/apis/kafka/v1alpha1/zz_generated.deepcopy.go index 04d498c7b8..aebe302c5e 100644 --- a/apis/kafka/v1alpha1/zz_generated.deepcopy.go +++ b/apis/kafka/v1alpha1/zz_generated.deepcopy.go @@ -131,6 +131,17 @@ func (in *BrokerNodeGroupInfo) DeepCopyInto(out *BrokerNodeGroupInfo) { *out = new(StorageInfo) (*in).DeepCopyInto(*out) } + if in.ZoneIDs != nil { + in, out := &in.ZoneIDs, &out.ZoneIDs + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BrokerNodeGroupInfo. @@ -249,6 +260,40 @@ func (in *ClientAuthentication) DeepCopy() *ClientAuthentication { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientVPCConnection) DeepCopyInto(out *ClientVPCConnection) { + *out = *in + if in.Authentication != nil { + in, out := &in.Authentication, &out.Authentication + *out = new(string) + **out = **in + } + if in.CreationTime != nil { + in, out := &in.CreationTime, &out.CreationTime + *out = (*in).DeepCopy() + } + if in.Owner != nil { + in, out := &in.Owner, &out.Owner + *out = new(string) + **out = **in + } + if in.VPCConnectionARN != nil { + in, out := &in.VPCConnectionARN, &out.VPCConnectionARN + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientVPCConnection. +func (in *ClientVPCConnection) DeepCopy() *ClientVPCConnection { + if in == nil { + return nil + } + out := new(ClientVPCConnection) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CloudWatchLogs) DeepCopyInto(out *CloudWatchLogs) { *out = *in @@ -565,6 +610,92 @@ func (in *ClusterOperationStepInfo) DeepCopy() *ClusterOperationStepInfo { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterOperationV2) DeepCopyInto(out *ClusterOperationV2) { + *out = *in + if in.ClusterARN != nil { + in, out := &in.ClusterARN, &out.ClusterARN + *out = new(string) + **out = **in + } + if in.EndTime != nil { + in, out := &in.EndTime, &out.EndTime + *out = (*in).DeepCopy() + } + if in.OperationARN != nil { + in, out := &in.OperationARN, &out.OperationARN + *out = new(string) + **out = **in + } + if in.OperationState != nil { + in, out := &in.OperationState, &out.OperationState + *out = new(string) + **out = **in + } + if in.OperationType != nil { + in, out := &in.OperationType, &out.OperationType + *out = new(string) + **out = **in + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = (*in).DeepCopy() + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterOperationV2. +func (in *ClusterOperationV2) DeepCopy() *ClusterOperationV2 { + if in == nil { + return nil + } + out := new(ClusterOperationV2) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterOperationV2Summary) DeepCopyInto(out *ClusterOperationV2Summary) { + *out = *in + if in.ClusterARN != nil { + in, out := &in.ClusterARN, &out.ClusterARN + *out = new(string) + **out = **in + } + if in.EndTime != nil { + in, out := &in.EndTime, &out.EndTime + *out = (*in).DeepCopy() + } + if in.OperationARN != nil { + in, out := &in.OperationARN, &out.OperationARN + *out = new(string) + **out = **in + } + if in.OperationState != nil { + in, out := &in.OperationState, &out.OperationState + *out = new(string) + **out = **in + } + if in.OperationType != nil { + in, out := &in.OperationType, &out.OperationType + *out = new(string) + **out = **in + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = (*in).DeepCopy() + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterOperationV2Summary. +func (in *ClusterOperationV2Summary) DeepCopy() *ClusterOperationV2Summary { + if in == nil { + return nil + } + out := new(ClusterOperationV2Summary) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusterParameters) DeepCopyInto(out *ClusterParameters) { *out = *in @@ -1051,6 +1182,11 @@ func (in *ConnectivityInfo) DeepCopyInto(out *ConnectivityInfo) { *out = new(PublicAccess) (*in).DeepCopyInto(*out) } + if in.VPCConnectivity != nil { + in, out := &in.VPCConnectivity, &out.VPCConnectivity + *out = new(VPCConnectivity) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectivityInfo. @@ -2080,6 +2216,26 @@ func (in *UnprocessedSCRAMSecret) DeepCopy() *UnprocessedSCRAMSecret { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserIdentity) DeepCopyInto(out *UserIdentity) { + *out = *in + if in.PrincipalID != nil { + in, out := &in.PrincipalID, &out.PrincipalID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserIdentity. +func (in *UserIdentity) DeepCopy() *UserIdentity { + if in == nil { + return nil + } + out := new(UserIdentity) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *VPCConfig) DeepCopyInto(out *VPCConfig) { *out = *in @@ -2117,6 +2273,233 @@ func (in *VPCConfig) DeepCopy() *VPCConfig { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCConnection) DeepCopyInto(out *VPCConnection) { + *out = *in + if in.Authentication != nil { + in, out := &in.Authentication, &out.Authentication + *out = new(string) + **out = **in + } + if in.CreationTime != nil { + in, out := &in.CreationTime, &out.CreationTime + *out = (*in).DeepCopy() + } + if in.TargetClusterARN != nil { + in, out := &in.TargetClusterARN, &out.TargetClusterARN + *out = new(string) + **out = **in + } + if in.VPCConnectionARN != nil { + in, out := &in.VPCConnectionARN, &out.VPCConnectionARN + *out = new(string) + **out = **in + } + if in.VPCID != nil { + in, out := &in.VPCID, &out.VPCID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCConnection. +func (in *VPCConnection) DeepCopy() *VPCConnection { + if in == nil { + return nil + } + out := new(VPCConnection) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCConnectionInfo) DeepCopyInto(out *VPCConnectionInfo) { + *out = *in + if in.CreationTime != nil { + in, out := &in.CreationTime, &out.CreationTime + *out = (*in).DeepCopy() + } + if in.Owner != nil { + in, out := &in.Owner, &out.Owner + *out = new(string) + **out = **in + } + if in.VPCConnectionARN != nil { + in, out := &in.VPCConnectionARN, &out.VPCConnectionARN + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCConnectionInfo. +func (in *VPCConnectionInfo) DeepCopy() *VPCConnectionInfo { + if in == nil { + return nil + } + out := new(VPCConnectionInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCConnectionInfoServerless) DeepCopyInto(out *VPCConnectionInfoServerless) { + *out = *in + if in.CreationTime != nil { + in, out := &in.CreationTime, &out.CreationTime + *out = (*in).DeepCopy() + } + if in.Owner != nil { + in, out := &in.Owner, &out.Owner + *out = new(string) + **out = **in + } + if in.VPCConnectionARN != nil { + in, out := &in.VPCConnectionARN, &out.VPCConnectionARN + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCConnectionInfoServerless. +func (in *VPCConnectionInfoServerless) DeepCopy() *VPCConnectionInfoServerless { + if in == nil { + return nil + } + out := new(VPCConnectionInfoServerless) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCConnectivity) DeepCopyInto(out *VPCConnectivity) { + *out = *in + if in.ClientAuthentication != nil { + in, out := &in.ClientAuthentication, &out.ClientAuthentication + *out = new(VPCConnectivityClientAuthentication) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCConnectivity. +func (in *VPCConnectivity) DeepCopy() *VPCConnectivity { + if in == nil { + return nil + } + out := new(VPCConnectivity) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCConnectivityClientAuthentication) DeepCopyInto(out *VPCConnectivityClientAuthentication) { + *out = *in + if in.SASL != nil { + in, out := &in.SASL, &out.SASL + *out = new(VPCConnectivitySASL) + (*in).DeepCopyInto(*out) + } + if in.TLS != nil { + in, out := &in.TLS, &out.TLS + *out = new(VPCConnectivityTLS) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCConnectivityClientAuthentication. +func (in *VPCConnectivityClientAuthentication) DeepCopy() *VPCConnectivityClientAuthentication { + if in == nil { + return nil + } + out := new(VPCConnectivityClientAuthentication) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCConnectivityIAM) DeepCopyInto(out *VPCConnectivityIAM) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCConnectivityIAM. +func (in *VPCConnectivityIAM) DeepCopy() *VPCConnectivityIAM { + if in == nil { + return nil + } + out := new(VPCConnectivityIAM) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCConnectivitySASL) DeepCopyInto(out *VPCConnectivitySASL) { + *out = *in + if in.IAM != nil { + in, out := &in.IAM, &out.IAM + *out = new(VPCConnectivityIAM) + (*in).DeepCopyInto(*out) + } + if in.SCRAM != nil { + in, out := &in.SCRAM, &out.SCRAM + *out = new(VPCConnectivitySCRAM) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCConnectivitySASL. +func (in *VPCConnectivitySASL) DeepCopy() *VPCConnectivitySASL { + if in == nil { + return nil + } + out := new(VPCConnectivitySASL) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCConnectivitySCRAM) DeepCopyInto(out *VPCConnectivitySCRAM) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCConnectivitySCRAM. +func (in *VPCConnectivitySCRAM) DeepCopy() *VPCConnectivitySCRAM { + if in == nil { + return nil + } + out := new(VPCConnectivitySCRAM) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCConnectivityTLS) DeepCopyInto(out *VPCConnectivityTLS) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCConnectivityTLS. +func (in *VPCConnectivityTLS) DeepCopy() *VPCConnectivityTLS { + if in == nil { + return nil + } + out := new(VPCConnectivityTLS) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ZookeeperNodeInfo) DeepCopyInto(out *ZookeeperNodeInfo) { *out = *in diff --git a/apis/kafka/v1alpha1/zz_types.go b/apis/kafka/v1alpha1/zz_types.go index 42678dd03d..6b14690a3a 100644 --- a/apis/kafka/v1alpha1/zz_types.go +++ b/apis/kafka/v1alpha1/zz_types.go @@ -64,6 +64,8 @@ type BrokerNodeGroupInfo struct { SecurityGroups []*string `json:"securityGroups,omitempty"` // Contains information about storage volumes attached to MSK broker nodes. StorageInfo *StorageInfo `json:"storageInfo,omitempty"` + + ZoneIDs []*string `json:"zoneIDs,omitempty"` } // +kubebuilder:skipversion @@ -97,6 +99,17 @@ type ClientAuthentication struct { Unauthenticated *Unauthenticated `json:"unauthenticated,omitempty"` } +// +kubebuilder:skipversion +type ClientVPCConnection struct { + Authentication *string `json:"authentication,omitempty"` + + CreationTime *metav1.Time `json:"creationTime,omitempty"` + + Owner *string `json:"owner,omitempty"` + + VPCConnectionARN *string `json:"vpcConnectionARN,omitempty"` +} + // +kubebuilder:skipversion type CloudWatchLogs struct { Enabled *bool `json:"enabled,omitempty"` @@ -179,6 +192,36 @@ type ClusterOperationStepInfo struct { StepStatus *string `json:"stepStatus,omitempty"` } +// +kubebuilder:skipversion +type ClusterOperationV2 struct { + ClusterARN *string `json:"clusterARN,omitempty"` + + EndTime *metav1.Time `json:"endTime,omitempty"` + + OperationARN *string `json:"operationARN,omitempty"` + + OperationState *string `json:"operationState,omitempty"` + + OperationType *string `json:"operationType,omitempty"` + + StartTime *metav1.Time `json:"startTime,omitempty"` +} + +// +kubebuilder:skipversion +type ClusterOperationV2Summary struct { + ClusterARN *string `json:"clusterARN,omitempty"` + + EndTime *metav1.Time `json:"endTime,omitempty"` + + OperationARN *string `json:"operationARN,omitempty"` + + OperationState *string `json:"operationState,omitempty"` + + OperationType *string `json:"operationType,omitempty"` + + StartTime *metav1.Time `json:"startTime,omitempty"` +} + // +kubebuilder:skipversion type Cluster_SDK struct { ActiveOperationARN *string `json:"activeOperationARN,omitempty"` @@ -242,6 +285,8 @@ type Configuration_SDK struct { type ConnectivityInfo struct { // Broker public access control. PublicAccess *PublicAccess `json:"publicAccess,omitempty"` + // Broker VPC connectivity access control. + VPCConnectivity *VPCConnectivity `json:"vpcConnectivity,omitempty"` } // +kubebuilder:skipversion @@ -527,6 +572,11 @@ type UnprocessedSCRAMSecret struct { SecretARN *string `json:"secretARN,omitempty"` } +// +kubebuilder:skipversion +type UserIdentity struct { + PrincipalID *string `json:"principalID,omitempty"` +} + // +kubebuilder:skipversion type VPCConfig struct { SecurityGroupIDs []*string `json:"securityGroupIDs,omitempty"` @@ -534,6 +584,71 @@ type VPCConfig struct { SubnetIDs []*string `json:"subnetIDs,omitempty"` } +// +kubebuilder:skipversion +type VPCConnection struct { + Authentication *string `json:"authentication,omitempty"` + + CreationTime *metav1.Time `json:"creationTime,omitempty"` + + TargetClusterARN *string `json:"targetClusterARN,omitempty"` + + VPCConnectionARN *string `json:"vpcConnectionARN,omitempty"` + + VPCID *string `json:"vpcID,omitempty"` +} + +// +kubebuilder:skipversion +type VPCConnectionInfo struct { + CreationTime *metav1.Time `json:"creationTime,omitempty"` + + Owner *string `json:"owner,omitempty"` + + VPCConnectionARN *string `json:"vpcConnectionARN,omitempty"` +} + +// +kubebuilder:skipversion +type VPCConnectionInfoServerless struct { + CreationTime *metav1.Time `json:"creationTime,omitempty"` + + Owner *string `json:"owner,omitempty"` + + VPCConnectionARN *string `json:"vpcConnectionARN,omitempty"` +} + +// +kubebuilder:skipversion +type VPCConnectivity struct { + ClientAuthentication *VPCConnectivityClientAuthentication `json:"clientAuthentication,omitempty"` +} + +// +kubebuilder:skipversion +type VPCConnectivityClientAuthentication struct { + SASL *VPCConnectivitySASL `json:"sasl,omitempty"` + + TLS *VPCConnectivityTLS `json:"tls,omitempty"` +} + +// +kubebuilder:skipversion +type VPCConnectivityIAM struct { + Enabled *bool `json:"enabled,omitempty"` +} + +// +kubebuilder:skipversion +type VPCConnectivitySASL struct { + IAM *VPCConnectivityIAM `json:"iam,omitempty"` + + SCRAM *VPCConnectivitySCRAM `json:"scram,omitempty"` +} + +// +kubebuilder:skipversion +type VPCConnectivitySCRAM struct { + Enabled *bool `json:"enabled,omitempty"` +} + +// +kubebuilder:skipversion +type VPCConnectivityTLS struct { + Enabled *bool `json:"enabled,omitempty"` +} + // +kubebuilder:skipversion type ZookeeperNodeInfo struct { AttachedENIID *string `json:"attachedENIID,omitempty"` diff --git a/apis/kms/v1alpha1/zz_enums.go b/apis/kms/v1alpha1/zz_enums.go index d9e6391382..cf3de83018 100644 --- a/apis/kms/v1alpha1/zz_enums.go +++ b/apis/kms/v1alpha1/zz_enums.go @@ -21,9 +21,11 @@ package v1alpha1 type AlgorithmSpec string const ( - AlgorithmSpec_RSAES_PKCS1_V1_5 AlgorithmSpec = "RSAES_PKCS1_V1_5" - AlgorithmSpec_RSAES_OAEP_SHA_1 AlgorithmSpec = "RSAES_OAEP_SHA_1" - AlgorithmSpec_RSAES_OAEP_SHA_256 AlgorithmSpec = "RSAES_OAEP_SHA_256" + AlgorithmSpec_RSAES_PKCS1_V1_5 AlgorithmSpec = "RSAES_PKCS1_V1_5" + AlgorithmSpec_RSAES_OAEP_SHA_1 AlgorithmSpec = "RSAES_OAEP_SHA_1" + AlgorithmSpec_RSAES_OAEP_SHA_256 AlgorithmSpec = "RSAES_OAEP_SHA_256" + AlgorithmSpec_RSA_AES_KEY_WRAP_SHA_1 AlgorithmSpec = "RSA_AES_KEY_WRAP_SHA_1" + AlgorithmSpec_RSA_AES_KEY_WRAP_SHA_256 AlgorithmSpec = "RSA_AES_KEY_WRAP_SHA_256" ) type ConnectionErrorCodeType string @@ -141,6 +143,12 @@ const ( GrantOperation_VerifyMac GrantOperation = "VerifyMac" ) +type KeyEncryptionMechanism string + +const ( + KeyEncryptionMechanism_RSAES_OAEP_SHA_256 KeyEncryptionMechanism = "RSAES_OAEP_SHA_256" +) + type KeyManagerType string const ( @@ -238,6 +246,8 @@ type WrappingKeySpec string const ( WrappingKeySpec_RSA_2048 WrappingKeySpec = "RSA_2048" + WrappingKeySpec_RSA_3072 WrappingKeySpec = "RSA_3072" + WrappingKeySpec_RSA_4096 WrappingKeySpec = "RSA_4096" ) type XksProxyConnectivityType string diff --git a/apis/kms/v1alpha1/zz_key.go b/apis/kms/v1alpha1/zz_key.go index 50f71ffa94..4b0752d346 100644 --- a/apis/kms/v1alpha1/zz_key.go +++ b/apis/kms/v1alpha1/zz_key.go @@ -29,19 +29,18 @@ type KeyParameters struct { // Region is which region the Key will be created. // +kubebuilder:validation:Required Region string `json:"region"` - // A flag to indicate whether to bypass the key policy lockout safety check. + // Skips ("bypasses") the key policy lockout safety check. The default value + // is false. // // Setting this value to true increases the risk that the KMS key becomes unmanageable. // Do not set this value to true indiscriminately. // - // For more information, refer to the scenario in the Default Key Policy (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default-allow-root-enable-iam) - // section in the Key Management Service Developer Guide . - // - // Use this parameter only when you include a policy in the request and you - // intend to prevent the principal that is making the request from making a - // subsequent PutKeyPolicy request on the KMS key. + // For more information, see Default key policy (https://docs.aws.amazon.com/kms/latest/developerguide/key-policy-default.html#prevent-unmanageable-key) + // in the Key Management Service Developer Guide. // - // The default value is false. + // Use this parameter only when you intend to prevent the principal that is + // making the request from making a subsequent PutKeyPolicy request on the KMS + // key. BypassPolicyLockoutSafetyCheck *bool `json:"bypassPolicyLockoutSafetyCheck,omitempty"` // Creates the KMS key in the specified custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html). // The ConnectionState of the custom key store must be CONNECTED. To find the @@ -62,10 +61,12 @@ type KeyParameters struct { // the names differ. We recommend that you use KeySpec parameter in your code. // However, to avoid breaking changes, KMS supports both parameters. CustomerMasterKeySpec *string `json:"customerMasterKeySpec,omitempty"` - // A description of the KMS key. + // A description of the KMS key. Use a description that helps you decide whether + // the KMS key is appropriate for a task. The default value is an empty string + // (no description). // - // Use a description that helps you decide whether the KMS key is appropriate - // for a task. The default value is an empty string (no description). + // Do not include confidential or sensitive information in this field. This + // field may be displayed in plaintext in CloudTrail logs and other output. // // To set or change the description after the key is created, use UpdateKeyDescription. Description *string `json:"description,omitempty"` @@ -173,24 +174,23 @@ type KeyParameters struct { // // If you provide a key policy, it must meet the following criteria: // - // * If you don't set BypassPolicyLockoutSafetyCheck to true, the key policy - // must allow the principal that is making the CreateKey request to make - // a subsequent PutKeyPolicy request on the KMS key. This reduces the risk - // that the KMS key becomes unmanageable. For more information, refer to - // the scenario in the Default Key Policy (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default-allow-root-enable-iam) - // section of the Key Management Service Developer Guide . + // * The key policy must allow the calling principal to make a subsequent + // PutKeyPolicy request on the KMS key. This reduces the risk that the KMS + // key becomes unmanageable. For more information, see Default key policy + // (https://docs.aws.amazon.com/kms/latest/developerguide/key-policy-default.html#prevent-unmanageable-key) + // in the Key Management Service Developer Guide. (To omit this condition, + // set BypassPolicyLockoutSafetyCheck to true.) // // * Each statement in the key policy must contain one or more principals. // The principals in the key policy must exist and be visible to KMS. When - // you create a new Amazon Web Services principal (for example, an IAM user - // or role), you might need to enforce a delay before including the new principal - // in a key policy because the new principal might not be immediately visible - // to KMS. For more information, see Changes that I make are not always immediately - // visible (https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_eventual-consistency) + // you create a new Amazon Web Services principal, you might need to enforce + // a delay before including the new principal in a key policy because the + // new principal might not be immediately visible to KMS. For more information, + // see Changes that I make are not always immediately visible (https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_eventual-consistency) // in the Amazon Web Services Identity and Access Management User Guide. // // If you do not provide a key policy, KMS attaches a default key policy to - // the KMS key. For more information, see Default Key Policy (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default) + // the KMS key. For more information, see Default key policy (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default) // in the Key Management Service Developer Guide. // // The key policy size quota is 32 kilobytes (32768 bytes). @@ -202,6 +202,9 @@ type KeyParameters struct { // Assigns one or more tags to the KMS key. Use this parameter to tag the KMS // key when it is created. To tag an existing KMS key, use the TagResource operation. // + // Do not include confidential or sensitive information in this field. This + // field may be displayed in plaintext in CloudTrail logs and other output. + // // Tagging or untagging a KMS key can allow or deny permission to the KMS key. // For details, see ABAC for KMS (https://docs.aws.amazon.com/kms/latest/developerguide/abac.html) // in the Key Management Service Developer Guide. diff --git a/apis/lambda/v1alpha1/zz_enums.go b/apis/lambda/v1alpha1/zz_enums.go index e969d0f55a..22e392cba4 100644 --- a/apis/lambda/v1alpha1/zz_enums.go +++ b/apis/lambda/v1alpha1/zz_enums.go @@ -46,6 +46,13 @@ const ( EventSourcePosition_AT_TIMESTAMP EventSourcePosition = "AT_TIMESTAMP" ) +type FullDocument string + +const ( + FullDocument_UpdateLookup FullDocument = "UpdateLookup" + FullDocument_Default FullDocument = "Default" +) + type FunctionResponseType string const ( @@ -73,6 +80,13 @@ const ( InvocationType_DryRun InvocationType = "DryRun" ) +type InvokeMode string + +const ( + InvokeMode_BUFFERED InvokeMode = "BUFFERED" + InvokeMode_RESPONSE_STREAM InvokeMode = "RESPONSE_STREAM" +) + type LastUpdateStatus string const ( @@ -129,6 +143,13 @@ const ( ProvisionedConcurrencyStatusEnum_FAILED ProvisionedConcurrencyStatusEnum = "FAILED" ) +type ResponseStreamingInvocationType string + +const ( + ResponseStreamingInvocationType_RequestResponse ResponseStreamingInvocationType = "RequestResponse" + ResponseStreamingInvocationType_DryRun ResponseStreamingInvocationType = "DryRun" +) + type Runtime string const ( @@ -160,6 +181,10 @@ const ( Runtime_provided Runtime = "provided" Runtime_provided_al2 Runtime = "provided.al2" Runtime_nodejs18_x Runtime = "nodejs18.x" + Runtime_python3_10 Runtime = "python3.10" + Runtime_java17 Runtime = "java17" + Runtime_ruby3_2 Runtime = "ruby3.2" + Runtime_python3_11 Runtime = "python3.11" ) type SnapStartApplyOn string @@ -244,3 +269,11 @@ const ( TracingMode_Active TracingMode = "Active" TracingMode_PassThrough TracingMode = "PassThrough" ) + +type UpdateRuntimeOn string + +const ( + UpdateRuntimeOn_Auto UpdateRuntimeOn = "Auto" + UpdateRuntimeOn_Manual UpdateRuntimeOn = "Manual" + UpdateRuntimeOn_FunctionUpdate UpdateRuntimeOn = "FunctionUpdate" +) diff --git a/apis/lambda/v1alpha1/zz_function_url_config.go b/apis/lambda/v1alpha1/zz_function_url_config.go index d107699ff4..4badf31d10 100644 --- a/apis/lambda/v1alpha1/zz_function_url_config.go +++ b/apis/lambda/v1alpha1/zz_function_url_config.go @@ -30,14 +30,25 @@ type FunctionURLConfigParameters struct { // +kubebuilder:validation:Required Region string `json:"region"` // The type of authentication that your function URL uses. Set to AWS_IAM if - // you want to restrict access to authenticated IAM users only. Set to NONE - // if you want to bypass IAM authentication to create a public endpoint. For - // more information, see Security and auth model for Lambda function URLs (https://docs.aws.amazon.com/lambda/latest/dg/urls-auth.html). + // you want to restrict access to authenticated users only. Set to NONE if you + // want to bypass IAM authentication to create a public endpoint. For more information, + // see Security and auth model for Lambda function URLs (https://docs.aws.amazon.com/lambda/latest/dg/urls-auth.html). // +kubebuilder:validation:Required AuthType *string `json:"authType"` // The cross-origin resource sharing (CORS) (https://developer.mozilla.org/en-US/docs/Web/HTTP/CORS) // settings for your function URL. CORS *CORS `json:"cors,omitempty"` + // Use one of the following options: + // + // * BUFFERED – This is the default option. Lambda invokes your function + // using the Invoke API operation. Invocation results are available when + // the payload is complete. The maximum payload size is 6 MB. + // + // * RESPONSE_STREAM – Your function streams payload results as they become + // available. Lambda invokes your function using the InvokeWithResponseStream + // API operation. The maximum response payload size is 20 MB, however, you + // can request a quota increase (https://docs.aws.amazon.com/servicequotas/latest/userguide/request-quota-increase.html). + InvokeMode *string `json:"invokeMode,omitempty"` // The alias name. Qualifier *string `json:"qualifier,omitempty"` CustomFunctionURLConfigParameters `json:",inline"` diff --git a/apis/lambda/v1alpha1/zz_generated.deepcopy.go b/apis/lambda/v1alpha1/zz_generated.deepcopy.go index e84a42c924..70b64d06c8 100644 --- a/apis/lambda/v1alpha1/zz_generated.deepcopy.go +++ b/apis/lambda/v1alpha1/zz_generated.deepcopy.go @@ -744,6 +744,11 @@ func (in *FunctionConfiguration) DeepCopyInto(out *FunctionConfiguration) { *out = new(string) **out = **in } + if in.RuntimeVersionConfig != nil { + in, out := &in.RuntimeVersionConfig, &out.RuntimeVersionConfig + *out = new(RuntimeVersionConfig) + (*in).DeepCopyInto(*out) + } if in.SigningJobARN != nil { in, out := &in.SigningJobARN, &out.SigningJobARN *out = new(string) @@ -1222,6 +1227,11 @@ func (in *FunctionURLConfigParameters) DeepCopyInto(out *FunctionURLConfigParame *out = new(CORS) (*in).DeepCopyInto(*out) } + if in.InvokeMode != nil { + in, out := &in.InvokeMode, &out.InvokeMode + *out = new(string) + **out = **in + } if in.Qualifier != nil { in, out := &in.Qualifier, &out.Qualifier *out = new(string) @@ -1302,6 +1312,11 @@ func (in *FunctionURLConfig_SDK) DeepCopyInto(out *FunctionURLConfig_SDK) { *out = new(string) **out = **in } + if in.InvokeMode != nil { + in, out := &in.InvokeMode, &out.InvokeMode + *out = new(string) + **out = **in + } if in.LastModifiedTime != nil { in, out := &in.LastModifiedTime, &out.LastModifiedTime *out = new(string) @@ -1411,6 +1426,36 @@ func (in *ImageConfigResponse) DeepCopy() *ImageConfigResponse { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InvokeWithResponseStreamCompleteEvent) DeepCopyInto(out *InvokeWithResponseStreamCompleteEvent) { + *out = *in + if in.ErrorCode != nil { + in, out := &in.ErrorCode, &out.ErrorCode + *out = new(string) + **out = **in + } + if in.ErrorDetails != nil { + in, out := &in.ErrorDetails, &out.ErrorDetails + *out = new(string) + **out = **in + } + if in.LogResult != nil { + in, out := &in.LogResult, &out.LogResult + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InvokeWithResponseStreamCompleteEvent. +func (in *InvokeWithResponseStreamCompleteEvent) DeepCopy() *InvokeWithResponseStreamCompleteEvent { + if in == nil { + return nil + } + out := new(InvokeWithResponseStreamCompleteEvent) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Layer) DeepCopyInto(out *Layer) { *out = *in @@ -1566,6 +1611,56 @@ func (in *PutFunctionConcurrencyOutput) DeepCopy() *PutFunctionConcurrencyOutput return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuntimeVersionConfig) DeepCopyInto(out *RuntimeVersionConfig) { + *out = *in + if in.Error != nil { + in, out := &in.Error, &out.Error + *out = new(RuntimeVersionError) + (*in).DeepCopyInto(*out) + } + if in.RuntimeVersionARN != nil { + in, out := &in.RuntimeVersionARN, &out.RuntimeVersionARN + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuntimeVersionConfig. +func (in *RuntimeVersionConfig) DeepCopy() *RuntimeVersionConfig { + if in == nil { + return nil + } + out := new(RuntimeVersionConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuntimeVersionError) DeepCopyInto(out *RuntimeVersionError) { + *out = *in + if in.ErrorCode != nil { + in, out := &in.ErrorCode, &out.ErrorCode + *out = new(string) + **out = **in + } + if in.Message != nil { + in, out := &in.Message, &out.Message + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuntimeVersionError. +func (in *RuntimeVersionError) DeepCopy() *RuntimeVersionError { + if in == nil { + return nil + } + out := new(RuntimeVersionError) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SnapStart) DeepCopyInto(out *SnapStart) { *out = *in diff --git a/apis/lambda/v1alpha1/zz_types.go b/apis/lambda/v1alpha1/zz_types.go index 75a75e210b..d86a4b721a 100644 --- a/apis/lambda/v1alpha1/zz_types.go +++ b/apis/lambda/v1alpha1/zz_types.go @@ -171,6 +171,8 @@ type FunctionConfiguration struct { Role *string `json:"role,omitempty"` Runtime *string `json:"runtime,omitempty"` + // The ARN of the runtime and any errors that occured. + RuntimeVersionConfig *RuntimeVersionConfig `json:"runtimeVersionConfig,omitempty"` SigningJobARN *string `json:"signingJobARN,omitempty"` @@ -214,6 +216,8 @@ type FunctionURLConfig_SDK struct { FunctionURL *string `json:"functionURL,omitempty"` + InvokeMode *string `json:"invokeMode,omitempty"` + LastModifiedTime *string `json:"lastModifiedTime,omitempty"` } @@ -242,6 +246,15 @@ type ImageConfigResponse struct { ImageConfig *ImageConfig `json:"imageConfig,omitempty"` } +// +kubebuilder:skipversion +type InvokeWithResponseStreamCompleteEvent struct { + ErrorCode *string `json:"errorCode,omitempty"` + + ErrorDetails *string `json:"errorDetails,omitempty"` + + LogResult *string `json:"logResult,omitempty"` +} + // +kubebuilder:skipversion type Layer struct { ARN *string `json:"arn,omitempty"` @@ -289,6 +302,22 @@ type PutFunctionConcurrencyOutput struct { ReservedConcurrentExecutions *int64 `json:"reservedConcurrentExecutions,omitempty"` } +// +kubebuilder:skipversion +type RuntimeVersionConfig struct { + // Any error returned when the runtime version information for the function + // could not be retrieved. + Error *RuntimeVersionError `json:"error,omitempty"` + + RuntimeVersionARN *string `json:"runtimeVersionARN,omitempty"` +} + +// +kubebuilder:skipversion +type RuntimeVersionError struct { + ErrorCode *string `json:"errorCode,omitempty"` + + Message *string `json:"message,omitempty"` +} + // +kubebuilder:skipversion type SnapStart struct { ApplyOn *string `json:"applyOn,omitempty"` diff --git a/apis/lambda/v1beta1/zz_enums.go b/apis/lambda/v1beta1/zz_enums.go index e5447eb30f..8864f25c29 100644 --- a/apis/lambda/v1beta1/zz_enums.go +++ b/apis/lambda/v1beta1/zz_enums.go @@ -46,6 +46,13 @@ const ( EventSourcePosition_AT_TIMESTAMP EventSourcePosition = "AT_TIMESTAMP" ) +type FullDocument string + +const ( + FullDocument_UpdateLookup FullDocument = "UpdateLookup" + FullDocument_Default FullDocument = "Default" +) + type FunctionResponseType string const ( @@ -73,6 +80,13 @@ const ( InvocationType_DryRun InvocationType = "DryRun" ) +type InvokeMode string + +const ( + InvokeMode_BUFFERED InvokeMode = "BUFFERED" + InvokeMode_RESPONSE_STREAM InvokeMode = "RESPONSE_STREAM" +) + type LastUpdateStatus string const ( @@ -129,6 +143,13 @@ const ( ProvisionedConcurrencyStatusEnum_FAILED ProvisionedConcurrencyStatusEnum = "FAILED" ) +type ResponseStreamingInvocationType string + +const ( + ResponseStreamingInvocationType_RequestResponse ResponseStreamingInvocationType = "RequestResponse" + ResponseStreamingInvocationType_DryRun ResponseStreamingInvocationType = "DryRun" +) + type Runtime string const ( @@ -160,6 +181,10 @@ const ( Runtime_provided Runtime = "provided" Runtime_provided_al2 Runtime = "provided.al2" Runtime_nodejs18_x Runtime = "nodejs18.x" + Runtime_python3_10 Runtime = "python3.10" + Runtime_java17 Runtime = "java17" + Runtime_ruby3_2 Runtime = "ruby3.2" + Runtime_python3_11 Runtime = "python3.11" ) type SnapStartApplyOn string @@ -244,3 +269,11 @@ const ( TracingMode_Active TracingMode = "Active" TracingMode_PassThrough TracingMode = "PassThrough" ) + +type UpdateRuntimeOn string + +const ( + UpdateRuntimeOn_Auto UpdateRuntimeOn = "Auto" + UpdateRuntimeOn_Manual UpdateRuntimeOn = "Manual" + UpdateRuntimeOn_FunctionUpdate UpdateRuntimeOn = "FunctionUpdate" +) diff --git a/apis/lambda/v1beta1/zz_function.go b/apis/lambda/v1beta1/zz_function.go index 84410f83ab..7c55f15be5 100644 --- a/apis/lambda/v1beta1/zz_function.go +++ b/apis/lambda/v1beta1/zz_function.go @@ -59,9 +59,15 @@ type FunctionParameters struct { // Container image configuration values (https://docs.aws.amazon.com/lambda/latest/dg/configuration-images.html#configuration-images-settings) // that override the values in the container image Dockerfile. ImageConfig *ImageConfig `json:"imageConfig,omitempty"` - // The ARN of the Key Management Service (KMS) key that's used to encrypt your - // function's environment variables. If it's not provided, Lambda uses a default - // service key. + // The ARN of the Key Management Service (KMS) customer managed key that's used + // to encrypt your function's environment variables (https://docs.aws.amazon.com/lambda/latest/dg/configuration-envvars.html#configuration-envvars-encryption). + // When Lambda SnapStart (https://docs.aws.amazon.com/lambda/latest/dg/snapstart-security.html) + // is activated, Lambda also uses this key is to encrypt your function's snapshot. + // If you deploy your function using a container image, Lambda also uses this + // key to encrypt your function when it's deployed. Note that this is not the + // same key that's used to protect your container image in the Amazon Elastic + // Container Registry (Amazon ECR). If you don't provide a customer managed + // key, Lambda uses a default service key. KMSKeyARN *string `json:"kmsKeyARN,omitempty"` // A list of function layers (https://docs.aws.amazon.com/lambda/latest/dg/configuration-layers.html) // to add to the function's execution environment. Specify each layer by its @@ -78,6 +84,9 @@ type FunctionParameters struct { Publish *bool `json:"publish,omitempty"` // The identifier of the function's runtime (https://docs.aws.amazon.com/lambda/latest/dg/lambda-runtimes.html). // Runtime is required if the deployment package is a .zip file archive. + // + // The following list includes deprecated runtimes. For more information, see + // Runtime deprecation policy (https://docs.aws.amazon.com/lambda/latest/dg/lambda-runtimes.html#runtime-support-policy). Runtime *string `json:"runtime,omitempty"` // The function's SnapStart (https://docs.aws.amazon.com/lambda/latest/dg/snapstart.html) // setting. @@ -129,6 +138,8 @@ type FunctionObservation struct { RevisionID *string `json:"revisionID,omitempty"` // The function's execution role. Role *string `json:"role,omitempty"` + // The ARN of the runtime and any errors that occured. + RuntimeVersionConfig *RuntimeVersionConfig `json:"runtimeVersionConfig,omitempty"` // The ARN of the signing job. SigningJobARN *string `json:"signingJobARN,omitempty"` // The ARN of the signing profile version. diff --git a/apis/lambda/v1beta1/zz_generated.deepcopy.go b/apis/lambda/v1beta1/zz_generated.deepcopy.go index 85d019a164..b22ec9d289 100644 --- a/apis/lambda/v1beta1/zz_generated.deepcopy.go +++ b/apis/lambda/v1beta1/zz_generated.deepcopy.go @@ -714,6 +714,11 @@ func (in *FunctionConfiguration) DeepCopyInto(out *FunctionConfiguration) { *out = new(string) **out = **in } + if in.RuntimeVersionConfig != nil { + in, out := &in.RuntimeVersionConfig, &out.RuntimeVersionConfig + *out = new(RuntimeVersionConfig) + (*in).DeepCopyInto(*out) + } if in.SigningJobARN != nil { in, out := &in.SigningJobARN, &out.SigningJobARN *out = new(string) @@ -891,6 +896,11 @@ func (in *FunctionObservation) DeepCopyInto(out *FunctionObservation) { *out = new(string) **out = **in } + if in.RuntimeVersionConfig != nil { + in, out := &in.RuntimeVersionConfig, &out.RuntimeVersionConfig + *out = new(RuntimeVersionConfig) + (*in).DeepCopyInto(*out) + } if in.SigningJobARN != nil { in, out := &in.SigningJobARN, &out.SigningJobARN *out = new(string) @@ -1139,6 +1149,11 @@ func (in *FunctionURLConfig_SDK) DeepCopyInto(out *FunctionURLConfig_SDK) { *out = new(string) **out = **in } + if in.InvokeMode != nil { + in, out := &in.InvokeMode, &out.InvokeMode + *out = new(string) + **out = **in + } if in.LastModifiedTime != nil { in, out := &in.LastModifiedTime, &out.LastModifiedTime *out = new(string) @@ -1248,6 +1263,36 @@ func (in *ImageConfigResponse) DeepCopy() *ImageConfigResponse { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InvokeWithResponseStreamCompleteEvent) DeepCopyInto(out *InvokeWithResponseStreamCompleteEvent) { + *out = *in + if in.ErrorCode != nil { + in, out := &in.ErrorCode, &out.ErrorCode + *out = new(string) + **out = **in + } + if in.ErrorDetails != nil { + in, out := &in.ErrorDetails, &out.ErrorDetails + *out = new(string) + **out = **in + } + if in.LogResult != nil { + in, out := &in.LogResult, &out.LogResult + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InvokeWithResponseStreamCompleteEvent. +func (in *InvokeWithResponseStreamCompleteEvent) DeepCopy() *InvokeWithResponseStreamCompleteEvent { + if in == nil { + return nil + } + out := new(InvokeWithResponseStreamCompleteEvent) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Layer) DeepCopyInto(out *Layer) { *out = *in @@ -1403,6 +1448,56 @@ func (in *PutFunctionConcurrencyOutput) DeepCopy() *PutFunctionConcurrencyOutput return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuntimeVersionConfig) DeepCopyInto(out *RuntimeVersionConfig) { + *out = *in + if in.Error != nil { + in, out := &in.Error, &out.Error + *out = new(RuntimeVersionError) + (*in).DeepCopyInto(*out) + } + if in.RuntimeVersionARN != nil { + in, out := &in.RuntimeVersionARN, &out.RuntimeVersionARN + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuntimeVersionConfig. +func (in *RuntimeVersionConfig) DeepCopy() *RuntimeVersionConfig { + if in == nil { + return nil + } + out := new(RuntimeVersionConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuntimeVersionError) DeepCopyInto(out *RuntimeVersionError) { + *out = *in + if in.ErrorCode != nil { + in, out := &in.ErrorCode, &out.ErrorCode + *out = new(string) + **out = **in + } + if in.Message != nil { + in, out := &in.Message, &out.Message + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuntimeVersionError. +func (in *RuntimeVersionError) DeepCopy() *RuntimeVersionError { + if in == nil { + return nil + } + out := new(RuntimeVersionError) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SnapStart) DeepCopyInto(out *SnapStart) { *out = *in diff --git a/apis/lambda/v1beta1/zz_types.go b/apis/lambda/v1beta1/zz_types.go index 5d9e574369..8383937c11 100644 --- a/apis/lambda/v1beta1/zz_types.go +++ b/apis/lambda/v1beta1/zz_types.go @@ -171,6 +171,8 @@ type FunctionConfiguration struct { Role *string `json:"role,omitempty"` Runtime *string `json:"runtime,omitempty"` + // The ARN of the runtime and any errors that occured. + RuntimeVersionConfig *RuntimeVersionConfig `json:"runtimeVersionConfig,omitempty"` SigningJobARN *string `json:"signingJobARN,omitempty"` @@ -214,6 +216,8 @@ type FunctionURLConfig_SDK struct { FunctionURL *string `json:"functionURL,omitempty"` + InvokeMode *string `json:"invokeMode,omitempty"` + LastModifiedTime *string `json:"lastModifiedTime,omitempty"` } @@ -242,6 +246,15 @@ type ImageConfigResponse struct { ImageConfig *ImageConfig `json:"imageConfig,omitempty"` } +// +kubebuilder:skipversion +type InvokeWithResponseStreamCompleteEvent struct { + ErrorCode *string `json:"errorCode,omitempty"` + + ErrorDetails *string `json:"errorDetails,omitempty"` + + LogResult *string `json:"logResult,omitempty"` +} + // +kubebuilder:skipversion type Layer struct { ARN *string `json:"arn,omitempty"` @@ -289,6 +302,22 @@ type PutFunctionConcurrencyOutput struct { ReservedConcurrentExecutions *int64 `json:"reservedConcurrentExecutions,omitempty"` } +// +kubebuilder:skipversion +type RuntimeVersionConfig struct { + // Any error returned when the runtime version information for the function + // could not be retrieved. + Error *RuntimeVersionError `json:"error,omitempty"` + + RuntimeVersionARN *string `json:"runtimeVersionARN,omitempty"` +} + +// +kubebuilder:skipversion +type RuntimeVersionError struct { + ErrorCode *string `json:"errorCode,omitempty"` + + Message *string `json:"message,omitempty"` +} + // +kubebuilder:skipversion type SnapStart struct { ApplyOn *string `json:"applyOn,omitempty"` diff --git a/apis/mq/v1alpha1/zz_broker.go b/apis/mq/v1alpha1/zz_broker.go index 613e254bbb..fb0dfe04c5 100644 --- a/apis/mq/v1alpha1/zz_broker.go +++ b/apis/mq/v1alpha1/zz_broker.go @@ -39,6 +39,10 @@ type BrokerParameters struct { CreatorRequestID *string `json:"creatorRequestID,omitempty"` + DataReplicationMode *string `json:"dataReplicationMode,omitempty"` + + DataReplicationPrimaryBrokerARN *string `json:"dataReplicationPrimaryBrokerARN,omitempty"` + // +kubebuilder:validation:Required DeploymentMode *string `json:"deploymentMode"` diff --git a/apis/mq/v1alpha1/zz_enums.go b/apis/mq/v1alpha1/zz_enums.go index 8eb2791d7b..68653de38e 100644 --- a/apis/mq/v1alpha1/zz_enums.go +++ b/apis/mq/v1alpha1/zz_enums.go @@ -34,6 +34,7 @@ const ( BrokerState_RUNNING BrokerState = "RUNNING" BrokerState_REBOOT_IN_PROGRESS BrokerState = "REBOOT_IN_PROGRESS" BrokerState_CRITICAL_ACTION_REQUIRED BrokerState = "CRITICAL_ACTION_REQUIRED" + BrokerState_REPLICA BrokerState = "REPLICA" ) type BrokerStorageType string @@ -51,6 +52,13 @@ const ( ChangeType_DELETE ChangeType = "DELETE" ) +type DataReplicationMode string + +const ( + DataReplicationMode_NONE DataReplicationMode = "NONE" + DataReplicationMode_CRDR DataReplicationMode = "CRDR" +) + type DayOfWeek string const ( @@ -78,6 +86,13 @@ const ( EngineType_RABBITMQ EngineType = "RABBITMQ" ) +type PromoteMode string + +const ( + PromoteMode_SWITCHOVER PromoteMode = "SWITCHOVER" + PromoteMode_FAILOVER PromoteMode = "FAILOVER" +) + type SanitizationWarningReason string const ( diff --git a/apis/mq/v1alpha1/zz_generated.deepcopy.go b/apis/mq/v1alpha1/zz_generated.deepcopy.go index ebb8e1c777..744a9d63cd 100644 --- a/apis/mq/v1alpha1/zz_generated.deepcopy.go +++ b/apis/mq/v1alpha1/zz_generated.deepcopy.go @@ -347,6 +347,16 @@ func (in *BrokerParameters) DeepCopyInto(out *BrokerParameters) { *out = new(string) **out = **in } + if in.DataReplicationMode != nil { + in, out := &in.DataReplicationMode, &out.DataReplicationMode + *out = new(string) + **out = **in + } + if in.DataReplicationPrimaryBrokerARN != nil { + in, out := &in.DataReplicationPrimaryBrokerARN, &out.DataReplicationPrimaryBrokerARN + *out = new(string) + **out = **in + } if in.DeploymentMode != nil { in, out := &in.DeploymentMode, &out.DeploymentMode *out = new(string) @@ -812,6 +822,56 @@ func (in *CustomUserParameters) DeepCopy() *CustomUserParameters { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataReplicationCounterpart) DeepCopyInto(out *DataReplicationCounterpart) { + *out = *in + if in.BrokerID != nil { + in, out := &in.BrokerID, &out.BrokerID + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataReplicationCounterpart. +func (in *DataReplicationCounterpart) DeepCopy() *DataReplicationCounterpart { + if in == nil { + return nil + } + out := new(DataReplicationCounterpart) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataReplicationMetadataOutput) DeepCopyInto(out *DataReplicationMetadataOutput) { + *out = *in + if in.DataReplicationCounterpart != nil { + in, out := &in.DataReplicationCounterpart, &out.DataReplicationCounterpart + *out = new(DataReplicationCounterpart) + (*in).DeepCopyInto(*out) + } + if in.DataReplicationRole != nil { + in, out := &in.DataReplicationRole, &out.DataReplicationRole + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataReplicationMetadataOutput. +func (in *DataReplicationMetadataOutput) DeepCopy() *DataReplicationMetadataOutput { + if in == nil { + return nil + } + out := new(DataReplicationMetadataOutput) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *EncryptionOptions) DeepCopyInto(out *EncryptionOptions) { *out = *in @@ -1212,6 +1272,11 @@ func (in *UserParameters) DeepCopyInto(out *UserParameters) { } } } + if in.ReplicationUser != nil { + in, out := &in.ReplicationUser, &out.ReplicationUser + *out = new(bool) + **out = **in + } in.CustomUserParameters.DeepCopyInto(&out.CustomUserParameters) } @@ -1344,6 +1409,11 @@ func (in *User_SDK) DeepCopyInto(out *User_SDK) { *out = new(string) **out = **in } + if in.ReplicationUser != nil { + in, out := &in.ReplicationUser, &out.ReplicationUser + *out = new(bool) + **out = **in + } if in.Username != nil { in, out := &in.Username, &out.Username *out = new(string) diff --git a/apis/mq/v1alpha1/zz_types.go b/apis/mq/v1alpha1/zz_types.go index 158470e906..03ae39e91a 100644 --- a/apis/mq/v1alpha1/zz_types.go +++ b/apis/mq/v1alpha1/zz_types.go @@ -128,17 +128,28 @@ type ConfigurationRevision struct { // +kubebuilder:skipversion type Configurations struct { // A list of information about the configuration. - // - // Does not apply to RabbitMQ brokers. Current *ConfigurationID `json:"current,omitempty"` History []*ConfigurationID `json:"history,omitempty"` // A list of information about the configuration. - // - // Does not apply to RabbitMQ brokers. Pending *ConfigurationID `json:"pending,omitempty"` } +// +kubebuilder:skipversion +type DataReplicationCounterpart struct { + BrokerID *string `json:"brokerID,omitempty"` + + Region *string `json:"region,omitempty"` +} + +// +kubebuilder:skipversion +type DataReplicationMetadataOutput struct { + // Specifies a broker in a data replication pair. + DataReplicationCounterpart *DataReplicationCounterpart `json:"dataReplicationCounterpart,omitempty"` + + DataReplicationRole *string `json:"dataReplicationRole,omitempty"` +} + // +kubebuilder:skipversion type EncryptionOptions struct { KMSKeyID *string `json:"kmsKeyID,omitempty"` @@ -258,6 +269,8 @@ type User_SDK struct { Password *string `json:"password,omitempty"` + ReplicationUser *bool `json:"replicationUser,omitempty"` + Username *string `json:"username,omitempty"` } diff --git a/apis/mq/v1alpha1/zz_user.go b/apis/mq/v1alpha1/zz_user.go index 76170460b6..45e936c710 100644 --- a/apis/mq/v1alpha1/zz_user.go +++ b/apis/mq/v1alpha1/zz_user.go @@ -32,7 +32,9 @@ type UserParameters struct { ConsoleAccess *bool `json:"consoleAccess,omitempty"` - Groups []*string `json:"groups,omitempty"` + Groups []*string `json:"groups,omitempty"` + + ReplicationUser *bool `json:"replicationUser,omitempty"` CustomUserParameters `json:",inline"` } diff --git a/apis/mwaa/v1alpha1/zz_enums.go b/apis/mwaa/v1alpha1/zz_enums.go index 876090416e..1064ab3e58 100644 --- a/apis/mwaa/v1alpha1/zz_enums.go +++ b/apis/mwaa/v1alpha1/zz_enums.go @@ -21,14 +21,16 @@ package v1alpha1 type EnvironmentStatus_SDK string const ( - EnvironmentStatus_SDK_CREATING EnvironmentStatus_SDK = "CREATING" - EnvironmentStatus_SDK_CREATE_FAILED EnvironmentStatus_SDK = "CREATE_FAILED" - EnvironmentStatus_SDK_AVAILABLE EnvironmentStatus_SDK = "AVAILABLE" - EnvironmentStatus_SDK_UPDATING EnvironmentStatus_SDK = "UPDATING" - EnvironmentStatus_SDK_DELETING EnvironmentStatus_SDK = "DELETING" - EnvironmentStatus_SDK_DELETED EnvironmentStatus_SDK = "DELETED" - EnvironmentStatus_SDK_UNAVAILABLE EnvironmentStatus_SDK = "UNAVAILABLE" - EnvironmentStatus_SDK_UPDATE_FAILED EnvironmentStatus_SDK = "UPDATE_FAILED" + EnvironmentStatus_SDK_CREATING EnvironmentStatus_SDK = "CREATING" + EnvironmentStatus_SDK_CREATE_FAILED EnvironmentStatus_SDK = "CREATE_FAILED" + EnvironmentStatus_SDK_AVAILABLE EnvironmentStatus_SDK = "AVAILABLE" + EnvironmentStatus_SDK_UPDATING EnvironmentStatus_SDK = "UPDATING" + EnvironmentStatus_SDK_DELETING EnvironmentStatus_SDK = "DELETING" + EnvironmentStatus_SDK_DELETED EnvironmentStatus_SDK = "DELETED" + EnvironmentStatus_SDK_UNAVAILABLE EnvironmentStatus_SDK = "UNAVAILABLE" + EnvironmentStatus_SDK_UPDATE_FAILED EnvironmentStatus_SDK = "UPDATE_FAILED" + EnvironmentStatus_SDK_ROLLING_BACK EnvironmentStatus_SDK = "ROLLING_BACK" + EnvironmentStatus_SDK_CREATING_SNAPSHOT EnvironmentStatus_SDK = "CREATING_SNAPSHOT" ) type LoggingLevel string diff --git a/apis/mwaa/v1alpha1/zz_environment.go b/apis/mwaa/v1alpha1/zz_environment.go index bba38175fb..20a1d31bdf 100644 --- a/apis/mwaa/v1alpha1/zz_environment.go +++ b/apis/mwaa/v1alpha1/zz_environment.go @@ -30,20 +30,20 @@ type EnvironmentParameters struct { // +kubebuilder:validation:Required Region string `json:"region"` // A list of key-value pairs containing the Apache Airflow configuration options - // you want to attach to your environment. To learn more, see Apache Airflow - // configuration options (https://docs.aws.amazon.com/mwaa/latest/userguide/configuring-env-variables.html). + // you want to attach to your environment. For more information, see Apache + // Airflow configuration options (https://docs.aws.amazon.com/mwaa/latest/userguide/configuring-env-variables.html). AirflowConfigurationOptions map[string]*string `json:"airflowConfigurationOptions,omitempty"` // The Apache Airflow version for your environment. If no value is specified, - // it defaults to the latest version. Valid values: 1.10.12, 2.0.2, 2.2.2, and - // 2.4.3. For more information, see Apache Airflow versions on Amazon Managed + // it defaults to the latest version. Valid values: 1.10.12, 2.0.2, 2.2.2, 2.4.3, + // and 2.5.1. For more information, see Apache Airflow versions on Amazon Managed // Workflows for Apache Airflow (MWAA) (https://docs.aws.amazon.com/mwaa/latest/userguide/airflow-versions.html). AirflowVersion *string `json:"airflowVersion,omitempty"` // The relative path to the DAGs folder on your Amazon S3 bucket. For example, - // dags. To learn more, see Adding or updating DAGs (https://docs.aws.amazon.com/mwaa/latest/userguide/configuring-dag-folder.html). + // dags. For more information, see Adding or updating DAGs (https://docs.aws.amazon.com/mwaa/latest/userguide/configuring-dag-folder.html). // +kubebuilder:validation:Required DagS3Path *string `json:"dagS3Path"` // The environment class type. Valid values: mw1.small, mw1.medium, mw1.large. - // To learn more, see Amazon MWAA environment class (https://docs.aws.amazon.com/mwaa/latest/userguide/environment-class.html). + // For more information, see Amazon MWAA environment class (https://docs.aws.amazon.com/mwaa/latest/userguide/environment-class.html). EnvironmentClass *string `json:"environmentClass,omitempty"` // Defines the Apache Airflow logs to send to CloudWatch Logs. LoggingConfiguration *LoggingConfigurationInput `json:"loggingConfiguration,omitempty"` @@ -60,21 +60,21 @@ type EnvironmentParameters struct { // in the queue, MWAA disposes of the extra workers leaving the worker count // you specify in the MinWorkers field. For example, 2. MinWorkers *int64 `json:"minWorkers,omitempty"` - // The version of the plugins.zip file on your Amazon S3 bucket. A version must - // be specified each time a plugins.zip file is updated. To learn more, see - // How S3 Versioning works (https://docs.aws.amazon.com/AmazonS3/latest/userguide/versioning-workflows.html). + // The version of the plugins.zip file on your Amazon S3 bucket. You must specify + // a version each time a plugins.zip file is updated. For more information, + // see How S3 Versioning works (https://docs.aws.amazon.com/AmazonS3/latest/userguide/versioning-workflows.html). PluginsS3ObjectVersion *string `json:"pluginsS3ObjectVersion,omitempty"` // The relative path to the plugins.zip file on your Amazon S3 bucket. For example, - // plugins.zip. If specified, then the plugins.zip version is required. To learn - // more, see Installing custom plugins (https://docs.aws.amazon.com/mwaa/latest/userguide/configuring-dag-import-plugins.html). + // plugins.zip. If specified, then the plugins.zip version is required. For + // more information, see Installing custom plugins (https://docs.aws.amazon.com/mwaa/latest/userguide/configuring-dag-import-plugins.html). PluginsS3Path *string `json:"pluginsS3Path,omitempty"` - // The version of the requirements.txt file on your Amazon S3 bucket. A version - // must be specified each time a requirements.txt file is updated. To learn - // more, see How S3 Versioning works (https://docs.aws.amazon.com/AmazonS3/latest/userguide/versioning-workflows.html). + // The version of the requirements.txt file on your Amazon S3 bucket. You must + // specify a version each time a requirements.txt file is updated. For more + // information, see How S3 Versioning works (https://docs.aws.amazon.com/AmazonS3/latest/userguide/versioning-workflows.html). RequirementsS3ObjectVersion *string `json:"requirementsS3ObjectVersion,omitempty"` // The relative path to the requirements.txt file on your Amazon S3 bucket. - // For example, requirements.txt. If specified, then a file version is required. - // To learn more, see Installing Python dependencies (https://docs.aws.amazon.com/mwaa/latest/userguide/working-dags-dependencies.html). + // For example, requirements.txt. If specified, then a version is required. + // For more information, see Installing Python dependencies (https://docs.aws.amazon.com/mwaa/latest/userguide/working-dags-dependencies.html). RequirementsS3Path *string `json:"requirementsS3Path,omitempty"` // The number of Apache Airflow schedulers to run in your environment. Valid // values: @@ -83,12 +83,31 @@ type EnvironmentParameters struct { // // * v1 - Accepts 1. Schedulers *int64 `json:"schedulers,omitempty"` + // The version of the startup shell script in your Amazon S3 bucket. You must + // specify the version ID (https://docs.aws.amazon.com/AmazonS3/latest/userguide/versioning-workflows.html) + // that Amazon S3 assigns to the file every time you update the script. + // + // Version IDs are Unicode, UTF-8 encoded, URL-ready, opaque strings that are + // no more than 1,024 bytes long. The following is an example: + // + // 3sL4kqtJlcpXroDTDmJ+rmSpXd3dIbrHY+MTRCxf3vjVBH40Nr8X8gdRQBpUMLUo + // + // For more information, see Using a startup script (https://docs.aws.amazon.com/mwaa/latest/userguide/using-startup-script.html). + StartupScriptS3ObjectVersion *string `json:"startupScriptS3ObjectVersion,omitempty"` + // The relative path to the startup shell script in your Amazon S3 bucket. For + // example, s3://mwaa-environment/startup.sh. + // + // Amazon MWAA runs the script as your environment starts, and before running + // the Apache Airflow process. You can use this script to install dependencies, + // modify Apache Airflow configuration options, and set environment variables. + // For more information, see Using a startup script (https://docs.aws.amazon.com/mwaa/latest/userguide/using-startup-script.html). + StartupScriptS3Path *string `json:"startupScriptS3Path,omitempty"` // The key-value tag pairs you want to associate to your environment. For example, - // "Environment": "Staging". To learn more, see Tagging Amazon Web Services + // "Environment": "Staging". For more information, see Tagging Amazon Web Services // resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html). Tags map[string]*string `json:"tags,omitempty"` - // The Apache Airflow Web server access mode. To learn more, see Apache Airflow - // access modes (https://docs.aws.amazon.com/mwaa/latest/userguide/configuring-networking.html). + // The Apache Airflow Web server access mode. For more information, see Apache + // Airflow access modes (https://docs.aws.amazon.com/mwaa/latest/userguide/configuring-networking.html). WebserverAccessMode *string `json:"webserverAccessMode,omitempty"` // The day and time of the week in Coordinated Universal Time (UTC) 24-hour // standard time to start weekly maintenance updates of your environment in @@ -114,6 +133,13 @@ type EnvironmentObservation struct { // // * CREATING - Indicates the request to create the environment is in progress. // + // * CREATING_SNAPSHOT - Indicates the request to update environment details, + // or upgrade the environment version, is in progress and Amazon MWAA is + // creating a storage volume snapshot of the Amazon RDS database cluster + // associated with the environment. A database snapshot is a backup created + // at a specific point in time. Amazon MWAA uses snapshots to recover environment + // metadata if the process to update or upgrade an environment fails. + // // * CREATE_FAILED - Indicates the request to create the environment failed, // and the environment could not be created. // @@ -122,6 +148,10 @@ type EnvironmentObservation struct { // // * UPDATING - Indicates the request to update the environment is in progress. // + // * ROLLING_BACK - Indicates the request to update environment details, + // or upgrade the environment version, failed and Amazon MWAA is restoring + // the environment using the latest storage volume snapshot. + // // * DELETING - Indicates the request to delete the environment is in progress. // // * DELETED - Indicates the request to delete the environment is complete, @@ -134,7 +164,8 @@ type EnvironmentObservation struct { // and the environment has rolled back successfully and is ready to use. // // We recommend reviewing our troubleshooting guide for a list of common errors - // and their solutions. To learn more, see Amazon MWAA troubleshooting (https://docs.aws.amazon.com/mwaa/latest/userguide/troubleshooting.html). + // and their solutions. For more information, see Amazon MWAA troubleshooting + // (https://docs.aws.amazon.com/mwaa/latest/userguide/troubleshooting.html). Status *string `json:"status,omitempty"` } diff --git a/apis/mwaa/v1alpha1/zz_generated.deepcopy.go b/apis/mwaa/v1alpha1/zz_generated.deepcopy.go index 1f81a3f07e..34105425a4 100644 --- a/apis/mwaa/v1alpha1/zz_generated.deepcopy.go +++ b/apis/mwaa/v1alpha1/zz_generated.deepcopy.go @@ -136,6 +136,31 @@ func (in *CustomNetworkConfiguration) DeepCopy() *CustomNetworkConfiguration { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Dimension) DeepCopyInto(out *Dimension) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Dimension. +func (in *Dimension) DeepCopy() *Dimension { + if in == nil { + return nil + } + out := new(Dimension) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Environment) DeepCopyInto(out *Environment) { *out = *in @@ -298,6 +323,16 @@ func (in *EnvironmentParameters) DeepCopyInto(out *EnvironmentParameters) { *out = new(int64) **out = **in } + if in.StartupScriptS3ObjectVersion != nil { + in, out := &in.StartupScriptS3ObjectVersion, &out.StartupScriptS3ObjectVersion + *out = new(string) + **out = **in + } + if in.StartupScriptS3Path != nil { + in, out := &in.StartupScriptS3Path, &out.StartupScriptS3Path + *out = new(string) + **out = **in + } if in.Tags != nil { in, out := &in.Tags, &out.Tags *out = make(map[string]*string, len(*in)) @@ -482,6 +517,16 @@ func (in *Environment_SDK) DeepCopyInto(out *Environment_SDK) { *out = new(string) **out = **in } + if in.StartupScriptS3ObjectVersion != nil { + in, out := &in.StartupScriptS3ObjectVersion, &out.StartupScriptS3ObjectVersion + *out = new(string) + **out = **in + } + if in.StartupScriptS3Path != nil { + in, out := &in.StartupScriptS3Path, &out.StartupScriptS3Path + *out = new(string) + **out = **in + } if in.Status != nil { in, out := &in.Status, &out.Status *out = new(string) @@ -639,6 +684,26 @@ func (in *LoggingConfigurationInput) DeepCopy() *LoggingConfigurationInput { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricDatum) DeepCopyInto(out *MetricDatum) { + *out = *in + if in.MetricName != nil { + in, out := &in.MetricName, &out.MetricName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricDatum. +func (in *MetricDatum) DeepCopy() *MetricDatum { + if in == nil { + return nil + } + out := new(MetricDatum) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ModuleLoggingConfiguration) DeepCopyInto(out *ModuleLoggingConfiguration) { *out = *in diff --git a/apis/mwaa/v1alpha1/zz_types.go b/apis/mwaa/v1alpha1/zz_types.go index 85edfd963b..ef616ffb22 100644 --- a/apis/mwaa/v1alpha1/zz_types.go +++ b/apis/mwaa/v1alpha1/zz_types.go @@ -27,6 +27,13 @@ var ( _ = &metav1.Time{} ) +// +kubebuilder:skipversion +type Dimension struct { + Name *string `json:"name,omitempty"` + + Value *string `json:"value,omitempty"` +} + // +kubebuilder:skipversion type Environment_SDK struct { AirflowConfigurationOptions map[string]*string `json:"airflowConfigurationOptions,omitempty"` @@ -70,6 +77,10 @@ type Environment_SDK struct { SourceBucketARN *string `json:"sourceBucketARN,omitempty"` + StartupScriptS3ObjectVersion *string `json:"startupScriptS3ObjectVersion,omitempty"` + + StartupScriptS3Path *string `json:"startupScriptS3Path,omitempty"` + Status *string `json:"status,omitempty"` Tags map[string]*string `json:"tags,omitempty"` @@ -124,6 +135,11 @@ type LoggingConfigurationInput struct { WorkerLogs *ModuleLoggingConfigurationInput `json:"workerLogs,omitempty"` } +// +kubebuilder:skipversion +type MetricDatum struct { + MetricName *string `json:"metricName,omitempty"` +} + // +kubebuilder:skipversion type ModuleLoggingConfiguration struct { CloudWatchLogGroupARN *string `json:"cloudWatchLogGroupARN,omitempty"` diff --git a/apis/neptune/v1alpha1/zz_db_cluster.go b/apis/neptune/v1alpha1/zz_db_cluster.go index 539a401201..f069880654 100644 --- a/apis/neptune/v1alpha1/zz_db_cluster.go +++ b/apis/neptune/v1alpha1/zz_db_cluster.go @@ -239,6 +239,9 @@ type DBClusterObservation struct { LatestRestorableTime *metav1.Time `json:"latestRestorableTime,omitempty"` // Specifies whether the DB cluster has instances in multiple Availability Zones. MultiAZ *bool `json:"multiAZ,omitempty"` + // This data type is used as a response element in the ModifyDBCluster operation + // and contains changes that will be applied during the next maintenance window. + PendingModifiedValues *ClusterPendingModifiedValues `json:"pendingModifiedValues,omitempty"` // Specifies the progress of the operation as a percentage. PercentProgress *string `json:"percentProgress,omitempty"` // Contains one or more identifiers of the Read Replicas associated with this diff --git a/apis/neptune/v1alpha1/zz_generated.deepcopy.go b/apis/neptune/v1alpha1/zz_generated.deepcopy.go index 674db01ab2..3f79f338cd 100644 --- a/apis/neptune/v1alpha1/zz_generated.deepcopy.go +++ b/apis/neptune/v1alpha1/zz_generated.deepcopy.go @@ -107,6 +107,56 @@ func (in *CloudwatchLogsExportConfiguration) DeepCopy() *CloudwatchLogsExportCon return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterPendingModifiedValues) DeepCopyInto(out *ClusterPendingModifiedValues) { + *out = *in + if in.AllocatedStorage != nil { + in, out := &in.AllocatedStorage, &out.AllocatedStorage + *out = new(int64) + **out = **in + } + if in.BackupRetentionPeriod != nil { + in, out := &in.BackupRetentionPeriod, &out.BackupRetentionPeriod + *out = new(int64) + **out = **in + } + if in.DBClusterIdentifier != nil { + in, out := &in.DBClusterIdentifier, &out.DBClusterIdentifier + *out = new(string) + **out = **in + } + if in.EngineVersion != nil { + in, out := &in.EngineVersion, &out.EngineVersion + *out = new(string) + **out = **in + } + if in.IAMDatabaseAuthenticationEnabled != nil { + in, out := &in.IAMDatabaseAuthenticationEnabled, &out.IAMDatabaseAuthenticationEnabled + *out = new(bool) + **out = **in + } + if in.IOPS != nil { + in, out := &in.IOPS, &out.IOPS + *out = new(int64) + **out = **in + } + if in.PendingCloudwatchLogsExports != nil { + in, out := &in.PendingCloudwatchLogsExports, &out.PendingCloudwatchLogsExports + *out = new(PendingCloudwatchLogsExports) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterPendingModifiedValues. +func (in *ClusterPendingModifiedValues) DeepCopy() *ClusterPendingModifiedValues { + if in == nil { + return nil + } + out := new(ClusterPendingModifiedValues) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CustomDBClusterParameters) DeepCopyInto(out *CustomDBClusterParameters) { *out = *in @@ -404,6 +454,11 @@ func (in *DBClusterObservation) DeepCopyInto(out *DBClusterObservation) { *out = new(bool) **out = **in } + if in.PendingModifiedValues != nil { + in, out := &in.PendingModifiedValues, &out.PendingModifiedValues + *out = new(ClusterPendingModifiedValues) + (*in).DeepCopyInto(*out) + } if in.PercentProgress != nil { in, out := &in.PercentProgress, &out.PercentProgress *out = new(string) @@ -1051,6 +1106,11 @@ func (in *DBCluster_SDK) DeepCopyInto(out *DBCluster_SDK) { *out = new(string) **out = **in } + if in.GlobalClusterIdentifier != nil { + in, out := &in.GlobalClusterIdentifier, &out.GlobalClusterIdentifier + *out = new(string) + **out = **in + } if in.HostedZoneID != nil { in, out := &in.HostedZoneID, &out.HostedZoneID *out = new(string) @@ -1080,6 +1140,11 @@ func (in *DBCluster_SDK) DeepCopyInto(out *DBCluster_SDK) { *out = new(bool) **out = **in } + if in.PendingModifiedValues != nil { + in, out := &in.PendingModifiedValues, &out.PendingModifiedValues + *out = new(ClusterPendingModifiedValues) + (*in).DeepCopyInto(*out) + } if in.PercentProgress != nil { in, out := &in.PercentProgress, &out.PercentProgress *out = new(string) @@ -2252,6 +2317,11 @@ func (in *PendingModifiedValues) DeepCopyInto(out *PendingModifiedValues) { *out = new(bool) **out = **in } + if in.PendingCloudwatchLogsExports != nil { + in, out := &in.PendingCloudwatchLogsExports, &out.PendingCloudwatchLogsExports + *out = new(PendingCloudwatchLogsExports) + (*in).DeepCopyInto(*out) + } if in.Port != nil { in, out := &in.Port, &out.Port *out = new(int64) diff --git a/apis/neptune/v1alpha1/zz_types.go b/apis/neptune/v1alpha1/zz_types.go index 9816e82f88..9d6b147cdf 100644 --- a/apis/neptune/v1alpha1/zz_types.go +++ b/apis/neptune/v1alpha1/zz_types.go @@ -46,6 +46,24 @@ type CloudwatchLogsExportConfiguration struct { EnableLogTypes []*string `json:"enableLogTypes,omitempty"` } +// +kubebuilder:skipversion +type ClusterPendingModifiedValues struct { + AllocatedStorage *int64 `json:"allocatedStorage,omitempty"` + + BackupRetentionPeriod *int64 `json:"backupRetentionPeriod,omitempty"` + + DBClusterIdentifier *string `json:"dbClusterIdentifier,omitempty"` + + EngineVersion *string `json:"engineVersion,omitempty"` + + IAMDatabaseAuthenticationEnabled *bool `json:"iamDatabaseAuthenticationEnabled,omitempty"` + + IOPS *int64 `json:"iops,omitempty"` + // A list of the log types whose configuration is still pending. In other words, + // these log types are in the process of being activated or deactivated. + PendingCloudwatchLogsExports *PendingCloudwatchLogsExports `json:"pendingCloudwatchLogsExports,omitempty"` +} + // +kubebuilder:skipversion type DBClusterEndpoint struct { CustomEndpointType *string `json:"customEndpointType,omitempty"` @@ -200,6 +218,8 @@ type DBCluster_SDK struct { EngineVersion *string `json:"engineVersion,omitempty"` + GlobalClusterIdentifier *string `json:"globalClusterIdentifier,omitempty"` + HostedZoneID *string `json:"hostedZoneID,omitempty"` IAMDatabaseAuthenticationEnabled *bool `json:"iamDatabaseAuthenticationEnabled,omitempty"` @@ -211,6 +231,9 @@ type DBCluster_SDK struct { MasterUsername *string `json:"masterUsername,omitempty"` MultiAZ *bool `json:"multiAZ,omitempty"` + // This data type is used as a response element in the ModifyDBCluster operation + // and contains changes that will be applied during the next maintenance window. + PendingModifiedValues *ClusterPendingModifiedValues `json:"pendingModifiedValues,omitempty"` PercentProgress *string `json:"percentProgress,omitempty"` @@ -600,6 +623,9 @@ type PendingModifiedValues struct { MasterUserPassword *string `json:"masterUserPassword,omitempty"` MultiAZ *bool `json:"multiAZ,omitempty"` + // A list of the log types whose configuration is still pending. In other words, + // these log types are in the process of being activated or deactivated. + PendingCloudwatchLogsExports *PendingCloudwatchLogsExports `json:"pendingCloudwatchLogsExports,omitempty"` Port *int64 `json:"port,omitempty"` diff --git a/apis/opensearchservice/v1alpha1/zz_domain.go b/apis/opensearchservice/v1alpha1/zz_domain.go index ee9a8da77b..0bb5976ff1 100644 --- a/apis/opensearchservice/v1alpha1/zz_domain.go +++ b/apis/opensearchservice/v1alpha1/zz_domain.go @@ -78,10 +78,18 @@ type DomainParameters struct { // Elasticsearch_7.9. For more information, see Creating and managing Amazon // OpenSearch Service domains (https://docs.aws.amazon.com/opensearch-service/latest/developerguide/createupdatedomains.html#createdomains). EngineVersion *string `json:"engineVersion,omitempty"` - // Key-value pairs to configure slow log publishing. + // Key-value pairs to configure log publishing. LogPublishingOptions map[string]*LogPublishingOption `json:"logPublishingOptions,omitempty"` // Enables node-to-node encryption. NodeToNodeEncryptionOptions *NodeToNodeEncryptionOptions `json:"nodeToNodeEncryptionOptions,omitempty"` + // Specifies a daily 10-hour time block during which OpenSearch Service can + // perform configuration changes on the domain, including service software updates + // and Auto-Tune enhancements that require a blue/green deployment. If no options + // are specified, the default start time of 10:00 P.M. local time (for the Region + // that the domain is created in) is used. + OffPeakWindowOptions *OffPeakWindowOptions `json:"offPeakWindowOptions,omitempty"` + // Software update options for the domain. + SoftwareUpdateOptions *SoftwareUpdateOptions `json:"softwareUpdateOptions,omitempty"` // List of tags to add to the domain upon creation. Tags []*Tag `json:"tags,omitempty"` CustomDomainParameters `json:",inline"` diff --git a/apis/opensearchservice/v1alpha1/zz_enums.go b/apis/opensearchservice/v1alpha1/zz_enums.go index b1165e2cfe..3b78a9aca9 100644 --- a/apis/opensearchservice/v1alpha1/zz_enums.go +++ b/apis/opensearchservice/v1alpha1/zz_enums.go @@ -18,6 +18,33 @@ limitations under the License. package v1alpha1 +type ActionSeverity string + +const ( + ActionSeverity_HIGH ActionSeverity = "HIGH" + ActionSeverity_MEDIUM ActionSeverity = "MEDIUM" + ActionSeverity_LOW ActionSeverity = "LOW" +) + +type ActionStatus string + +const ( + ActionStatus_PENDING_UPDATE ActionStatus = "PENDING_UPDATE" + ActionStatus_IN_PROGRESS ActionStatus = "IN_PROGRESS" + ActionStatus_FAILED ActionStatus = "FAILED" + ActionStatus_COMPLETED ActionStatus = "COMPLETED" + ActionStatus_NOT_ELIGIBLE ActionStatus = "NOT_ELIGIBLE" + ActionStatus_ELIGIBLE ActionStatus = "ELIGIBLE" +) + +type ActionType string + +const ( + ActionType_SERVICE_SOFTWARE_UPDATE ActionType = "SERVICE_SOFTWARE_UPDATE" + ActionType_JVM_HEAP_SIZE_TUNING ActionType = "JVM_HEAP_SIZE_TUNING" + ActionType_JVM_YOUNG_GEN_TUNING ActionType = "JVM_YOUNG_GEN_TUNING" +) + type AutoTuneDesiredState string const ( @@ -45,6 +72,13 @@ const ( AutoTuneType_SCHEDULED_ACTION AutoTuneType = "SCHEDULED_ACTION" ) +type ConnectionMode string + +const ( + ConnectionMode_DIRECT ConnectionMode = "DIRECT" + ConnectionMode_VPC_ENDPOINT ConnectionMode = "VPC_ENDPOINT" +) + type DeploymentStatus string const ( @@ -63,6 +97,15 @@ const ( DescribePackagesFilterName_PackageStatus DescribePackagesFilterName = "PackageStatus" ) +type DomainHealth string + +const ( + DomainHealth_Red DomainHealth = "Red" + DomainHealth_Yellow DomainHealth = "Yellow" + DomainHealth_Green DomainHealth = "Green" + DomainHealth_NotAvailable DomainHealth = "NotAvailable" +) + type DomainPackageStatus string const ( @@ -73,6 +116,21 @@ const ( DomainPackageStatus_DISSOCIATION_FAILED DomainPackageStatus = "DISSOCIATION_FAILED" ) +type DomainState string + +const ( + DomainState_Active DomainState = "Active" + DomainState_Processing DomainState = "Processing" + DomainState_NotAvailable DomainState = "NotAvailable" +) + +type DryRunMode string + +const ( + DryRunMode_Basic DryRunMode = "Basic" + DryRunMode_Verbose DryRunMode = "Verbose" +) + type EngineType string const ( @@ -102,6 +160,29 @@ const ( LogType_AUDIT_LOGS LogType = "AUDIT_LOGS" ) +type MasterNodeStatus string + +const ( + MasterNodeStatus_Available MasterNodeStatus = "Available" + MasterNodeStatus_UnAvailable MasterNodeStatus = "UnAvailable" +) + +type NodeStatus string + +const ( + NodeStatus_Active NodeStatus = "Active" + NodeStatus_StandBy NodeStatus = "StandBy" + NodeStatus_NotAvailable NodeStatus = "NotAvailable" +) + +type NodeType string + +const ( + NodeType_Data NodeType = "Data" + NodeType_Ultrawarm NodeType = "Ultrawarm" + NodeType_Master NodeType = "Master" +) + type OpenSearchPartitionInstanceType string const ( @@ -283,6 +364,14 @@ const ( RollbackOnDisable_DEFAULT_ROLLBACK RollbackOnDisable = "DEFAULT_ROLLBACK" ) +type ScheduleAt string + +const ( + ScheduleAt_NOW ScheduleAt = "NOW" + ScheduleAt_TIMESTAMP ScheduleAt = "TIMESTAMP" + ScheduleAt_OFF_PEAK_WINDOW ScheduleAt = "OFF_PEAK_WINDOW" +) + type ScheduledAutoTuneActionType string const ( @@ -298,6 +387,20 @@ const ( ScheduledAutoTuneSeverityType_HIGH ScheduledAutoTuneSeverityType = "HIGH" ) +type ScheduledBy string + +const ( + ScheduledBy_CUSTOMER ScheduledBy = "CUSTOMER" + ScheduledBy_SYSTEM ScheduledBy = "SYSTEM" +) + +type SkipUnavailableStatus string + +const ( + SkipUnavailableStatus_ENABLED SkipUnavailableStatus = "ENABLED" + SkipUnavailableStatus_DISABLED SkipUnavailableStatus = "DISABLED" +) + type TLSSecurityPolicy string const ( @@ -355,3 +458,11 @@ const ( VolumeType_io1 VolumeType = "io1" VolumeType_gp3 VolumeType = "gp3" ) + +type ZoneStatus string + +const ( + ZoneStatus_Active ZoneStatus = "Active" + ZoneStatus_StandBy ZoneStatus = "StandBy" + ZoneStatus_NotAvailable ZoneStatus = "NotAvailable" +) diff --git a/apis/opensearchservice/v1alpha1/zz_generated.deepcopy.go b/apis/opensearchservice/v1alpha1/zz_generated.deepcopy.go index 4055175182..08ef9df6e6 100644 --- a/apis/opensearchservice/v1alpha1/zz_generated.deepcopy.go +++ b/apis/opensearchservice/v1alpha1/zz_generated.deepcopy.go @@ -263,6 +263,11 @@ func (in *AutoTuneOptions) DeepCopyInto(out *AutoTuneOptions) { } } } + if in.UseOffPeakWindow != nil { + in, out := &in.UseOffPeakWindow, &out.UseOffPeakWindow + *out = new(bool) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoTuneOptions. @@ -294,6 +299,11 @@ func (in *AutoTuneOptionsInput) DeepCopyInto(out *AutoTuneOptionsInput) { } } } + if in.UseOffPeakWindow != nil { + in, out := &in.UseOffPeakWindow, &out.UseOffPeakWindow + *out = new(bool) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoTuneOptionsInput. @@ -319,6 +329,11 @@ func (in *AutoTuneOptionsOutput) DeepCopyInto(out *AutoTuneOptionsOutput) { *out = new(string) **out = **in } + if in.UseOffPeakWindow != nil { + in, out := &in.UseOffPeakWindow, &out.UseOffPeakWindow + *out = new(bool) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoTuneOptionsOutput. @@ -461,6 +476,11 @@ func (in *ClusterConfig) DeepCopyInto(out *ClusterConfig) { *out = new(string) **out = **in } + if in.MultiAZWithStandbyEnabled != nil { + in, out := &in.MultiAZWithStandbyEnabled, &out.MultiAZWithStandbyEnabled + *out = new(bool) + **out = **in + } if in.WarmCount != nil { in, out := &in.WarmCount, &out.WarmCount *out = new(int64) @@ -898,6 +918,31 @@ func (in *DomainList) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DomainNodesStatus) DeepCopyInto(out *DomainNodesStatus) { + *out = *in + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType + *out = new(string) + **out = **in + } + if in.StorageVolumeType != nil { + in, out := &in.StorageVolumeType, &out.StorageVolumeType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DomainNodesStatus. +func (in *DomainNodesStatus) DeepCopy() *DomainNodesStatus { + if in == nil { + return nil + } + out := new(DomainNodesStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DomainObservation) DeepCopyInto(out *DomainObservation) { *out = *in @@ -1146,6 +1191,16 @@ func (in *DomainParameters) DeepCopyInto(out *DomainParameters) { *out = new(NodeToNodeEncryptionOptions) (*in).DeepCopyInto(*out) } + if in.OffPeakWindowOptions != nil { + in, out := &in.OffPeakWindowOptions, &out.OffPeakWindowOptions + *out = new(OffPeakWindowOptions) + (*in).DeepCopyInto(*out) + } + if in.SoftwareUpdateOptions != nil { + in, out := &in.SoftwareUpdateOptions, &out.SoftwareUpdateOptions + *out = new(SoftwareUpdateOptions) + (*in).DeepCopyInto(*out) + } if in.Tags != nil { in, out := &in.Tags, &out.Tags *out = make([]*Tag, len(*in)) @@ -1337,6 +1392,11 @@ func (in *DomainStatus_SDK) DeepCopyInto(out *DomainStatus_SDK) { *out = new(NodeToNodeEncryptionOptions) (*in).DeepCopyInto(*out) } + if in.OffPeakWindowOptions != nil { + in, out := &in.OffPeakWindowOptions, &out.OffPeakWindowOptions + *out = new(OffPeakWindowOptions) + (*in).DeepCopyInto(*out) + } if in.Processing != nil { in, out := &in.Processing, &out.Processing *out = new(bool) @@ -1352,6 +1412,11 @@ func (in *DomainStatus_SDK) DeepCopyInto(out *DomainStatus_SDK) { *out = new(SnapshotOptions) (*in).DeepCopyInto(*out) } + if in.SoftwareUpdateOptions != nil { + in, out := &in.SoftwareUpdateOptions, &out.SoftwareUpdateOptions + *out = new(SoftwareUpdateOptions) + (*in).DeepCopyInto(*out) + } if in.UpgradeProcessing != nil { in, out := &in.UpgradeProcessing, &out.UpgradeProcessing *out = new(bool) @@ -1374,6 +1439,41 @@ func (in *DomainStatus_SDK) DeepCopy() *DomainStatus_SDK { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DryRunProgressStatus) DeepCopyInto(out *DryRunProgressStatus) { + *out = *in + if in.CreationDate != nil { + in, out := &in.CreationDate, &out.CreationDate + *out = new(string) + **out = **in + } + if in.DryRunID != nil { + in, out := &in.DryRunID, &out.DryRunID + *out = new(string) + **out = **in + } + if in.DryRunStatus != nil { + in, out := &in.DryRunStatus, &out.DryRunStatus + *out = new(string) + **out = **in + } + if in.UpdateDate != nil { + in, out := &in.UpdateDate, &out.UpdateDate + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DryRunProgressStatus. +func (in *DryRunProgressStatus) DeepCopy() *DryRunProgressStatus { + if in == nil { + return nil + } + out := new(DryRunProgressStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DryRunResults) DeepCopyInto(out *DryRunResults) { *out = *in @@ -1694,6 +1794,71 @@ func (in *NodeToNodeEncryptionOptionsStatus) DeepCopy() *NodeToNodeEncryptionOpt return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OffPeakWindow) DeepCopyInto(out *OffPeakWindow) { + *out = *in + if in.WindowStartTime != nil { + in, out := &in.WindowStartTime, &out.WindowStartTime + *out = new(WindowStartTime) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OffPeakWindow. +func (in *OffPeakWindow) DeepCopy() *OffPeakWindow { + if in == nil { + return nil + } + out := new(OffPeakWindow) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OffPeakWindowOptions) DeepCopyInto(out *OffPeakWindowOptions) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.OffPeakWindow != nil { + in, out := &in.OffPeakWindow, &out.OffPeakWindow + *out = new(OffPeakWindow) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OffPeakWindowOptions. +func (in *OffPeakWindowOptions) DeepCopy() *OffPeakWindowOptions { + if in == nil { + return nil + } + out := new(OffPeakWindowOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OffPeakWindowOptionsStatus) DeepCopyInto(out *OffPeakWindowOptionsStatus) { + *out = *in + if in.Options != nil { + in, out := &in.Options, &out.Options + *out = new(OffPeakWindowOptions) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OffPeakWindowOptionsStatus. +func (in *OffPeakWindowOptionsStatus) DeepCopy() *OffPeakWindowOptionsStatus { + if in == nil { + return nil + } + out := new(OffPeakWindowOptionsStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *OptionStatus) DeepCopyInto(out *OptionStatus) { *out = *in @@ -1919,6 +2084,41 @@ func (in *SAMLOptionsOutput) DeepCopy() *SAMLOptionsOutput { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScheduledAction) DeepCopyInto(out *ScheduledAction) { + *out = *in + if in.Cancellable != nil { + in, out := &in.Cancellable, &out.Cancellable + *out = new(bool) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Mandatory != nil { + in, out := &in.Mandatory, &out.Mandatory + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduledAction. +func (in *ScheduledAction) DeepCopy() *ScheduledAction { + if in == nil { + return nil + } + out := new(ScheduledAction) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ServiceSoftwareOptions) DeepCopyInto(out *ServiceSoftwareOptions) { *out = *in @@ -2013,6 +2213,46 @@ func (in *SnapshotOptionsStatus) DeepCopy() *SnapshotOptionsStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SoftwareUpdateOptions) DeepCopyInto(out *SoftwareUpdateOptions) { + *out = *in + if in.AutoSoftwareUpdateEnabled != nil { + in, out := &in.AutoSoftwareUpdateEnabled, &out.AutoSoftwareUpdateEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SoftwareUpdateOptions. +func (in *SoftwareUpdateOptions) DeepCopy() *SoftwareUpdateOptions { + if in == nil { + return nil + } + out := new(SoftwareUpdateOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SoftwareUpdateOptionsStatus) DeepCopyInto(out *SoftwareUpdateOptionsStatus) { + *out = *in + if in.Options != nil { + in, out := &in.Options, &out.Options + *out = new(SoftwareUpdateOptions) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SoftwareUpdateOptionsStatus. +func (in *SoftwareUpdateOptionsStatus) DeepCopy() *SoftwareUpdateOptionsStatus { + if in == nil { + return nil + } + out := new(SoftwareUpdateOptionsStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Tag) DeepCopyInto(out *Tag) { *out = *in @@ -2208,6 +2448,31 @@ func (in *VPCOptions) DeepCopy() *VPCOptions { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidationFailure) DeepCopyInto(out *ValidationFailure) { + *out = *in + if in.Code != nil { + in, out := &in.Code, &out.Code + *out = new(string) + **out = **in + } + if in.Message != nil { + in, out := &in.Message, &out.Message + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidationFailure. +func (in *ValidationFailure) DeepCopy() *ValidationFailure { + if in == nil { + return nil + } + out := new(ValidationFailure) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *VersionStatus) DeepCopyInto(out *VersionStatus) { *out = *in @@ -2228,6 +2493,31 @@ func (in *VersionStatus) DeepCopy() *VersionStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowStartTime) DeepCopyInto(out *WindowStartTime) { + *out = *in + if in.Hours != nil { + in, out := &in.Hours, &out.Hours + *out = new(int64) + **out = **in + } + if in.Minutes != nil { + in, out := &in.Minutes, &out.Minutes + *out = new(int64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowStartTime. +func (in *WindowStartTime) DeepCopy() *WindowStartTime { + if in == nil { + return nil + } + out := new(WindowStartTime) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ZoneAwarenessConfig) DeepCopyInto(out *ZoneAwarenessConfig) { *out = *in diff --git a/apis/opensearchservice/v1alpha1/zz_types.go b/apis/opensearchservice/v1alpha1/zz_types.go index 1fd27ea393..327ae2eab8 100644 --- a/apis/opensearchservice/v1alpha1/zz_types.go +++ b/apis/opensearchservice/v1alpha1/zz_types.go @@ -110,6 +110,8 @@ type AutoTuneOptions struct { DesiredState *string `json:"desiredState,omitempty"` MaintenanceSchedules []*AutoTuneMaintenanceSchedule `json:"maintenanceSchedules,omitempty"` + + UseOffPeakWindow *bool `json:"useOffPeakWindow,omitempty"` } // +kubebuilder:skipversion @@ -118,6 +120,8 @@ type AutoTuneOptionsInput struct { DesiredState *string `json:"desiredState,omitempty"` MaintenanceSchedules []*AutoTuneMaintenanceSchedule `json:"maintenanceSchedules,omitempty"` + + UseOffPeakWindow *bool `json:"useOffPeakWindow,omitempty"` } // +kubebuilder:skipversion @@ -126,6 +130,8 @@ type AutoTuneOptionsOutput struct { // The Auto-Tune state for the domain. For valid states see Auto-Tune for Amazon // OpenSearch Service (https://docs.aws.amazon.com/opensearch-service/latest/developerguide/auto-tune.html). State *string `json:"state,omitempty"` + + UseOffPeakWindow *bool `json:"useOffPeakWindow,omitempty"` } // +kubebuilder:skipversion @@ -171,6 +177,8 @@ type ClusterConfig struct { InstanceType *string `json:"instanceType,omitempty"` + MultiAZWithStandbyEnabled *bool `json:"multiAZWithStandbyEnabled,omitempty"` + WarmCount *int64 `json:"warmCount,omitempty"` WarmEnabled *bool `json:"warmEnabled,omitempty"` @@ -253,6 +261,14 @@ type DomainInfo struct { DomainName *string `json:"domainName,omitempty"` } +// +kubebuilder:skipversion +type DomainNodesStatus struct { + InstanceType *string `json:"instanceType,omitempty"` + // The type of EBS volume that a domain uses. For more information, see Configuring + // EBS-based storage (https://docs.aws.amazon.com/opensearch-service/latest/developerguide/opensearch-createupdatedomains.html#opensearch-createdomain-configure-ebs). + StorageVolumeType *string `json:"storageVolumeType,omitempty"` +} + // +kubebuilder:skipversion type DomainPackageDetails struct { // The name of an OpenSearch Service domain. Domain names are unique across @@ -326,6 +342,10 @@ type DomainStatus_SDK struct { // Enables or disables node-to-node encryption. For more information, see Node-to-node // encryption for Amazon OpenSearch Service (https://docs.aws.amazon.com/opensearch-service/latest/developerguide/ntn.html). NodeToNodeEncryptionOptions *NodeToNodeEncryptionOptions `json:"nodeToNodeEncryptionOptions,omitempty"` + // Options for a domain's off-peak window (https://docs.aws.amazon.com/opensearch-service/latest/APIReference/API_OffPeakWindow.html), + // during which OpenSearch Service can perform mandatory configuration changes + // on the domain. + OffPeakWindowOptions *OffPeakWindowOptions `json:"offPeakWindowOptions,omitempty"` Processing *bool `json:"processing,omitempty"` // The current status of the service software for an Amazon OpenSearch Service @@ -335,6 +355,8 @@ type DomainStatus_SDK struct { // The time, in UTC format, when OpenSearch Service takes a daily automated // snapshot of the specified domain. Default is 0 hours. SnapshotOptions *SnapshotOptions `json:"snapshotOptions,omitempty"` + // Options for configuring service software updates for a domain. + SoftwareUpdateOptions *SoftwareUpdateOptions `json:"softwareUpdateOptions,omitempty"` UpgradeProcessing *bool `json:"upgradeProcessing,omitempty"` // Information about the subnets and security groups for an Amazon OpenSearch @@ -345,6 +367,17 @@ type DomainStatus_SDK struct { VPCOptions *VPCDerivedInfo `json:"vpcOptions,omitempty"` } +// +kubebuilder:skipversion +type DryRunProgressStatus struct { + CreationDate *string `json:"creationDate,omitempty"` + + DryRunID *string `json:"dryRunID,omitempty"` + + DryRunStatus *string `json:"dryRunStatus,omitempty"` + + UpdateDate *string `json:"updateDate,omitempty"` +} + // +kubebuilder:skipversion type DryRunResults struct { Message *string `json:"message,omitempty"` @@ -446,6 +479,37 @@ type NodeToNodeEncryptionOptionsStatus struct { Options *NodeToNodeEncryptionOptions `json:"options,omitempty"` } +// +kubebuilder:skipversion +type OffPeakWindow struct { + // The desired start time for an off-peak maintenance window (https://docs.aws.amazon.com/opensearch-service/latest/APIReference/API_OffPeakWindow.html). + WindowStartTime *WindowStartTime `json:"windowStartTime,omitempty"` +} + +// +kubebuilder:skipversion +type OffPeakWindowOptions struct { + Enabled *bool `json:"enabled,omitempty"` + // A custom 10-hour, low-traffic window during which OpenSearch Service can + // perform mandatory configuration changes on the domain. These actions can + // include scheduled service software updates and blue/green Auto-Tune enhancements. + // OpenSearch Service will schedule these actions during the window that you + // specify. + // + // If you don't specify a window start time, it defaults to 10:00 P.M. local + // time. + // + // For more information, see Defining off-peak maintenance windows for Amazon + // OpenSearch Service (https://docs.aws.amazon.com/opensearch-service/latest/developerguide/off-peak.html). + OffPeakWindow *OffPeakWindow `json:"offPeakWindow,omitempty"` +} + +// +kubebuilder:skipversion +type OffPeakWindowOptionsStatus struct { + // Options for a domain's off-peak window (https://docs.aws.amazon.com/opensearch-service/latest/APIReference/API_OffPeakWindow.html), + // during which OpenSearch Service can perform mandatory configuration changes + // on the domain. + Options *OffPeakWindowOptions `json:"options,omitempty"` +} + // +kubebuilder:skipversion type OptionStatus struct { PendingDeletion *bool `json:"pendingDeletion,omitempty"` @@ -515,6 +579,17 @@ type SAMLOptionsOutput struct { SubjectKey *string `json:"subjectKey,omitempty"` } +// +kubebuilder:skipversion +type ScheduledAction struct { + Cancellable *bool `json:"cancellable,omitempty"` + + Description *string `json:"description,omitempty"` + + ID *string `json:"id,omitempty"` + + Mandatory *bool `json:"mandatory,omitempty"` +} + // +kubebuilder:skipversion type ServiceSoftwareOptions struct { AutomatedUpdateDate *metav1.Time `json:"automatedUpdateDate,omitempty"` @@ -546,6 +621,17 @@ type SnapshotOptionsStatus struct { Options *SnapshotOptions `json:"options,omitempty"` } +// +kubebuilder:skipversion +type SoftwareUpdateOptions struct { + AutoSoftwareUpdateEnabled *bool `json:"autoSoftwareUpdateEnabled,omitempty"` +} + +// +kubebuilder:skipversion +type SoftwareUpdateOptionsStatus struct { + // Options for configuring service software updates for a domain. + Options *SoftwareUpdateOptions `json:"options,omitempty"` +} + // +kubebuilder:skipversion type Tag struct { // A string between 1 to 128 characters that specifies the key for a tag. Tag @@ -604,11 +690,25 @@ type VPCOptions struct { SubnetIDs []*string `json:"subnetIDs,omitempty"` } +// +kubebuilder:skipversion +type ValidationFailure struct { + Code *string `json:"code,omitempty"` + + Message *string `json:"message,omitempty"` +} + // +kubebuilder:skipversion type VersionStatus struct { Options *string `json:"options,omitempty"` } +// +kubebuilder:skipversion +type WindowStartTime struct { + Hours *int64 `json:"hours,omitempty"` + + Minutes *int64 `json:"minutes,omitempty"` +} + // +kubebuilder:skipversion type ZoneAwarenessConfig struct { AvailabilityZoneCount *int64 `json:"availabilityZoneCount,omitempty"` diff --git a/apis/ram/generator-config.yaml b/apis/ram/generator-config.yaml index 3993f68197..80d406f85f 100644 --- a/apis/ram/generator-config.yaml +++ b/apis/ram/generator-config.yaml @@ -1 +1,4 @@ ignore: + resource_names: + - Permission + - PermissionVersion diff --git a/apis/ram/v1alpha1/zz_enums.go b/apis/ram/v1alpha1/zz_enums.go index c21cd0135e..396305e8c8 100644 --- a/apis/ram/v1alpha1/zz_enums.go +++ b/apis/ram/v1alpha1/zz_enums.go @@ -18,6 +18,46 @@ limitations under the License. package v1alpha1 +type PermissionFeatureSet string + +const ( + PermissionFeatureSet_CREATED_FROM_POLICY PermissionFeatureSet = "CREATED_FROM_POLICY" + PermissionFeatureSet_PROMOTING_TO_STANDARD PermissionFeatureSet = "PROMOTING_TO_STANDARD" + PermissionFeatureSet_STANDARD PermissionFeatureSet = "STANDARD" +) + +type PermissionStatus string + +const ( + PermissionStatus_ATTACHABLE PermissionStatus = "ATTACHABLE" + PermissionStatus_UNATTACHABLE PermissionStatus = "UNATTACHABLE" + PermissionStatus_DELETING PermissionStatus = "DELETING" + PermissionStatus_DELETED PermissionStatus = "DELETED" +) + +type PermissionType string + +const ( + PermissionType_CUSTOMER_MANAGED PermissionType = "CUSTOMER_MANAGED" + PermissionType_AWS_MANAGED PermissionType = "AWS_MANAGED" +) + +type PermissionTypeFilter string + +const ( + PermissionTypeFilter_ALL PermissionTypeFilter = "ALL" + PermissionTypeFilter_AWS_MANAGED PermissionTypeFilter = "AWS_MANAGED" + PermissionTypeFilter_CUSTOMER_MANAGED PermissionTypeFilter = "CUSTOMER_MANAGED" +) + +type ReplacePermissionAssociationsWorkStatus string + +const ( + ReplacePermissionAssociationsWorkStatus_IN_PROGRESS ReplacePermissionAssociationsWorkStatus = "IN_PROGRESS" + ReplacePermissionAssociationsWorkStatus_COMPLETED ReplacePermissionAssociationsWorkStatus = "COMPLETED" + ReplacePermissionAssociationsWorkStatus_FAILED ReplacePermissionAssociationsWorkStatus = "FAILED" +) + type ResourceOwner string const ( diff --git a/apis/ram/v1alpha1/zz_generated.deepcopy.go b/apis/ram/v1alpha1/zz_generated.deepcopy.go index 8a65a29453..7d40b43cbf 100644 --- a/apis/ram/v1alpha1/zz_generated.deepcopy.go +++ b/apis/ram/v1alpha1/zz_generated.deepcopy.go @@ -25,6 +25,55 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AssociatedPermission) DeepCopyInto(out *AssociatedPermission) { + *out = *in + if in.ARN != nil { + in, out := &in.ARN, &out.ARN + *out = new(string) + **out = **in + } + if in.DefaultVersion != nil { + in, out := &in.DefaultVersion, &out.DefaultVersion + *out = new(bool) + **out = **in + } + if in.LastUpdatedTime != nil { + in, out := &in.LastUpdatedTime, &out.LastUpdatedTime + *out = (*in).DeepCopy() + } + if in.PermissionVersion != nil { + in, out := &in.PermissionVersion, &out.PermissionVersion + *out = new(string) + **out = **in + } + if in.ResourceShareARN != nil { + in, out := &in.ResourceShareARN, &out.ResourceShareARN + *out = new(string) + **out = **in + } + if in.ResourceType != nil { + in, out := &in.ResourceType, &out.ResourceType + *out = new(string) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AssociatedPermission. +func (in *AssociatedPermission) DeepCopy() *AssociatedPermission { + if in == nil { + return nil + } + out := new(AssociatedPermission) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CustomResourceShareParameters) DeepCopyInto(out *CustomResourceShareParameters) { *out = *in @@ -78,6 +127,59 @@ func (in *Principal) DeepCopy() *Principal { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplacePermissionAssociationsWork) DeepCopyInto(out *ReplacePermissionAssociationsWork) { + *out = *in + if in.CreationTime != nil { + in, out := &in.CreationTime, &out.CreationTime + *out = (*in).DeepCopy() + } + if in.FromPermissionARN != nil { + in, out := &in.FromPermissionARN, &out.FromPermissionARN + *out = new(string) + **out = **in + } + if in.FromPermissionVersion != nil { + in, out := &in.FromPermissionVersion, &out.FromPermissionVersion + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.LastUpdatedTime != nil { + in, out := &in.LastUpdatedTime, &out.LastUpdatedTime + *out = (*in).DeepCopy() + } + if in.StatusMessage != nil { + in, out := &in.StatusMessage, &out.StatusMessage + *out = new(string) + **out = **in + } + if in.ToPermissionARN != nil { + in, out := &in.ToPermissionARN, &out.ToPermissionARN + *out = new(string) + **out = **in + } + if in.ToPermissionVersion != nil { + in, out := &in.ToPermissionVersion, &out.ToPermissionVersion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplacePermissionAssociationsWork. +func (in *ReplacePermissionAssociationsWork) DeepCopy() *ReplacePermissionAssociationsWork { + if in == nil { + return nil + } + out := new(ReplacePermissionAssociationsWork) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Resource) DeepCopyInto(out *Resource) { *out = *in @@ -353,6 +455,17 @@ func (in *ResourceShareParameters) DeepCopyInto(out *ResourceShareParameters) { } } } + if in.Sources != nil { + in, out := &in.Sources, &out.Sources + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } if in.Tags != nil { in, out := &in.Tags, &out.Tags *out = make([]*Tag, len(*in)) @@ -418,6 +531,17 @@ func (in *ResourceSharePermissionDetail) DeepCopyInto(out *ResourceSharePermissi *out = new(string) **out = **in } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make([]*Tag, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(Tag) + (*in).DeepCopyInto(*out) + } + } + } if in.Version != nil { in, out := &in.Version, &out.Version *out = new(string) @@ -476,6 +600,17 @@ func (in *ResourceSharePermissionSummary) DeepCopyInto(out *ResourceSharePermiss *out = new(string) **out = **in } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make([]*Tag, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(Tag) + (*in).DeepCopyInto(*out) + } + } + } if in.Version != nil { in, out := &in.Version, &out.Version *out = new(string) diff --git a/apis/ram/v1alpha1/zz_resource_share.go b/apis/ram/v1alpha1/zz_resource_share.go index 96dbe34939..e0decd96ea 100644 --- a/apis/ram/v1alpha1/zz_resource_share.go +++ b/apis/ram/v1alpha1/zz_resource_share.go @@ -44,6 +44,9 @@ type ResourceShareParameters struct { // // If you don't provide this value, then Amazon Web Services generates a random // one for you. + // + // If you retry the operation with the same ClientToken, but with different + // parameters, the retry fails with an IdempotentParameterMismatch error. ClientToken *string `json:"clientToken,omitempty"` // Specifies the name of the resource share. // +kubebuilder:validation:Required @@ -61,7 +64,7 @@ type ResourceShareParameters struct { // // * An Amazon Web Services account ID, for example: 123456789012 // - // * An Amazon Resoure Name (ARN) (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // * An Amazon Resource Name (ARN) (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) // of an organization in Organizations, for example: organizations::123456789012:organization/o-exampleorgid // // * An ARN of an organizational unit (OU) in Organizations, for example: @@ -78,6 +81,9 @@ type ResourceShareParameters struct { // Specifies a list of one or more ARNs of the resources to associate with the // resource share. ResourceARNs []*string `json:"resourceARNs,omitempty"` + // Specifies from which source accounts the service principal has access to + // the resources in this resource share. + Sources []*string `json:"sources,omitempty"` // Specifies one or more tags to attach to the resource share itself. It doesn't // attach the tags to the resources associated with the resource share. Tags []*Tag `json:"tags,omitempty"` diff --git a/apis/ram/v1alpha1/zz_types.go b/apis/ram/v1alpha1/zz_types.go index c0ed55ca72..418d0a26df 100644 --- a/apis/ram/v1alpha1/zz_types.go +++ b/apis/ram/v1alpha1/zz_types.go @@ -27,6 +27,23 @@ var ( _ = &metav1.Time{} ) +// +kubebuilder:skipversion +type AssociatedPermission struct { + ARN *string `json:"arn,omitempty"` + + DefaultVersion *bool `json:"defaultVersion,omitempty"` + + LastUpdatedTime *metav1.Time `json:"lastUpdatedTime,omitempty"` + + PermissionVersion *string `json:"permissionVersion,omitempty"` + + ResourceShareARN *string `json:"resourceShareARN,omitempty"` + + ResourceType *string `json:"resourceType,omitempty"` + + Status *string `json:"status,omitempty"` +} + // +kubebuilder:skipversion type Principal struct { CreationTime *metav1.Time `json:"creationTime,omitempty"` @@ -40,6 +57,25 @@ type Principal struct { ResourceShareARN *string `json:"resourceShareARN,omitempty"` } +// +kubebuilder:skipversion +type ReplacePermissionAssociationsWork struct { + CreationTime *metav1.Time `json:"creationTime,omitempty"` + + FromPermissionARN *string `json:"fromPermissionARN,omitempty"` + + FromPermissionVersion *string `json:"fromPermissionVersion,omitempty"` + + ID *string `json:"id,omitempty"` + + LastUpdatedTime *metav1.Time `json:"lastUpdatedTime,omitempty"` + + StatusMessage *string `json:"statusMessage,omitempty"` + + ToPermissionARN *string `json:"toPermissionARN,omitempty"` + + ToPermissionVersion *string `json:"toPermissionVersion,omitempty"` +} + // +kubebuilder:skipversion type Resource struct { ARN *string `json:"arn,omitempty"` @@ -109,6 +145,8 @@ type ResourceSharePermissionDetail struct { ResourceType *string `json:"resourceType,omitempty"` + Tags []*Tag `json:"tags,omitempty"` + Version *string `json:"version,omitempty"` } @@ -130,6 +168,8 @@ type ResourceSharePermissionSummary struct { Status *string `json:"status,omitempty"` + Tags []*Tag `json:"tags,omitempty"` + Version *string `json:"version,omitempty"` } diff --git a/apis/rds/v1alpha1/zz_db_cluster.go b/apis/rds/v1alpha1/zz_db_cluster.go index 182d03a763..e90bf80450 100644 --- a/apis/rds/v1alpha1/zz_db_cluster.go +++ b/apis/rds/v1alpha1/zz_db_cluster.go @@ -32,23 +32,25 @@ type DBClusterParameters struct { // The amount of storage in gibibytes (GiB) to allocate to each DB instance // in the Multi-AZ DB cluster. // - // This setting is required to create a Multi-AZ DB cluster. + // Valid for Cluster Type: Multi-AZ DB clusters only // - // Valid for: Multi-AZ DB clusters only + // This setting is required to create a Multi-AZ DB cluster. AllocatedStorage *int64 `json:"allocatedStorage,omitempty"` - // A value that indicates whether major version upgrades are allowed. + // Specifies whether major version upgrades are allowed. // - // Constraints: You must allow major version upgrades when specifying a value - // for the EngineVersion parameter that is a different major version than the - // DB cluster's current version. + // Valid for Cluster Type: Aurora DB clusters only // - // Valid for: Aurora DB clusters only + // Constraints: + // + // * You must allow major version upgrades when specifying a value for the + // EngineVersion parameter that is a different major version than the DB + // cluster's current version. AllowMajorVersionUpgrade *bool `json:"allowMajorVersionUpgrade,omitempty"` - // A value that indicates whether minor engine upgrades are applied automatically - // to the DB cluster during the maintenance window. By default, minor engine - // upgrades are applied automatically. + // Specifies whether minor engine upgrades are applied automatically to the + // DB cluster during the maintenance window. By default, minor engine upgrades + // are applied automatically. // - // Valid for: Multi-AZ DB clusters only + // Valid for Cluster Type: Multi-AZ DB clusters only AutoMinorVersionUpgrade *bool `json:"autoMinorVersionUpgrade,omitempty"` // A list of Availability Zones (AZs) where DB instances in the DB cluster can // be created. @@ -57,39 +59,39 @@ type DBClusterParameters struct { // Choosing the Regions and Availability Zones (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Concepts.RegionsAndAvailabilityZones.html) // in the Amazon Aurora User Guide. // - // Valid for: Aurora DB clusters only + // Valid for Cluster Type: Aurora DB clusters only AvailabilityZones []*string `json:"availabilityZones,omitempty"` // The target backtrack window, in seconds. To disable backtracking, set this // value to 0. // + // Valid for Cluster Type: Aurora MySQL DB clusters only + // // Default: 0 // // Constraints: // // * If specified, this value must be set to a number from 0 to 259,200 (72 // hours). - // - // Valid for: Aurora MySQL DB clusters only BacktrackWindow *int64 `json:"backtrackWindow,omitempty"` // The number of days for which automated backups are retained. // + // Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters + // // Default: 1 // // Constraints: // - // * Must be a value from 1 to 35 - // - // Valid for: Aurora DB clusters and Multi-AZ DB clusters + // * Must be a value from 1 to 35. BackupRetentionPeriod *int64 `json:"backupRetentionPeriod,omitempty"` - // A value that indicates that the DB cluster should be associated with the - // specified CharacterSet. + // The name of the character set (CharacterSet) to associate the DB cluster + // with. // - // Valid for: Aurora DB clusters only + // Valid for Cluster Type: Aurora DB clusters only CharacterSetName *string `json:"characterSetName,omitempty"` - // A value that indicates whether to copy all tags from the DB cluster to snapshots - // of the DB cluster. The default is not to copy them. + // Specifies whether to copy all tags from the DB cluster to snapshots of the + // DB cluster. The default is not to copy them. // - // Valid for: Aurora DB clusters and Multi-AZ DB clusters + // Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters CopyTagsToSnapshot *bool `json:"copyTagsToSnapshot,omitempty"` // The compute and memory capacity of each DB instance in the Multi-AZ DB cluster, // for example db.m6gd.xlarge. Not all DB instance classes are available in @@ -101,43 +103,46 @@ type DBClusterParameters struct { // // This setting is required to create a Multi-AZ DB cluster. // - // Valid for: Multi-AZ DB clusters only + // Valid for Cluster Type: Multi-AZ DB clusters only DBClusterInstanceClass *string `json:"dbClusterInstanceClass,omitempty"` // The name of the DB cluster parameter group to associate with this DB cluster. - // If you do not specify a value, then the default DB cluster parameter group + // If you don't specify a value, then the default DB cluster parameter group // for the specified DB engine and version is used. // + // Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters + // // Constraints: // // * If supplied, must match the name of an existing DB cluster parameter // group. - // - // Valid for: Aurora DB clusters and Multi-AZ DB clusters DBClusterParameterGroupName *string `json:"dbClusterParameterGroupName,omitempty"` // A DB subnet group to associate with this DB cluster. // // This setting is required to create a Multi-AZ DB cluster. // - // Constraints: Must match the name of an existing DBSubnetGroup. Must not be - // default. + // Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters // - // Example: mydbsubnetgroup + // Constraints: // - // Valid for: Aurora DB clusters and Multi-AZ DB clusters + // * Must match the name of an existing DB subnet group. + // + // * Must not be default. + // + // Example: mydbsubnetgroup DBSubnetGroupName *string `json:"dbSubnetGroupName,omitempty"` // Reserved for future use. DBSystemID *string `json:"dbSystemID,omitempty"` - // The name for your database of up to 64 alphanumeric characters. If you do - // not provide a name, Amazon RDS doesn't create a database in the DB cluster - // you are creating. + // The name for your database of up to 64 alphanumeric characters. If you don't + // provide a name, Amazon RDS doesn't create a database in the DB cluster you + // are creating. // - // Valid for: Aurora DB clusters and Multi-AZ DB clusters + // Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters DatabaseName *string `json:"databaseName,omitempty"` - // A value that indicates whether the DB cluster has deletion protection enabled. - // The database can't be deleted when deletion protection is enabled. By default, - // deletion protection isn't enabled. + // Specifies whether the DB cluster has deletion protection enabled. The database + // can't be deleted when deletion protection is enabled. By default, deletion + // protection isn't enabled. // - // Valid for: Aurora DB clusters and Multi-AZ DB clusters + // Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters DeletionProtection *bool `json:"deletionProtection,omitempty"` // DestinationRegion is used for presigning the request to a given region. DestinationRegion *string `json:"destinationRegion,omitempty"` @@ -149,31 +154,26 @@ type DBClusterParameters struct { // For more information, see Kerberos authentication (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/kerberos-authentication.html) // in the Amazon Aurora User Guide. // - // Valid for: Aurora DB clusters only + // Valid for Cluster Type: Aurora DB clusters only Domain *string `json:"domain,omitempty"` - // Specify the name of the IAM role to be used when making API calls to the - // Directory Service. + // The name of the IAM role to use when making API calls to the Directory Service. // - // Valid for: Aurora DB clusters only + // Valid for Cluster Type: Aurora DB clusters only DomainIAMRoleName *string `json:"domainIAMRoleName,omitempty"` // The list of log types that need to be enabled for exporting to CloudWatch - // Logs. The values in the list depend on the DB engine being used. - // - // RDS for MySQL - // - // Possible values are error, general, and slowquery. + // Logs. // - // RDS for PostgreSQL + // Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters // - // Possible values are postgresql and upgrade. + // The following values are valid for each DB engine: // - // Aurora MySQL + // * Aurora MySQL - audit | error | general | slowquery // - // Possible values are audit, error, general, and slowquery. + // * Aurora PostgreSQL - postgresql // - // Aurora PostgreSQL + // * RDS for MySQL - error | general | slowquery // - // Possible value is postgresql. + // * RDS for PostgreSQL - postgresql | upgrade // // For more information about exporting CloudWatch Logs for Amazon RDS, see // Publishing Database Logs to Amazon CloudWatch Logs (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_LogAccess.html#USER_LogAccess.Procedural.UploadtoCloudWatch) @@ -182,25 +182,23 @@ type DBClusterParameters struct { // For more information about exporting CloudWatch Logs for Amazon Aurora, see // Publishing Database Logs to Amazon CloudWatch Logs (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_LogAccess.html#USER_LogAccess.Procedural.UploadtoCloudWatch) // in the Amazon Aurora User Guide. - // - // Valid for: Aurora DB clusters and Multi-AZ DB clusters EnableCloudwatchLogsExports []*string `json:"enableCloudwatchLogsExports,omitempty"` - // A value that indicates whether to enable this DB cluster to forward write - // operations to the primary cluster of an Aurora global database (GlobalCluster). - // By default, write operations are not allowed on Aurora DB clusters that are - // secondary clusters in an Aurora global database. + // Specifies whether to enable this DB cluster to forward write operations to + // the primary cluster of a global cluster (Aurora global database). By default, + // write operations are not allowed on Aurora DB clusters that are secondary + // clusters in an Aurora global database. // // You can set this value only on Aurora DB clusters that are members of an // Aurora global database. With this parameter enabled, a secondary cluster - // can forward writes to the current primary cluster and the resulting changes + // can forward writes to the current primary cluster, and the resulting changes // are replicated back to this cluster. For the primary DB cluster of an Aurora // global database, this value is used immediately if the primary is demoted - // by the FailoverGlobalCluster API operation, but it does nothing until then. + // by a global cluster API operation, but it does nothing until then. // - // Valid for: Aurora DB clusters only + // Valid for Cluster Type: Aurora DB clusters only EnableGlobalWriteForwarding *bool `json:"enableGlobalWriteForwarding,omitempty"` - // A value that indicates whether to enable the HTTP endpoint for an Aurora - // Serverless v1 DB cluster. By default, the HTTP endpoint is disabled. + // Specifies whether to enable the HTTP endpoint for an Aurora Serverless v1 + // DB cluster. By default, the HTTP endpoint is disabled. // // When enabled, the HTTP endpoint provides a connectionless web service API // for running SQL queries on the Aurora Serverless v1 DB cluster. You can also @@ -209,92 +207,69 @@ type DBClusterParameters struct { // For more information, see Using the Data API for Aurora Serverless v1 (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/data-api.html) // in the Amazon Aurora User Guide. // - // Valid for: Aurora DB clusters only + // Valid for Cluster Type: Aurora DB clusters only EnableHTTPEndpoint *bool `json:"enableHTTPEndpoint,omitempty"` - // A value that indicates whether to enable mapping of Amazon Web Services Identity - // and Access Management (IAM) accounts to database accounts. By default, mapping - // isn't enabled. + // Specifies whether to enable mapping of Amazon Web Services Identity and Access + // Management (IAM) accounts to database accounts. By default, mapping isn't + // enabled. // // For more information, see IAM Database Authentication (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/UsingWithRDS.IAMDBAuth.html) // in the Amazon Aurora User Guide. // - // Valid for: Aurora DB clusters only + // Valid for Cluster Type: Aurora DB clusters only EnableIAMDatabaseAuthentication *bool `json:"enableIAMDatabaseAuthentication,omitempty"` - // A value that indicates whether to turn on Performance Insights for the DB - // cluster. + // Specifies whether read replicas can forward write operations to the writer + // DB instance in the DB cluster. By default, write operations aren't allowed + // on reader DB instances. + // + // Valid for: Aurora DB clusters only + EnableLocalWriteForwarding *bool `json:"enableLocalWriteForwarding,omitempty"` + // Specifies whether to turn on Performance Insights for the DB cluster. // // For more information, see Using Amazon Performance Insights (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_PerfInsights.html) // in the Amazon RDS User Guide. // - // Valid for: Multi-AZ DB clusters only + // Valid for Cluster Type: Multi-AZ DB clusters only EnablePerformanceInsights *bool `json:"enablePerformanceInsights,omitempty"` - // The name of the database engine to be used for this DB cluster. - // - // Valid Values: - // - // * aurora (for MySQL 5.6-compatible Aurora) - // - // * aurora-mysql (for MySQL 5.7-compatible and MySQL 8.0-compatible Aurora) + // The database engine to use for this DB cluster. // - // * aurora-postgresql + // Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters // - // * mysql - // - // * postgres - // - // Valid for: Aurora DB clusters and Multi-AZ DB clusters + // Valid Values: aurora-mysql | aurora-postgresql | mysql | postgres // +kubebuilder:validation:Required Engine *string `json:"engine"` - // The DB engine mode of the DB cluster, either provisioned, serverless, parallelquery, - // global, or multimaster. - // - // The parallelquery engine mode isn't required for Aurora MySQL version 1.23 - // and higher 1.x versions, and version 2.09 and higher 2.x versions. - // - // The global engine mode isn't required for Aurora MySQL version 1.22 and higher - // 1.x versions, and global engine mode isn't required for any 2.x versions. - // - // The multimaster engine mode only applies for DB clusters created with Aurora - // MySQL version 5.6.10a. + // The DB engine mode of the DB cluster, either provisioned or serverless. // // The serverless engine mode only applies for Aurora Serverless v1 DB clusters. // - // For Aurora PostgreSQL, the global engine mode isn't required, and both the - // parallelquery and the multimaster engine modes currently aren't supported. - // - // Limitations and requirements apply to some DB engine modes. For more information, + // For information about limitations and requirements for Serverless DB clusters, // see the following sections in the Amazon Aurora User Guide: // // * Limitations of Aurora Serverless v1 (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless.html#aurora-serverless.limitations) // // * Requirements for Aurora Serverless v2 (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless-v2.requirements.html) // - // * Limitations of Parallel Query (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-mysql-parallel-query.html#aurora-mysql-parallel-query-limitations) - // - // * Limitations of Aurora Global Databases (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-global-database.html#aurora-global-database.limitations) - // - // * Limitations of Multi-Master Clusters (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-multi-master.html#aurora-multi-master-limitations) - // - // Valid for: Aurora DB clusters only + // Valid for Cluster Type: Aurora DB clusters only EngineMode *string `json:"engineMode,omitempty"` // The global cluster ID of an Aurora cluster that becomes the primary cluster // in the new global database cluster. // - // Valid for: Aurora DB clusters only + // Valid for Cluster Type: Aurora DB clusters only GlobalClusterIdentifier *string `json:"globalClusterIdentifier,omitempty"` // The amount of Provisioned IOPS (input/output operations per second) to be // initially allocated for each DB instance in the Multi-AZ DB cluster. // - // For information about valid IOPS values, see Amazon RDS Provisioned IOPS - // storage (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Storage.html#USER_PIOPS) + // For information about valid IOPS values, see Provisioned IOPS storage (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Storage.html#USER_PIOPS) // in the Amazon RDS User Guide. // // This setting is required to create a Multi-AZ DB cluster. // - // Constraints: Must be a multiple between .5 and 50 of the storage amount for - // the DB cluster. + // Valid for Cluster Type: Multi-AZ DB clusters only // - // Valid for: Multi-AZ DB clusters only + // Constraints: + // + // * Must be a multiple between .5 and 50 of the storage amount for the DB + // cluster. IOPS *int64 `json:"iops,omitempty"` // The Amazon Web Services KMS key identifier for an encrypted DB cluster. // @@ -305,25 +280,25 @@ type DBClusterParameters struct { // When a KMS key isn't specified in KmsKeyId: // // * If ReplicationSourceIdentifier identifies an encrypted source, then - // Amazon RDS will use the KMS key used to encrypt the source. Otherwise, - // Amazon RDS will use your default KMS key. + // Amazon RDS uses the KMS key used to encrypt the source. Otherwise, Amazon + // RDS uses your default KMS key. // // * If the StorageEncrypted parameter is enabled and ReplicationSourceIdentifier - // isn't specified, then Amazon RDS will use your default KMS key. + // isn't specified, then Amazon RDS uses your default KMS key. // // There is a default KMS key for your Amazon Web Services account. Your Amazon // Web Services account has a different default KMS key for each Amazon Web // Services Region. // // If you create a read replica of an encrypted DB cluster in another Amazon - // Web Services Region, you must set KmsKeyId to a KMS key identifier that is - // valid in the destination Amazon Web Services Region. This KMS key is used + // Web Services Region, make sure to set KmsKeyId to a KMS key identifier that + // is valid in the destination Amazon Web Services Region. This KMS key is used // to encrypt the read replica in that Amazon Web Services Region. // - // Valid for: Aurora DB clusters and Multi-AZ DB clusters + // Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters KMSKeyID *string `json:"kmsKeyID,omitempty"` - // A value that indicates whether to manage the master user password with Amazon - // Web Services Secrets Manager. + // Specifies whether to manage the master user password with Amazon Web Services + // Secrets Manager. // // For more information, see Password management with Amazon Web Services Secrets // Manager (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/rds-secrets-manager.html) @@ -331,12 +306,12 @@ type DBClusterParameters struct { // Secrets Manager (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/rds-secrets-manager.html) // in the Amazon Aurora User Guide. // + // Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters + // // Constraints: // // * Can't manage the master user password with Amazon Web Services Secrets // Manager if MasterUserPassword is specified. - // - // Valid for: Aurora DB clusters and Multi-AZ DB clusters ManageMasterUserPassword *bool `json:"manageMasterUserPassword,omitempty"` // The Amazon Web Services KMS key identifier to encrypt a secret that is automatically // generated and managed in Amazon Web Services Secrets Manager. @@ -357,10 +332,12 @@ type DBClusterParameters struct { // Web Services account has a different default KMS key for each Amazon Web // Services Region. // - // Valid for: Aurora DB clusters and Multi-AZ DB clusters + // Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters MasterUserSecretKMSKeyID *string `json:"masterUserSecretKMSKeyID,omitempty"` // The name of the master user for the DB cluster. // + // Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters + // // Constraints: // // * Must be 1 to 16 letters or numbers. @@ -368,19 +345,19 @@ type DBClusterParameters struct { // * First character must be a letter. // // * Can't be a reserved word for the chosen database engine. - // - // Valid for: Aurora DB clusters and Multi-AZ DB clusters MasterUsername *string `json:"masterUsername,omitempty"` // The interval, in seconds, between points when Enhanced Monitoring metrics // are collected for the DB cluster. To turn off collecting Enhanced Monitoring - // metrics, specify 0. The default is 0. + // metrics, specify 0. // // If MonitoringRoleArn is specified, also set MonitoringInterval to a value // other than 0. // - // Valid Values: 0, 1, 5, 10, 15, 30, 60 + // Valid for Cluster Type: Multi-AZ DB clusters only // - // Valid for: Multi-AZ DB clusters only + // Valid Values: 0 | 1 | 5 | 10 | 15 | 30 | 60 + // + // Default: 0 MonitoringInterval *int64 `json:"monitoringInterval,omitempty"` // The Amazon Resource Name (ARN) for the IAM role that permits RDS to send // Enhanced Monitoring metrics to Amazon CloudWatch Logs. An example is arn:aws:iam:123456789012:role/emaccess. @@ -391,16 +368,10 @@ type DBClusterParameters struct { // If MonitoringInterval is set to a value other than 0, supply a MonitoringRoleArn // value. // - // Valid for: Multi-AZ DB clusters only + // Valid for Cluster Type: Multi-AZ DB clusters only MonitoringRoleARN *string `json:"monitoringRoleARN,omitempty"` // The network type of the DB cluster. // - // Valid values: - // - // * IPV4 - // - // * DUAL - // // The network type is determined by the DBSubnetGroup specified for the DB // cluster. A DBSubnetGroup can support only the IPv4 protocol or the IPv4 and // the IPv6 protocols (DUAL). @@ -408,10 +379,11 @@ type DBClusterParameters struct { // For more information, see Working with a DB instance in a VPC (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_VPC.WorkingWithRDSInstanceinaVPC.html) // in the Amazon Aurora User Guide. // - // Valid for: Aurora DB clusters only + // Valid for Cluster Type: Aurora DB clusters only + // + // Valid Values: IPV4 | DUAL NetworkType *string `json:"networkType,omitempty"` - // A value that indicates that the DB cluster should be associated with the - // specified option group. + // The option group to associate the DB cluster with. // // DB clusters are associated with a default option group that can't be modified. OptionGroupName *string `json:"optionGroupName,omitempty"` @@ -426,47 +398,37 @@ type DBClusterParameters struct { // Web Services account. Your Amazon Web Services account has a different default // KMS key for each Amazon Web Services Region. // - // Valid for: Multi-AZ DB clusters only + // Valid for Cluster Type: Multi-AZ DB clusters only PerformanceInsightsKMSKeyID *string `json:"performanceInsightsKMSKeyID,omitempty"` - // The number of days to retain Performance Insights data. The default is 7 - // days. The following values are valid: - // - // * 7 - // - // * month * 31, where month is a number of months from 1-23 + // The number of days to retain Performance Insights data. // - // * 731 + // Valid for Cluster Type: Multi-AZ DB clusters only // - // For example, the following values are valid: - // - // * 93 (3 months * 31) + // Valid Values: // - // * 341 (11 months * 31) + // * 7 // - // * 589 (19 months * 31) + // * month * 31, where month is a number of months from 1-23. Examples: 93 + // (3 months * 31), 341 (11 months * 31), 589 (19 months * 31) // // * 731 // - // If you specify a retention period such as 94, which isn't a valid value, - // RDS issues an error. + // Default: 7 days // - // Valid for: Multi-AZ DB clusters only + // If you specify a retention period that isn't valid, such as 94, Amazon RDS + // issues an error. PerformanceInsightsRetentionPeriod *int64 `json:"performanceInsightsRetentionPeriod,omitempty"` // The port number on which the instances in the DB cluster accept connections. // - // RDS for MySQL and Aurora MySQL - // - // Default: 3306 - // - // Valid values: 1150-65535 + // Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters // - // RDS for PostgreSQL and Aurora PostgreSQL + // Valid Values: 1150-65535 // - // Default: 5432 + // Default: // - // Valid values: 1150-65535 + // * RDS for MySQL and Aurora MySQL - 3306 // - // Valid for: Aurora DB clusters and Multi-AZ DB clusters + // * RDS for PostgreSQL and Aurora PostgreSQL - 5432 Port *int64 `json:"port,omitempty"` // When you are replicating a DB cluster from one Amazon Web Services GovCloud // (US) Region to another, an URL that contains a Signature Version 4 signed @@ -508,11 +470,13 @@ type DBClusterParameters struct { // valid request for the operation that can run in the source Amazon Web Services // Region. // - // Valid for: Aurora DB clusters only + // Valid for Cluster Type: Aurora DB clusters only PreSignedURL *string `json:"preSignedURL,omitempty"` // The daily time range during which automated backups are created if automated // backups are enabled using the BackupRetentionPeriod parameter. // + // Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters + // // The default is a 30-minute window selected at random from an 8-hour block // of time for each Amazon Web Services Region. To view the time blocks available, // see Backup window (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Aurora.Managing.Backups.html#Aurora.Managing.Backups.BackupWindow) @@ -527,13 +491,10 @@ type DBClusterParameters struct { // * Must not conflict with the preferred maintenance window. // // * Must be at least 30 minutes. - // - // Valid for: Aurora DB clusters and Multi-AZ DB clusters PreferredBackupWindow *string `json:"preferredBackupWindow,omitempty"` - // The weekly time range during which system maintenance can occur, in Universal - // Coordinated Time (UTC). + // The weekly time range during which system maintenance can occur. // - // Format: ddd:hh24:mi-ddd:hh24:mi + // Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters // // The default is a 30-minute window selected at random from an 8-hour block // of time for each Amazon Web Services Region, occurring on a random day of @@ -541,13 +502,17 @@ type DBClusterParameters struct { // Cluster Maintenance Window (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_UpgradeDBInstance.Maintenance.html#AdjustingTheMaintenanceWindow.Aurora) // in the Amazon Aurora User Guide. // - // Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun. + // Constraints: + // + // * Must be in the format ddd:hh24:mi-ddd:hh24:mi. // - // Constraints: Minimum 30-minute window. + // * Days must be one of Mon | Tue | Wed | Thu | Fri | Sat | Sun. // - // Valid for: Aurora DB clusters and Multi-AZ DB clusters + // * Must be in Universal Coordinated Time (UTC). + // + // * Must be at least 30 minutes. PreferredMaintenanceWindow *string `json:"preferredMaintenanceWindow,omitempty"` - // A value that indicates whether the DB cluster is publicly accessible. + // Specifies whether the DB cluster is publicly accessible. // // When the DB cluster is publicly accessible, its Domain Name System (DNS) // endpoint resolves to the private IP address from within the DB cluster's @@ -559,6 +524,8 @@ type DBClusterParameters struct { // When the DB cluster isn't publicly accessible, it is an internal DB cluster // with a DNS name that resolves to a private IP address. // + // Valid for Cluster Type: Multi-AZ DB clusters only + // // Default: The default behavior varies depending on whether DBSubnetGroupName // is specified. // @@ -579,18 +546,16 @@ type DBClusterParameters struct { // // * If the subnets are part of a VPC that has an internet gateway attached // to it, the DB cluster is public. - // - // Valid for: Multi-AZ DB clusters only PubliclyAccessible *bool `json:"publiclyAccessible,omitempty"` // The Amazon Resource Name (ARN) of the source DB instance or DB cluster if // this DB cluster is created as a read replica. // - // Valid for: Aurora DB clusters only + // Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters ReplicationSourceIdentifier *string `json:"replicationSourceIdentifier,omitempty"` // For DB clusters in serverless DB engine mode, the scaling properties of the // DB cluster. // - // Valid for: Aurora DB clusters only + // Valid for Cluster Type: Aurora DB clusters only ScalingConfiguration *ScalingConfiguration `json:"scalingConfiguration,omitempty"` ServerlessV2ScalingConfiguration *ServerlessV2ScalingConfiguration `json:"serverlessV2ScalingConfiguration,omitempty"` @@ -598,25 +563,43 @@ type DBClusterParameters struct { // sent over the wire and is only used for presigning. This value should always // have the same region as the source ARN. SourceRegion *string `json:"sourceRegion,omitempty"` - // A value that indicates whether the DB cluster is encrypted. + // Specifies whether the DB cluster is encrypted. // - // Valid for: Aurora DB clusters and Multi-AZ DB clusters + // Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters StorageEncrypted *bool `json:"storageEncrypted,omitempty"` - // Specifies the storage type to be associated with the DB cluster. + // The storage type to associate with the DB cluster. + // + // For information on storage types for Aurora DB clusters, see Storage configurations + // for Amazon Aurora DB clusters (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Aurora.Overview.StorageReliability.html#aurora-storage-type). + // For information on storage types for Multi-AZ DB clusters, see Settings for + // creating Multi-AZ DB clusters (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/create-multi-az-db-cluster.html#create-multi-az-db-cluster-settings). // // This setting is required to create a Multi-AZ DB cluster. // - // Valid values: io1 + // When specified for a Multi-AZ DB cluster, a value for the Iops parameter + // is required. + // + // Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters + // + // Valid Values: + // + // * Aurora DB clusters - aurora | aurora-iopt1 // - // When specified, a value for the Iops parameter is required. + // * Multi-AZ DB clusters - io1 // - // Default: io1 + // Default: // - // Valid for: Multi-AZ DB clusters only + // * Aurora DB clusters - aurora + // + // * Multi-AZ DB clusters - io1 + // + // When you create an Aurora DB cluster with the storage type set to aurora-iopt1, + // the storage type is returned in the response. The storage type isn't returned + // when you set it to aurora. StorageType *string `json:"storageType,omitempty"` // Tags to assign to the DB cluster. // - // Valid for: Aurora DB clusters and Multi-AZ DB clusters + // Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters Tags []*Tag `json:"tags,omitempty"` CustomDBClusterParameters `json:",inline"` } @@ -644,10 +627,10 @@ type DBClusterObservation struct { ActivityStreamMode *string `json:"activityStreamMode,omitempty"` // The status of the database activity stream. ActivityStreamStatus *string `json:"activityStreamStatus,omitempty"` - // Provides a list of the Amazon Web Services Identity and Access Management - // (IAM) roles that are associated with the DB cluster. IAM roles that are associated - // with a DB cluster grant permission for the DB cluster to access other Amazon - // Web Services on your behalf. + // A list of the Amazon Web Services Identity and Access Management (IAM) roles + // that are associated with the DB cluster. IAM roles that are associated with + // a DB cluster grant permission for the DB cluster to access other Amazon Web + // Services on your behalf. AssociatedRoles []*DBClusterRole `json:"associatedRoles,omitempty"` // The time when a stopped DB cluster is restarted automatically. AutomaticRestartTime *metav1.Time `json:"automaticRestartTime,omitempty"` @@ -660,29 +643,28 @@ type DBClusterObservation struct { // Serverless v1 (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless.html) // in the Amazon Aurora User Guide. Capacity *int64 `json:"capacity,omitempty"` - // Identifies the clone group to which the DB cluster is associated. + // The ID of the clone group with which the DB cluster is associated. CloneGroupID *string `json:"cloneGroupID,omitempty"` - // Specifies the time when the DB cluster was created, in Universal Coordinated - // Time (UTC). + // The time when the DB cluster was created, in Universal Coordinated Time (UTC). ClusterCreateTime *metav1.Time `json:"clusterCreateTime,omitempty"` - // Specifies whether the DB cluster is a clone of a DB cluster owned by a different + // Indicates whether the DB cluster is a clone of a DB cluster owned by a different // Amazon Web Services account. CrossAccountClone *bool `json:"crossAccountClone,omitempty"` - // Identifies all custom endpoints associated with the cluster. + // The custom endpoints associated with the DB cluster. CustomEndpoints []*string `json:"customEndpoints,omitempty"` // The Amazon Resource Name (ARN) for the DB cluster. DBClusterARN *string `json:"dbClusterARN,omitempty"` - // Contains a user-supplied DB cluster identifier. This identifier is the unique + // The user-supplied identifier for the DB cluster. This identifier is the unique // key that identifies a DB cluster. DBClusterIdentifier *string `json:"dbClusterIdentifier,omitempty"` - // Provides the list of instances that make up the DB cluster. + // The list of DB instances that make up the DB cluster. DBClusterMembers []*DBClusterMember `json:"dbClusterMembers,omitempty"` - // Provides the list of option group memberships for this DB cluster. + // The list of option group memberships for this DB cluster. DBClusterOptionGroupMemberships []*DBClusterOptionGroupStatus `json:"dbClusterOptionGroupMemberships,omitempty"` - // Specifies the name of the DB cluster parameter group for the DB cluster. + // The name of the DB cluster parameter group for the DB cluster. DBClusterParameterGroup *string `json:"dbClusterParameterGroup,omitempty"` - // Specifies information on the subnet group associated with the DB cluster, - // including the name, description, and subnets in the subnet group. + // Information about the subnet group associated with the DB cluster, including + // the name, description, and subnets in the subnet group. DBSubnetGroup *string `json:"dbSubnetGroup,omitempty"` // The Amazon Web Services Region-unique, immutable identifier for the DB cluster. // This identifier is found in Amazon Web Services CloudTrail log entries whenever @@ -702,23 +684,22 @@ type DBClusterObservation struct { // DB engine, see Amazon RDS Database Log Files (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_LogAccess.html) // in the Amazon Aurora User Guide. EnabledCloudwatchLogsExports []*string `json:"enabledCloudwatchLogsExports,omitempty"` - // Specifies the connection endpoint for the primary instance of the DB cluster. + // The connection endpoint for the primary instance of the DB cluster. Endpoint *string `json:"endpoint,omitempty"` - // Indicates the database engine version. + // The version of the database engine. EngineVersion *string `json:"engineVersion,omitempty"` - // Specifies whether you have requested to enable write forwarding for a secondary - // cluster in an Aurora global database. Because write forwarding takes time - // to enable, check the value of GlobalWriteForwardingStatus to confirm that - // the request has completed before using the write forwarding feature for this - // cluster. + // Specifies whether write forwarding is enabled for a secondary cluster in + // an Aurora global database. Because write forwarding takes time to enable, + // check the value of GlobalWriteForwardingStatus to confirm that the request + // has completed before using the write forwarding feature for this cluster. GlobalWriteForwardingRequested *bool `json:"globalWriteForwardingRequested,omitempty"` - // Specifies whether a secondary cluster in an Aurora global database has write - // forwarding enabled, not enabled, or is in the process of enabling it. + // The status of write forwarding for a secondary cluster in an Aurora global + // database. GlobalWriteForwardingStatus *string `json:"globalWriteForwardingStatus,omitempty"` - // Specifies the ID that Amazon Route 53 assigns when you create a hosted zone. + // The ID that Amazon Route 53 assigns when you create a hosted zone. HostedZoneID *string `json:"hostedZoneID,omitempty"` - // A value that indicates whether the HTTP endpoint for an Aurora Serverless - // v1 DB cluster is enabled. + // Indicates whether the HTTP endpoint for an Aurora Serverless v1 DB cluster + // is enabled. // // When enabled, the HTTP endpoint provides a connectionless web service API // for running SQL queries on the Aurora Serverless v1 DB cluster. You can also @@ -727,14 +708,21 @@ type DBClusterObservation struct { // For more information, see Using the Data API for Aurora Serverless v1 (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/data-api.html) // in the Amazon Aurora User Guide. HTTPEndpointEnabled *bool `json:"httpEndpointEnabled,omitempty"` - // A value that indicates whether the mapping of Amazon Web Services Identity - // and Access Management (IAM) accounts to database accounts is enabled. + // Indicates whether the mapping of Amazon Web Services Identity and Access + // Management (IAM) accounts to database accounts is enabled. IAMDatabaseAuthenticationEnabled *bool `json:"iamDatabaseAuthenticationEnabled,omitempty"` - // Specifies the latest time to which a database can be restored with point-in-time - // restore. + // The next time you can modify the DB cluster to use the aurora-iopt1 storage + // type. + // + // This setting is only for Aurora DB clusters. + IOOptimizedNextAllowedModificationTime *metav1.Time `json:"iOOptimizedNextAllowedModificationTime,omitempty"` + // The latest time to which a database can be restored with point-in-time restore. LatestRestorableTime *metav1.Time `json:"latestRestorableTime,omitempty"` - // Contains the secret managed by RDS in Amazon Web Services Secrets Manager - // for the master user password. + // Specifies whether an Aurora DB cluster has in-cluster write forwarding enabled, + // not enabled, requested, or is in the process of enabling it. + LocalWriteForwardingStatus *string `json:"localWriteForwardingStatus,omitempty"` + // The secret managed by RDS in Amazon Web Services Secrets Manager for the + // master user password. // // For more information, see Password management with Amazon Web Services Secrets // Manager (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/rds-secrets-manager.html) @@ -742,12 +730,11 @@ type DBClusterObservation struct { // Secrets Manager (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/rds-secrets-manager.html) // in the Amazon Aurora User Guide. MasterUserSecret *MasterUserSecret `json:"masterUserSecret,omitempty"` - // Specifies whether the DB cluster has instances in multiple Availability Zones. + // Indicates whether the DB cluster has instances in multiple Availability Zones. MultiAZ *bool `json:"multiAZ,omitempty"` - // Specifies the progress of the operation as a percentage. + // The progress of the operation as a percentage. PercentProgress *string `json:"percentProgress,omitempty"` - // True if Performance Insights is enabled for the DB cluster, and otherwise - // false. + // Indicates whether Performance Insights is enabled for the DB cluster. // // This setting is only for non-Aurora Multi-AZ DB clusters. PerformanceInsightsEnabled *bool `json:"performanceInsightsEnabled,omitempty"` @@ -768,11 +755,11 @@ type DBClusterObservation struct { ReaderEndpoint *string `json:"readerEndpoint,omitempty"` ScalingConfigurationInfo *ScalingConfigurationInfo `json:"scalingConfigurationInfo,omitempty"` - // Specifies the current state of this DB cluster. + // The current state of this DB cluster. Status *string `json:"status,omitempty"` TagList []*Tag `json:"tagList,omitempty"` - // Provides a list of VPC security groups that the DB cluster belongs to. + // The list of VPC security groups that the DB cluster belongs to. VPCSecurityGroups []*VPCSecurityGroupMembership `json:"vpcSecurityGroups,omitempty"` } diff --git a/apis/rds/v1alpha1/zz_db_instance.go b/apis/rds/v1alpha1/zz_db_instance.go index 98e53f2a0b..381d482eea 100644 --- a/apis/rds/v1alpha1/zz_db_instance.go +++ b/apis/rds/v1alpha1/zz_db_instance.go @@ -31,13 +31,10 @@ type DBInstanceParameters struct { Region string `json:"region"` // The amount of storage in gibibytes (GiB) to allocate for the DB instance. // - // Type: Integer - // - // Amazon Aurora - // - // Not applicable. Aurora cluster volumes automatically grow as the amount of - // data in your database increases, though you are only charged for the space - // that you use in an Aurora cluster volume. + // This setting doesn't apply to Amazon Aurora DB instances. Aurora cluster + // volumes automatically grow as the amount of data in your database increases, + // though you are only charged for the space that you use in an Aurora cluster + // volume. // // Amazon RDS Custom // @@ -49,7 +46,7 @@ type DBInstanceParameters struct { // * Provisioned IOPS storage (io1): Must be an integer from 40 to 65536 // for RDS Custom for Oracle, 16384 for RDS Custom for SQL Server. // - // MySQL + // RDS for MariaDB // // Constraints to the amount of storage for each storage type are the following: // @@ -60,7 +57,7 @@ type DBInstanceParameters struct { // // * Magnetic storage (standard): Must be an integer from 5 to 3072. // - // MariaDB + // RDS for MySQL // // Constraints to the amount of storage for each storage type are the following: // @@ -71,7 +68,7 @@ type DBInstanceParameters struct { // // * Magnetic storage (standard): Must be an integer from 5 to 3072. // - // PostgreSQL + // RDS for Oracle // // Constraints to the amount of storage for each storage type are the following: // @@ -80,9 +77,9 @@ type DBInstanceParameters struct { // // * Provisioned IOPS storage (io1): Must be an integer from 100 to 65536. // - // * Magnetic storage (standard): Must be an integer from 5 to 3072. + // * Magnetic storage (standard): Must be an integer from 10 to 3072. // - // Oracle + // RDS for PostgreSQL // // Constraints to the amount of storage for each storage type are the following: // @@ -91,9 +88,9 @@ type DBInstanceParameters struct { // // * Provisioned IOPS storage (io1): Must be an integer from 100 to 65536. // - // * Magnetic storage (standard): Must be an integer from 10 to 3072. + // * Magnetic storage (standard): Must be an integer from 5 to 3072. // - // SQL Server + // RDS for SQL Server // // Constraints to the amount of storage for each storage type are the following: // @@ -109,19 +106,21 @@ type DBInstanceParameters struct { // be an integer from 20 to 1024. Web and Express editions: Must be an integer // from 20 to 1024. AllocatedStorage *int64 `json:"allocatedStorage,omitempty"` - // A value that indicates whether major version upgrades are allowed. Changing - // this parameter doesn't result in an outage and the change is asynchronously - // applied as soon as possible. + // Specifies whether major version upgrades are allowed. Changing this parameter + // doesn't result in an outage and the change is asynchronously applied as soon + // as possible. + // + // This setting doesn't apply to RDS Custom DB instances. // - // This setting doesn't apply to RDS Custom. + // Constraints: // - // Constraints: Major version upgrades must be allowed when specifying a value - // for the EngineVersion parameter that is a different major version than the - // DB instance's current version. + // * Major version upgrades must be allowed when specifying a value for the + // EngineVersion parameter that's a different major version than the DB instance's + // current version. AllowMajorVersionUpgrade *bool `json:"allowMajorVersionUpgrade,omitempty"` - // A value that indicates whether minor engine upgrades are applied automatically - // to the DB instance during the maintenance window. By default, minor engine - // upgrades are applied automatically. + // Specifies whether minor engine upgrades are applied automatically to the + // DB instance during the maintenance window. By default, minor engine upgrades + // are applied automatically. // // If you create an RDS Custom DB instance, you must set AutoMinorVersionUpgrade // to false. @@ -130,53 +129,58 @@ type DBInstanceParameters struct { // on Amazon Web Services Regions and Availability Zones, see Regions and Availability // Zones (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.RegionsAndAvailabilityZones.html). // - // Amazon Aurora - // - // Each Aurora DB cluster hosts copies of its storage in three separate Availability - // Zones. Specify one of these Availability Zones. Aurora automatically chooses - // an appropriate Availability Zone if you don't specify one. + // For Amazon Aurora, each Aurora DB cluster hosts copies of its storage in + // three separate Availability Zones. Specify one of these Availability Zones. + // Aurora automatically chooses an appropriate Availability Zone if you don't + // specify one. // // Default: A random, system-chosen Availability Zone in the endpoint's Amazon // Web Services Region. // - // Example: us-east-1d + // Constraints: // - // Constraint: The AvailabilityZone parameter can't be specified if the DB instance - // is a Multi-AZ deployment. The specified Availability Zone must be in the - // same Amazon Web Services Region as the current endpoint. + // * The AvailabilityZone parameter can't be specified if the DB instance + // is a Multi-AZ deployment. + // + // * The specified Availability Zone must be in the same Amazon Web Services + // Region as the current endpoint. + // + // Example: us-east-1d AvailabilityZone *string `json:"availabilityZone,omitempty"` // The number of days for which automated backups are retained. Setting this // parameter to a positive number enables backups. Setting this parameter to // 0 disables automated backups. // - // Amazon Aurora - // - // Not applicable. The retention period for automated backups is managed by - // the DB cluster. + // This setting doesn't apply to Amazon Aurora DB instances. The retention period + // for automated backups is managed by the DB cluster. // // Default: 1 // // Constraints: // - // * Must be a value from 0 to 35 + // * Must be a value from 0 to 35. // - // * Can't be set to 0 if the DB instance is a source to read replicas + // * Can't be set to 0 if the DB instance is a source to read replicas. // - // * Can't be set to 0 for an RDS Custom for Oracle DB instance + // * Can't be set to 0 for an RDS Custom for Oracle DB instance. BackupRetentionPeriod *int64 `json:"backupRetentionPeriod,omitempty"` - // Specifies where automated backups and manual snapshots are stored. + // The location for storing automated backups and manual snapshots. // - // Possible values are outposts (Amazon Web Services Outposts) and region (Amazon - // Web Services Region). The default is region. + // Valie Values: + // + // * outposts (Amazon Web Services Outposts) + // + // * region (Amazon Web Services Region) + // + // Default: region // // For more information, see Working with Amazon RDS on Amazon Web Services // Outposts (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/rds-on-outposts.html) // in the Amazon RDS User Guide. BackupTarget *string `json:"backupTarget,omitempty"` - // Specifies the CA certificate identifier to use for the DB instance’s server - // certificate. + // The CA certificate identifier to use for the DB instance's server certificate. // - // This setting doesn't apply to RDS Custom. + // This setting doesn't apply to RDS Custom DB instances. // // For more information, see Using SSL/TLS to encrypt a connection to a DB instance // (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/UsingWithRDS.SSL.html) @@ -184,27 +188,30 @@ type DBInstanceParameters struct { // a DB cluster (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/UsingWithRDS.SSL.html) // in the Amazon Aurora User Guide. CACertificateIdentifier *string `json:"caCertificateIdentifier,omitempty"` - // For supported engines, this value indicates that the DB instance should be - // associated with the specified CharacterSet. + // For supported engines, the character set (CharacterSet) to associate the + // DB instance with. // - // This setting doesn't apply to RDS Custom. However, if you need to change - // the character set, you can change it on the database itself. + // This setting doesn't apply to the following DB instances: // - // Amazon Aurora + // * Amazon Aurora - The character set is managed by the DB cluster. For + // more information, see CreateDBCluster. // - // Not applicable. The character set is managed by the DB cluster. For more - // information, see CreateDBCluster. + // * RDS Custom - However, if you need to change the character set, you can + // change it on the database itself. CharacterSetName *string `json:"characterSetName,omitempty"` - // A value that indicates whether to copy tags from the DB instance to snapshots - // of the DB instance. By default, tags are not copied. - // - // Amazon Aurora + // Specifies whether to copy tags from the DB instance to snapshots of the DB + // instance. By default, tags are not copied. // - // Not applicable. Copying tags to snapshots is managed by the DB cluster. Setting - // this value for an Aurora DB instance has no effect on the DB cluster setting. + // This setting doesn't apply to Amazon Aurora DB instances. Copying tags to + // snapshots is managed by the DB cluster. Setting this value for an Aurora + // DB instance has no effect on the DB cluster setting. CopyTagsToSnapshot *bool `json:"copyTagsToSnapshot,omitempty"` // The instance profile associated with the underlying Amazon EC2 instance of - // an RDS Custom DB instance. The instance profile must meet the following requirements: + // an RDS Custom DB instance. + // + // This setting is required for RDS Custom. + // + // Constraints: // // * The profile must exist in your account. // @@ -217,12 +224,10 @@ type DBInstanceParameters struct { // For the list of permissions required for the IAM role, see Configure IAM // and your VPC (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/custom-setup-orcl.html#custom-setup-orcl.iam-vpc) // in the Amazon RDS User Guide. - // - // This setting is required for RDS Custom. CustomIAMInstanceProfile *string `json:"customIAMInstanceProfile,omitempty"` - // The identifier of the DB cluster that the instance will belong to. + // The identifier of the DB cluster that this DB instance will belong to. // - // This setting doesn't apply to RDS Custom. + // This setting doesn't apply to RDS Custom DB instances. DBClusterIdentifier *string `json:"dbClusterIdentifier,omitempty"` // The compute and memory capacity of the DB instance, for example db.m5.large. // Not all DB instance classes are available in all Amazon Web Services Regions, @@ -280,9 +285,9 @@ type DBInstanceParameters struct { // // Oracle // - // The Oracle System ID (SID) of the created DB instance. If you specify null, - // the default value ORCL is used. You can't specify the string NULL, or any - // other reserved word, for DBName. + // The Oracle System ID (SID) of the created DB instance. If you don't specify + // a value, the default value is ORCL. You can't specify the string null, or + // any other reserved word, for DBName. // // Default: ORCL // @@ -293,7 +298,8 @@ type DBInstanceParameters struct { // Amazon RDS Custom for Oracle // // The Oracle System ID (SID) of the created RDS Custom DB instance. If you - // don't specify a value, the default value is ORCL. + // don't specify a value, the default value is ORCL for non-CDBs and RDSCDB + // for CDBs. // // Default: ORCL // @@ -342,95 +348,125 @@ type DBInstanceParameters struct { // * It can't be a word reserved by the database engine. DBName *string `json:"dbName,omitempty"` // The name of the DB parameter group to associate with this DB instance. If - // you do not specify a value, then the default DB parameter group for the specified - // DB engine and version is used. + // you don't specify a value, then Amazon RDS uses the default DB parameter + // group for the specified DB engine and version. // - // This setting doesn't apply to RDS Custom. + // This setting doesn't apply to RDS Custom DB instances. // // Constraints: // - // * It must be 1 to 255 letters, numbers, or hyphens. + // * Must be 1 to 255 letters, numbers, or hyphens. // // * The first character must be a letter. // - // * It can't end with a hyphen or contain two consecutive hyphens. + // * Can't end with a hyphen or contain two consecutive hyphens. DBParameterGroupName *string `json:"dbParameterGroupName,omitempty"` // A DB subnet group to associate with this DB instance. // - // Constraints: Must match the name of an existing DBSubnetGroup. Must not be - // default. + // Constraints: + // + // * Must match the name of an existing DB subnet group. + // + // * Must not be default. // // Example: mydbsubnetgroup DBSubnetGroupName *string `json:"dbSubnetGroupName,omitempty"` - // A value that indicates whether the DB instance has deletion protection enabled. - // The database can't be deleted when deletion protection is enabled. By default, - // deletion protection isn't enabled. For more information, see Deleting a DB - // Instance (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_DeleteInstance.html). - // - // Amazon Aurora - // - // Not applicable. You can enable or disable deletion protection for the DB - // cluster. For more information, see CreateDBCluster. DB instances in a DB - // cluster can be deleted even when deletion protection is enabled for the DB - // cluster. + // The Oracle system identifier (SID), which is the name of the Oracle database + // instance that manages your database files. In this context, the term "Oracle + // database instance" refers exclusively to the system global area (SGA) and + // Oracle background processes. If you don't specify a SID, the value defaults + // to RDSCDB. The Oracle SID is also the name of your CDB. + DBSystemID *string `json:"dbSystemID,omitempty"` + // Specifies whether the DB instance has deletion protection enabled. The database + // can't be deleted when deletion protection is enabled. By default, deletion + // protection isn't enabled. For more information, see Deleting a DB Instance + // (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_DeleteInstance.html). + // + // This setting doesn't apply to Amazon Aurora DB instances. You can enable + // or disable deletion protection for the DB cluster. For more information, + // see CreateDBCluster. DB instances in a DB cluster can be deleted even when + // deletion protection is enabled for the DB cluster. DeletionProtection *bool `json:"deletionProtection,omitempty"` // The Active Directory directory ID to create the DB instance in. Currently, - // only MySQL, Microsoft SQL Server, Oracle, and PostgreSQL DB instances can + // only Microsoft SQL Server, MySQL, Oracle, and PostgreSQL DB instances can // be created in an Active Directory Domain. // // For more information, see Kerberos Authentication (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/kerberos-authentication.html) // in the Amazon RDS User Guide. // - // This setting doesn't apply to RDS Custom. + // This setting doesn't apply to the following DB instances: // - // Amazon Aurora + // * Amazon Aurora (The domain is managed by the DB cluster.) // - // Not applicable. The domain is managed by the DB cluster. + // * RDS Custom Domain *string `json:"domain,omitempty"` - // Specify the name of the IAM role to be used when making API calls to the - // Directory Service. + // The ARN for the Secrets Manager secret with the credentials for the user + // joining the domain. // - // This setting doesn't apply to RDS Custom. + // Example: arn:aws:secretsmanager:region:account-number:secret:myselfmanagedADtestsecret-123456 + DomainAuthSecretARN *string `json:"domainAuthSecretARN,omitempty"` + // The IPv4 DNS IP addresses of your primary and secondary Active Directory + // domain controllers. // - // Amazon Aurora + // Constraints: // - // Not applicable. The domain is managed by the DB cluster. - DomainIAMRoleName *string `json:"domainIAMRoleName,omitempty"` - // The list of log types that need to be enabled for exporting to CloudWatch - // Logs. The values in the list depend on the DB engine. For more information, - // see Publishing Database Logs to Amazon CloudWatch Logs (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_LogAccess.html#USER_LogAccess.Procedural.UploadtoCloudWatch) - // in the Amazon RDS User Guide. + // * Two IP addresses must be provided. If there isn't a secondary domain + // controller, use the IP address of the primary domain controller for both + // entries in the list. // - // Amazon Aurora + // Example: 123.124.125.126,234.235.236.237 + DomainDNSIPs []*string `json:"domainDNSIPs,omitempty"` + // The fully qualified domain name (FQDN) of an Active Directory domain. // - // Not applicable. CloudWatch Logs exports are managed by the DB cluster. + // Constraints: // - // RDS Custom + // * Can't be longer than 64 characters. // - // Not applicable. + // Example: mymanagedADtest.mymanagedAD.mydomain + DomainFqdn *string `json:"domainFqdn,omitempty"` + // The name of the IAM role to use when making API calls to the Directory Service. // - // MariaDB + // This setting doesn't apply to the following DB instances: // - // Possible values are audit, error, general, and slowquery. + // * Amazon Aurora (The domain is managed by the DB cluster.) // - // Microsoft SQL Server + // * RDS Custom + DomainIAMRoleName *string `json:"domainIAMRoleName,omitempty"` + // The Active Directory organizational unit for your DB instance to join. // - // Possible values are agent and error. + // Constraints: // - // MySQL + // * Must be in the distinguished name format. // - // Possible values are audit, error, general, and slowquery. + // * Can't be longer than 64 characters. // - // Oracle + // Example: OU=mymanagedADtestOU,DC=mymanagedADtest,DC=mymanagedAD,DC=mydomain + DomainOu *string `json:"domainOu,omitempty"` + // The list of log types that need to be enabled for exporting to CloudWatch + // Logs. For more information, see Publishing Database Logs to Amazon CloudWatch + // Logs (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_LogAccess.html#USER_LogAccess.Procedural.UploadtoCloudWatch) + // in the Amazon RDS User Guide. // - // Possible values are alert, audit, listener, trace, and oemagent. + // This setting doesn't apply to the following DB instances: // - // PostgreSQL + // * Amazon Aurora (CloudWatch Logs exports are managed by the DB cluster.) + // + // * RDS Custom + // + // The following values are valid for each DB engine: + // + // * RDS for MariaDB - audit | error | general | slowquery + // + // * RDS for Microsoft SQL Server - agent | error + // + // * RDS for MySQL - audit | error | general | slowquery + // + // * RDS for Oracle - alert | audit | listener | trace | oemagent // - // Possible values are postgresql and upgrade. + // * RDS for PostgreSQL - postgresql | upgrade EnableCloudwatchLogsExports []*string `json:"enableCloudwatchLogsExports,omitempty"` - // A value that indicates whether to enable a customer-owned IP address (CoIP) - // for an RDS on Outposts DB instance. + // Specifies whether to enable a customer-owned IP address (CoIP) for an RDS + // on Outposts DB instance. // // A CoIP provides local or external connectivity to resources in your Outpost // subnets through your on-premises network. For some use cases, a CoIP can @@ -444,46 +480,46 @@ type DBInstanceParameters struct { // For more information about CoIPs, see Customer-owned IP addresses (https://docs.aws.amazon.com/outposts/latest/userguide/routing.html#ip-addressing) // in the Amazon Web Services Outposts User Guide. EnableCustomerOwnedIP *bool `json:"enableCustomerOwnedIP,omitempty"` - // A value that indicates whether to enable mapping of Amazon Web Services Identity - // and Access Management (IAM) accounts to database accounts. By default, mapping - // isn't enabled. + // Specifies whether to enable mapping of Amazon Web Services Identity and Access + // Management (IAM) accounts to database accounts. By default, mapping isn't + // enabled. // // For more information, see IAM Database Authentication for MySQL and PostgreSQL // (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/UsingWithRDS.IAMDBAuth.html) // in the Amazon RDS User Guide. // - // This setting doesn't apply to RDS Custom. + // This setting doesn't apply to the following DB instances: // - // Amazon Aurora + // * Amazon Aurora (Mapping Amazon Web Services IAM accounts to database + // accounts is managed by the DB cluster.) // - // Not applicable. Mapping Amazon Web Services IAM accounts to database accounts - // is managed by the DB cluster. + // * RDS Custom EnableIAMDatabaseAuthentication *bool `json:"enableIAMDatabaseAuthentication,omitempty"` - // A value that indicates whether to enable Performance Insights for the DB - // instance. For more information, see Using Amazon Performance Insights (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_PerfInsights.html) + // Specifies whether to enable Performance Insights for the DB instance. For + // more information, see Using Amazon Performance Insights (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_PerfInsights.html) // in the Amazon RDS User Guide. // - // This setting doesn't apply to RDS Custom. + // This setting doesn't apply to RDS Custom DB instances. EnablePerformanceInsights *bool `json:"enablePerformanceInsights,omitempty"` - // The name of the database engine to be used for this instance. + // The database engine to use for this DB instance. // - // Not every database engine is available for every Amazon Web Services Region. + // Not every database engine is available in every Amazon Web Services Region. // // Valid Values: // - // * aurora (for MySQL 5.6-compatible Aurora) + // * aurora-mysql (for Aurora MySQL DB instances) // - // * aurora-mysql (for MySQL 5.7-compatible and MySQL 8.0-compatible Aurora) + // * aurora-postgresql (for Aurora PostgreSQL DB instances) // - // * aurora-postgresql + // * custom-oracle-ee (for RDS Custom for Oracle DB instances) // - // * custom-oracle-ee (for RDS Custom for Oracle instances) + // * custom-oracle-ee-cdb (for RDS Custom for Oracle DB instances) // - // * custom-sqlserver-ee (for RDS Custom for SQL Server instances) + // * custom-sqlserver-ee (for RDS Custom for SQL Server DB instances) // - // * custom-sqlserver-se (for RDS Custom for SQL Server instances) + // * custom-sqlserver-se (for RDS Custom for SQL Server DB instances) // - // * custom-sqlserver-web (for RDS Custom for SQL Server instances) + // * custom-sqlserver-web (for RDS Custom for SQL Server DB instances) // // * mariadb // @@ -508,19 +544,21 @@ type DBInstanceParameters struct { // * sqlserver-web // +kubebuilder:validation:Required Engine *string `json:"engine"` - // The amount of Provisioned IOPS (input/output operations per second) to be - // initially allocated for the DB instance. For information about valid IOPS - // values, see Amazon RDS DB instance storage (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Storage.html) + // The amount of Provisioned IOPS (input/output operations per second) to initially + // allocate for the DB instance. For information about valid IOPS values, see + // Amazon RDS DB instance storage (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Storage.html) // in the Amazon RDS User Guide. // - // Constraints: For MariaDB, MySQL, Oracle, and PostgreSQL DB instances, must - // be a multiple between .5 and 50 of the storage amount for the DB instance. - // For SQL Server DB instances, must be a multiple between 1 and 50 of the storage - // amount for the DB instance. + // This setting doesn't apply to Amazon Aurora DB instances. Storage is managed + // by the DB cluster. + // + // Constraints: // - // Amazon Aurora + // * For RDS for MariaDB, MySQL, Oracle, and PostgreSQL - Must be a multiple + // between .5 and 50 of the storage amount for the DB instance. // - // Not applicable. Storage is managed by the DB cluster. + // * For RDS for SQL Server - Must be a multiple between 1 and 50 of the + // storage amount for the DB instance. IOPS *int64 `json:"iops,omitempty"` // The Amazon Web Services KMS key identifier for an encrypted DB instance. // @@ -528,35 +566,38 @@ type DBInstanceParameters struct { // ARN, or alias name for the KMS key. To use a KMS key in a different Amazon // Web Services account, specify the key ARN or alias ARN. // - // Amazon Aurora - // - // Not applicable. The Amazon Web Services KMS key identifier is managed by - // the DB cluster. For more information, see CreateDBCluster. + // This setting doesn't apply to Amazon Aurora DB instances. The Amazon Web + // Services KMS key identifier is managed by the DB cluster. For more information, + // see CreateDBCluster. // // If StorageEncrypted is enabled, and you do not specify a value for the KmsKeyId // parameter, then Amazon RDS uses your default KMS key. There is a default // KMS key for your Amazon Web Services account. Your Amazon Web Services account // has a different default KMS key for each Amazon Web Services Region. // - // Amazon RDS Custom - // - // A KMS key is required for RDS Custom instances. For most RDS engines, if - // you leave this parameter empty while enabling StorageEncrypted, the engine - // uses the default KMS key. However, RDS Custom doesn't use the default key - // when this parameter is empty. You must explicitly specify a key. + // For Amazon RDS Custom, a KMS key is required for DB instances. For most RDS + // engines, if you leave this parameter empty while enabling StorageEncrypted, + // the engine uses the default KMS key. However, RDS Custom doesn't use the + // default key when this parameter is empty. You must explicitly specify a key. KMSKeyID *string `json:"kmsKeyID,omitempty"` - // License model information for this DB instance. + // The license model information for this DB instance. // - // Valid values: license-included | bring-your-own-license | general-public-license + // This setting doesn't apply to Amazon Aurora or RDS Custom DB instances. // - // This setting doesn't apply to RDS Custom. + // Valid Values: + // + // * RDS for MariaDB - general-public-license + // + // * RDS for Microsoft SQL Server - license-included + // + // * RDS for MySQL - general-public-license // - // Amazon Aurora + // * RDS for Oracle - bring-your-own-license | license-included // - // Not applicable. + // * RDS for PostgreSQL - postgresql-license LicenseModel *string `json:"licenseModel,omitempty"` - // A value that indicates whether to manage the master user password with Amazon - // Web Services Secrets Manager. + // Specifies whether to manage the master user password with Amazon Web Services + // Secrets Manager. // // For more information, see Password management with Amazon Web Services Secrets // Manager (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/rds-secrets-manager.html) @@ -588,16 +629,13 @@ type DBInstanceParameters struct { MasterUserSecretKMSKeyID *string `json:"masterUserSecretKMSKeyID,omitempty"` // The name for the master user. // - // Amazon Aurora + // This setting doesn't apply to Amazon Aurora DB instances. The name for the + // master user is managed by the DB cluster. // - // Not applicable. The name for the master user is managed by the DB cluster. - // - // Amazon RDS + // This setting is required for RDS DB instances. // // Constraints: // - // * Required. - // // * Must be 1 to 16 letters, numbers, or underscores. // // * First character must be a letter. @@ -612,22 +650,24 @@ type DBInstanceParameters struct { // (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_PIOPS.StorageTypes.html#USER_PIOPS.Autoscaling) // in the Amazon RDS User Guide. // - // This setting doesn't apply to RDS Custom. + // This setting doesn't apply to the following DB instances: // - // Amazon Aurora + // * Amazon Aurora (Storage is managed by the DB cluster.) // - // Not applicable. Storage is managed by the DB cluster. + // * RDS Custom MaxAllocatedStorage *int64 `json:"maxAllocatedStorage,omitempty"` // The interval, in seconds, between points when Enhanced Monitoring metrics // are collected for the DB instance. To disable collection of Enhanced Monitoring - // metrics, specify 0. The default is 0. + // metrics, specify 0. // // If MonitoringRoleArn is specified, then you must set MonitoringInterval to // a value other than 0. // - // This setting doesn't apply to RDS Custom. + // This setting doesn't apply to RDS Custom DB instances. // - // Valid Values: 0, 1, 5, 10, 15, 30, 60 + // Valid Values: 0 | 1 | 5 | 10 | 15 | 30 | 60 + // + // Default: 0 MonitoringInterval *int64 `json:"monitoringInterval,omitempty"` // The ARN for the IAM role that permits RDS to send enhanced monitoring metrics // to Amazon CloudWatch Logs. For example, arn:aws:iam:123456789012:role/emaccess. @@ -638,50 +678,40 @@ type DBInstanceParameters struct { // If MonitoringInterval is set to a value other than 0, then you must supply // a MonitoringRoleArn value. // - // This setting doesn't apply to RDS Custom. + // This setting doesn't apply to RDS Custom DB instances. MonitoringRoleARN *string `json:"monitoringRoleARN,omitempty"` - // A value that indicates whether the DB instance is a Multi-AZ deployment. - // You can't set the AvailabilityZone parameter if the DB instance is a Multi-AZ - // deployment. + // Specifies whether the DB instance is a Multi-AZ deployment. You can't set + // the AvailabilityZone parameter if the DB instance is a Multi-AZ deployment. // - // This setting doesn't apply to RDS Custom. + // This setting doesn't apply to the following DB instances: // - // Amazon Aurora + // * Amazon Aurora (DB instance Availability Zones (AZs) are managed by the + // DB cluster.) // - // Not applicable. DB instance Availability Zones (AZs) are managed by the DB - // cluster. + // * RDS Custom MultiAZ *bool `json:"multiAZ,omitempty"` // The name of the NCHAR character set for the Oracle DB instance. // - // This parameter doesn't apply to RDS Custom. + // This setting doesn't apply to RDS Custom DB instances. NcharCharacterSetName *string `json:"ncharCharacterSetName,omitempty"` // The network type of the DB instance. // - // Valid values: - // - // * IPV4 - // - // * DUAL - // // The network type is determined by the DBSubnetGroup specified for the DB // instance. A DBSubnetGroup can support only the IPv4 protocol or the IPv4 // and the IPv6 protocols (DUAL). // // For more information, see Working with a DB instance in a VPC (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_VPC.WorkingWithRDSInstanceinaVPC.html) // in the Amazon RDS User Guide. + // + // Valid Values: IPV4 | DUAL NetworkType *string `json:"networkType,omitempty"` - // A value that indicates that the DB instance should be associated with the - // specified option group. + // The option group to associate the DB instance with. // // Permanent options, such as the TDE option for Oracle Advanced Security TDE, // can't be removed from an option group. Also, that option group can't be removed // from a DB instance after it is associated with a DB instance. // - // This setting doesn't apply to RDS Custom. - // - // Amazon Aurora - // - // Not applicable. + // This setting doesn't apply to Amazon Aurora or RDS Custom DB instances. OptionGroupName *string `json:"optionGroupName,omitempty"` // The Amazon Web Services KMS key identifier for encryption of Performance // Insights data. @@ -689,83 +719,54 @@ type DBInstanceParameters struct { // The Amazon Web Services KMS key identifier is the key ARN, key ID, alias // ARN, or alias name for the KMS key. // - // If you do not specify a value for PerformanceInsightsKMSKeyId, then Amazon + // If you don't specify a value for PerformanceInsightsKMSKeyId, then Amazon // RDS uses your default KMS key. There is a default KMS key for your Amazon // Web Services account. Your Amazon Web Services account has a different default // KMS key for each Amazon Web Services Region. // - // This setting doesn't apply to RDS Custom. + // This setting doesn't apply to RDS Custom DB instances. PerformanceInsightsKMSKeyID *string `json:"performanceInsightsKMSKeyID,omitempty"` - // The number of days to retain Performance Insights data. The default is 7 - // days. The following values are valid: - // - // * 7 + // The number of days to retain Performance Insights data. // - // * month * 31, where month is a number of months from 1-23 + // This setting doesn't apply to RDS Custom DB instances. // - // * 731 - // - // For example, the following values are valid: - // - // * 93 (3 months * 31) + // Valid Values: // - // * 341 (11 months * 31) + // * 7 // - // * 589 (19 months * 31) + // * month * 31, where month is a number of months from 1-23. Examples: 93 + // (3 months * 31), 341 (11 months * 31), 589 (19 months * 31) // // * 731 // - // If you specify a retention period such as 94, which isn't a valid value, - // RDS issues an error. + // Default: 7 days // - // This setting doesn't apply to RDS Custom. + // If you specify a retention period that isn't valid, such as 94, Amazon RDS + // returns an error. PerformanceInsightsRetentionPeriod *int64 `json:"performanceInsightsRetentionPeriod,omitempty"` // The port number on which the database accepts connections. // - // MySQL - // - // Default: 3306 + // This setting doesn't apply to Aurora DB instances. The port number is managed + // by the cluster. // - // Valid values: 1150-65535 + // Valid Values: 1150-65535 // - // Type: Integer - // - // MariaDB + // Default: // - // Default: 3306 + // * RDS for MariaDB - 3306 // - // Valid values: 1150-65535 + // * RDS for Microsoft SQL Server - 1433 // - // Type: Integer - // - // PostgreSQL - // - // Default: 5432 - // - // Valid values: 1150-65535 - // - // Type: Integer - // - // Oracle + // * RDS for MySQL - 3306 // - // Default: 1521 + // * RDS for Oracle - 1521 // - // Valid values: 1150-65535 + // * RDS for PostgreSQL - 5432 // - // SQL Server - // - // Default: 1433 - // - // Valid values: 1150-65535 except 1234, 1434, 3260, 3343, 3389, 47001, and - // 49152-49156. - // - // Amazon Aurora - // - // Default: 3306 - // - // Valid values: 1150-65535 + // Constraints: // - // Type: Integer + // * For RDS for Microsoft SQL Server, the value can't be 1234, 1434, 3260, + // 3343, 3389, 47001, or 49152-49156. Port *int64 `json:"port,omitempty"` // The daily time range during which automated backups are created if automated // backups are enabled, using the BackupRetentionPeriod parameter. The default @@ -774,10 +775,8 @@ type DBInstanceParameters struct { // (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_WorkingWithAutomatedBackups.html#USER_WorkingWithAutomatedBackups.BackupWindow) // in the Amazon RDS User Guide. // - // Amazon Aurora - // - // Not applicable. The daily time range for creating automated backups is managed - // by the DB cluster. + // This setting doesn't apply to Amazon Aurora DB instances. The daily time + // range for creating automated backups is managed by the DB cluster. // // Constraints: // @@ -789,41 +788,43 @@ type DBInstanceParameters struct { // // * Must be at least 30 minutes. PreferredBackupWindow *string `json:"preferredBackupWindow,omitempty"` - // The time range each week during which system maintenance can occur, in Universal - // Coordinated Time (UTC). For more information, see Amazon RDS Maintenance - // Window (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_UpgradeDBInstance.Maintenance.html#Concepts.DBMaintenance). - // - // Format: ddd:hh24:mi-ddd:hh24:mi + // The time range each week during which system maintenance can occur. For more + // information, see Amazon RDS Maintenance Window (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_UpgradeDBInstance.Maintenance.html#Concepts.DBMaintenance) + // in the Amazon RDS User Guide. // // The default is a 30-minute window selected at random from an 8-hour block // of time for each Amazon Web Services Region, occurring on a random day of // the week. // - // Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun. + // Constraints: + // + // * Must be in the format ddd:hh24:mi-ddd:hh24:mi. + // + // * The day values must be mon | tue | wed | thu | fri | sat | sun. + // + // * Must be in Universal Coordinated Time (UTC). + // + // * Must not conflict with the preferred backup window. // - // Constraints: Minimum 30-minute window. + // * Must be at least 30 minutes. PreferredMaintenanceWindow *string `json:"preferredMaintenanceWindow,omitempty"` // The number of CPU cores and the number of threads per core for the DB instance // class of the DB instance. // - // This setting doesn't apply to RDS Custom. - // - // Amazon Aurora - // - // Not applicable. + // This setting doesn't apply to Amazon Aurora or RDS Custom DB instances. ProcessorFeatures []*ProcessorFeature `json:"processorFeatures,omitempty"` - // A value that specifies the order in which an Aurora Replica is promoted to - // the primary instance after a failure of the existing primary instance. For - // more information, see Fault Tolerance for an Aurora DB Cluster (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Aurora.Managing.Backups.html#Aurora.Managing.FaultTolerance) + // The order of priority in which an Aurora Replica is promoted to the primary + // instance after a failure of the existing primary instance. For more information, + // see Fault Tolerance for an Aurora DB Cluster (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Concepts.AuroraHighAvailability.html#Aurora.Managing.FaultTolerance) // in the Amazon Aurora User Guide. // - // This setting doesn't apply to RDS Custom. + // This setting doesn't apply to RDS Custom DB instances. // // Default: 1 // // Valid Values: 0 - 15 PromotionTier *int64 `json:"promotionTier,omitempty"` - // A value that indicates whether the DB instance is publicly accessible. + // Specifies whether the DB instance is publicly accessible. // // When the DB instance is publicly accessible, its Domain Name System (DNS) // endpoint resolves to the private IP address from within the DB instance's @@ -856,48 +857,41 @@ type DBInstanceParameters struct { // * If the subnets are part of a VPC that has an internet gateway attached // to it, the DB instance is public. PubliclyAccessible *bool `json:"publiclyAccessible,omitempty"` - // A value that indicates whether the DB instance is encrypted. By default, - // it isn't encrypted. - // - // For RDS Custom instances, either set this parameter to true or leave it unset. - // If you set this parameter to false, RDS reports an error. + // Specifes whether the DB instance is encrypted. By default, it isn't encrypted. // - // Amazon Aurora + // For RDS Custom DB instances, either enable this setting or leave it unset. + // Otherwise, Amazon RDS reports an error. // - // Not applicable. The encryption for DB instances is managed by the DB cluster. + // This setting doesn't apply to Amazon Aurora DB instances. The encryption + // for DB instances is managed by the DB cluster. StorageEncrypted *bool `json:"storageEncrypted,omitempty"` - // Specifies the storage throughput value for the DB instance. + // The storage throughput value for the DB instance. // // This setting applies only to the gp3 storage type. // - // This setting doesn't apply to RDS Custom or Amazon Aurora. + // This setting doesn't apply to Amazon Aurora or RDS Custom DB instances. StorageThroughput *int64 `json:"storageThroughput,omitempty"` - // Specifies the storage type to be associated with the DB instance. - // - // Valid values: gp2 | gp3 | io1 | standard + // The storage type to associate with the DB instance. // // If you specify io1 or gp3, you must also include a value for the Iops parameter. // - // Default: io1 if the Iops parameter is specified, otherwise gp2 + // This setting doesn't apply to Amazon Aurora DB instances. Storage is managed + // by the DB cluster. // - // Amazon Aurora + // Valid Values: gp2 | gp3 | io1 | standard // - // Not applicable. Storage is managed by the DB cluster. + // Default: io1, if the Iops parameter is specified. Otherwise, gp2. StorageType *string `json:"storageType,omitempty"` // Tags to assign to the DB instance. Tags []*Tag `json:"tags,omitempty"` // The ARN from the key store with which to associate the instance for TDE encryption. // - // This setting doesn't apply to RDS Custom. - // - // Amazon Aurora - // - // Not applicable. + // This setting doesn't apply to Amazon Aurora or RDS Custom DB instances. TDECredentialARN *string `json:"tdeCredentialARN,omitempty"` // The password for the given ARN from the key store in order to access the // device. // - // This setting doesn't apply to RDS Custom. + // This setting doesn't apply to RDS Custom DB instances. TDECredentialPassword *string `json:"tdeCredentialPassword,omitempty"` // The time zone of the DB instance. The time zone parameter is currently supported // only by Microsoft SQL Server (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_SQLServer.html#SQLServer.Concepts.General.TimeZone). @@ -945,7 +939,7 @@ type DBInstanceObservation struct { AWSBackupRecoveryPointARN *string `json:"awsBackupRecoveryPointARN,omitempty"` // The details of the DB instance's server certificate. CertificateDetails *CertificateDetails `json:"certificateDetails,omitempty"` - // Specifies whether a customer-owned IP address (CoIP) is enabled for an RDS + // Indicates whether a customer-owned IP address (CoIP) is enabled for an RDS // on Outposts DB instance. // // A CoIP provides local or external connectivity to resources in your Outpost @@ -964,28 +958,25 @@ type DBInstanceObservation struct { DBInstanceARN *string `json:"dbInstanceARN,omitempty"` // The list of replicated automated backups associated with the DB instance. DBInstanceAutomatedBackupsReplications []*DBInstanceAutomatedBackupsReplication `json:"dbInstanceAutomatedBackupsReplications,omitempty"` - // Contains a user-supplied database identifier. This identifier is the unique - // key that identifies a DB instance. + // The user-supplied database identifier. This identifier is the unique key + // that identifies a DB instance. DBInstanceIdentifier *string `json:"dbInstanceIdentifier,omitempty"` - // Specifies the current state of this database. + // The current state of this database. // // For information about DB instance statuses, see Viewing DB instance status // (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/accessing-monitoring.html#Overview.DBInstance.Status) // in the Amazon RDS User Guide. DBInstanceStatus *string `json:"dbInstanceStatus,omitempty"` - // Provides the list of DB parameter groups applied to this DB instance. + // The list of DB parameter groups applied to this DB instance. DBParameterGroups []*DBParameterGroupStatus_SDK `json:"dbParameterGroups,omitempty"` // A list of DB security group elements containing DBSecurityGroup.Name and // DBSecurityGroup.Status subelements. DBSecurityGroups []*DBSecurityGroupMembership `json:"dbSecurityGroups,omitempty"` - // Specifies information on the subnet group associated with the DB instance, - // including the name, description, and subnets in the subnet group. + // Information about the subnet group associated with the DB instance, including + // the name, description, and subnets in the subnet group. DBSubnetGroup *DBSubnetGroup `json:"dbSubnetGroup,omitempty"` - // The Oracle system ID (Oracle SID) for a container database (CDB). The Oracle - // SID is also the name of the CDB. This setting is valid for RDS Custom only. - DBSystemID *string `json:"dbSystemID,omitempty"` - // Specifies the port that the DB instance listens on. If the DB instance is - // part of a DB cluster, this can be a different port than the DB cluster port. + // The port that the DB instance listens on. If the DB instance is part of a + // DB cluster, this can be a different port than the DB cluster port. DBInstancePort *int64 `json:"dbInstancePort,omitempty"` // The Amazon Web Services Region-unique, immutable identifier for the DB instance. // This identifier is found in Amazon Web Services CloudTrail log entries whenever @@ -997,66 +988,63 @@ type DBInstanceObservation struct { // Logs. // // Log types vary by DB engine. For information about the log types for each - // DB engine, see Amazon RDS Database Log Files (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_LogAccess.html) + // DB engine, see Monitoring Amazon RDS log files (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_LogAccess.html) // in the Amazon RDS User Guide. EnabledCloudwatchLogsExports []*string `json:"enabledCloudwatchLogsExports,omitempty"` - // Specifies the connection endpoint. + // The connection endpoint for the DB instance. // - // The endpoint might not be shown for instances whose status is creating. + // The endpoint might not be shown for instances with the status of creating. Endpoint *Endpoint `json:"endpoint,omitempty"` - // Indicates the database engine version. + // The version of the database engine. EngineVersion *string `json:"engineVersion,omitempty"` // The Amazon Resource Name (ARN) of the Amazon CloudWatch Logs log stream that // receives the Enhanced Monitoring metrics data for the DB instance. EnhancedMonitoringResourceARN *string `json:"enhancedMonitoringResourceARN,omitempty"` - // True if mapping of Amazon Web Services Identity and Access Management (IAM) - // accounts to database accounts is enabled, and otherwise false. - // - // IAM database authentication can be enabled for the following database engines + // Indicates whether mapping of Amazon Web Services Identity and Access Management + // (IAM) accounts to database accounts is enabled for the DB instance. // - // * For MySQL 5.6, minor version 5.6.34 or higher - // - // * For MySQL 5.7, minor version 5.7.16 or higher - // - // * Aurora 5.6 or higher. To enable IAM database authentication for Aurora, - // see DBCluster Type. + // For a list of engine versions that support IAM database authentication, see + // IAM database authentication (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.RDS_Fea_Regions_DB-eng.Feature.IamDatabaseAuthentication.html) + // in the Amazon RDS User Guide and IAM database authentication in Aurora (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Concepts.Aurora_Fea_Regions_DB-eng.Feature.IAMdbauth.html) + // in the Amazon Aurora User Guide. IAMDatabaseAuthenticationEnabled *bool `json:"iamDatabaseAuthenticationEnabled,omitempty"` - // Provides the date and time the DB instance was created. + // The date and time when the DB instance was created. InstanceCreateTime *metav1.Time `json:"instanceCreateTime,omitempty"` - // Specifies the latest time to which a database can be restored with point-in-time - // restore. + // The latest time to which a database in this DB instance can be restored with + // point-in-time restore. LatestRestorableTime *metav1.Time `json:"latestRestorableTime,omitempty"` - // Specifies the listener connection endpoint for SQL Server Always On. + // The listener connection endpoint for SQL Server Always On. ListenerEndpoint *Endpoint `json:"listenerEndpoint,omitempty"` - // Contains the secret managed by RDS in Amazon Web Services Secrets Manager - // for the master user password. + // The secret managed by RDS in Amazon Web Services Secrets Manager for the + // master user password. // // For more information, see Password management with Amazon Web Services Secrets // Manager (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/rds-secrets-manager.html) // in the Amazon RDS User Guide. MasterUserSecret *MasterUserSecret `json:"masterUserSecret,omitempty"` - // Provides the list of option group memberships for this DB instance. + // The list of option group memberships for this DB instance. OptionGroupMemberships []*OptionGroupMembership `json:"optionGroupMemberships,omitempty"` - // A value that specifies that changes to the DB instance are pending. This - // element is only included when changes are pending. Specific changes are identified + // Information about pending changes to the DB instance. This information is + // returned only when there are pending changes. Specific changes are identified // by subelements. PendingModifiedValues *PendingModifiedValues `json:"pendingModifiedValues,omitempty"` - // True if Performance Insights is enabled for the DB instance, and otherwise - // false. + // The progress of the storage optimization operation as a percentage. + PercentProgress *string `json:"percentProgress,omitempty"` + // Indicates whether Performance Insights is enabled for the DB instance. PerformanceInsightsEnabled *bool `json:"performanceInsightsEnabled,omitempty"` - // Contains one or more identifiers of Aurora DB clusters to which the RDS DB - // instance is replicated as a read replica. For example, when you create an - // Aurora read replica of an RDS for MySQL DB instance, the Aurora MySQL DB - // cluster for the Aurora read replica is shown. This output doesn't contain - // information about cross-Region Aurora read replicas. + // The identifiers of Aurora DB clusters to which the RDS DB instance is replicated + // as a read replica. For example, when you create an Aurora read replica of + // an RDS for MySQL DB instance, the Aurora MySQL DB cluster for the Aurora + // read replica is shown. This output doesn't contain information about cross-Region + // Aurora read replicas. // // Currently, each RDS DB instance can have only one Aurora read replica. ReadReplicaDBClusterIdentifiers []*string `json:"readReplicaDBClusterIdentifiers,omitempty"` - // Contains one or more identifiers of the read replicas associated with this - // DB instance. + // The identifiers of the read replicas associated with this DB instance. ReadReplicaDBInstanceIdentifiers []*string `json:"readReplicaDBInstanceIdentifiers,omitempty"` - // Contains the identifier of the source DB instance if this DB instance is - // a read replica. + // The identifier of the source DB cluster if this DB instance is a read replica. + ReadReplicaSourceDBClusterIdentifier *string `json:"readReplicaSourceDBClusterIdentifier,omitempty"` + // The identifier of the source DB instance if this DB instance is a read replica. ReadReplicaSourceDBInstanceIdentifier *string `json:"readReplicaSourceDBInstanceIdentifier,omitempty"` // The open mode of an Oracle read replica. The default is open-read-only. For // more information, see Working with Oracle Read Replicas for Amazon RDS (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/oracle-read-replicas.html) @@ -1071,13 +1059,12 @@ type DBInstanceObservation struct { // If present, specifies the name of the secondary Availability Zone for a DB // instance with multi-AZ support. SecondaryAvailabilityZone *string `json:"secondaryAvailabilityZone,omitempty"` - // The status of a read replica. If the instance isn't a read replica, this - // is blank. + // The status of a read replica. If the DB instance isn't a read replica, the + // value is blank. StatusInfos []*DBInstanceStatusInfo `json:"statusInfos,omitempty"` TagList []*Tag `json:"tagList,omitempty"` - // Provides a list of VPC security group elements that the DB instance belongs - // to. + // The list of Amazon EC2 VPC security groups that the DB instance belongs to. VPCSecurityGroups []*VPCSecurityGroupMembership `json:"vpcSecurityGroups,omitempty"` } diff --git a/apis/rds/v1alpha1/zz_enums.go b/apis/rds/v1alpha1/zz_enums.go index 6fb2bf5a67..77f96f945a 100644 --- a/apis/rds/v1alpha1/zz_enums.go +++ b/apis/rds/v1alpha1/zz_enums.go @@ -142,6 +142,13 @@ const ( FailoverStatus_cancelling FailoverStatus = "cancelling" ) +type GlobalClusterMemberSynchronizationStatus string + +const ( + GlobalClusterMemberSynchronizationStatus_connected GlobalClusterMemberSynchronizationStatus = "connected" + GlobalClusterMemberSynchronizationStatus_pending_resync GlobalClusterMemberSynchronizationStatus = "pending-resync" +) + type IAMAuthMode string const ( @@ -150,6 +157,16 @@ const ( IAMAuthMode_ENABLED IAMAuthMode = "ENABLED" ) +type LocalWriteForwardingStatus string + +const ( + LocalWriteForwardingStatus_enabled LocalWriteForwardingStatus = "enabled" + LocalWriteForwardingStatus_disabled LocalWriteForwardingStatus = "disabled" + LocalWriteForwardingStatus_enabling LocalWriteForwardingStatus = "enabling" + LocalWriteForwardingStatus_disabling LocalWriteForwardingStatus = "disabling" + LocalWriteForwardingStatus_requested LocalWriteForwardingStatus = "requested" +) + type ReplicaMode string const ( diff --git a/apis/rds/v1alpha1/zz_generated.deepcopy.go b/apis/rds/v1alpha1/zz_generated.deepcopy.go index a6f3e9edb4..3e2e9d750b 100644 --- a/apis/rds/v1alpha1/zz_generated.deepcopy.go +++ b/apis/rds/v1alpha1/zz_generated.deepcopy.go @@ -311,6 +311,11 @@ func (in *ClusterPendingModifiedValues) DeepCopyInto(out *ClusterPendingModified *out = new(PendingCloudwatchLogsExports) (*in).DeepCopyInto(*out) } + if in.StorageType != nil { + in, out := &in.StorageType, &out.StorageType + *out = new(string) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterPendingModifiedValues. @@ -964,6 +969,136 @@ func (in *DBCluster) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DBClusterAutomatedBackup) DeepCopyInto(out *DBClusterAutomatedBackup) { + *out = *in + if in.AllocatedStorage != nil { + in, out := &in.AllocatedStorage, &out.AllocatedStorage + *out = new(int64) + **out = **in + } + if in.AvailabilityZones != nil { + in, out := &in.AvailabilityZones, &out.AvailabilityZones + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.BackupRetentionPeriod != nil { + in, out := &in.BackupRetentionPeriod, &out.BackupRetentionPeriod + *out = new(int64) + **out = **in + } + if in.ClusterCreateTime != nil { + in, out := &in.ClusterCreateTime, &out.ClusterCreateTime + *out = (*in).DeepCopy() + } + if in.DBClusterARN != nil { + in, out := &in.DBClusterARN, &out.DBClusterARN + *out = new(string) + **out = **in + } + if in.DBClusterAutomatedBackupsARN != nil { + in, out := &in.DBClusterAutomatedBackupsARN, &out.DBClusterAutomatedBackupsARN + *out = new(string) + **out = **in + } + if in.DBClusterIdentifier != nil { + in, out := &in.DBClusterIdentifier, &out.DBClusterIdentifier + *out = new(string) + **out = **in + } + if in.DBClusterResourceID != nil { + in, out := &in.DBClusterResourceID, &out.DBClusterResourceID + *out = new(string) + **out = **in + } + if in.Engine != nil { + in, out := &in.Engine, &out.Engine + *out = new(string) + **out = **in + } + if in.EngineMode != nil { + in, out := &in.EngineMode, &out.EngineMode + *out = new(string) + **out = **in + } + if in.EngineVersion != nil { + in, out := &in.EngineVersion, &out.EngineVersion + *out = new(string) + **out = **in + } + if in.IAMDatabaseAuthenticationEnabled != nil { + in, out := &in.IAMDatabaseAuthenticationEnabled, &out.IAMDatabaseAuthenticationEnabled + *out = new(bool) + **out = **in + } + if in.IOPS != nil { + in, out := &in.IOPS, &out.IOPS + *out = new(int64) + **out = **in + } + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } + if in.LicenseModel != nil { + in, out := &in.LicenseModel, &out.LicenseModel + *out = new(string) + **out = **in + } + if in.MasterUsername != nil { + in, out := &in.MasterUsername, &out.MasterUsername + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(int64) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.StorageEncrypted != nil { + in, out := &in.StorageEncrypted, &out.StorageEncrypted + *out = new(bool) + **out = **in + } + if in.StorageType != nil { + in, out := &in.StorageType, &out.StorageType + *out = new(string) + **out = **in + } + if in.VPCID != nil { + in, out := &in.VPCID, &out.VPCID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DBClusterAutomatedBackup. +func (in *DBClusterAutomatedBackup) DeepCopy() *DBClusterAutomatedBackup { + if in == nil { + return nil + } + out := new(DBClusterAutomatedBackup) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DBClusterEndpoint) DeepCopyInto(out *DBClusterEndpoint) { *out = *in @@ -1293,10 +1428,19 @@ func (in *DBClusterObservation) DeepCopyInto(out *DBClusterObservation) { *out = new(bool) **out = **in } + if in.IOOptimizedNextAllowedModificationTime != nil { + in, out := &in.IOOptimizedNextAllowedModificationTime, &out.IOOptimizedNextAllowedModificationTime + *out = (*in).DeepCopy() + } if in.LatestRestorableTime != nil { in, out := &in.LatestRestorableTime, &out.LatestRestorableTime *out = (*in).DeepCopy() } + if in.LocalWriteForwardingStatus != nil { + in, out := &in.LocalWriteForwardingStatus, &out.LocalWriteForwardingStatus + *out = new(string) + **out = **in + } if in.MasterUserSecret != nil { in, out := &in.MasterUserSecret, &out.MasterUserSecret *out = new(MasterUserSecret) @@ -1712,6 +1856,11 @@ func (in *DBClusterParameters) DeepCopyInto(out *DBClusterParameters) { *out = new(bool) **out = **in } + if in.EnableLocalWriteForwarding != nil { + in, out := &in.EnableLocalWriteForwarding, &out.EnableLocalWriteForwarding + *out = new(bool) + **out = **in + } if in.EnablePerformanceInsights != nil { in, out := &in.EnablePerformanceInsights, &out.EnablePerformanceInsights *out = new(bool) @@ -1939,6 +2088,11 @@ func (in *DBClusterSnapshot) DeepCopyInto(out *DBClusterSnapshot) { *out = new(string) **out = **in } + if in.DBClusterResourceID != nil { + in, out := &in.DBClusterResourceID, &out.DBClusterResourceID + *out = new(string) + **out = **in + } if in.Engine != nil { in, out := &in.Engine, &out.Engine *out = new(string) @@ -2008,6 +2162,11 @@ func (in *DBClusterSnapshot) DeepCopyInto(out *DBClusterSnapshot) { *out = new(bool) **out = **in } + if in.StorageType != nil { + in, out := &in.StorageType, &out.StorageType + *out = new(string) + **out = **in + } if in.TagList != nil { in, out := &in.TagList, &out.TagList *out = make([]*Tag, len(*in)) @@ -2366,6 +2525,10 @@ func (in *DBCluster_SDK) DeepCopyInto(out *DBCluster_SDK) { *out = new(bool) **out = **in } + if in.IOOptimizedNextAllowedModificationTime != nil { + in, out := &in.IOOptimizedNextAllowedModificationTime, &out.IOOptimizedNextAllowedModificationTime + *out = (*in).DeepCopy() + } if in.IOPS != nil { in, out := &in.IOPS, &out.IOPS *out = new(int64) @@ -2380,6 +2543,11 @@ func (in *DBCluster_SDK) DeepCopyInto(out *DBCluster_SDK) { in, out := &in.LatestRestorableTime, &out.LatestRestorableTime *out = (*in).DeepCopy() } + if in.LocalWriteForwardingStatus != nil { + in, out := &in.LocalWriteForwardingStatus, &out.LocalWriteForwardingStatus + *out = new(string) + **out = **in + } if in.MasterUserSecret != nil { in, out := &in.MasterUserSecret, &out.MasterUserSecret *out = new(MasterUserSecret) @@ -2634,6 +2802,11 @@ func (in *DBEngineVersion) DeepCopyInto(out *DBEngineVersion) { *out = new(bool) **out = **in } + if in.SupportsLocalWriteForwarding != nil { + in, out := &in.SupportsLocalWriteForwarding, &out.SupportsLocalWriteForwarding + *out = new(bool) + **out = **in + } if in.SupportsLogExportsToCloudwatchLogs != nil { in, out := &in.SupportsLogExportsToCloudwatchLogs, &out.SupportsLogExportsToCloudwatchLogs *out = new(bool) @@ -3027,11 +3200,6 @@ func (in *DBInstanceObservation) DeepCopyInto(out *DBInstanceObservation) { *out = new(DBSubnetGroup) (*in).DeepCopyInto(*out) } - if in.DBSystemID != nil { - in, out := &in.DBSystemID, &out.DBSystemID - *out = new(string) - **out = **in - } if in.DBInstancePort != nil { in, out := &in.DBInstancePort, &out.DBInstancePort *out = new(int64) @@ -3118,6 +3286,11 @@ func (in *DBInstanceObservation) DeepCopyInto(out *DBInstanceObservation) { *out = new(PendingModifiedValues) (*in).DeepCopyInto(*out) } + if in.PercentProgress != nil { + in, out := &in.PercentProgress, &out.PercentProgress + *out = new(string) + **out = **in + } if in.PerformanceInsightsEnabled != nil { in, out := &in.PerformanceInsightsEnabled, &out.PerformanceInsightsEnabled *out = new(bool) @@ -3145,6 +3318,11 @@ func (in *DBInstanceObservation) DeepCopyInto(out *DBInstanceObservation) { } } } + if in.ReadReplicaSourceDBClusterIdentifier != nil { + in, out := &in.ReadReplicaSourceDBClusterIdentifier, &out.ReadReplicaSourceDBClusterIdentifier + *out = new(string) + **out = **in + } if in.ReadReplicaSourceDBInstanceIdentifier != nil { in, out := &in.ReadReplicaSourceDBInstanceIdentifier, &out.ReadReplicaSourceDBInstanceIdentifier *out = new(string) @@ -3287,6 +3465,11 @@ func (in *DBInstanceParameters) DeepCopyInto(out *DBInstanceParameters) { *out = new(string) **out = **in } + if in.DBSystemID != nil { + in, out := &in.DBSystemID, &out.DBSystemID + *out = new(string) + **out = **in + } if in.DeletionProtection != nil { in, out := &in.DeletionProtection, &out.DeletionProtection *out = new(bool) @@ -3297,11 +3480,37 @@ func (in *DBInstanceParameters) DeepCopyInto(out *DBInstanceParameters) { *out = new(string) **out = **in } + if in.DomainAuthSecretARN != nil { + in, out := &in.DomainAuthSecretARN, &out.DomainAuthSecretARN + *out = new(string) + **out = **in + } + if in.DomainDNSIPs != nil { + in, out := &in.DomainDNSIPs, &out.DomainDNSIPs + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DomainFqdn != nil { + in, out := &in.DomainFqdn, &out.DomainFqdn + *out = new(string) + **out = **in + } if in.DomainIAMRoleName != nil { in, out := &in.DomainIAMRoleName, &out.DomainIAMRoleName *out = new(string) **out = **in } + if in.DomainOu != nil { + in, out := &in.DomainOu, &out.DomainOu + *out = new(string) + **out = **in + } if in.EnableCloudwatchLogsExports != nil { in, out := &in.EnableCloudwatchLogsExports, &out.EnableCloudwatchLogsExports *out = make([]*string, len(*in)) @@ -4058,6 +4267,11 @@ func (in *DBInstance_SDK) DeepCopyInto(out *DBInstance_SDK) { *out = new(PendingModifiedValues) (*in).DeepCopyInto(*out) } + if in.PercentProgress != nil { + in, out := &in.PercentProgress, &out.PercentProgress + *out = new(string) + **out = **in + } if in.PerformanceInsightsEnabled != nil { in, out := &in.PerformanceInsightsEnabled, &out.PerformanceInsightsEnabled *out = new(bool) @@ -4126,6 +4340,11 @@ func (in *DBInstance_SDK) DeepCopyInto(out *DBInstance_SDK) { } } } + if in.ReadReplicaSourceDBClusterIdentifier != nil { + in, out := &in.ReadReplicaSourceDBClusterIdentifier, &out.ReadReplicaSourceDBClusterIdentifier + *out = new(string) + **out = **in + } if in.ReadReplicaSourceDBInstanceIdentifier != nil { in, out := &in.ReadReplicaSourceDBInstanceIdentifier, &out.ReadReplicaSourceDBInstanceIdentifier *out = new(string) @@ -4792,6 +5011,11 @@ func (in *DBSnapshot) DeepCopyInto(out *DBSnapshot) { *out = new(string) **out = **in } + if in.DBSystemID != nil { + in, out := &in.DBSystemID, &out.DBSystemID + *out = new(string) + **out = **in + } if in.DBIResourceID != nil { in, out := &in.DBIResourceID, &out.DBIResourceID *out = new(string) @@ -5077,6 +5301,22 @@ func (in *DescribeDBLogFilesDetails) DeepCopy() *DescribeDBLogFilesDetails { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DomainMembership) DeepCopyInto(out *DomainMembership) { *out = *in + if in.AuthSecretARN != nil { + in, out := &in.AuthSecretARN, &out.AuthSecretARN + *out = new(string) + **out = **in + } + if in.DNSIPs != nil { + in, out := &in.DNSIPs, &out.DNSIPs + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } if in.Domain != nil { in, out := &in.Domain, &out.Domain *out = new(string) @@ -5092,6 +5332,11 @@ func (in *DomainMembership) DeepCopyInto(out *DomainMembership) { *out = new(string) **out = **in } + if in.OU != nil { + in, out := &in.OU, &out.OU + *out = new(string) + **out = **in + } if in.Status != nil { in, out := &in.Status, &out.Status *out = new(string) @@ -5420,6 +5665,11 @@ func (in *FailoverState) DeepCopyInto(out *FailoverState) { *out = new(string) **out = **in } + if in.IsDataLossAllowed != nil { + in, out := &in.IsDataLossAllowed, &out.IsDataLossAllowed + *out = new(bool) + **out = **in + } if in.Status != nil { in, out := &in.Status, &out.Status *out = new(string) @@ -5561,6 +5811,11 @@ func (in *GlobalClusterMember) DeepCopyInto(out *GlobalClusterMember) { } } } + if in.SynchronizationStatus != nil { + in, out := &in.SynchronizationStatus, &out.SynchronizationStatus + *out = new(string) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlobalClusterMember. @@ -6911,6 +7166,11 @@ func (in *PendingModifiedValues) DeepCopyInto(out *PendingModifiedValues) { *out = new(string) **out = **in } + if in.Engine != nil { + in, out := &in.Engine, &out.Engine + *out = new(string) + **out = **in + } if in.EngineVersion != nil { in, out := &in.EngineVersion, &out.EngineVersion *out = new(string) @@ -7762,6 +8022,11 @@ func (in *UpgradeTarget) DeepCopyInto(out *UpgradeTarget) { *out = new(bool) **out = **in } + if in.SupportsLocalWriteForwarding != nil { + in, out := &in.SupportsLocalWriteForwarding, &out.SupportsLocalWriteForwarding + *out = new(bool) + **out = **in + } if in.SupportsParallelQuery != nil { in, out := &in.SupportsParallelQuery, &out.SupportsParallelQuery *out = new(bool) diff --git a/apis/rds/v1alpha1/zz_global_cluster.go b/apis/rds/v1alpha1/zz_global_cluster.go index 5d96654ffa..8c6f5a169e 100644 --- a/apis/rds/v1alpha1/zz_global_cluster.go +++ b/apis/rds/v1alpha1/zz_global_cluster.go @@ -29,21 +29,57 @@ type GlobalClusterParameters struct { // Region is which region the GlobalCluster will be created. // +kubebuilder:validation:Required Region string `json:"region"` - // The name for your database of up to 64 alphanumeric characters. If you do - // not provide a name, Amazon Aurora will not create a database in the global - // database cluster you are creating. + // The name for your database of up to 64 alphanumeric characters. If you don't + // specify a name, Amazon Aurora doesn't create a database in the global database + // cluster. + // + // Constraints: + // + // * Can't be specified if SourceDBClusterIdentifier is specified. In this + // case, Amazon Aurora uses the database name from the source DB cluster. DatabaseName *string `json:"databaseName,omitempty"` - // The deletion protection setting for the new global database. The global database - // can't be deleted when deletion protection is enabled. + // Specifies whether to enable deletion protection for the new global database + // cluster. The global database can't be deleted when deletion protection is + // enabled. DeletionProtection *bool `json:"deletionProtection,omitempty"` - // The name of the database engine to be used for this DB cluster. + // The database engine to use for this global database cluster. + // + // Valid Values: aurora-mysql | aurora-postgresql + // + // Constraints: + // + // * Can't be specified if SourceDBClusterIdentifier is specified. In this + // case, Amazon Aurora uses the engine of the source DB cluster. Engine *string `json:"engine,omitempty"` - // The engine version of the Aurora global database. + // The engine version to use for this global database cluster. + // + // Constraints: + // + // * Can't be specified if SourceDBClusterIdentifier is specified. In this + // case, Amazon Aurora uses the engine version of the source DB cluster. EngineVersion *string `json:"engineVersion,omitempty"` // The Amazon Resource Name (ARN) to use as the primary cluster of the global - // database. This parameter is optional. + // database. + // + // If you provide a value for this parameter, don't specify values for the following + // settings because Amazon Aurora uses the values from the specified source + // DB cluster: + // + // * DatabaseName + // + // * Engine + // + // * EngineVersion + // + // * StorageEncrypted SourceDBClusterIdentifier *string `json:"sourceDBClusterIdentifier,omitempty"` - // The storage encryption setting for the new global database cluster. + // Specifies whether to enable storage encryption for the new global database + // cluster. + // + // Constraints: + // + // * Can't be specified if SourceDBClusterIdentifier is specified. In this + // case, Amazon Aurora uses the setting from the source DB cluster. StorageEncrypted *bool `json:"storageEncrypted,omitempty"` CustomGlobalClusterParameters `json:",inline"` } @@ -57,17 +93,16 @@ type GlobalClusterSpec struct { // GlobalClusterObservation defines the observed state of GlobalCluster type GlobalClusterObservation struct { // A data object containing all properties for the current state of an in-process - // or pending failover process for this Aurora global database. This object - // is empty unless the FailoverGlobalCluster API operation has been called on - // this Aurora global database (GlobalCluster). + // or pending switchover or failover process for this global cluster (Aurora + // global database). This object is empty unless the SwitchoverGlobalCluster + // or FailoverGlobalCluster operation was called on this global cluster. FailoverState *FailoverState `json:"failoverState,omitempty"` // The Amazon Resource Name (ARN) for the global database cluster. GlobalClusterARN *string `json:"globalClusterARN,omitempty"` // Contains a user-supplied global database cluster identifier. This identifier // is the unique key that identifies a global database cluster. GlobalClusterIdentifier *string `json:"globalClusterIdentifier,omitempty"` - // The list of cluster IDs for secondary clusters within the global database - // cluster. Currently limited to 1 item. + // The list of primary and secondary clusters within the global database cluster. GlobalClusterMembers []*GlobalClusterMember `json:"globalClusterMembers,omitempty"` // The Amazon Web Services Region-unique, immutable identifier for the global // database cluster. This identifier is found in Amazon Web Services CloudTrail diff --git a/apis/rds/v1alpha1/zz_types.go b/apis/rds/v1alpha1/zz_types.go index ccb48db61d..89219eaa33 100644 --- a/apis/rds/v1alpha1/zz_types.go +++ b/apis/rds/v1alpha1/zz_types.go @@ -114,6 +114,8 @@ type ClusterPendingModifiedValues struct { // A list of the log types whose configuration is still pending. In other words, // these log types are in the process of being activated or deactivated. PendingCloudwatchLogsExports *PendingCloudwatchLogsExports `json:"pendingCloudwatchLogsExports,omitempty"` + + StorageType *string `json:"storageType,omitempty"` } // +kubebuilder:skipversion @@ -149,6 +151,53 @@ type CustomDBEngineVersionAMI struct { Status *string `json:"status,omitempty"` } +// +kubebuilder:skipversion +type DBClusterAutomatedBackup struct { + AllocatedStorage *int64 `json:"allocatedStorage,omitempty"` + + AvailabilityZones []*string `json:"availabilityZones,omitempty"` + + BackupRetentionPeriod *int64 `json:"backupRetentionPeriod,omitempty"` + + ClusterCreateTime *metav1.Time `json:"clusterCreateTime,omitempty"` + + DBClusterARN *string `json:"dbClusterARN,omitempty"` + + DBClusterAutomatedBackupsARN *string `json:"dbClusterAutomatedBackupsARN,omitempty"` + + DBClusterIdentifier *string `json:"dbClusterIdentifier,omitempty"` + + DBClusterResourceID *string `json:"dbClusterResourceID,omitempty"` + + Engine *string `json:"engine,omitempty"` + + EngineMode *string `json:"engineMode,omitempty"` + + EngineVersion *string `json:"engineVersion,omitempty"` + + IAMDatabaseAuthenticationEnabled *bool `json:"iamDatabaseAuthenticationEnabled,omitempty"` + + IOPS *int64 `json:"iops,omitempty"` + + KMSKeyID *string `json:"kmsKeyID,omitempty"` + + LicenseModel *string `json:"licenseModel,omitempty"` + + MasterUsername *string `json:"masterUsername,omitempty"` + + Port *int64 `json:"port,omitempty"` + + Region *string `json:"region,omitempty"` + + Status *string `json:"status,omitempty"` + + StorageEncrypted *bool `json:"storageEncrypted,omitempty"` + + StorageType *string `json:"storageType,omitempty"` + + VPCID *string `json:"vpcID,omitempty"` +} + // +kubebuilder:skipversion type DBClusterEndpoint struct { CustomEndpointType *string `json:"customEndpointType,omitempty"` @@ -226,6 +275,8 @@ type DBClusterSnapshot struct { DBSystemID *string `json:"dbSystemID,omitempty"` + DBClusterResourceID *string `json:"dbClusterResourceID,omitempty"` + Engine *string `json:"engine,omitempty"` EngineMode *string `json:"engineMode,omitempty"` @@ -253,6 +304,8 @@ type DBClusterSnapshot struct { Status *string `json:"status,omitempty"` StorageEncrypted *bool `json:"storageEncrypted,omitempty"` + + StorageType *string `json:"storageType,omitempty"` // A list of tags. For more information, see Tagging Amazon RDS Resources (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html) // in the Amazon RDS User Guide. TagList []*Tag `json:"tagList,omitempty"` @@ -358,11 +411,15 @@ type DBCluster_SDK struct { IAMDatabaseAuthenticationEnabled *bool `json:"iamDatabaseAuthenticationEnabled,omitempty"` + IOOptimizedNextAllowedModificationTime *metav1.Time `json:"iOOptimizedNextAllowedModificationTime,omitempty"` + IOPS *int64 `json:"iops,omitempty"` KMSKeyID *string `json:"kmsKeyID,omitempty"` LatestRestorableTime *metav1.Time `json:"latestRestorableTime,omitempty"` + + LocalWriteForwardingStatus *string `json:"localWriteForwardingStatus,omitempty"` // Contains the secret managed by RDS in Amazon Web Services Secrets Manager // for the master user password. // @@ -404,13 +461,13 @@ type DBCluster_SDK struct { ReaderEndpoint *string `json:"readerEndpoint,omitempty"` ReplicationSourceIdentifier *string `json:"replicationSourceIdentifier,omitempty"` - // Shows the scaling configuration for an Aurora DB cluster in serverless DB - // engine mode. + // The scaling configuration for an Aurora DB cluster in serverless DB engine + // mode. // // For more information, see Using Amazon Aurora Serverless v1 (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless.html) // in the Amazon Aurora User Guide. ScalingConfigurationInfo *ScalingConfigurationInfo `json:"scalingConfigurationInfo,omitempty"` - // Shows the scaling configuration for an Aurora Serverless v2 DB cluster. + // The scaling configuration for an Aurora Serverless v2 DB cluster. // // For more information, see Using Amazon Aurora Serverless v2 (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless-v2.html) // in the Amazon Aurora User Guide. @@ -466,6 +523,8 @@ type DBEngineVersion struct { SupportsGlobalDatabases *bool `json:"supportsGlobalDatabases,omitempty"` + SupportsLocalWriteForwarding *bool `json:"supportsLocalWriteForwarding,omitempty"` + SupportsLogExportsToCloudwatchLogs *bool `json:"supportsLogExportsToCloudwatchLogs,omitempty"` SupportsParallelQuery *bool `json:"supportsParallelQuery,omitempty"` @@ -715,6 +774,8 @@ type DBInstance_SDK struct { // and contains changes that will be applied during the next maintenance window. PendingModifiedValues *PendingModifiedValues `json:"pendingModifiedValues,omitempty"` + PercentProgress *string `json:"percentProgress,omitempty"` + PerformanceInsightsEnabled *bool `json:"performanceInsightsEnabled,omitempty"` PerformanceInsightsKMSKeyID *string `json:"performanceInsightsKMSKeyID,omitempty"` @@ -735,6 +796,8 @@ type DBInstance_SDK struct { ReadReplicaDBInstanceIdentifiers []*string `json:"readReplicaDBInstanceIdentifiers,omitempty"` + ReadReplicaSourceDBClusterIdentifier *string `json:"readReplicaSourceDBClusterIdentifier,omitempty"` + ReadReplicaSourceDBInstanceIdentifier *string `json:"readReplicaSourceDBInstanceIdentifier,omitempty"` ReplicaMode *string `json:"replicaMode,omitempty"` @@ -891,6 +954,8 @@ type DBSnapshot struct { DBSnapshotIdentifier *string `json:"dbSnapshotIdentifier,omitempty"` + DBSystemID *string `json:"dbSystemID,omitempty"` + DBIResourceID *string `json:"dbiResourceID,omitempty"` Encrypted *bool `json:"encrypted,omitempty"` @@ -983,12 +1048,18 @@ type DescribeDBLogFilesDetails struct { // +kubebuilder:skipversion type DomainMembership struct { + AuthSecretARN *string `json:"authSecretARN,omitempty"` + + DNSIPs []*string `json:"dnsIPs,omitempty"` + Domain *string `json:"domain,omitempty"` FQDN *string `json:"fQDN,omitempty"` IAMRoleName *string `json:"iamRoleName,omitempty"` + OU *string `json:"oU,omitempty"` + Status *string `json:"status,omitempty"` } @@ -1093,6 +1164,8 @@ type ExportTask struct { type FailoverState struct { FromDBClusterARN *string `json:"fromDBClusterARN,omitempty"` + IsDataLossAllowed *bool `json:"isDataLossAllowed,omitempty"` + Status *string `json:"status,omitempty"` ToDBClusterARN *string `json:"toDBClusterARN,omitempty"` @@ -1114,6 +1187,8 @@ type GlobalClusterMember struct { IsWriter *bool `json:"isWriter,omitempty"` Readers []*string `json:"readers,omitempty"` + + SynchronizationStatus *string `json:"synchronizationStatus,omitempty"` } // +kubebuilder:skipversion @@ -1125,9 +1200,9 @@ type GlobalCluster_SDK struct { Engine *string `json:"engine,omitempty"` EngineVersion *string `json:"engineVersion,omitempty"` - // Contains the state of scheduled or in-process failover operations on an Aurora - // global database (GlobalCluster). This Data type is empty unless a failover - // operation is scheduled or is currently underway on the Aurora global database. + // Contains the state of scheduled or in-process operations on a global cluster + // (Aurora global database). This data type is empty unless a switchover or + // failover operation is scheduled or is in progress on the Aurora global database. FailoverState *FailoverState `json:"failoverState,omitempty"` GlobalClusterARN *string `json:"globalClusterARN,omitempty"` @@ -1443,6 +1518,8 @@ type PendingModifiedValues struct { DBSubnetGroupName *string `json:"dbSubnetGroupName,omitempty"` + Engine *string `json:"engine,omitempty"` + EngineVersion *string `json:"engineVersion,omitempty"` IAMDatabaseAuthenticationEnabled *bool `json:"iamDatabaseAuthenticationEnabled,omitempty"` @@ -1657,6 +1734,8 @@ type UpgradeTarget struct { SupportsGlobalDatabases *bool `json:"supportsGlobalDatabases,omitempty"` + SupportsLocalWriteForwarding *bool `json:"supportsLocalWriteForwarding,omitempty"` + SupportsParallelQuery *bool `json:"supportsParallelQuery,omitempty"` } diff --git a/apis/route53resolver/generator-config.yaml b/apis/route53resolver/generator-config.yaml index 3fd74036f0..7281062e39 100644 --- a/apis/route53resolver/generator-config.yaml +++ b/apis/route53resolver/generator-config.yaml @@ -4,6 +4,7 @@ ignore: - FirewallDomainList - FirewallRule - FirewallRuleGroup + - OutpostResolver field_paths: - CreateResolverEndpointInput.SecurityGroupIds - CreateResolverEndpointInput.IpAddresses diff --git a/apis/route53resolver/v1alpha1/zz_enums.go b/apis/route53resolver/v1alpha1/zz_enums.go index d7c2ec7571..547f4e9e55 100644 --- a/apis/route53resolver/v1alpha1/zz_enums.go +++ b/apis/route53resolver/v1alpha1/zz_enums.go @@ -29,8 +29,9 @@ const ( type AutodefinedReverseFlag string const ( - AutodefinedReverseFlag_ENABLE AutodefinedReverseFlag = "ENABLE" - AutodefinedReverseFlag_DISABLE AutodefinedReverseFlag = "DISABLE" + AutodefinedReverseFlag_ENABLE AutodefinedReverseFlag = "ENABLE" + AutodefinedReverseFlag_DISABLE AutodefinedReverseFlag = "DISABLE" + AutodefinedReverseFlag_USE_LOCAL_RESOURCE_SETTING AutodefinedReverseFlag = "USE_LOCAL_RESOURCE_SETTING" ) type BlockOverrideDNSType string @@ -74,8 +75,9 @@ const ( type FirewallFailOpenStatus string const ( - FirewallFailOpenStatus_ENABLED FirewallFailOpenStatus = "ENABLED" - FirewallFailOpenStatus_DISABLED FirewallFailOpenStatus = "DISABLED" + FirewallFailOpenStatus_ENABLED FirewallFailOpenStatus = "ENABLED" + FirewallFailOpenStatus_DISABLED FirewallFailOpenStatus = "DISABLED" + FirewallFailOpenStatus_USE_LOCAL_RESOURCE_SETTING FirewallFailOpenStatus = "USE_LOCAL_RESOURCE_SETTING" ) type FirewallRuleGroupAssociationStatus string @@ -107,6 +109,8 @@ const ( IPAddressStatus_FAILED_RESOURCE_GONE IPAddressStatus = "FAILED_RESOURCE_GONE" IPAddressStatus_DELETING IPAddressStatus = "DELETING" IPAddressStatus_DELETE_FAILED_FAS_EXPIRED IPAddressStatus = "DELETE_FAILED_FAS_EXPIRED" + IPAddressStatus_UPDATING IPAddressStatus = "UPDATING" + IPAddressStatus_UPDATE_FAILED IPAddressStatus = "UPDATE_FAILED" ) type MutationProtectionStatus string @@ -116,22 +120,38 @@ const ( MutationProtectionStatus_DISABLED MutationProtectionStatus = "DISABLED" ) +type OutpostResolverStatus string + +const ( + OutpostResolverStatus_CREATING OutpostResolverStatus = "CREATING" + OutpostResolverStatus_OPERATIONAL OutpostResolverStatus = "OPERATIONAL" + OutpostResolverStatus_UPDATING OutpostResolverStatus = "UPDATING" + OutpostResolverStatus_DELETING OutpostResolverStatus = "DELETING" + OutpostResolverStatus_ACTION_NEEDED OutpostResolverStatus = "ACTION_NEEDED" + OutpostResolverStatus_FAILED_CREATION OutpostResolverStatus = "FAILED_CREATION" + OutpostResolverStatus_FAILED_DELETION OutpostResolverStatus = "FAILED_DELETION" +) + type ResolverAutodefinedReverseStatus string const ( - ResolverAutodefinedReverseStatus_ENABLING ResolverAutodefinedReverseStatus = "ENABLING" - ResolverAutodefinedReverseStatus_ENABLED ResolverAutodefinedReverseStatus = "ENABLED" - ResolverAutodefinedReverseStatus_DISABLING ResolverAutodefinedReverseStatus = "DISABLING" - ResolverAutodefinedReverseStatus_DISABLED ResolverAutodefinedReverseStatus = "DISABLED" + ResolverAutodefinedReverseStatus_ENABLING ResolverAutodefinedReverseStatus = "ENABLING" + ResolverAutodefinedReverseStatus_ENABLED ResolverAutodefinedReverseStatus = "ENABLED" + ResolverAutodefinedReverseStatus_DISABLING ResolverAutodefinedReverseStatus = "DISABLING" + ResolverAutodefinedReverseStatus_DISABLED ResolverAutodefinedReverseStatus = "DISABLED" + ResolverAutodefinedReverseStatus_UPDATING_TO_USE_LOCAL_RESOURCE_SETTING ResolverAutodefinedReverseStatus = "UPDATING_TO_USE_LOCAL_RESOURCE_SETTING" + ResolverAutodefinedReverseStatus_USE_LOCAL_RESOURCE_SETTING ResolverAutodefinedReverseStatus = "USE_LOCAL_RESOURCE_SETTING" ) type ResolverDNSSECValidationStatus string const ( - ResolverDNSSECValidationStatus_ENABLING ResolverDNSSECValidationStatus = "ENABLING" - ResolverDNSSECValidationStatus_ENABLED ResolverDNSSECValidationStatus = "ENABLED" - ResolverDNSSECValidationStatus_DISABLING ResolverDNSSECValidationStatus = "DISABLING" - ResolverDNSSECValidationStatus_DISABLED ResolverDNSSECValidationStatus = "DISABLED" + ResolverDNSSECValidationStatus_ENABLING ResolverDNSSECValidationStatus = "ENABLING" + ResolverDNSSECValidationStatus_ENABLED ResolverDNSSECValidationStatus = "ENABLED" + ResolverDNSSECValidationStatus_DISABLING ResolverDNSSECValidationStatus = "DISABLING" + ResolverDNSSECValidationStatus_DISABLED ResolverDNSSECValidationStatus = "DISABLED" + ResolverDNSSECValidationStatus_UPDATING_TO_USE_LOCAL_RESOURCE_SETTING ResolverDNSSECValidationStatus = "UPDATING_TO_USE_LOCAL_RESOURCE_SETTING" + ResolverDNSSECValidationStatus_USE_LOCAL_RESOURCE_SETTING ResolverDNSSECValidationStatus = "USE_LOCAL_RESOURCE_SETTING" ) type ResolverEndpointDirection string @@ -152,6 +172,14 @@ const ( ResolverEndpointStatus_SDK_DELETING ResolverEndpointStatus_SDK = "DELETING" ) +type ResolverEndpointType string + +const ( + ResolverEndpointType_IPV6 ResolverEndpointType = "IPV6" + ResolverEndpointType_IPV4 ResolverEndpointType = "IPV4" + ResolverEndpointType_DUALSTACK ResolverEndpointType = "DUALSTACK" +) + type ResolverQueryLogConfigAssociationError string const ( @@ -215,6 +243,7 @@ const ( type Validation string const ( - Validation_ENABLE Validation = "ENABLE" - Validation_DISABLE Validation = "DISABLE" + Validation_ENABLE Validation = "ENABLE" + Validation_DISABLE Validation = "DISABLE" + Validation_USE_LOCAL_RESOURCE_SETTING Validation = "USE_LOCAL_RESOURCE_SETTING" ) diff --git a/apis/route53resolver/v1alpha1/zz_generated.deepcopy.go b/apis/route53resolver/v1alpha1/zz_generated.deepcopy.go index 37371b8453..cadea2c917 100644 --- a/apis/route53resolver/v1alpha1/zz_generated.deepcopy.go +++ b/apis/route53resolver/v1alpha1/zz_generated.deepcopy.go @@ -518,6 +518,11 @@ func (in *IPAddressResponse) DeepCopyInto(out *IPAddressResponse) { *out = new(string) **out = **in } + if in.IPv6 != nil { + in, out := &in.IPv6, &out.IPv6 + *out = new(string) + **out = **in + } if in.ModificationTime != nil { in, out := &in.ModificationTime, &out.ModificationTime *out = new(string) @@ -553,6 +558,11 @@ func (in *IPAddressUpdate) DeepCopyInto(out *IPAddressUpdate) { *out = new(string) **out = **in } + if in.IPv6 != nil { + in, out := &in.IPv6, &out.IPv6 + *out = new(string) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAddressUpdate. @@ -565,6 +575,56 @@ func (in *IPAddressUpdate) DeepCopy() *IPAddressUpdate { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutpostResolver) DeepCopyInto(out *OutpostResolver) { + *out = *in + if in.ARN != nil { + in, out := &in.ARN, &out.ARN + *out = new(string) + **out = **in + } + if in.CreationTime != nil { + in, out := &in.CreationTime, &out.CreationTime + *out = new(string) + **out = **in + } + if in.CreatorRequestID != nil { + in, out := &in.CreatorRequestID, &out.CreatorRequestID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.ModificationTime != nil { + in, out := &in.ModificationTime, &out.ModificationTime + *out = new(string) + **out = **in + } + if in.OutpostARN != nil { + in, out := &in.OutpostARN, &out.OutpostARN + *out = new(string) + **out = **in + } + if in.PreferredInstanceType != nil { + in, out := &in.PreferredInstanceType, &out.PreferredInstanceType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutpostResolver. +func (in *OutpostResolver) DeepCopy() *OutpostResolver { + if in == nil { + return nil + } + out := new(OutpostResolver) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ResolverConfig) DeepCopyInto(out *ResolverConfig) { *out = *in @@ -768,6 +828,21 @@ func (in *ResolverEndpointParameters) DeepCopyInto(out *ResolverEndpointParamete *out = new(string) **out = **in } + if in.OutpostARN != nil { + in, out := &in.OutpostARN, &out.OutpostARN + *out = new(string) + **out = **in + } + if in.PreferredInstanceType != nil { + in, out := &in.PreferredInstanceType, &out.PreferredInstanceType + *out = new(string) + **out = **in + } + if in.ResolverEndpointType != nil { + in, out := &in.ResolverEndpointType, &out.ResolverEndpointType + *out = new(string) + **out = **in + } if in.Tags != nil { in, out := &in.Tags, &out.Tags *out = make([]*Tag, len(*in)) @@ -874,6 +949,21 @@ func (in *ResolverEndpoint_SDK) DeepCopyInto(out *ResolverEndpoint_SDK) { *out = new(string) **out = **in } + if in.OutpostARN != nil { + in, out := &in.OutpostARN, &out.OutpostARN + *out = new(string) + **out = **in + } + if in.PreferredInstanceType != nil { + in, out := &in.PreferredInstanceType, &out.PreferredInstanceType + *out = new(string) + **out = **in + } + if in.ResolverEndpointType != nil { + in, out := &in.ResolverEndpointType, &out.ResolverEndpointType + *out = new(string) + **out = **in + } if in.SecurityGroupIDs != nil { in, out := &in.SecurityGroupIDs, &out.SecurityGroupIDs *out = make([]*string, len(*in)) @@ -1358,6 +1448,11 @@ func (in *TargetAddress) DeepCopyInto(out *TargetAddress) { *out = new(string) **out = **in } + if in.IPv6 != nil { + in, out := &in.IPv6, &out.IPv6 + *out = new(string) + **out = **in + } if in.Port != nil { in, out := &in.Port, &out.Port *out = new(int64) @@ -1374,3 +1469,28 @@ func (in *TargetAddress) DeepCopy() *TargetAddress { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UpdateIPAddress) DeepCopyInto(out *UpdateIPAddress) { + *out = *in + if in.IPID != nil { + in, out := &in.IPID, &out.IPID + *out = new(string) + **out = **in + } + if in.IPv6 != nil { + in, out := &in.IPv6, &out.IPv6 + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UpdateIPAddress. +func (in *UpdateIPAddress) DeepCopy() *UpdateIPAddress { + if in == nil { + return nil + } + out := new(UpdateIPAddress) + in.DeepCopyInto(out) + return out +} diff --git a/apis/route53resolver/v1alpha1/zz_resolver_endpoint.go b/apis/route53resolver/v1alpha1/zz_resolver_endpoint.go index 1e417a8311..d42fc6cae6 100644 --- a/apis/route53resolver/v1alpha1/zz_resolver_endpoint.go +++ b/apis/route53resolver/v1alpha1/zz_resolver_endpoint.go @@ -41,6 +41,16 @@ type ResolverEndpointParameters struct { // A friendly name that lets you easily find a configuration in the Resolver // dashboard in the Route 53 console. Name *string `json:"name,omitempty"` + // The Amazon Resource Name (ARN) of the Outpost. If you specify this, you must + // also specify a value for the PreferredInstanceType. + OutpostARN *string `json:"outpostARN,omitempty"` + // The instance type. If you specify this, you must also specify a value for + // the OutpostArn. + PreferredInstanceType *string `json:"preferredInstanceType,omitempty"` + // For the endpoint type you can choose either IPv4, IPv6, or dual-stack. A + // dual-stack endpoint means that it will resolve via both IPv4 and IPv6. This + // endpoint type is applied to all IP addresses. + ResolverEndpointType *string `json:"resolverEndpointType,omitempty"` // A list of the tag keys and values that you want to associate with the endpoint. Tags []*Tag `json:"tags,omitempty"` CustomResolverEndpointParameters `json:",inline"` diff --git a/apis/route53resolver/v1alpha1/zz_resolver_rule.go b/apis/route53resolver/v1alpha1/zz_resolver_rule.go index a48f192526..561f8f3146 100644 --- a/apis/route53resolver/v1alpha1/zz_resolver_rule.go +++ b/apis/route53resolver/v1alpha1/zz_resolver_rule.go @@ -60,7 +60,8 @@ type ResolverRuleParameters struct { // A list of the tag keys and values that you want to associate with the endpoint. Tags []*Tag `json:"tags,omitempty"` // The IPs that you want Resolver to forward DNS queries to. You can specify - // only IPv4 addresses. Separate IP addresses with a space. + // either Ipv4 or Ipv6 addresses but not both in the same rule. Separate IP + // addresses with a space. // // TargetIps is available only when the value of Rule type is FORWARD. TargetIPs []*TargetAddress `json:"targetIPs,omitempty"` diff --git a/apis/route53resolver/v1alpha1/zz_types.go b/apis/route53resolver/v1alpha1/zz_types.go index 5925b5df37..9b237d74a7 100644 --- a/apis/route53resolver/v1alpha1/zz_types.go +++ b/apis/route53resolver/v1alpha1/zz_types.go @@ -151,6 +151,8 @@ type IPAddressResponse struct { IPID *string `json:"ipID,omitempty"` + IPv6 *string `json:"ipv6,omitempty"` + ModificationTime *string `json:"modificationTime,omitempty"` StatusMessage *string `json:"statusMessage,omitempty"` @@ -161,6 +163,25 @@ type IPAddressUpdate struct { IP *string `json:"ip,omitempty"` IPID *string `json:"ipID,omitempty"` + + IPv6 *string `json:"ipv6,omitempty"` +} + +// +kubebuilder:skipversion +type OutpostResolver struct { + ARN *string `json:"arn,omitempty"` + + CreationTime *string `json:"creationTime,omitempty"` + + CreatorRequestID *string `json:"creatorRequestID,omitempty"` + + ID *string `json:"id,omitempty"` + + ModificationTime *string `json:"modificationTime,omitempty"` + + OutpostARN *string `json:"outpostARN,omitempty"` + + PreferredInstanceType *string `json:"preferredInstanceType,omitempty"` } // +kubebuilder:skipversion @@ -201,6 +222,12 @@ type ResolverEndpoint_SDK struct { Name *string `json:"name,omitempty"` + OutpostARN *string `json:"outpostARN,omitempty"` + + PreferredInstanceType *string `json:"preferredInstanceType,omitempty"` + + ResolverEndpointType *string `json:"resolverEndpointType,omitempty"` + SecurityGroupIDs []*string `json:"securityGroupIDs,omitempty"` Status *string `json:"status,omitempty"` @@ -285,5 +312,14 @@ type Tag struct { type TargetAddress struct { IP *string `json:"ip,omitempty"` + IPv6 *string `json:"ipv6,omitempty"` + Port *int64 `json:"port,omitempty"` } + +// +kubebuilder:skipversion +type UpdateIPAddress struct { + IPID *string `json:"ipID,omitempty"` + + IPv6 *string `json:"ipv6,omitempty"` +} diff --git a/apis/s3control/v1alpha1/zz_enums.go b/apis/s3control/v1alpha1/zz_enums.go index 35c4122169..74f2ed8bc6 100644 --- a/apis/s3control/v1alpha1/zz_enums.go +++ b/apis/s3control/v1alpha1/zz_enums.go @@ -58,6 +58,20 @@ const ( BucketVersioningStatus_Suspended BucketVersioningStatus = "Suspended" ) +type DeleteMarkerReplicationStatus string + +const ( + DeleteMarkerReplicationStatus_Enabled DeleteMarkerReplicationStatus = "Enabled" + DeleteMarkerReplicationStatus_Disabled DeleteMarkerReplicationStatus = "Disabled" +) + +type ExistingObjectReplicationStatus string + +const ( + ExistingObjectReplicationStatus_Enabled ExistingObjectReplicationStatus = "Enabled" + ExistingObjectReplicationStatus_Disabled ExistingObjectReplicationStatus = "Disabled" +) + type ExpirationStatus string const ( @@ -139,6 +153,13 @@ const ( MFADeleteStatus_Disabled MFADeleteStatus = "Disabled" ) +type MetricsStatus string + +const ( + MetricsStatus_Enabled MetricsStatus = "Enabled" + MetricsStatus_Disabled MetricsStatus = "Disabled" +) + type MultiRegionAccessPointStatus string const ( @@ -157,6 +178,13 @@ const ( NetworkOrigin_VPC NetworkOrigin = "VPC" ) +type ObjectLambdaAccessPointAliasStatus string + +const ( + ObjectLambdaAccessPointAliasStatus_PROVISIONING ObjectLambdaAccessPointAliasStatus = "PROVISIONING" + ObjectLambdaAccessPointAliasStatus_READY ObjectLambdaAccessPointAliasStatus = "READY" +) + type ObjectLambdaAllowedFeature string const ( @@ -195,6 +223,26 @@ const ( OutputSchemaVersion_V_1 OutputSchemaVersion = "V_1" ) +type OwnerOverride string + +const ( + OwnerOverride_Destination OwnerOverride = "Destination" +) + +type ReplicaModificationsStatus string + +const ( + ReplicaModificationsStatus_Enabled ReplicaModificationsStatus = "Enabled" + ReplicaModificationsStatus_Disabled ReplicaModificationsStatus = "Disabled" +) + +type ReplicationRuleStatus string + +const ( + ReplicationRuleStatus_Enabled ReplicationRuleStatus = "Enabled" + ReplicationRuleStatus_Disabled ReplicationRuleStatus = "Disabled" +) + type ReplicationStatus string const ( @@ -204,6 +252,27 @@ const ( ReplicationStatus_NONE ReplicationStatus = "NONE" ) +type ReplicationStorageClass string + +const ( + ReplicationStorageClass_STANDARD ReplicationStorageClass = "STANDARD" + ReplicationStorageClass_REDUCED_REDUNDANCY ReplicationStorageClass = "REDUCED_REDUNDANCY" + ReplicationStorageClass_STANDARD_IA ReplicationStorageClass = "STANDARD_IA" + ReplicationStorageClass_ONEZONE_IA ReplicationStorageClass = "ONEZONE_IA" + ReplicationStorageClass_INTELLIGENT_TIERING ReplicationStorageClass = "INTELLIGENT_TIERING" + ReplicationStorageClass_GLACIER ReplicationStorageClass = "GLACIER" + ReplicationStorageClass_DEEP_ARCHIVE ReplicationStorageClass = "DEEP_ARCHIVE" + ReplicationStorageClass_OUTPOSTS ReplicationStorageClass = "OUTPOSTS" + ReplicationStorageClass_GLACIER_IR ReplicationStorageClass = "GLACIER_IR" +) + +type ReplicationTimeStatus string + +const ( + ReplicationTimeStatus_Enabled ReplicationTimeStatus = "Enabled" + ReplicationTimeStatus_Disabled ReplicationTimeStatus = "Disabled" +) + type RequestedJobStatus string const ( @@ -304,6 +373,13 @@ const ( S3StorageClass_GLACIER_IR S3StorageClass = "GLACIER_IR" ) +type SSEKMSEncryptedObjectsStatus string + +const ( + SSEKMSEncryptedObjectsStatus_Enabled SSEKMSEncryptedObjectsStatus = "Enabled" + SSEKMSEncryptedObjectsStatus_Disabled SSEKMSEncryptedObjectsStatus = "Disabled" +) + type TransitionStorageClass string const ( diff --git a/apis/s3control/v1alpha1/zz_generated.deepcopy.go b/apis/s3control/v1alpha1/zz_generated.deepcopy.go index 13e535399e..428db8221d 100644 --- a/apis/s3control/v1alpha1/zz_generated.deepcopy.go +++ b/apis/s3control/v1alpha1/zz_generated.deepcopy.go @@ -286,6 +286,26 @@ func (in *CustomAccessPointParameters) DeepCopy() *CustomAccessPointParameters { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Destination) DeepCopyInto(out *Destination) { + *out = *in + if in.Account != nil { + in, out := &in.Account, &out.Account + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Destination. +func (in *Destination) DeepCopy() *Destination { + if in == nil { + return nil + } + out := new(Destination) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *JobManifestLocation) DeepCopyInto(out *JobManifestLocation) { *out = *in @@ -389,6 +409,11 @@ func (in *Region) DeepCopyInto(out *Region) { *out = new(string) **out = **in } + if in.BucketAccountID != nil { + in, out := &in.BucketAccountID, &out.BucketAccountID + *out = new(string) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Region. @@ -409,6 +434,11 @@ func (in *RegionReport) DeepCopyInto(out *RegionReport) { *out = new(string) **out = **in } + if in.BucketAccountID != nil { + in, out := &in.BucketAccountID, &out.BucketAccountID + *out = new(string) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegionReport. diff --git a/apis/s3control/v1alpha1/zz_types.go b/apis/s3control/v1alpha1/zz_types.go index b787c8568d..2ea488cd7d 100644 --- a/apis/s3control/v1alpha1/zz_types.go +++ b/apis/s3control/v1alpha1/zz_types.go @@ -56,6 +56,11 @@ type CreateMultiRegionAccessPointInput struct { PublicAccessBlock *PublicAccessBlockConfiguration `json:"publicAccessBlock,omitempty"` } +// +kubebuilder:skipversion +type Destination struct { + Account *string `json:"account,omitempty"` +} + // +kubebuilder:skipversion type JobManifestLocation struct { ETag *string `json:"eTag,omitempty"` @@ -92,11 +97,15 @@ type PublicAccessBlockConfiguration struct { // +kubebuilder:skipversion type Region struct { Bucket *string `json:"bucket,omitempty"` + + BucketAccountID *string `json:"bucketAccountID,omitempty"` } // +kubebuilder:skipversion type RegionReport struct { Bucket *string `json:"bucket,omitempty"` + + BucketAccountID *string `json:"bucketAccountID,omitempty"` } // +kubebuilder:skipversion diff --git a/apis/secretsmanager/v1beta1/zz_secret.go b/apis/secretsmanager/v1beta1/zz_secret.go index 9c6ea45650..7fef7b561e 100644 --- a/apis/secretsmanager/v1beta1/zz_secret.go +++ b/apis/secretsmanager/v1beta1/zz_secret.go @@ -34,7 +34,7 @@ type SecretParameters struct { // The description of the secret. Description *string `json:"description,omitempty"` // Specifies whether to overwrite a secret with the same name in the destination - // Region. + // Region. By default, secrets aren't overwritten. ForceOverwriteReplicaSecret *bool `json:"forceOverwriteReplicaSecret,omitempty"` // The ARN, key ID, or alias of the KMS key that Secrets Manager uses to encrypt // the secret value in the secret. An alias is always prefixed by alias/, for diff --git a/apis/sesv2/generator-config.yaml b/apis/sesv2/generator-config.yaml index 85f2963bac..449ae9faea 100644 --- a/apis/sesv2/generator-config.yaml +++ b/apis/sesv2/generator-config.yaml @@ -26,5 +26,6 @@ ignore: - DeliverabilityTestReport #Not offered in AWS Web Console, alternatively view from Reputation metrics dashboard - DedicatedIpPool #Trigger manual flow through AWS Support for service limit increase is needed - EmailIdentityPolicy #Issue during pkg conversion - executing "sdk_find_read_many" + - ExportJob - ImportJob - ConfigurationSetEventDestination #Remove until PR fix in code-generator diff --git a/apis/sesv2/v1alpha1/zz_enums.go b/apis/sesv2/v1alpha1/zz_enums.go index cf44738e4c..3f6db83c7c 100644 --- a/apis/sesv2/v1alpha1/zz_enums.go +++ b/apis/sesv2/v1alpha1/zz_enums.go @@ -25,6 +25,14 @@ const ( BehaviorOnMxFailure_REJECT_MESSAGE BehaviorOnMxFailure = "REJECT_MESSAGE" ) +type BounceType string + +const ( + BounceType_UNDETERMINED BounceType = "UNDETERMINED" + BounceType_TRANSIENT BounceType = "TRANSIENT" + BounceType_PERMANENT BounceType = "PERMANENT" +) + type BulkEmailStatus string const ( @@ -80,6 +88,17 @@ const ( DeliverabilityTestStatus_COMPLETED DeliverabilityTestStatus = "COMPLETED" ) +type DeliveryEventType string + +const ( + DeliveryEventType_SEND DeliveryEventType = "SEND" + DeliveryEventType_DELIVERY DeliveryEventType = "DELIVERY" + DeliveryEventType_TRANSIENT_BOUNCE DeliveryEventType = "TRANSIENT_BOUNCE" + DeliveryEventType_PERMANENT_BOUNCE DeliveryEventType = "PERMANENT_BOUNCE" + DeliveryEventType_UNDETERMINED_BOUNCE DeliveryEventType = "UNDETERMINED_BOUNCE" + DeliveryEventType_COMPLAINT DeliveryEventType = "COMPLAINT" +) + type DimensionValueSource string const ( @@ -112,6 +131,13 @@ const ( DkimStatus_NOT_STARTED DkimStatus = "NOT_STARTED" ) +type EngagementEventType string + +const ( + EngagementEventType_OPEN EngagementEventType = "OPEN" + EngagementEventType_CLICK EngagementEventType = "CLICK" +) + type EventType string const ( @@ -127,6 +153,13 @@ const ( EventType_SUBSCRIPTION EventType = "SUBSCRIPTION" ) +type ExportSourceType string + +const ( + ExportSourceType_METRICS_DATA ExportSourceType = "METRICS_DATA" + ExportSourceType_MESSAGE_INSIGHTS ExportSourceType = "MESSAGE_INSIGHTS" +) + type FeatureStatus string const ( @@ -156,6 +189,7 @@ const ( JobStatus_PROCESSING JobStatus = "PROCESSING" JobStatus_COMPLETED JobStatus = "COMPLETED" JobStatus_FAILED JobStatus = "FAILED" + JobStatus_CANCELLED JobStatus = "CANCELLED" ) type ListRecommendationsFilterKey string @@ -198,6 +232,13 @@ const ( Metric_DELIVERY_COMPLAINT Metric = "DELIVERY_COMPLAINT" ) +type MetricAggregation string + +const ( + MetricAggregation_RATE MetricAggregation = "RATE" + MetricAggregation_VOLUME MetricAggregation = "VOLUME" +) + type MetricDimensionName string const ( @@ -239,6 +280,7 @@ const ( RecommendationType_DKIM RecommendationType = "DKIM" RecommendationType_DMARC RecommendationType = "DMARC" RecommendationType_SPF RecommendationType = "SPF" + RecommendationType_BIMI RecommendationType = "BIMI" ) type ReviewStatus string diff --git a/apis/sesv2/v1alpha1/zz_generated.deepcopy.go b/apis/sesv2/v1alpha1/zz_generated.deepcopy.go index 695e4455ee..7c273d968c 100644 --- a/apis/sesv2/v1alpha1/zz_generated.deepcopy.go +++ b/apis/sesv2/v1alpha1/zz_generated.deepcopy.go @@ -867,6 +867,29 @@ func (in *EventDestinationDefinition) DeepCopy() *EventDestinationDefinition { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExportJobSummary) DeepCopyInto(out *ExportJobSummary) { + *out = *in + if in.CompletedTimestamp != nil { + in, out := &in.CompletedTimestamp, &out.CompletedTimestamp + *out = (*in).DeepCopy() + } + if in.CreatedTimestamp != nil { + in, out := &in.CreatedTimestamp, &out.CreatedTimestamp + *out = (*in).DeepCopy() + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExportJobSummary. +func (in *ExportJobSummary) DeepCopy() *ExportJobSummary { + if in == nil { + return nil + } + out := new(ExportJobSummary) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *GuardianAttributes) DeepCopyInto(out *GuardianAttributes) { *out = *in @@ -962,6 +985,25 @@ func (in *InboxPlacementTrackingOption) DeepCopy() *InboxPlacementTrackingOption return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InsightsEvent) DeepCopyInto(out *InsightsEvent) { + *out = *in + if in.Timestamp != nil { + in, out := &in.Timestamp, &out.Timestamp + *out = (*in).DeepCopy() + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InsightsEvent. +func (in *InsightsEvent) DeepCopy() *InsightsEvent { + if in == nil { + return nil + } + out := new(InsightsEvent) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *MailFromAttributes) DeepCopyInto(out *MailFromAttributes) { *out = *in @@ -992,6 +1034,52 @@ func (in *MailFromAttributes) DeepCopy() *MailFromAttributes { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MessageInsightsDataSource) DeepCopyInto(out *MessageInsightsDataSource) { + *out = *in + if in.EndDate != nil { + in, out := &in.EndDate, &out.EndDate + *out = (*in).DeepCopy() + } + if in.StartDate != nil { + in, out := &in.StartDate, &out.StartDate + *out = (*in).DeepCopy() + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MessageInsightsDataSource. +func (in *MessageInsightsDataSource) DeepCopy() *MessageInsightsDataSource { + if in == nil { + return nil + } + out := new(MessageInsightsDataSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricsDataSource) DeepCopyInto(out *MetricsDataSource) { + *out = *in + if in.EndDate != nil { + in, out := &in.EndDate, &out.EndDate + *out = (*in).DeepCopy() + } + if in.StartDate != nil { + in, out := &in.StartDate, &out.StartDate + *out = (*in).DeepCopy() + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricsDataSource. +func (in *MetricsDataSource) DeepCopy() *MetricsDataSource { + if in == nil { + return nil + } + out := new(MetricsDataSource) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Recommendation) DeepCopyInto(out *Recommendation) { *out = *in diff --git a/apis/sesv2/v1alpha1/zz_types.go b/apis/sesv2/v1alpha1/zz_types.go index 7c0debf5ff..0e73295b5b 100644 --- a/apis/sesv2/v1alpha1/zz_types.go +++ b/apis/sesv2/v1alpha1/zz_types.go @@ -136,6 +136,13 @@ type EventDestinationDefinition struct { Enabled *bool `json:"enabled,omitempty"` } +// +kubebuilder:skipversion +type ExportJobSummary struct { + CompletedTimestamp *metav1.Time `json:"completedTimestamp,omitempty"` + + CreatedTimestamp *metav1.Time `json:"createdTimestamp,omitempty"` +} + // +kubebuilder:skipversion type GuardianAttributes struct { OptimizedSharedDelivery *string `json:"optimizedSharedDelivery,omitempty"` @@ -162,6 +169,11 @@ type InboxPlacementTrackingOption struct { Global *bool `json:"global,omitempty"` } +// +kubebuilder:skipversion +type InsightsEvent struct { + Timestamp *metav1.Time `json:"timestamp,omitempty"` +} + // +kubebuilder:skipversion type MailFromAttributes struct { // The action to take if the required MX record can't be found when you send @@ -190,6 +202,20 @@ type MailFromAttributes struct { MailFromDomainStatus *string `json:"mailFromDomainStatus,omitempty"` } +// +kubebuilder:skipversion +type MessageInsightsDataSource struct { + EndDate *metav1.Time `json:"endDate,omitempty"` + + StartDate *metav1.Time `json:"startDate,omitempty"` +} + +// +kubebuilder:skipversion +type MetricsDataSource struct { + EndDate *metav1.Time `json:"endDate,omitempty"` + + StartDate *metav1.Time `json:"startDate,omitempty"` +} + // +kubebuilder:skipversion type Recommendation struct { CreatedTimestamp *metav1.Time `json:"createdTimestamp,omitempty"` diff --git a/apis/sfn/generator-config.yaml b/apis/sfn/generator-config.yaml index 073876d814..973253d132 100644 --- a/apis/sfn/generator-config.yaml +++ b/apis/sfn/generator-config.yaml @@ -1,4 +1,6 @@ ignore: + resource_names: + - StateMachineAlias field_paths: - CreateStateMachineInput.RoleArn - CreateStateMachineInput.Type # its jsontag is type_ in SDK and we don't want that. diff --git a/apis/sfn/v1alpha1/zz_enums.go b/apis/sfn/v1alpha1/zz_enums.go index 7f692e07cb..f277b3350a 100644 --- a/apis/sfn/v1alpha1/zz_enums.go +++ b/apis/sfn/v1alpha1/zz_enums.go @@ -138,4 +138,5 @@ const ( ValidationExceptionReason_API_DOES_NOT_SUPPORT_LABELED_ARNS ValidationExceptionReason = "API_DOES_NOT_SUPPORT_LABELED_ARNS" ValidationExceptionReason_MISSING_REQUIRED_PARAMETER ValidationExceptionReason = "MISSING_REQUIRED_PARAMETER" ValidationExceptionReason_CANNOT_UPDATE_COMPLETED_MAP_RUN ValidationExceptionReason = "CANNOT_UPDATE_COMPLETED_MAP_RUN" + ValidationExceptionReason_INVALID_ROUTING_CONFIGURATION ValidationExceptionReason = "INVALID_ROUTING_CONFIGURATION" ) diff --git a/apis/sfn/v1alpha1/zz_generated.deepcopy.go b/apis/sfn/v1alpha1/zz_generated.deepcopy.go index d58a464513..40f53774bd 100644 --- a/apis/sfn/v1alpha1/zz_generated.deepcopy.go +++ b/apis/sfn/v1alpha1/zz_generated.deepcopy.go @@ -306,11 +306,21 @@ func (in *ExecutionListItem) DeepCopyInto(out *ExecutionListItem) { in, out := &in.StartDate, &out.StartDate *out = (*in).DeepCopy() } + if in.StateMachineAliasARN != nil { + in, out := &in.StateMachineAliasARN, &out.StateMachineAliasARN + *out = new(string) + **out = **in + } if in.StateMachineARN != nil { in, out := &in.StateMachineARN, &out.StateMachineARN *out = new(string) **out = **in } + if in.StateMachineVersionARN != nil { + in, out := &in.StateMachineVersionARN, &out.StateMachineVersionARN + *out = new(string) + **out = **in + } if in.StopDate != nil { in, out := &in.StopDate, &out.StopDate *out = (*in).DeepCopy() @@ -335,6 +345,16 @@ func (in *ExecutionStartedEventDetails) DeepCopyInto(out *ExecutionStartedEventD *out = new(string) **out = **in } + if in.StateMachineAliasARN != nil { + in, out := &in.StateMachineAliasARN, &out.StateMachineAliasARN + *out = new(string) + **out = **in + } + if in.StateMachineVersionARN != nil { + in, out := &in.StateMachineVersionARN, &out.StateMachineVersionARN + *out = new(string) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecutionStartedEventDetails. @@ -495,6 +515,26 @@ func (in *MapRunListItem) DeepCopy() *MapRunListItem { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoutingConfigurationListItem) DeepCopyInto(out *RoutingConfigurationListItem) { + *out = *in + if in.StateMachineVersionARN != nil { + in, out := &in.StateMachineVersionARN, &out.StateMachineVersionARN + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoutingConfigurationListItem. +func (in *RoutingConfigurationListItem) DeepCopy() *RoutingConfigurationListItem { + if in == nil { + return nil + } + out := new(RoutingConfigurationListItem) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *StateEnteredEventDetails) DeepCopyInto(out *StateEnteredEventDetails) { *out = *in @@ -562,6 +602,25 @@ func (in *StateMachine) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StateMachineAliasListItem) DeepCopyInto(out *StateMachineAliasListItem) { + *out = *in + if in.CreationDate != nil { + in, out := &in.CreationDate, &out.CreationDate + *out = (*in).DeepCopy() + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StateMachineAliasListItem. +func (in *StateMachineAliasListItem) DeepCopy() *StateMachineAliasListItem { + if in == nil { + return nil + } + out := new(StateMachineAliasListItem) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *StateMachineList) DeepCopyInto(out *StateMachineList) { *out = *in @@ -640,6 +699,11 @@ func (in *StateMachineObservation) DeepCopyInto(out *StateMachineObservation) { *out = new(string) **out = **in } + if in.StateMachineVersionARN != nil { + in, out := &in.StateMachineVersionARN, &out.StateMachineVersionARN + *out = new(string) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StateMachineObservation. @@ -670,6 +734,11 @@ func (in *StateMachineParameters) DeepCopyInto(out *StateMachineParameters) { *out = new(string) **out = **in } + if in.Publish != nil { + in, out := &in.Publish, &out.Publish + *out = new(bool) + **out = **in + } if in.Tags != nil { in, out := &in.Tags, &out.Tags *out = make([]*Tag, len(*in)) @@ -686,6 +755,11 @@ func (in *StateMachineParameters) DeepCopyInto(out *StateMachineParameters) { *out = new(TracingConfiguration) (*in).DeepCopyInto(*out) } + if in.VersionDescription != nil { + in, out := &in.VersionDescription, &out.VersionDescription + *out = new(string) + **out = **in + } in.CustomStateMachineParameters.DeepCopyInto(&out.CustomStateMachineParameters) } @@ -733,6 +807,25 @@ func (in *StateMachineStatus) DeepCopy() *StateMachineStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StateMachineVersionListItem) DeepCopyInto(out *StateMachineVersionListItem) { + *out = *in + if in.CreationDate != nil { + in, out := &in.CreationDate, &out.CreationDate + *out = (*in).DeepCopy() + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StateMachineVersionListItem. +func (in *StateMachineVersionListItem) DeepCopy() *StateMachineVersionListItem { + if in == nil { + return nil + } + out := new(StateMachineVersionListItem) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Tag) DeepCopyInto(out *Tag) { *out = *in diff --git a/apis/sfn/v1alpha1/zz_state_machine.go b/apis/sfn/v1alpha1/zz_state_machine.go index 42f42cdeb4..8700a267c1 100644 --- a/apis/sfn/v1alpha1/zz_state_machine.go +++ b/apis/sfn/v1alpha1/zz_state_machine.go @@ -57,6 +57,9 @@ type StateMachineParameters struct { // A-Z, a-z, - and _. // +kubebuilder:validation:Required Name *string `json:"name"` + // Set to true to publish the first version of the state machine during creation. + // The default is false. + Publish *bool `json:"publish,omitempty"` // Tags to be added when creating a state machine. // // An array of key-value pairs. For more information, see Using Cost Allocation @@ -68,7 +71,11 @@ type StateMachineParameters struct { // _ . : / = + - @. Tags []*Tag `json:"tags,omitempty"` // Selects whether X-Ray tracing is enabled. - TracingConfiguration *TracingConfiguration `json:"tracingConfiguration,omitempty"` + TracingConfiguration *TracingConfiguration `json:"tracingConfiguration,omitempty"` + // Sets description about the state machine version. You can only set the description + // if the publish parameter is set to true. Otherwise, if you set versionDescription, + // but publish to false, this API action throws ValidationException. + VersionDescription *string `json:"versionDescription,omitempty"` CustomStateMachineParameters `json:",inline"` } @@ -84,6 +91,10 @@ type StateMachineObservation struct { CreationDate *metav1.Time `json:"creationDate,omitempty"` // The Amazon Resource Name (ARN) that identifies the created state machine. StateMachineARN *string `json:"stateMachineARN,omitempty"` + // The Amazon Resource Name (ARN) that identifies the created state machine + // version. If you do not set the publish parameter to true, this field returns + // null value. + StateMachineVersionARN *string `json:"stateMachineVersionARN,omitempty"` } // StateMachineStatus defines the observed state of StateMachine. diff --git a/apis/sfn/v1alpha1/zz_types.go b/apis/sfn/v1alpha1/zz_types.go index 9bea99fd29..691647aa15 100644 --- a/apis/sfn/v1alpha1/zz_types.go +++ b/apis/sfn/v1alpha1/zz_types.go @@ -54,14 +54,22 @@ type ExecutionListItem struct { StartDate *metav1.Time `json:"startDate,omitempty"` + StateMachineAliasARN *string `json:"stateMachineAliasARN,omitempty"` + StateMachineARN *string `json:"stateMachineARN,omitempty"` + StateMachineVersionARN *string `json:"stateMachineVersionARN,omitempty"` + StopDate *metav1.Time `json:"stopDate,omitempty"` } // +kubebuilder:skipversion type ExecutionStartedEventDetails struct { RoleARN *string `json:"roleARN,omitempty"` + + StateMachineAliasARN *string `json:"stateMachineAliasARN,omitempty"` + + StateMachineVersionARN *string `json:"stateMachineVersionARN,omitempty"` } // +kubebuilder:skipversion @@ -104,6 +112,11 @@ type MapRunListItem struct { StopDate *metav1.Time `json:"stopDate,omitempty"` } +// +kubebuilder:skipversion +type RoutingConfigurationListItem struct { + StateMachineVersionARN *string `json:"stateMachineVersionARN,omitempty"` +} + // +kubebuilder:skipversion type StateEnteredEventDetails struct { Name *string `json:"name,omitempty"` @@ -114,6 +127,11 @@ type StateExitedEventDetails struct { Name *string `json:"name,omitempty"` } +// +kubebuilder:skipversion +type StateMachineAliasListItem struct { + CreationDate *metav1.Time `json:"creationDate,omitempty"` +} + // +kubebuilder:skipversion type StateMachineListItem struct { CreationDate *metav1.Time `json:"creationDate,omitempty"` @@ -125,6 +143,11 @@ type StateMachineListItem struct { Type *string `json:"type_,omitempty"` } +// +kubebuilder:skipversion +type StateMachineVersionListItem struct { + CreationDate *metav1.Time `json:"creationDate,omitempty"` +} + // +kubebuilder:skipversion type Tag struct { Key *string `json:"key,omitempty"` diff --git a/apis/transfer/v1alpha1/zz_enums.go b/apis/transfer/v1alpha1/zz_enums.go index 32dab87c08..052afc1d8d 100644 --- a/apis/transfer/v1alpha1/zz_enums.go +++ b/apis/transfer/v1alpha1/zz_enums.go @@ -183,6 +183,15 @@ const ( SetStatOption_ENABLE_NO_OP SetStatOption = "ENABLE_NO_OP" ) +type SftpAuthenticationMethods string + +const ( + SftpAuthenticationMethods_PASSWORD SftpAuthenticationMethods = "PASSWORD" + SftpAuthenticationMethods_PUBLIC_KEY SftpAuthenticationMethods = "PUBLIC_KEY" + SftpAuthenticationMethods_PUBLIC_KEY_OR_PASSWORD SftpAuthenticationMethods = "PUBLIC_KEY_OR_PASSWORD" + SftpAuthenticationMethods_PUBLIC_KEY_AND_PASSWORD SftpAuthenticationMethods = "PUBLIC_KEY_AND_PASSWORD" +) + type SigningAlg string const ( diff --git a/apis/transfer/v1alpha1/zz_generated.deepcopy.go b/apis/transfer/v1alpha1/zz_generated.deepcopy.go index 57b445dfb6..c7c7bc04c6 100644 --- a/apis/transfer/v1alpha1/zz_generated.deepcopy.go +++ b/apis/transfer/v1alpha1/zz_generated.deepcopy.go @@ -614,6 +614,17 @@ func (in *DescribedServer) DeepCopyInto(out *DescribedServer) { *out = new(string) **out = **in } + if in.StructuredLogDestinations != nil { + in, out := &in.StructuredLogDestinations, &out.StructuredLogDestinations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } if in.Tags != nil { in, out := &in.Tags, &out.Tags *out = make([]*Tag, len(*in)) @@ -867,6 +878,11 @@ func (in *IdentityProviderDetails) DeepCopyInto(out *IdentityProviderDetails) { *out = new(string) **out = **in } + if in.SftpAuthenticationMethods != nil { + in, out := &in.SftpAuthenticationMethods, &out.SftpAuthenticationMethods + *out = new(string) + **out = **in + } if in.URL != nil { in, out := &in.URL, &out.URL *out = new(string) @@ -1393,6 +1409,17 @@ func (in *ServerParameters) DeepCopyInto(out *ServerParameters) { *out = new(string) **out = **in } + if in.StructuredLogDestinations != nil { + in, out := &in.StructuredLogDestinations, &out.StructuredLogDestinations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } if in.Tags != nil { in, out := &in.Tags, &out.Tags *out = make([]*Tag, len(*in)) diff --git a/apis/transfer/v1alpha1/zz_server.go b/apis/transfer/v1alpha1/zz_server.go index be46ce44c1..6fe0b4ca5f 100644 --- a/apis/transfer/v1alpha1/zz_server.go +++ b/apis/transfer/v1alpha1/zz_server.go @@ -83,14 +83,14 @@ type ServerParameters struct { // server to a new server, don't update the host key. Accidentally changing // a server's host key can be disruptive. // - // For more information, see Update host keys for your SFTP-enabled server (https://docs.aws.amazon.com/transfer/latest/userguide/edit-server-config.html#configuring-servers-change-host-key) + // For more information, see Manage host keys for your SFTP-enabled server (https://docs.aws.amazon.com/transfer/latest/userguide/edit-server-config.html#configuring-servers-change-host-key) // in the Transfer Family User Guide. HostKey *string `json:"hostKey,omitempty"` - // Required when IdentityProviderType is set to AWS_DIRECTORY_SERVICE or API_GATEWAY. - // Accepts an array containing all of the information required to use a directory - // in AWS_DIRECTORY_SERVICE or invoke a customer-supplied authentication API, - // including the API Gateway URL. Not required when IdentityProviderType is - // set to SERVICE_MANAGED. + // Required when IdentityProviderType is set to AWS_DIRECTORY_SERVICE, Amazon + // Web Services_LAMBDA or API_GATEWAY. Accepts an array containing all of the + // information required to use a directory in AWS_DIRECTORY_SERVICE or invoke + // a customer-supplied authentication API, including the API Gateway URL. Not + // required when IdentityProviderType is set to SERVICE_MANAGED. IdentityProviderDetails *IdentityProviderDetails `json:"identityProviderDetails,omitempty"` // The mode of authentication for a server. The default value is SERVICE_MANAGED, // which allows you to store and access user credentials within the Transfer @@ -109,7 +109,7 @@ type ServerParameters struct { // // Use the AWS_LAMBDA value to directly use an Lambda function as your identity // provider. If you choose this value, you must specify the ARN for the Lambda - // function in the Function parameter or the IdentityProviderDetails data type. + // function in the Function parameter for the IdentityProviderDetails data type. IdentityProviderType *string `json:"identityProviderType,omitempty"` // Specifies a string to display when users connect to a server. This string // is displayed after the user authenticates. @@ -166,27 +166,44 @@ type ServerParameters struct { // to it over FTPS. // // * If Protocol includes either FTP or FTPS, then the EndpointType must - // be VPC and the IdentityProviderType must be AWS_DIRECTORY_SERVICE or API_GATEWAY. + // be VPC and the IdentityProviderType must be either AWS_DIRECTORY_SERVICE, + // AWS_LAMBDA, or API_GATEWAY. // // * If Protocol includes FTP, then AddressAllocationIds cannot be associated. // // * If Protocol is set only to SFTP, the EndpointType can be set to PUBLIC - // and the IdentityProviderType can be set to SERVICE_MANAGED. + // and the IdentityProviderType can be set any of the supported identity + // types: SERVICE_MANAGED, AWS_DIRECTORY_SERVICE, AWS_LAMBDA, or API_GATEWAY. // // * If Protocol includes AS2, then the EndpointType must be VPC, and domain // must be Amazon S3. Protocols []*string `json:"protocols,omitempty"` // Specifies the name of the security policy that is attached to the server. SecurityPolicyName *string `json:"securityPolicyName,omitempty"` + // Specifies the log groups to which your server logs are sent. + // + // To specify a log group, you must provide the ARN for an existing log group. + // In this case, the format of the log group is as follows: + // + // arn:aws:logs:region-name:amazon-account-id:log-group:log-group-name:* + // + // For example, arn:aws:logs:us-east-1:111122223333:log-group:mytestgroup:* + // + // If you have previously specified a log group for a server, you can clear + // it, and in effect turn off structured logging, by providing an empty value + // for this parameter in an update-server call. For example: + // + // update-server --server-id s-1234567890abcdef0 --structured-log-destinations + StructuredLogDestinations []*string `json:"structuredLogDestinations,omitempty"` // Key-value pairs that can be used to group and search for servers. Tags []*Tag `json:"tags,omitempty"` // Specifies the workflow ID for the workflow to assign and the execution role // that's used for executing the workflow. // - // In additon to a workflow to execute when a file is uploaded completely, WorkflowDeatails - // can also contain a workflow ID (and execution role) for a workflow to execute - // on partial upload. A partial upload occurs when a file is open when the session - // disconnects. + // In addition to a workflow to execute when a file is uploaded completely, + // WorkflowDetails can also contain a workflow ID (and execution role) for a + // workflow to execute on partial upload. A partial upload occurs when the server + // session disconnects while the file is still being uploaded. WorkflowDetails *WorkflowDetails `json:"workflowDetails,omitempty"` CustomServerParameters `json:",inline"` } diff --git a/apis/transfer/v1alpha1/zz_types.go b/apis/transfer/v1alpha1/zz_types.go index 8780db9975..4242416877 100644 --- a/apis/transfer/v1alpha1/zz_types.go +++ b/apis/transfer/v1alpha1/zz_types.go @@ -142,12 +142,24 @@ type DescribedServer struct { // use for a file transfer protocol-enabled server's users. A server can have // only one method of authentication. IdentityProviderDetails *IdentityProviderDetails `json:"identityProviderDetails,omitempty"` - // Returns information related to the type of user authentication that is in - // use for a file transfer protocol-enabled server's users. For AWS_DIRECTORY_SERVICE - // or SERVICE_MANAGED authentication, the Secure Shell (SSH) public keys are - // stored with a user on the server instance. For API_GATEWAY authentication, - // your custom authentication method is implemented by using an API call. The - // server can have only one method of authentication. + // The mode of authentication for a server. The default value is SERVICE_MANAGED, + // which allows you to store and access user credentials within the Transfer + // Family service. + // + // Use AWS_DIRECTORY_SERVICE to provide access to Active Directory groups in + // Directory Service for Microsoft Active Directory or Microsoft Active Directory + // in your on-premises environment or in Amazon Web Services using AD Connector. + // This option also requires you to provide a Directory ID by using the IdentityProviderDetails + // parameter. + // + // Use the API_GATEWAY value to integrate with an identity provider of your + // choosing. The API_GATEWAY setting requires you to provide an Amazon API Gateway + // endpoint URL to call for authentication by using the IdentityProviderDetails + // parameter. + // + // Use the AWS_LAMBDA value to directly use an Lambda function as your identity + // provider. If you choose this value, you must specify the ARN for the Lambda + // function in the Function parameter for the IdentityProviderDetails data type. IdentityProviderType *string `json:"identityProviderType,omitempty"` LoggingRole *string `json:"loggingRole,omitempty"` @@ -175,6 +187,8 @@ type DescribedServer struct { // operational. Both START_FAILED and STOP_FAILED are error conditions. State *string `json:"state,omitempty"` + StructuredLogDestinations []*string `json:"structuredLogDestinations,omitempty"` + Tags []*Tag `json:"tags,omitempty"` UserCount *int64 `json:"userCount,omitempty"` @@ -247,6 +261,8 @@ type IdentityProviderDetails struct { InvocationRole *string `json:"invocationRole,omitempty"` + SftpAuthenticationMethods *string `json:"sftpAuthenticationMethods,omitempty"` + URL *string `json:"url,omitempty"` } @@ -299,12 +315,24 @@ type ListedServer struct { Domain *string `json:"domain,omitempty"` EndpointType *string `json:"endpointType,omitempty"` - // Returns information related to the type of user authentication that is in - // use for a file transfer protocol-enabled server's users. For AWS_DIRECTORY_SERVICE - // or SERVICE_MANAGED authentication, the Secure Shell (SSH) public keys are - // stored with a user on the server instance. For API_GATEWAY authentication, - // your custom authentication method is implemented by using an API call. The - // server can have only one method of authentication. + // The mode of authentication for a server. The default value is SERVICE_MANAGED, + // which allows you to store and access user credentials within the Transfer + // Family service. + // + // Use AWS_DIRECTORY_SERVICE to provide access to Active Directory groups in + // Directory Service for Microsoft Active Directory or Microsoft Active Directory + // in your on-premises environment or in Amazon Web Services using AD Connector. + // This option also requires you to provide a Directory ID by using the IdentityProviderDetails + // parameter. + // + // Use the API_GATEWAY value to integrate with an identity provider of your + // choosing. The API_GATEWAY setting requires you to provide an Amazon API Gateway + // endpoint URL to call for authentication by using the IdentityProviderDetails + // parameter. + // + // Use the AWS_LAMBDA value to directly use an Lambda function as your identity + // provider. If you choose this value, you must specify the ARN for the Lambda + // function in the Function parameter for the IdentityProviderDetails data type. IdentityProviderType *string `json:"identityProviderType,omitempty"` LoggingRole *string `json:"loggingRole,omitempty"` diff --git a/apis/transfer/v1alpha1/zz_user.go b/apis/transfer/v1alpha1/zz_user.go index 6e4a737e56..3cdf2466ac 100644 --- a/apis/transfer/v1alpha1/zz_user.go +++ b/apis/transfer/v1alpha1/zz_user.go @@ -48,7 +48,8 @@ type UserParameters struct { // // In most cases, you can use this value instead of the session policy to lock // your user down to the designated home directory ("chroot"). To do this, you - // can set Entry to / and set Target to the HomeDirectory parameter value. + // can set Entry to / and set Target to the value the user should see for their + // home directory when they log in. // // The following is an Entry and Target pair example for chroot. // @@ -88,7 +89,17 @@ type UserParameters struct { // The public portion of the Secure Shell (SSH) key used to authenticate the // user to the server. // + // The three standard SSH public key format elements are , , + // and an optional , with spaces between each element. + // // Transfer Family accepts RSA, ECDSA, and ED25519 keys. + // + // * For RSA keys, the key type is ssh-rsa. + // + // * For ED25519 keys, the key type is ssh-ed25519. + // + // * For ECDSA keys, the key type is either ecdsa-sha2-nistp256, ecdsa-sha2-nistp384, + // or ecdsa-sha2-nistp521, depending on the size of the key you generated. SshPublicKeyBody *string `json:"sshPublicKeyBody,omitempty"` // Key-value pairs that can be used to group and search for users. Tags are // metadata attached to users for any purpose. @@ -106,7 +117,7 @@ type UserSpec struct { type UserObservation struct { // The identifier of the server that the user is attached to. ServerID *string `json:"serverID,omitempty"` - // A unique string that identifies a user account associated with a server. + // A unique string that identifies a Transfer Family user. UserName *string `json:"userName,omitempty"` } diff --git a/go.mod b/go.mod index eb865de76b..3920e8c8f9 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,7 @@ go 1.19 require ( github.com/aws-controllers-k8s/code-generator v0.26.1 - github.com/aws/aws-sdk-go v1.44.174 + github.com/aws/aws-sdk-go v1.44.334 github.com/aws/aws-sdk-go-v2 v1.16.16 github.com/aws/aws-sdk-go-v2/config v1.11.1 github.com/aws/aws-sdk-go-v2/credentials v1.6.5 @@ -46,7 +46,7 @@ require ( k8s.io/api v0.27.3 k8s.io/apimachinery v0.27.3 k8s.io/client-go v0.27.3 - k8s.io/utils v0.0.0-20230505201702-9f6742963106 + k8s.io/utils v0.0.0-20230726121419-3b25d923346b sigs.k8s.io/controller-runtime v0.15.0 sigs.k8s.io/controller-tools v0.12.1 sigs.k8s.io/yaml v1.3.0 diff --git a/go.sum b/go.sum index b1590aa4e7..10145331b5 100644 --- a/go.sum +++ b/go.sum @@ -92,8 +92,8 @@ github.com/aws-controllers-k8s/code-generator v0.26.1/go.mod h1:JP9b2NiBqKN/Xxj3 github.com/aws-controllers-k8s/pkg v0.0.4 h1:fQX18NZZG6eVKdG3WWp/oE7QJgFe7Dz/Ublu+ua4PW8= github.com/aws-controllers-k8s/pkg v0.0.4/go.mod h1:LC/9DlYrXu8FWNwLquZLq1WhcyRo7qXb7upRLAEosQk= github.com/aws-controllers-k8s/runtime v0.26.0 h1:XKqygFzHSBtM74Ov9IroZbyCVeYei9Eskp4aKbJ2SFw= -github.com/aws/aws-sdk-go v1.44.174 h1:9lR4a6MKQW/t6YCG0ZKAt1GAkjdEPP8sWch/pfcuR0c= -github.com/aws/aws-sdk-go v1.44.174/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= +github.com/aws/aws-sdk-go v1.44.334 h1:h2bdbGb//fez6Sv6PaYv868s9liDeoYM6hYsAqTB4MU= +github.com/aws/aws-sdk-go v1.44.334/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= github.com/aws/aws-sdk-go-v2 v1.10.0/go.mod h1:U/EyyVvKtzmFeQQcca7eBotKdlpcP2zzU6bXBYcf7CE= github.com/aws/aws-sdk-go-v2 v1.11.2/go.mod h1:SQfA+m2ltnu1cA0soUkj4dRSsmITiVQUJvBIZjzfPyQ= github.com/aws/aws-sdk-go-v2 v1.16.2/go.mod h1:ytwTPBG6fXTZLxxeeCCWj2/EMYp/xDUgX+OET6TLNNU= @@ -1319,8 +1319,8 @@ k8s.io/kube-openapi v0.0.0-20230525220651-2546d827e515 h1:OmK1d0WrkD3IPfkskvroRy k8s.io/kube-openapi v0.0.0-20230525220651-2546d827e515/go.mod h1:kzo02I3kQ4BTtEfVLaPbjvCkX97YqGve33wzlb3fofQ= k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20230505201702-9f6742963106 h1:EObNQ3TW2D+WptiYXlApGNLVy0zm/JIBVY9i+M4wpAU= -k8s.io/utils v0.0.0-20230505201702-9f6742963106/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= +k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/package/crds/apigateway.aws.crossplane.io_apikeys.yaml b/package/crds/apigateway.aws.crossplane.io_apikeys.yaml index 25f605aaa1..2ae508686f 100644 --- a/package/crds/apigateway.aws.crossplane.io_apikeys.yaml +++ b/package/crds/apigateway.aws.crossplane.io_apikeys.yaml @@ -68,8 +68,8 @@ spec: description: APIKeyParameters defines the desired state of APIKey properties: customerID: - description: An AWS Marketplace customer identifier , when integrating - with the AWS SaaS Marketplace. + description: An Amazon Web Services Marketplace customer identifier, + when integrating with the Amazon Web Services SaaS Marketplace. type: string description: description: The description of the ApiKey. diff --git a/package/crds/apigateway.aws.crossplane.io_domainnames.yaml b/package/crds/apigateway.aws.crossplane.io_domainnames.yaml index 88dabfa849..1a24a42487 100644 --- a/package/crds/apigateway.aws.crossplane.io_domainnames.yaml +++ b/package/crds/apigateway.aws.crossplane.io_domainnames.yaml @@ -68,9 +68,9 @@ spec: description: DomainNameParameters defines the desired state of DomainName properties: certificateARN: - description: The reference to an AWS-managed certificate that - will be used by edge-optimized endpoint for this domain name. - AWS Certificate Manager is the only supported source. + description: The reference to an Amazon Web Services-managed certificate + that will be used by edge-optimized endpoint for this domain + name. Certificate Manager is the only supported source. type: string certificateBody: description: '[Deprecated] The body of the server certificate @@ -128,8 +128,8 @@ spec: description: Region is which region the DomainName will be created. type: string regionalCertificateARN: - description: The reference to an AWS-managed certificate that - will be used by regional endpoint for this domain name. AWS + description: The reference to an Amazon Web Services-managed certificate + that will be used by regional endpoint for this domain name. Certificate Manager is the only supported source. type: string regionalCertificateName: diff --git a/package/crds/apigateway.aws.crossplane.io_restapis.yaml b/package/crds/apigateway.aws.crossplane.io_restapis.yaml index fd25eb0013..9bf438c98e 100644 --- a/package/crds/apigateway.aws.crossplane.io_restapis.yaml +++ b/package/crds/apigateway.aws.crossplane.io_restapis.yaml @@ -69,7 +69,7 @@ spec: properties: apiKeySource: description: 'The source of the API key for metering requests - according to a usage plan. Valid values are: >HEADER to read + according to a usage plan. Valid values are: HEADER to read the API key from the X-API-Key header of a request. AUTHORIZER to read the API key from the UsageIdentifierKey from a custom authorizer.' @@ -351,6 +351,9 @@ spec: description: The API's identifier. This identifier is unique across all of your APIs in API Gateway. type: string + rootResourceID: + description: The API's root resource ID. + type: string warnings: description: The warning messages reported when failonwarnings is turned on during API import. diff --git a/package/crds/apigateway.aws.crossplane.io_usageplans.yaml b/package/crds/apigateway.aws.crossplane.io_usageplans.yaml index d9012bfcd3..786ffd7269 100644 --- a/package/crds/apigateway.aws.crossplane.io_usageplans.yaml +++ b/package/crds/apigateway.aws.crossplane.io_usageplans.yaml @@ -437,8 +437,9 @@ spec: description: The identifier of a UsagePlan resource. type: string productCode: - description: The AWS Markeplace product identifier to associate - with the usage plan as a SaaS product on AWS Marketplace. + description: The Amazon Web Services Marketplace product identifier + to associate with the usage plan as a SaaS product on the Amazon + Web Services Marketplace. type: string type: object conditions: diff --git a/package/crds/apigateway.aws.crossplane.io_vpclinks.yaml b/package/crds/apigateway.aws.crossplane.io_vpclinks.yaml index 3ef31eb41a..eb961d5807 100644 --- a/package/crds/apigateway.aws.crossplane.io_vpclinks.yaml +++ b/package/crds/apigateway.aws.crossplane.io_vpclinks.yaml @@ -87,7 +87,7 @@ spec: targetARNs: description: The ARN of the network load balancer of the VPC targeted by the VPC link. The network load balancer must be owned by - the same AWS account of the API owner. + the same Amazon Web Services account of the API owner. items: type: string type: array diff --git a/package/crds/athena.aws.crossplane.io_workgroups.yaml b/package/crds/athena.aws.crossplane.io_workgroups.yaml index 3ca9578192..220d1213e1 100644 --- a/package/crds/athena.aws.crossplane.io_workgroups.yaml +++ b/package/crds/athena.aws.crossplane.io_workgroups.yaml @@ -69,13 +69,14 @@ spec: properties: configuration: description: Contains configuration information for creating an - Athena SQL workgroup, which includes the location in Amazon - S3 where query results are stored, the encryption configuration, - if any, used for encrypting query results, whether the Amazon - CloudWatch Metrics are enabled for the workgroup, the limit - for the amount of bytes scanned (cutoff) per query, if it is - specified, and whether workgroup's settings (specified with - EnforceWorkGroupConfiguration) in the WorkGroupConfiguration + Athena SQL workgroup or Spark enabled Athena workgroup. Athena + SQL workgroup configuration includes the location in Amazon + S3 where query and calculation results are stored, the encryption + configuration, if any, used for encrypting query results, whether + the Amazon CloudWatch Metrics are enabled for the workgroup, + the limit for the amount of bytes scanned (cutoff) per query, + if it is specified, and whether workgroup's settings (specified + with EnforceWorkGroupConfiguration) in the WorkGroupConfiguration override client-side settings. See WorkGroupConfiguration$EnforceWorkGroupConfiguration. properties: additionalConfiguration: @@ -85,11 +86,14 @@ spec: type: integer customerContentEncryptionConfiguration: description: Specifies the KMS key that is used to encrypt - the user's data stores in Athena. + the user's data stores in Athena. This setting does not + apply to Athena SQL workgroups. properties: kmsKey: type: string type: object + enableMinimumEncryptionConfiguration: + type: boolean enforceWorkGroupConfiguration: type: boolean engineVersion: @@ -108,11 +112,11 @@ spec: requesterPaysEnabled: type: boolean resultConfiguration: - description: The location in Amazon S3 where query results - are stored and the encryption option, if any, used for query - results. These are known as "client-side settings". If workgroup - settings override client-side settings, then the query uses - the workgroup settings. + description: The location in Amazon S3 where query and calculation + results are stored and the encryption option, if any, used + for query and calculation results. These are known as "client-side + settings". If workgroup settings override client-side settings, + then the query uses the workgroup settings. properties: aclConfiguration: description: Indicates that an Amazon S3 canned ACL should @@ -127,9 +131,9 @@ spec: type: string type: object encryptionConfiguration: - description: If query results are encrypted in Amazon - S3, indicates the encryption option used (for example, - SSE_KMS or CSE_KMS) and key information. + description: If query and calculation results are encrypted + in Amazon S3, indicates the encryption option used (for + example, SSE_KMS or CSE_KMS) and key information. properties: encryptionOption: type: string diff --git a/package/crds/autoscaling.aws.crossplane.io_autoscalinggroups.yaml b/package/crds/autoscaling.aws.crossplane.io_autoscalinggroups.yaml index 547d397b4d..856f4e5495 100644 --- a/package/crds/autoscaling.aws.crossplane.io_autoscalinggroups.yaml +++ b/package/crds/autoscaling.aws.crossplane.io_autoscalinggroups.yaml @@ -102,24 +102,24 @@ spec: format: int64 type: integer defaultInstanceWarmup: - description: "The amount of time, in seconds, until a newly launched - instance can contribute to the Amazon CloudWatch metrics. This - delay lets an instance finish initializing before Amazon EC2 - Auto Scaling aggregates instance metrics, resulting in more - reliable usage data. Set this value equal to the amount of time - that it takes for resource consumption to become stable after - an instance reaches the InService state. For more information, - see Set the default instance warmup for an Auto Scaling group - (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-default-instance-warmup.html) - in the Amazon EC2 Auto Scaling User Guide. \n To manage your + description: "The amount of time, in seconds, until a new instance + is considered to have finished initializing and resource consumption + to become stable after it enters the InService state. \n During + an instance refresh, Amazon EC2 Auto Scaling waits for the warm-up + period after it replaces an instance before it moves on to replacing + the next instance. Amazon EC2 Auto Scaling also waits for the + warm-up period before aggregating the metrics for new instances + with existing instances in the Amazon CloudWatch metrics that + are used for scaling, resulting in more reliable usage data. + For more information, see Set the default instance warmup for + an Auto Scaling group (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-default-instance-warmup.html) + in the Amazon EC2 Auto Scaling User Guide. \n To manage various warm-up settings at the group level, we recommend that you set - the default instance warmup, even if its value is set to 0 seconds. - This also optimizes the performance of scaling policies that - scale continuously, such as target tracking and step scaling - policies. \n If you need to remove a value that you previously - set, include the property but specify -1 for the value. However, - we strongly recommend keeping the default instance warmup enabled - by specifying a minimum value of 0. \n Default: None" + the default instance warmup, even if it is set to 0 seconds. + To remove a value that you previously set, include the property + but specify -1 for the value. However, we strongly recommend + keeping the default instance warmup enabled by specifying a + value of 0 or other nominal value. \n Default: None" format: int64 type: integer desiredCapacity: @@ -154,14 +154,13 @@ spec: format: int64 type: integer healthCheckType: - description: "Determines whether any additional health checks - are performed on the instances in this group. Amazon EC2 health - checks are always on. For more information, see Health checks - for Auto Scaling instances (https://docs.aws.amazon.com/autoscaling/ec2/userguide/healthcheck.html) - in the Amazon EC2 Auto Scaling User Guide. \n The valid values - are EC2 (default), ELB, and VPC_LATTICE. The VPC_LATTICE health - check type is reserved for use with VPC Lattice, which is in - preview release and is subject to change." + description: "A comma-separated value string of one or more health + check types. \n The valid values are EC2, ELB, and VPC_LATTICE. + EC2 is the default health check and cannot be disabled. For + more information, see Health checks for Auto Scaling instances + (https://docs.aws.amazon.com/autoscaling/ec2/userguide/healthcheck.html) + in the Amazon EC2 Auto Scaling User Guide. \n Only specify EC2 + if you must clear a value that was previously set." type: string instanceID: description: The ID of the instance used to base the launch configuration @@ -221,7 +220,7 @@ spec: loadBalancerNames: description: A list of Classic Load Balancers associated with this Auto Scaling group. For Application Load Balancers, Network - Load Balancers, and Gateway Load Balancer, specify the TargetGroupARNs + Load Balancers, and Gateway Load Balancers, specify the TargetGroupARNs property instead. items: type: string @@ -594,22 +593,6 @@ spec: items: type: string type: array - trafficSources: - description: "Reserved for use with Amazon VPC Lattice, which - is in preview release and is subject to change. Do not use this - parameter for production workloads. It is also subject to change. - \n The unique identifiers of one or more traffic sources. \n - Currently, you must specify an Amazon Resource Name (ARN) for - an existing VPC Lattice target group. Amazon EC2 Auto Scaling - registers the running instances with the attached target groups. - The target groups receive incoming traffic and route requests - to one or more registered targets." - items: - properties: - identifier: - type: string - type: object - type: array vpcZoneIdentifier: description: A comma-separated list of subnet IDs for a virtual private cloud (VPC) where instances in the Auto Scaling group @@ -873,12 +856,8 @@ spec: format: int64 type: integer healthCheckType: - description: "Determines whether any additional health checks - are performed on the instances in this group. Amazon EC2 health - checks are always on. \n The valid values are EC2 (default), - ELB, and VPC_LATTICE. The VPC_LATTICE health check type is reserved - for use with VPC Lattice, which is in preview release and is - subject to change." + description: A comma-separated value string of one or more health + check types. type: string instances: description: The EC2 instances associated with the group. @@ -1272,11 +1251,14 @@ spec: type: string type: array trafficSources: - description: The unique identifiers of the traffic sources. + description: The traffic sources associated with this Auto Scaling + group. items: properties: identifier: type: string + type_: + type: string type: object type: array vpcZoneIdentifier: diff --git a/package/crds/batch.aws.crossplane.io_computeenvironments.yaml b/package/crds/batch.aws.crossplane.io_computeenvironments.yaml index c01ff39397..eace51e779 100644 --- a/package/crds/batch.aws.crossplane.io_computeenvironments.yaml +++ b/package/crds/batch.aws.crossplane.io_computeenvironments.yaml @@ -912,8 +912,15 @@ spec: the state is DISABLED, then the Batch scheduler doesn't attempt to place jobs within the environment. Jobs in a STARTING or RUNNING state continue to progress normally. Managed compute - environments in the DISABLED state don't scale out. However, - they scale in to minvCpus value after instances become idle." + environments in the DISABLED state don't scale out. \n Compute + environments in a DISABLED state may continue to incur billing + charges. To prevent additional charges, turn off and then delete + the compute environment. For more information, see State (https://docs.aws.amazon.com/batch/latest/userguide/compute_environment_parameters.html#compute_environment_state) + in the Batch User Guide. \n When an instance is idle, the instance + scales down to the minvCpus value. However, the instance size + doesn't change. For example, consider a c5.8xlarge instance + with a minvCpus value of 4 and a desiredvCpus value of 36. This + instance doesn't scale down to a c5.large instance." type: string status: description: The current status of the compute environment (for diff --git a/package/crds/cloudfront.aws.crossplane.io_cachepolicies.yaml b/package/crds/cloudfront.aws.crossplane.io_cachepolicies.yaml index b39e959f8a..29a6082a99 100644 --- a/package/crds/cloudfront.aws.crossplane.io_cachepolicies.yaml +++ b/package/crds/cloudfront.aws.crossplane.io_cachepolicies.yaml @@ -89,18 +89,18 @@ spec: headers, cookies, and URL query strings. CloudFront uses the cache key to find an object in its cache that it can return to the viewer. \n The headers, cookies, and query - strings that are included in the cache key are automatically - included in requests that CloudFront sends to the origin. - CloudFront sends a request when it can't find an object - in its cache that matches the request's cache key. If you - want to send values to the origin but not include them in - the cache key, use OriginRequestPolicy." + strings that are included in the cache key are also included + in requests that CloudFront sends to the origin. CloudFront + sends a request when it can't find an object in its cache + that matches the request's cache key. If you want to send + values to the origin but not include them in the cache key, + use OriginRequestPolicy." properties: cookiesConfig: description: An object that determines whether any cookies in viewer requests (and if so, which cookies) are included - in the cache key and automatically included in requests - that CloudFront sends to the origin. + in the cache key and in requests that CloudFront sends + to the origin. properties: cookieBehavior: type: string @@ -123,8 +123,8 @@ spec: headersConfig: description: An object that determines whether any HTTP headers (and if so, which headers) are included in the - cache key and automatically included in requests that - CloudFront sends to the origin. + cache key and in requests that CloudFront sends to the + origin. properties: headerBehavior: type: string @@ -140,8 +140,8 @@ spec: queryStringsConfig: description: An object that determines whether any URL query strings in viewer requests (and if so, which query - strings) are included in the cache key and automatically - included in requests that CloudFront sends to the origin. + strings) are included in the cache key and in requests + that CloudFront sends to the origin. properties: queryStringBehavior: type: string @@ -382,12 +382,12 @@ spec: return to the viewer. \n * The default, minimum, and maximum time to live (TTL) values that you want objects to stay in the CloudFront cache. \n The headers, cookies, and query - strings that are included in the cache key are automatically - included in requests that CloudFront sends to the origin. - CloudFront sends a request when it can't find a valid object - in its cache that matches the request's cache key. If you - want to send values to the origin but not include them in - the cache key, use OriginRequestPolicy." + strings that are included in the cache key are also included + in requests that CloudFront sends to the origin. CloudFront + sends a request when it can't find a valid object in its + cache that matches the request's cache key. If you want + to send values to the origin but not include them in the + cache key, use OriginRequestPolicy." properties: comment: type: string @@ -409,18 +409,17 @@ spec: uses the cache key to find an object in its cache that it can return to the viewer. \n The headers, cookies, and query strings that are included in the cache key - are automatically included in requests that CloudFront - sends to the origin. CloudFront sends a request when - it can't find an object in its cache that matches the - request's cache key. If you want to send values to the - origin but not include them in the cache key, use OriginRequestPolicy." + are also included in requests that CloudFront sends + to the origin. CloudFront sends a request when it can't + find an object in its cache that matches the request's + cache key. If you want to send values to the origin + but not include them in the cache key, use OriginRequestPolicy." properties: cookiesConfig: description: An object that determines whether any cookies in viewer requests (and if so, which cookies) - are included in the cache key and automatically - included in requests that CloudFront sends to the - origin. + are included in the cache key and in requests that + CloudFront sends to the origin. properties: cookieBehavior: type: string @@ -443,8 +442,8 @@ spec: headersConfig: description: An object that determines whether any HTTP headers (and if so, which headers) are included - in the cache key and automatically included in requests - that CloudFront sends to the origin. + in the cache key and in requests that CloudFront + sends to the origin. properties: headerBehavior: type: string @@ -461,8 +460,7 @@ spec: description: An object that determines whether any URL query strings in viewer requests (and if so, which query strings) are included in the cache key - and automatically included in requests that CloudFront - sends to the origin. + and in requests that CloudFront sends to the origin. properties: queryStringBehavior: type: string diff --git a/package/crds/cognitoidentityprovider.aws.crossplane.io_groups.yaml b/package/crds/cognitoidentityprovider.aws.crossplane.io_groups.yaml index 8a795e92bb..7d2c17334a 100644 --- a/package/crds/cognitoidentityprovider.aws.crossplane.io_groups.yaml +++ b/package/crds/cognitoidentityprovider.aws.crossplane.io_groups.yaml @@ -452,14 +452,16 @@ spec: description: GroupObservation defines the observed state of Group properties: creationDate: - description: The date the group was created. + description: The date and time, in ISO 8601 (https://www.iso.org/iso-8601-date-and-time-format.html) + format, when the item was created. format: date-time type: string groupName: description: The name of the group. type: string lastModifiedDate: - description: The date the group was last modified. + description: The date and time, in ISO 8601 (https://www.iso.org/iso-8601-date-and-time-format.html) + format, when the item was modified. format: date-time type: string roleARN: diff --git a/package/crds/cognitoidentityprovider.aws.crossplane.io_identityproviders.yaml b/package/crds/cognitoidentityprovider.aws.crossplane.io_identityproviders.yaml index fa1decba74..3be890c827 100644 --- a/package/crds/cognitoidentityprovider.aws.crossplane.io_identityproviders.yaml +++ b/package/crds/cognitoidentityprovider.aws.crossplane.io_identityproviders.yaml @@ -399,11 +399,13 @@ spec: of IdentityProvider properties: creationDate: - description: The date the IdP was created. + description: The date and time, in ISO 8601 (https://www.iso.org/iso-8601-date-and-time-format.html) + format, when the item was created. format: date-time type: string lastModifiedDate: - description: The date the IdP was last modified. + description: The date and time, in ISO 8601 (https://www.iso.org/iso-8601-date-and-time-format.html) + format, when the item was modified. format: date-time type: string providerName: diff --git a/package/crds/cognitoidentityprovider.aws.crossplane.io_userpoolclients.yaml b/package/crds/cognitoidentityprovider.aws.crossplane.io_userpoolclients.yaml index f1a67e7896..f7ed95d406 100644 --- a/package/crds/cognitoidentityprovider.aws.crossplane.io_userpoolclients.yaml +++ b/package/crds/cognitoidentityprovider.aws.crossplane.io_userpoolclients.yaml @@ -94,8 +94,18 @@ spec: type: string type: array allowedOAuthFlowsUserPoolClient: - description: Set to true if the client is allowed to follow the - OAuth protocol when interacting with Amazon Cognito user pools. + description: "Set to true to use OAuth 2.0 features in your user + pool app client. \n AllowedOAuthFlowsUserPoolClient must be + true before you can configure the following features in your + app client. \n * CallBackURLs: Callback URLs. \n * LogoutURLs: + Sign-out redirect URLs. \n * AllowedOAuthScopes: OAuth 2.0 scopes. + \n * AllowedOAuthFlows: Support for authorization code, implicit, + and client credentials OAuth 2.0 grants. \n To use OAuth 2.0 + features, configure one of these features in the Amazon Cognito + console or set AllowedOAuthFlowsUserPoolClient to true in a + CreateUserPoolClient or UpdateUserPoolClient API request. If + you don't set a value for AllowedOAuthFlowsUserPoolClient in + a request with the CLI or SDKs, it defaults to false." type: boolean allowedOAuthScopes: description: The allowed OAuth scopes. Possible values provided @@ -213,7 +223,7 @@ spec: a TokenValidityUnits value in your API request. \n For example, when you set IdTokenValidity as 10 and TokenValidityUnits as hours, your user can authenticate their session with their ID - token for 10 hours. \n The default time unit for AccessTokenValidity + token for 10 hours. \n The default time unit for IdTokenValidity in an API request is hours. Valid range is displayed below in seconds. \n If you don't specify otherwise in the configuration of your app client, your ID tokens are valid for one hour." @@ -590,11 +600,13 @@ spec: client type. type: string creationDate: - description: The date the user pool client was created. + description: The date and time, in ISO 8601 (https://www.iso.org/iso-8601-date-and-time-format.html) + format, when the item was created. format: date-time type: string lastModifiedDate: - description: The date the user pool client was last modified. + description: The date and time, in ISO 8601 (https://www.iso.org/iso-8601-date-and-time-format.html) + format, when the item was modified. format: date-time type: string userPoolID: diff --git a/package/crds/cognitoidentityprovider.aws.crossplane.io_userpools.yaml b/package/crds/cognitoidentityprovider.aws.crossplane.io_userpools.yaml index e96dd69e24..2a83bdfd6e 100644 --- a/package/crds/cognitoidentityprovider.aws.crossplane.io_userpools.yaml +++ b/package/crds/cognitoidentityprovider.aws.crossplane.io_userpools.yaml @@ -321,8 +321,12 @@ spec: type: array type: object userPoolAddOns: - description: Enables advanced security risk detection. Set the - key AdvancedSecurityMode to the value "AUDIT". + description: "User pool add-ons. Contains settings for activation + of advanced security features. To log user security information + but take no action, set to AUDIT. To configure automatic security + responses to risky traffic to your user pool, set to ENFORCED. + \n For more information, see Adding advanced security to a user + pool (https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-pool-settings-advanced-security.html)." properties: advancedSecurityMode: type: string @@ -342,11 +346,18 @@ spec: type: string type: array usernameConfiguration: - description: Case sensitivity on the username input for the selected - sign-in option. For example, when case sensitivity is set to - False, users can sign in using either "username" or "Username". - This configuration is immutable once it has been set. For more - information, see UsernameConfigurationType (https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_UsernameConfigurationType.html). + description: "Case sensitivity on the username input for the selected + sign-in option. When case sensitivity is set to False (case + insensitive), users can sign in with any combination of capital + and lowercase letters. For example, username, USERNAME, or UserName, + or for email, email@example.com or EMaiL@eXamplE.Com. For most + use cases, set case sensitivity to False (case insensitive) + as a best practice. When usernames and email addresses are case + insensitive, Amazon Cognito treats any variation in case as + the same user, and prevents a case variation from being assigned + to the same attribute for a different user. \n This configuration + is immutable after you set it. For more information, see UsernameConfigurationType + (https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_UsernameConfigurationType.html)." properties: caseSensitive: type: boolean @@ -580,7 +591,8 @@ spec: description: The Amazon Resource Name (ARN) for the user pool. type: string creationDate: - description: The date the user pool was created. + description: The date and time, in ISO 8601 (https://www.iso.org/iso-8601-date-and-time-format.html) + format, when the item was created. format: date-time type: string customDomain: @@ -608,15 +620,23 @@ spec: description: The ID of the user pool. type: string lastModifiedDate: - description: The date the user pool was last modified. + description: The date and time, in ISO 8601 (https://www.iso.org/iso-8601-date-and-time-format.html) + format, when the item was modified. format: date-time type: string name: description: The name of the user pool. type: string schemaAttributes: - description: A container with the schema attributes of a user - pool. + description: "A list of the user attributes and their properties + in your user pool. The attribute schema contains standard attributes, + custom attributes with a custom: prefix, and developer attributes + with a dev: prefix. For more information, see User pool attributes + (https://docs.aws.amazon.com/cognito/latest/developerguide/user-pool-settings-attributes.html). + \n Developer-only attributes are a legacy feature of user pools, + are read-only to all app clients. You can create and update + developer-only attributes only with IAM-authenticated API operations. + Use app client read/write permissions instead." items: properties: attributeDataType: @@ -658,10 +678,10 @@ spec: information, see SmsConfigurationType (https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_SmsConfigurationType.html). \n SNSSandbox \n The Amazon Web Services account is in the SNS SMS Sandbox and messages will only reach verified end users. - This parameter won’t get populated with SNSSandbox if the IAM - user creating the user pool doesn’t have SNS permissions. To - learn how to move your Amazon Web Services account out of the - sandbox, see Moving out of the SMS sandbox (https://docs.aws.amazon.com/sns/latest/dg/sns-sms-sandbox-moving-to-production.html)." + This parameter won’t get populated with SNSSandbox if the user + creating the user pool doesn’t have SNS permissions. To learn + how to move your Amazon Web Services account out of the sandbox, + see Moving out of the SMS sandbox (https://docs.aws.amazon.com/sns/latest/dg/sns-sms-sandbox-moving-to-production.html)." type: string status: description: The status of a user pool. diff --git a/package/crds/dynamodb.aws.crossplane.io_tables.yaml b/package/crds/dynamodb.aws.crossplane.io_tables.yaml index eca7fc804e..78d5ea59ee 100644 --- a/package/crds/dynamodb.aws.crossplane.io_tables.yaml +++ b/package/crds/dynamodb.aws.crossplane.io_tables.yaml @@ -88,6 +88,10 @@ spec: unpredictable workloads. PAY_PER_REQUEST sets the billing mode to On-Demand Mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadWriteCapacityMode.html#HowItWorks.OnDemand)." type: string + deletionProtectionEnabled: + description: Indicates whether deletion protection is to be enabled + (true) or disabled (false) on the table. + type: boolean globalSecondaryIndexes: description: "One or more global secondary indexes (the maximum is 20) to be created on the table. Each global secondary index diff --git a/package/crds/ec2.aws.crossplane.io_flowlogs.yaml b/package/crds/ec2.aws.crossplane.io_flowlogs.yaml index c894990a09..b909bd8b86 100644 --- a/package/crds/ec2.aws.crossplane.io_flowlogs.yaml +++ b/package/crds/ec2.aws.crossplane.io_flowlogs.yaml @@ -272,9 +272,7 @@ spec: in the Amazon VPC User Guide or Transit Gateway Flow Log records (https://docs.aws.amazon.com/vpc/latest/tgw/tgw-flow-logs.html#flow-log-records) in the Amazon Web Services Transit Gateway Guide. \n Specify - the fields using the ${field-id} format, separated by spaces. - For the CLI, surround this parameter value with single quotes - on Linux or double quotes on Windows." + the fields using the ${field-id} format, separated by spaces." type: string logGroupName: description: "The name of a new or existing CloudWatch Logs log diff --git a/package/crds/ec2.aws.crossplane.io_launchtemplates.yaml b/package/crds/ec2.aws.crossplane.io_launchtemplates.yaml index d666117ec5..f6c8d0ce5f 100644 --- a/package/crds/ec2.aws.crossplane.io_launchtemplates.yaml +++ b/package/crds/ec2.aws.crossplane.io_launchtemplates.yaml @@ -132,6 +132,8 @@ spec: description: The CPU options for the instance. Both the core count and threads per core must be specified in the request. properties: + amdSevSnp: + type: string coreCount: format: int64 type: integer @@ -222,21 +224,29 @@ spec: instanceRequirements: description: "The attributes for the instance types. When you specify instance attributes, Amazon EC2 will identify - instance types with these attributes. \n When you specify - multiple attributes, you get instance types that satisfy - all of the specified attributes. If you specify multiple - values for an attribute, you get instance types that satisfy - any of the specified values. \n To limit the list of instance - types from which Amazon EC2 can identify matching instance - types, you can use one of the following parameters, but - not both in the same request: \n * AllowedInstanceTypes + instance types with these attributes. \n You must specify + VCpuCount and MemoryMiB. All other attributes are optional. + Any unspecified optional attribute is set to its default. + \n When you specify multiple attributes, you get instance + types that satisfy all of the specified attributes. If you + specify multiple values for an attribute, you get instance + types that satisfy any of the specified values. \n To limit + the list of instance types from which Amazon EC2 can identify + matching instance types, you can use one of the following + parameters, but not both in the same request: \n * AllowedInstanceTypes - The instance types to include in the list. All other instance types are ignored, even if they match your specified attributes. \n * ExcludedInstanceTypes - The instance types to exclude from the list, even if they match your specified attributes. - \n You must specify VCpuCount and MemoryMiB. All other attributes - are optional. Any unspecified optional attribute is set - to its default. \n For more information, see Attribute-based + \n If you specify InstanceRequirements, you can't specify + InstanceType. \n Attribute-based instance type selection + is only supported when using Auto Scaling groups, EC2 Fleet, + and Spot Fleet to launch instances. If you plan to use the + launch template in the launch instance wizard (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-launch-instance-wizard.html), + or with the RunInstances (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_RunInstances.html) + API or AWS::EC2::Instance (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-instance.html) + Amazon Web Services CloudFormation resource, you can't specify + InstanceRequirements. \n For more information, see Attribute-based instance type selection for EC2 Fleet (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-fleet-attribute-based-instance-type-selection.html), Attribute-based instance type selection for Spot Fleet (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-fleet-attribute-based-instance-type-selection.html), and Spot placement score (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-placement-score.html) @@ -489,6 +499,8 @@ spec: type: integer networkInterfaceID: type: string + primaryIPv6: + type: boolean privateIPAddress: type: string privateIPAddresses: diff --git a/package/crds/ec2.aws.crossplane.io_launchtemplateversions.yaml b/package/crds/ec2.aws.crossplane.io_launchtemplateversions.yaml index 1d60b54b92..ae0c7c8795 100644 --- a/package/crds/ec2.aws.crossplane.io_launchtemplateversions.yaml +++ b/package/crds/ec2.aws.crossplane.io_launchtemplateversions.yaml @@ -133,6 +133,8 @@ spec: description: The CPU options for the instance. Both the core count and threads per core must be specified in the request. properties: + amdSevSnp: + type: string coreCount: format: int64 type: integer @@ -223,21 +225,29 @@ spec: instanceRequirements: description: "The attributes for the instance types. When you specify instance attributes, Amazon EC2 will identify - instance types with these attributes. \n When you specify - multiple attributes, you get instance types that satisfy - all of the specified attributes. If you specify multiple - values for an attribute, you get instance types that satisfy - any of the specified values. \n To limit the list of instance - types from which Amazon EC2 can identify matching instance - types, you can use one of the following parameters, but - not both in the same request: \n * AllowedInstanceTypes + instance types with these attributes. \n You must specify + VCpuCount and MemoryMiB. All other attributes are optional. + Any unspecified optional attribute is set to its default. + \n When you specify multiple attributes, you get instance + types that satisfy all of the specified attributes. If you + specify multiple values for an attribute, you get instance + types that satisfy any of the specified values. \n To limit + the list of instance types from which Amazon EC2 can identify + matching instance types, you can use one of the following + parameters, but not both in the same request: \n * AllowedInstanceTypes - The instance types to include in the list. All other instance types are ignored, even if they match your specified attributes. \n * ExcludedInstanceTypes - The instance types to exclude from the list, even if they match your specified attributes. - \n You must specify VCpuCount and MemoryMiB. All other attributes - are optional. Any unspecified optional attribute is set - to its default. \n For more information, see Attribute-based + \n If you specify InstanceRequirements, you can't specify + InstanceType. \n Attribute-based instance type selection + is only supported when using Auto Scaling groups, EC2 Fleet, + and Spot Fleet to launch instances. If you plan to use the + launch template in the launch instance wizard (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-launch-instance-wizard.html), + or with the RunInstances (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_RunInstances.html) + API or AWS::EC2::Instance (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-instance.html) + Amazon Web Services CloudFormation resource, you can't specify + InstanceRequirements. \n For more information, see Attribute-based instance type selection for EC2 Fleet (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-fleet-attribute-based-instance-type-selection.html), Attribute-based instance type selection for Spot Fleet (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-fleet-attribute-based-instance-type-selection.html), and Spot placement score (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-placement-score.html) @@ -490,6 +500,8 @@ spec: type: integer networkInterfaceID: type: string + primaryIPv6: + type: boolean privateIPAddress: type: string privateIPAddresses: @@ -732,6 +744,14 @@ spec: description: Region is which region the LaunchTemplateVersion will be created. type: string + resolveAlias: + description: "If true, and if a Systems Manager parameter is specified + for ImageId, the AMI ID is displayed in the response for imageID. + For more information, see Use a Systems Manager parameter instead + of an AMI ID (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-launch-templates.html#use-an-ssm-parameter-instead-of-an-ami-id) + in the Amazon Elastic Compute Cloud User Guide. \n Default: + false" + type: boolean sourceVersion: description: The version number of the launch template version on which to base the new version. The new version inherits the @@ -1019,6 +1039,8 @@ spec: cpuOptions: description: The CPU options for the instance. properties: + amdSevSnp: + type: string coreCount: format: int64 type: integer @@ -1107,23 +1129,31 @@ spec: instanceRequirements: description: "The attributes for the instance types. When you specify instance attributes, Amazon EC2 will identify - instance types with these attributes. \n When you specify - multiple attributes, you get instance types that satisfy - all of the specified attributes. If you specify multiple - values for an attribute, you get instance types that - satisfy any of the specified values. \n To limit the - list of instance types from which Amazon EC2 can identify - matching instance types, you can use one of the following - parameters, but not both in the same request: \n * AllowedInstanceTypes + instance types with these attributes. \n You must specify + VCpuCount and MemoryMiB. All other attributes are optional. + Any unspecified optional attribute is set to its default. + \n When you specify multiple attributes, you get instance + types that satisfy all of the specified attributes. + If you specify multiple values for an attribute, you + get instance types that satisfy any of the specified + values. \n To limit the list of instance types from + which Amazon EC2 can identify matching instance types, + you can use one of the following parameters, but not + both in the same request: \n * AllowedInstanceTypes - The instance types to include in the list. All other instance types are ignored, even if they match your specified attributes. \n * ExcludedInstanceTypes - The instance types to exclude from the list, even if they - match your specified attributes. \n You must specify - VCpuCount and MemoryMiB. All other attributes are optional. - Any unspecified optional attribute is set to its default. - \n For more information, see Attribute-based instance - type selection for EC2 Fleet (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-fleet-attribute-based-instance-type-selection.html), + match your specified attributes. \n If you specify InstanceRequirements, + you can't specify InstanceType. \n Attribute-based instance + type selection is only supported when using Auto Scaling + groups, EC2 Fleet, and Spot Fleet to launch instances. + If you plan to use the launch template in the launch + instance wizard (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-launch-instance-wizard.html) + or with the RunInstances API (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_RunInstances.html), + you can't specify InstanceRequirements. \n For more + information, see Attribute-based instance type selection + for EC2 Fleet (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-fleet-attribute-based-instance-type-selection.html), Attribute-based instance type selection for Spot Fleet (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-fleet-attribute-based-instance-type-selection.html), and Spot placement score (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-placement-score.html) @@ -1362,6 +1392,8 @@ spec: properties: ipv6Address: type: string + isPrimaryIPv6: + type: boolean type: object type: array ipv6PrefixCount: @@ -1379,6 +1411,8 @@ spec: type: integer networkInterfaceID: type: string + primaryIPv6: + type: boolean privateIPAddress: type: string privateIPAddresses: diff --git a/package/crds/ec2.aws.crossplane.io_volumes.yaml b/package/crds/ec2.aws.crossplane.io_volumes.yaml index c44891815c..fe6e3706e7 100644 --- a/package/crds/ec2.aws.crossplane.io_volumes.yaml +++ b/package/crds/ec2.aws.crossplane.io_volumes.yaml @@ -68,7 +68,8 @@ spec: description: VolumeParameters defines the desired state of Volume properties: availabilityZone: - description: The Availability Zone in which to create the volume. + description: The ID of the Availability Zone in which to create + the volume. For example, us-east-1a. type: string encrypted: description: "Indicates whether the volume should be encrypted. @@ -248,8 +249,9 @@ spec: description: "The volume type. This parameter can be one of the following values: \n * General Purpose SSD: gp2 | gp3 \n * Provisioned IOPS SSD: io1 | io2 \n * Throughput Optimized HDD: st1 \n * - Cold HDD: sc1 \n * Magnetic: standard \n For more information, - see Amazon EBS volume types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html) + Cold HDD: sc1 \n * Magnetic: standard \n Throughput Optimized + HDD (st1) and Cold HDD (sc1) volumes can't be used as boot volumes. + \n For more information, see Amazon EBS volume types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html) in the Amazon Elastic Compute Cloud User Guide. \n Default: gp2" type: string @@ -492,6 +494,9 @@ spec: Service (KMS) KMS key that was used to protect the volume encryption key for the volume. type: string + sseType: + description: Reserved for future use. + type: string state: description: The volume state. type: string diff --git a/package/crds/ec2.aws.crossplane.io_vpcendpoints.yaml b/package/crds/ec2.aws.crossplane.io_vpcendpoints.yaml index a7c61717d3..dd0fa2eadc 100644 --- a/package/crds/ec2.aws.crossplane.io_vpcendpoints.yaml +++ b/package/crds/ec2.aws.crossplane.io_vpcendpoints.yaml @@ -72,6 +72,8 @@ spec: properties: dnsRecordIPType: type: string + privateDNSOnlyForInboundResolverEndpoint: + type: boolean type: object ipAddressType: description: The IP address type for the endpoint. @@ -267,10 +269,20 @@ spec: type: string type: array serviceName: - description: The service name. To get a list of available services, - use the DescribeVpcEndpointServices request, or get the name - from the service provider. + description: The name of the endpoint service. type: string + subnetConfigurations: + description: The subnet configurations for the endpoint. + items: + properties: + ipv4: + type: string + ipv6: + type: string + subnetID: + type: string + type: object + type: array subnetIdRefs: description: SubnetIDRefs is a list of references to Subnets used to set the SubnetIDs. @@ -703,8 +715,8 @@ spec: type: string type: object networkInterfaceIDs: - description: (Interface endpoint) One or more network interfaces - for the endpoint. + description: (Interface endpoint) The network interfaces for the + endpoint. items: type: string type: array @@ -717,7 +729,7 @@ spec: its service. type: boolean routeTableIDs: - description: (Gateway endpoint) One or more route tables associated + description: (Gateway endpoint) The IDs of the route tables associated with the endpoint. items: type: string @@ -731,7 +743,7 @@ spec: type: string type: array tags: - description: Any tags assigned to the endpoint. + description: The tags assigned to the endpoint. items: properties: key: diff --git a/package/crds/ec2.aws.crossplane.io_vpcpeeringconnections.yaml b/package/crds/ec2.aws.crossplane.io_vpcpeeringconnections.yaml index 4b3778f2fe..e4c3b42847 100644 --- a/package/crds/ec2.aws.crossplane.io_vpcpeeringconnections.yaml +++ b/package/crds/ec2.aws.crossplane.io_vpcpeeringconnections.yaml @@ -529,11 +529,7 @@ spec: ownerID: type: string peeringOptions: - description: "We are retiring EC2-Classic. We recommend that - you migrate from EC2-Classic to a VPC. For more information, - see Migrate from EC2-Classic to a VPC (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/vpc-migrate.html) - in the Amazon Elastic Compute Cloud User Guide. \n Describes - the VPC peering connection options." + description: Describes the VPC peering connection options. properties: allowDNSResolutionFromRemoteVPC: type: boolean @@ -575,11 +571,7 @@ spec: ownerID: type: string peeringOptions: - description: "We are retiring EC2-Classic. We recommend that - you migrate from EC2-Classic to a VPC. For more information, - see Migrate from EC2-Classic to a VPC (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/vpc-migrate.html) - in the Amazon Elastic Compute Cloud User Guide. \n Describes - the VPC peering connection options." + description: Describes the VPC peering connection options. properties: allowDNSResolutionFromRemoteVPC: type: boolean diff --git a/package/crds/ecs.aws.crossplane.io_clusters.yaml b/package/crds/ecs.aws.crossplane.io_clusters.yaml index 934a37376e..ca457219ba 100644 --- a/package/crds/ecs.aws.crossplane.io_clusters.yaml +++ b/package/crds/ecs.aws.crossplane.io_clusters.yaml @@ -72,15 +72,17 @@ spec: to associate with the cluster. A capacity provider must be associated with a cluster before it can be included as part of the default capacity provider strategy of the cluster or used in a capacity - provider strategy when calling the CreateService or RunTask + provider strategy when calling the CreateService (https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_CreateService.html) + or RunTask (https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_RunTask.html) actions. \n If specifying a capacity provider that uses an Auto Scaling group, the capacity provider must be created but not associated with another cluster. New Auto Scaling group capacity - providers can be created with the CreateCapacityProvider API - operation. \n To use a Fargate capacity provider, specify either - the FARGATE or FARGATE_SPOT capacity providers. The Fargate + providers can be created with the CreateCapacityProvider (https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_CreateCapacityProvider.html) + API operation. \n To use a Fargate capacity provider, specify + either the FARGATE or FARGATE_SPOT capacity providers. The Fargate capacity providers are available to all accounts and only need - to be associated with a cluster to be used. \n The PutClusterCapacityProviders + to be associated with a cluster to be used. \n The PutCapacityProvider + (https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_PutCapacityProvider.html) API operation is used to update the list of available capacity providers for a cluster after the cluster is created." items: @@ -123,7 +125,8 @@ spec: defaultCapacityProviderStrategy: description: "The capacity provider strategy to set as the default for the cluster. After a default capacity provider strategy - is set for a cluster, when you call the RunTask or CreateService + is set for a cluster, when you call the CreateService (https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_CreateService.html) + or RunTask (https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_RunTask.html) APIs with no capacity provider strategy or launch type specified, the default capacity provider strategy for the cluster is used. \n If a default capacity provider strategy isn't defined for diff --git a/package/crds/ecs.aws.crossplane.io_services.yaml b/package/crds/ecs.aws.crossplane.io_services.yaml index 1566b22333..615d7b1c37 100644 --- a/package/crds/ecs.aws.crossplane.io_services.yaml +++ b/package/crds/ecs.aws.crossplane.io_services.yaml @@ -194,15 +194,17 @@ spec: type: object deploymentCircuitBreaker: description: "The deployment circuit breaker can only be used - for services using the rolling update (ECS) deployment type - that aren't behind a Classic Load Balancer. \n The deployment - circuit breaker determines whether a service deployment - will fail if the service can't reach a steady state. If - enabled, a service deployment will transition to a failed - state and stop launching new tasks. You can also configure - Amazon ECS to roll back your service to the last completed - deployment after a failure. For more information, see Rolling - update (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/deployment-type-ecs.html) + for services using the rolling update (ECS) deployment type. + \n The deployment circuit breaker determines whether a service + deployment will fail if the service can't reach a steady + state. If it is turned on, a service deployment will transition + to a failed state and stop launching new tasks. You can + also configure Amazon ECS to roll back your service to the + last completed deployment after a failure. For more information, + see Rolling update (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/deployment-type-ecs.html) + in the Amazon Elastic Container Service Developer Guide. + \n For more information about API failure reasons, see API + failure reasons (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/api_failures_messages.html) in the Amazon Elastic Container Service Developer Guide." properties: enable: @@ -227,21 +229,23 @@ spec: type: object desiredCount: description: "The number of instantiations of the specified task - definition to place and keep running on your cluster. \n This + definition to place and keep running in your service. \n This is required if schedulingStrategy is REPLICA or isn't specified. If schedulingStrategy is DAEMON then this isn't required." format: int64 type: integer enableECSManagedTags: - description: Specifies whether to turn on Amazon ECS managed tags - for the tasks within the service. For more information, see - Tagging your Amazon ECS resources (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-using-tags.html) - in the Amazon Elastic Container Service Developer Guide. + description: "Specifies whether to turn on Amazon ECS managed + tags for the tasks within the service. For more information, + see Tagging your Amazon ECS resources (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-using-tags.html) + in the Amazon Elastic Container Service Developer Guide. \n + When you use Amazon ECS managed tags, you need to set the propagateTags + request parameter." type: boolean enableExecuteCommand: description: Determines whether the execute command functionality - is enabled for the service. If true, this enables execute command - functionality on all containers in the service tasks. + is turned on for the service. If true, this enables execute + command functionality on all containers in the service tasks. type: boolean forceDeletion: description: Force Service to be deleted, even with task Running @@ -751,11 +755,12 @@ spec: in the Amazon Elastic Container Service Developer Guide. type: string propagateTags: - description: Specifies whether to propagate the tags from the + description: "Specifies whether to propagate the tags from the task definition to the task. If no value is specified, the tags aren't propagated. Tags can only be propagated to the task during task creation. To add tags to a task after task creation, use - the TagResource API action. + the TagResource (https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_TagResource.html) + API action. \n The default is NONE." type: string region: description: Region is which region the Service will be created. @@ -834,17 +839,20 @@ spec: in the Docker documentation. \n Understand the following when specifying a log configuration for your containers. \n * Amazon ECS currently supports a subset of the logging - drivers available to the Docker daemon (shown in the valid - values below). Additional log drivers may be available in - future releases of the Amazon ECS container agent. \n * - This parameter requires version 1.18 of the Docker Remote - API or greater on your container instance. \n * For tasks - that are hosted on Amazon EC2 instances, the Amazon ECS - container agent must register the available logging drivers - with the ECS_AVAILABLE_LOGGING_DRIVERS environment variable - before containers placed on that instance can use these - log configuration options. For more information, see Amazon - ECS container agent configuration (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html) + drivers available to the Docker daemon. Additional log drivers + may be available in future releases of the Amazon ECS container + agent. For tasks on Fargate, the supported log drivers are + awslogs, splunk, and awsfirelens. For tasks hosted on Amazon + EC2 instances, the supported log drivers are awslogs, fluentd, + gelf, json-file, journald, logentries,syslog, splunk, and + awsfirelens. \n * This parameter requires version 1.18 of + the Docker Remote API or greater on your container instance. + \n * For tasks that are hosted on Amazon EC2 instances, + the Amazon ECS container agent must register the available + logging drivers with the ECS_AVAILABLE_LOGGING_DRIVERS environment + variable before containers placed on that instance can use + these log configuration options. For more information, see + Amazon ECS container agent configuration (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html) in the Amazon Elastic Container Service Developer Guide. \n * For tasks that are on Fargate, because you don't have access to the underlying infrastructure your tasks are hosted @@ -1270,8 +1278,7 @@ spec: launchType: type: string networkConfiguration: - description: An object representing the network configuration - for a task or service. + description: The network configuration for a task or service. properties: awsVPCConfiguration: description: An object representing the networking details @@ -1335,19 +1342,22 @@ spec: in the Docker documentation. \n Understand the following when specifying a log configuration for your containers. \n * Amazon ECS currently supports a subset of the - logging drivers available to the Docker daemon (shown - in the valid values below). Additional log drivers - may be available in future releases of the Amazon - ECS container agent. \n * This parameter requires - version 1.18 of the Docker Remote API or greater on - your container instance. \n * For tasks that are hosted - on Amazon EC2 instances, the Amazon ECS container - agent must register the available logging drivers - with the ECS_AVAILABLE_LOGGING_DRIVERS environment - variable before containers placed on that instance - can use these log configuration options. For more - information, see Amazon ECS container agent configuration - (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html) + logging drivers available to the Docker daemon. Additional + log drivers may be available in future releases of + the Amazon ECS container agent. For tasks on Fargate, + the supported log drivers are awslogs, splunk, and + awsfirelens. For tasks hosted on Amazon EC2 instances, + the supported log drivers are awslogs, fluentd, gelf, + json-file, journald, logentries,syslog, splunk, and + awsfirelens. \n * This parameter requires version + 1.18 of the Docker Remote API or greater on your container + instance. \n * For tasks that are hosted on Amazon + EC2 instances, the Amazon ECS container agent must + register the available logging drivers with the ECS_AVAILABLE_LOGGING_DRIVERS + environment variable before containers placed on that + instance can use these log configuration options. + For more information, see Amazon ECS container agent + configuration (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html) in the Amazon Elastic Container Service Developer Guide. \n * For tasks that are on Fargate, because you don't have access to the underlying infrastructure @@ -1561,8 +1571,7 @@ spec: type: object type: array networkConfiguration: - description: An object representing the network configuration - for a task or service. + description: The network configuration for a task or service. properties: awsVPCConfiguration: description: An object representing the networking details diff --git a/package/crds/ecs.aws.crossplane.io_taskdefinitions.yaml b/package/crds/ecs.aws.crossplane.io_taskdefinitions.yaml index 58f5daac1b..8e33d0eeac 100644 --- a/package/crds/ecs.aws.crossplane.io_taskdefinitions.yaml +++ b/package/crds/ecs.aws.crossplane.io_taskdefinitions.yaml @@ -80,6 +80,10 @@ spec: cpu: format: int64 type: integer + credentialSpecs: + items: + type: string + type: array dependsOn: items: properties: @@ -159,7 +163,9 @@ spec: check. Health check parameters that are specified in a container definition override any Docker health checks that exist in the container image (such as those specified - in a parent image or from the image's Dockerfile). \n + in a parent image or from the image's Dockerfile). This + configuration maps to the HEALTHCHECK parameter of docker + run (https://docs.docker.com/engine/reference/run/). \n The Amazon ECS container agent only monitors and reports on the health checks specified in the task definition. Amazon ECS does not monitor Docker health checks that @@ -169,38 +175,38 @@ spec: health checks that exist in the container image. \n You can view the health status of both individual containers and a task with the DescribeTasks API operation or when - viewing the task details in the console. \n The following - describes the possible healthStatus values for a container: - \n * HEALTHY-The container health check has passed successfully. - \n * UNHEALTHY-The container health check has failed. - \n * UNKNOWN-The container health check is being evaluated - or there's no container health check defined. \n The following - describes the possible healthStatus values for a task. - The container health check status of nonessential containers - only affects the health status of a task if no essential - containers have health checks defined. \n * HEALTHY-All - essential containers within the task have passed their - health checks. \n * UNHEALTHY-One or more essential containers - have failed their health check. \n * UNKNOWN-The essential - containers within the task are still having their health - checks evaluated or there are only nonessential containers - with health checks defined. \n If a task is run manually, - and not as part of a service, the task will continue its - lifecycle regardless of its health status. For tasks that - are part of a service, if the task reports as unhealthy - then the task will be stopped and the service scheduler - will replace it. \n For tasks that are a part of a service - and the service uses the ECS rolling deployment type, - the deployment is paused while the new tasks have the - UNKNOWN task health check status. For example, tasks that - define health checks for nonessential containers when - no essential containers have health checks will have the - UNKNOWN health check status indefinitely which prevents - the deployment from completing. \n The following are notes - about container health check support: \n * Container health - checks require version 1.17.0 or greater of the Amazon - ECS container agent. For more information, see Updating - the Amazon ECS container agent (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-update.html). + viewing the task details in the console. \n The health + check is designed to make sure that your containers survive + agent restarts, upgrades, or temporary unavailability. + \n The following describes the possible healthStatus values + for a container: \n * HEALTHY-The container health check + has passed successfully. \n * UNHEALTHY-The container + health check has failed. \n * UNKNOWN-The container health + check is being evaluated or there's no container health + check defined. \n The following describes the possible + healthStatus values for a task. The container health check + status of non-essential containers don't have an effect + on the health status of a task. \n * HEALTHY-All essential + containers within the task have passed their health checks. + \n * UNHEALTHY-One or more essential containers have failed + their health check. \n * UNKNOWN-The essential containers + within the task are still having their health checks evaluated, + there are only nonessential containers with health checks + defined, or there are no container health checks defined. + \n If a task is run manually, and not as part of a service, + the task will continue its lifecycle regardless of its + health status. For tasks that are part of a service, if + the task reports as unhealthy then the task will be stopped + and the service scheduler will replace it. \n The following + are notes about container health check support: \n * When + the Amazon ECS agent cannot connect to the Amazon ECS + service, the service reports the container as UNHEALTHY. + \n * The health check statuses are the \"last heard from\" + response from the Amazon ECS agent. There are no assumptions + made about the status of the container health checks. + \n * Container health checks require version 1.17.0 or + greater of the Amazon ECS container agent. For more information, + see Updating the Amazon ECS container agent (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-update.html). \n * Container health checks are supported for Fargate tasks if you're using platform version 1.1.0 or greater. For more information, see Fargate platform versions (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/platform_versions.html). @@ -236,8 +242,8 @@ spec: type: string type: array linuxParameters: - description: Linux-specific options that are applied to - the container, such as Linux KernelCapabilities. + description: The Linux-specific options that are applied + to the container, such as Linux KernelCapabilities (https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_KernelCapabilities.html). properties: capabilities: description: The Linux capabilities for the container @@ -315,18 +321,21 @@ spec: in the Docker documentation. \n Understand the following when specifying a log configuration for your containers. \n * Amazon ECS currently supports a subset of the logging - drivers available to the Docker daemon (shown in the valid - values below). Additional log drivers may be available - in future releases of the Amazon ECS container agent. - \n * This parameter requires version 1.18 of the Docker - Remote API or greater on your container instance. \n * - For tasks that are hosted on Amazon EC2 instances, the - Amazon ECS container agent must register the available - logging drivers with the ECS_AVAILABLE_LOGGING_DRIVERS - environment variable before containers placed on that - instance can use these log configuration options. For - more information, see Amazon ECS container agent configuration - (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html) + drivers available to the Docker daemon. Additional log + drivers may be available in future releases of the Amazon + ECS container agent. For tasks on Fargate, the supported + log drivers are awslogs, splunk, and awsfirelens. For + tasks hosted on Amazon EC2 instances, the supported log + drivers are awslogs, fluentd, gelf, json-file, journald, + logentries,syslog, splunk, and awsfirelens. \n * This + parameter requires version 1.18 of the Docker Remote API + or greater on your container instance. \n * For tasks + that are hosted on Amazon EC2 instances, the Amazon ECS + container agent must register the available logging drivers + with the ECS_AVAILABLE_LOGGING_DRIVERS environment variable + before containers placed on that instance can use these + log configuration options. For more information, see Amazon + ECS container agent configuration (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html) in the Amazon Elastic Container Service Developer Guide. \n * For tasks that are on Fargate, because you don't have access to the underlying infrastructure your tasks @@ -499,9 +508,10 @@ spec: of ephemeral storage available, beyond the default amount, for tasks hosted on Fargate. For more information, see Fargate task storage (https://docs.aws.amazon.com/AmazonECS/latest/userguide/using_data_volumes.html) - in the Amazon ECS User Guide for Fargate. \n This parameter - is only supported for tasks hosted on Fargate using the following - platform versions: \n * Linux platform version 1.4.0 or later." + in the Amazon ECS User Guide for Fargate. \n For tasks using + the Fargate launch type, the task requires the following platforms: + \n * Linux platform version 1.4.0 or later. \n * Windows platform + version 1.0.0 or later." properties: sizeInGiB: format: int64 @@ -1380,6 +1390,10 @@ spec: cpu: format: int64 type: integer + credentialSpecs: + items: + type: string + type: array dependsOn: items: properties: @@ -1461,51 +1475,53 @@ spec: in a container definition override any Docker health checks that exist in the container image (such as those specified in a parent image or from the image's - Dockerfile). \n The Amazon ECS container agent only - monitors and reports on the health checks specified - in the task definition. Amazon ECS does not monitor - Docker health checks that are embedded in a container - image and not specified in the container definition. - Health check parameters that are specified in a container + Dockerfile). This configuration maps to the HEALTHCHECK + parameter of docker run (https://docs.docker.com/engine/reference/run/). + \n The Amazon ECS container agent only monitors and + reports on the health checks specified in the task + definition. Amazon ECS does not monitor Docker health + checks that are embedded in a container image and + not specified in the container definition. Health + check parameters that are specified in a container definition override any Docker health checks that exist in the container image. \n You can view the health status of both individual containers and a task with the DescribeTasks API operation or when - viewing the task details in the console. \n The following - describes the possible healthStatus values for a container: - \n * HEALTHY-The container health check has passed - successfully. \n * UNHEALTHY-The container health - check has failed. \n * UNKNOWN-The container health - check is being evaluated or there's no container health - check defined. \n The following describes the possible - healthStatus values for a task. The container health - check status of nonessential containers only affects - the health status of a task if no essential containers - have health checks defined. \n * HEALTHY-All essential - containers within the task have passed their health - checks. \n * UNHEALTHY-One or more essential containers - have failed their health check. \n * UNKNOWN-The essential - containers within the task are still having their - health checks evaluated or there are only nonessential - containers with health checks defined. \n If a task - is run manually, and not as part of a service, the - task will continue its lifecycle regardless of its - health status. For tasks that are part of a service, - if the task reports as unhealthy then the task will - be stopped and the service scheduler will replace - it. \n For tasks that are a part of a service and - the service uses the ECS rolling deployment type, - the deployment is paused while the new tasks have - the UNKNOWN task health check status. For example, - tasks that define health checks for nonessential containers - when no essential containers have health checks will - have the UNKNOWN health check status indefinitely - which prevents the deployment from completing. \n - The following are notes about container health check - support: \n * Container health checks require version - 1.17.0 or greater of the Amazon ECS container agent. - For more information, see Updating the Amazon ECS - container agent (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-update.html). + viewing the task details in the console. \n The health + check is designed to make sure that your containers + survive agent restarts, upgrades, or temporary unavailability. + \n The following describes the possible healthStatus + values for a container: \n * HEALTHY-The container + health check has passed successfully. \n * UNHEALTHY-The + container health check has failed. \n * UNKNOWN-The + container health check is being evaluated or there's + no container health check defined. \n The following + describes the possible healthStatus values for a task. + The container health check status of non-essential + containers don't have an effect on the health status + of a task. \n * HEALTHY-All essential containers within + the task have passed their health checks. \n * UNHEALTHY-One + or more essential containers have failed their health + check. \n * UNKNOWN-The essential containers within + the task are still having their health checks evaluated, + there are only nonessential containers with health + checks defined, or there are no container health checks + defined. \n If a task is run manually, and not as + part of a service, the task will continue its lifecycle + regardless of its health status. For tasks that are + part of a service, if the task reports as unhealthy + then the task will be stopped and the service scheduler + will replace it. \n The following are notes about + container health check support: \n * When the Amazon + ECS agent cannot connect to the Amazon ECS service, + the service reports the container as UNHEALTHY. \n + * The health check statuses are the \"last heard from\" + response from the Amazon ECS agent. There are no assumptions + made about the status of the container health checks. + \n * Container health checks require version 1.17.0 + or greater of the Amazon ECS container agent. For + more information, see Updating the Amazon ECS container + agent (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-update.html). \n * Container health checks are supported for Fargate tasks if you're using platform version 1.1.0 or greater. For more information, see Fargate platform versions @@ -1542,8 +1558,9 @@ spec: type: string type: array linuxParameters: - description: Linux-specific options that are applied - to the container, such as Linux KernelCapabilities. + description: The Linux-specific options that are applied + to the container, such as Linux KernelCapabilities + (https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_KernelCapabilities.html). properties: capabilities: description: The Linux capabilities for the container @@ -1621,19 +1638,22 @@ spec: in the Docker documentation. \n Understand the following when specifying a log configuration for your containers. \n * Amazon ECS currently supports a subset of the - logging drivers available to the Docker daemon (shown - in the valid values below). Additional log drivers - may be available in future releases of the Amazon - ECS container agent. \n * This parameter requires - version 1.18 of the Docker Remote API or greater on - your container instance. \n * For tasks that are hosted - on Amazon EC2 instances, the Amazon ECS container - agent must register the available logging drivers - with the ECS_AVAILABLE_LOGGING_DRIVERS environment - variable before containers placed on that instance - can use these log configuration options. For more - information, see Amazon ECS container agent configuration - (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html) + logging drivers available to the Docker daemon. Additional + log drivers may be available in future releases of + the Amazon ECS container agent. For tasks on Fargate, + the supported log drivers are awslogs, splunk, and + awsfirelens. For tasks hosted on Amazon EC2 instances, + the supported log drivers are awslogs, fluentd, gelf, + json-file, journald, logentries,syslog, splunk, and + awsfirelens. \n * This parameter requires version + 1.18 of the Docker Remote API or greater on your container + instance. \n * For tasks that are hosted on Amazon + EC2 instances, the Amazon ECS container agent must + register the available logging drivers with the ECS_AVAILABLE_LOGGING_DRIVERS + environment variable before containers placed on that + instance can use these log configuration options. + For more information, see Amazon ECS container agent + configuration (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html) in the Amazon Elastic Container Service Developer Guide. \n * For tasks that are on Fargate, because you don't have access to the underlying infrastructure @@ -1781,10 +1801,10 @@ spec: amount of ephemeral storage available, beyond the default amount, for tasks hosted on Fargate. For more information, see Fargate task storage (https://docs.aws.amazon.com/AmazonECS/latest/userguide/using_data_volumes.html) - in the Amazon ECS User Guide for Fargate. \n This parameter - is only supported for tasks hosted on Fargate using Linux - platform version 1.4.0 or later. This parameter is not supported - for Windows containers on Fargate." + in the Amazon ECS User Guide for Fargate. \n For tasks using + the Fargate launch type, the task requires the following + platforms: \n * Linux platform version 1.4.0 or later. \n + * Windows platform version 1.0.0 or later." properties: sizeInGiB: format: int64 diff --git a/package/crds/efs.aws.crossplane.io_accesspoints.yaml b/package/crds/efs.aws.crossplane.io_accesspoints.yaml index 09695ad9ed..cee95d6256 100644 --- a/package/crds/efs.aws.crossplane.io_accesspoints.yaml +++ b/package/crds/efs.aws.crossplane.io_accesspoints.yaml @@ -452,7 +452,7 @@ spec: the Name tag. type: string ownerID: - description: Identified the Amazon Web Services account that owns + description: Identifies the Amazon Web Services account that owns the access point resource. type: string type: object diff --git a/package/crds/efs.aws.crossplane.io_filesystems.yaml b/package/crds/efs.aws.crossplane.io_filesystems.yaml index d9d2e11685..67b6ad15df 100644 --- a/package/crds/efs.aws.crossplane.io_filesystems.yaml +++ b/package/crds/efs.aws.crossplane.io_filesystems.yaml @@ -481,8 +481,7 @@ spec: type: integer ownerID: description: The Amazon Web Services account that created the - file system. If the file system was created by an IAM user, - the parent account to which the user belongs is the owner. + file system. type: string sizeInBytes: description: The latest known metered size (in bytes) of data diff --git a/package/crds/elasticache.aws.crossplane.io_cacheparametergroups.yaml b/package/crds/elasticache.aws.crossplane.io_cacheparametergroups.yaml index bf33f8c1a2..ccd4dca556 100644 --- a/package/crds/elasticache.aws.crossplane.io_cacheparametergroups.yaml +++ b/package/crds/elasticache.aws.crossplane.io_cacheparametergroups.yaml @@ -73,7 +73,7 @@ spec: description: "The name of the cache parameter group family that the cache parameter group can be used with. \n Valid values are: memcached1.4 | memcached1.5 | memcached1.6 | redis2.6 | - redis2.8 | redis3.2 | redis4.0 | redis5.0 | redis6.x" + redis2.8 | redis3.2 | redis4.0 | redis5.0 | redis6.x | redis7" type: string description: description: A user-specified description for the cache parameter diff --git a/package/crds/elbv2.aws.crossplane.io_loadbalancers.yaml b/package/crds/elbv2.aws.crossplane.io_loadbalancers.yaml index 7bc90ae363..dba94f5340 100644 --- a/package/crds/elbv2.aws.crossplane.io_loadbalancers.yaml +++ b/package/crds/elbv2.aws.crossplane.io_loadbalancers.yaml @@ -179,8 +179,8 @@ spec: type: object type: object securityGroups: - description: '[Application Load Balancers] The IDs of the security - groups for the load balancer.' + description: '[Application Load Balancers and Network Load Balancers] + The IDs of the security groups for the load balancer.' items: type: string type: array @@ -559,6 +559,11 @@ spec: dnsName: description: The public DNS name of the load balancer. type: string + enforceSecurityGroupInboundRulesOnPrivateLinkTraffic: + description: Indicates whether to evaluate inbound security group + rules for traffic sent to a Network Load Balancer through Amazon + Web Services PrivateLink. + type: string loadBalancerARN: description: The Amazon Resource Name (ARN) of the load balancer. type: string diff --git a/package/crds/elbv2.aws.crossplane.io_targetgroups.yaml b/package/crds/elbv2.aws.crossplane.io_targetgroups.yaml index c85ecb768b..3751f11e32 100644 --- a/package/crds/elbv2.aws.crossplane.io_targetgroups.yaml +++ b/package/crds/elbv2.aws.crossplane.io_targetgroups.yaml @@ -120,7 +120,7 @@ spec: required before considering a target healthy. The range is 2-10. If the target group protocol is TCP, TCP_UDP, UDP, TLS, HTTP or HTTPS, the default is 5. For target groups with a protocol - of GENEVE, the default is 3. If the target type is lambda, the + of GENEVE, the default is 5. If the target type is lambda, the default is 5. format: int64 type: integer @@ -202,7 +202,7 @@ spec: before considering a target unhealthy. The range is 2-10. If the target group protocol is TCP, TCP_UDP, UDP, TLS, HTTP or HTTPS, the default is 2. For target groups with a protocol of - GENEVE, the default is 3. If the target type is lambda, the + GENEVE, the default is 2. If the target type is lambda, the default is 5. format: int64 type: integer diff --git a/package/crds/emrcontainers.aws.crossplane.io_jobruns.yaml b/package/crds/emrcontainers.aws.crossplane.io_jobruns.yaml index f0bdb088d2..c90148d9ba 100644 --- a/package/crds/emrcontainers.aws.crossplane.io_jobruns.yaml +++ b/package/crds/emrcontainers.aws.crossplane.io_jobruns.yaml @@ -112,6 +112,13 @@ spec: description: The Amazon EMR release version to use for the job run. type: string + retryPolicyConfiguration: + description: The retry policy configuration for the job run. + properties: + maxAttempts: + format: int64 + type: integer + type: object tags: additionalProperties: type: string diff --git a/package/crds/emrcontainers.aws.crossplane.io_virtualclusters.yaml b/package/crds/emrcontainers.aws.crossplane.io_virtualclusters.yaml index f3a5af0d41..c96c01f36f 100644 --- a/package/crds/emrcontainers.aws.crossplane.io_virtualclusters.yaml +++ b/package/crds/emrcontainers.aws.crossplane.io_virtualclusters.yaml @@ -78,7 +78,7 @@ spec: a job run or a managed endpoint. properties: eksInfo: - description: The information about the EKS cluster. + description: The information about the Amazon EKS cluster. properties: namespace: type: string diff --git a/package/crds/glue.aws.crossplane.io_databases.yaml b/package/crds/glue.aws.crossplane.io_databases.yaml index 7cd9877680..b5a1c3038d 100644 --- a/package/crds/glue.aws.crossplane.io_databases.yaml +++ b/package/crds/glue.aws.crossplane.io_databases.yaml @@ -115,6 +115,8 @@ spec: type: string databaseName: type: string + region: + type: string type: object type: object region: diff --git a/package/crds/glue.aws.crossplane.io_jobs.yaml b/package/crds/glue.aws.crossplane.io_jobs.yaml index e94bdf9ba2..3a9e83c087 100644 --- a/package/crds/glue.aws.crossplane.io_jobs.yaml +++ b/package/crds/glue.aws.crossplane.io_jobs.yaml @@ -108,6 +108,280 @@ spec: name: type: string type: object + amazonRedshiftSource: + description: Specifies an Amazon Redshift source. + properties: + data: + description: Specifies an Amazon Redshift node. + properties: + accessType: + type: string + action: + type: string + advancedOptions: + items: + properties: + key: + type: string + value: + type: string + type: object + type: array + catalogDatabase: + description: Specifies an option value. + properties: + description: + type: string + label: + type: string + value: + type: string + type: object + catalogRedshiftSchema: + type: string + catalogRedshiftTable: + type: string + catalogTable: + description: Specifies an option value. + properties: + description: + type: string + label: + type: string + value: + type: string + type: object + connection: + description: Specifies an option value. + properties: + description: + type: string + label: + type: string + value: + type: string + type: object + crawlerConnection: + type: string + iamRole: + description: Specifies an option value. + properties: + description: + type: string + label: + type: string + value: + type: string + type: object + mergeAction: + type: string + mergeClause: + type: string + mergeWhenMatched: + type: string + mergeWhenNotMatched: + type: string + postAction: + type: string + preAction: + type: string + sampleQuery: + type: string + schema: + description: Specifies an option value. + properties: + description: + type: string + label: + type: string + value: + type: string + type: object + selectedColumns: + items: + properties: + description: + type: string + label: + type: string + value: + type: string + type: object + type: array + sourceType: + type: string + stagingTable: + type: string + table: + description: Specifies an option value. + properties: + description: + type: string + label: + type: string + value: + type: string + type: object + tablePrefix: + type: string + tableSchema: + items: + properties: + description: + type: string + label: + type: string + value: + type: string + type: object + type: array + tempDir: + type: string + upsert: + type: boolean + type: object + name: + type: string + type: object + amazonRedshiftTarget: + description: Specifies an Amazon Redshift target. + properties: + data: + description: Specifies an Amazon Redshift node. + properties: + accessType: + type: string + action: + type: string + advancedOptions: + items: + properties: + key: + type: string + value: + type: string + type: object + type: array + catalogDatabase: + description: Specifies an option value. + properties: + description: + type: string + label: + type: string + value: + type: string + type: object + catalogRedshiftSchema: + type: string + catalogRedshiftTable: + type: string + catalogTable: + description: Specifies an option value. + properties: + description: + type: string + label: + type: string + value: + type: string + type: object + connection: + description: Specifies an option value. + properties: + description: + type: string + label: + type: string + value: + type: string + type: object + crawlerConnection: + type: string + iamRole: + description: Specifies an option value. + properties: + description: + type: string + label: + type: string + value: + type: string + type: object + mergeAction: + type: string + mergeClause: + type: string + mergeWhenMatched: + type: string + mergeWhenNotMatched: + type: string + postAction: + type: string + preAction: + type: string + sampleQuery: + type: string + schema: + description: Specifies an option value. + properties: + description: + type: string + label: + type: string + value: + type: string + type: object + selectedColumns: + items: + properties: + description: + type: string + label: + type: string + value: + type: string + type: object + type: array + sourceType: + type: string + stagingTable: + type: string + table: + description: Specifies an option value. + properties: + description: + type: string + label: + type: string + value: + type: string + type: object + tablePrefix: + type: string + tableSchema: + items: + properties: + description: + type: string + label: + type: string + value: + type: string + type: object + type: array + tempDir: + type: string + upsert: + type: boolean + type: object + inputs: + items: + type: string + type: array + name: + type: string + type: object applyMapping: description: Specifies a transform that maps data property keys in the data source to data property keys in the data @@ -169,6 +443,64 @@ spec: schemaName: type: string type: object + catalogDeltaSource: + description: Specifies a Delta Lake data source that is + registered in the Glue Data Catalog. + properties: + additionalDeltaOptions: + additionalProperties: + type: string + type: object + database: + type: string + name: + type: string + outputSchemas: + items: + properties: + columns: + items: + properties: + name: + type: string + type_: + type: string + type: object + type: array + type: object + type: array + table: + type: string + type: object + catalogHudiSource: + description: Specifies a Hudi data source that is registered + in the Glue Data Catalog. + properties: + additionalHudiOptions: + additionalProperties: + type: string + type: object + database: + type: string + name: + type: string + outputSchemas: + items: + properties: + columns: + items: + properties: + name: + type: string + type_: + type: string + type: object + type: array + type: object + type: array + table: + type: string + type: object catalogKafkaSource: description: Specifies an Apache Kafka data store in the Data Catalog. @@ -193,6 +525,8 @@ spec: streamingOptions: description: Additional options for streaming. properties: + addRecordTimestamp: + type: string assign: type: string bootstrapServers: @@ -203,8 +537,12 @@ spec: type: string delimiter: type: string + emitConsumerLagMetrics: + type: string endingOffsets: type: string + includeHeaders: + type: boolean maxOffsetsPerTrigger: format: int64 type: integer @@ -224,6 +562,9 @@ spec: type: string startingOffsets: type: string + startingTimestamp: + format: date-time + type: string subscribePattern: type: string topicName: @@ -262,6 +603,8 @@ spec: properties: addIdleTimeBetweenReads: type: boolean + addRecordTimestamp: + type: string avoidEmptyBatches: type: boolean classification: @@ -271,6 +614,8 @@ spec: describeShardInterval: format: int64 type: integer + emitConsumerLagMetrics: + type: string endpointURL: type: string idleTimeBetweenReadsInMs: @@ -300,6 +645,9 @@ spec: type: string startingPosition: type: string + startingTimestamp: + format: date-time + type: string streamARN: type: string streamName: @@ -366,6 +714,22 @@ spec: type: object type: array type: object + directJDBCSource: + description: Specifies the direct JDBC source connection. + properties: + connectionName: + type: string + connectionType: + type: string + database: + type: string + name: + type: string + redshiftTmpDir: + type: string + table: + type: string + type: object directKafkaSource: description: Specifies an Apache Kafka data store. properties: @@ -387,6 +751,8 @@ spec: streamingOptions: description: Additional options for streaming. properties: + addRecordTimestamp: + type: string assign: type: string bootstrapServers: @@ -397,8 +763,12 @@ spec: type: string delimiter: type: string + emitConsumerLagMetrics: + type: string endingOffsets: type: string + includeHeaders: + type: boolean maxOffsetsPerTrigger: format: int64 type: integer @@ -418,6 +788,9 @@ spec: type: string startingOffsets: type: string + startingTimestamp: + format: date-time + type: string subscribePattern: type: string topicName: @@ -451,6 +824,8 @@ spec: properties: addIdleTimeBetweenReads: type: boolean + addRecordTimestamp: + type: string avoidEmptyBatches: type: boolean classification: @@ -460,6 +835,8 @@ spec: describeShardInterval: format: int64 type: integer + emitConsumerLagMetrics: + type: string endpointURL: type: string idleTimeBetweenReadsInMs: @@ -489,6 +866,9 @@ spec: type: string startingPosition: type: string + startingTimestamp: + format: date-time + type: string streamARN: type: string streamName: @@ -586,6 +966,20 @@ spec: type: array name: type: string + outputSchemas: + items: + properties: + columns: + items: + properties: + name: + type: string + type_: + type: string + type: object + type: array + type: object + type: array parameters: items: properties: @@ -659,24 +1053,64 @@ spec: type: string type: object type: object - fillMissingValues: - description: Specifies a transform that locates records - in the dataset that have missing values and adds a new - field with a value determined by imputation. The input - data set is used to train the machine learning model that - determines what the missing value should be. + evaluateDataQualityMultiFrame: + description: Specifies your data quality evaluation criteria. properties: - filledPath: - type: string - imputedPath: - type: string + additionalDataSources: + additionalProperties: + type: string + type: object + additionalOptions: + additionalProperties: + type: string + type: object inputs: items: type: string type: array name: type: string - type: object + publishingOptions: + description: Options to configure how your data quality + evaluation results are published. + properties: + cloudWatchMetricsEnabled: + type: boolean + evaluationContext: + type: string + resultsPublishingEnabled: + type: boolean + resultsS3Prefix: + type: string + type: object + ruleset: + type: string + stopJobOnFailureOptions: + description: Options to configure how your job will + stop if your data quality evaluation fails. + properties: + stopJobOnFailureTiming: + type: string + type: object + type: object + fillMissingValues: + description: Specifies a transform that locates records + in the dataset that have missing values and adds a new + field with a value determined by imputation. The input + data set is used to train the machine learning model that + determines what the missing value should be. + properties: + filledPath: + type: string + imputedPath: + type: string + inputs: + items: + type: string + type: array + name: + type: string + type: object filter: description: Specifies a transform that splits a dataset into two, based on a filter condition. @@ -1032,6 +1466,25 @@ spec: table: type: string type: object + recipe: + description: A Glue Studio node that uses a Glue DataBrew + recipe in Glue jobs. + properties: + inputs: + items: + type: string + type: array + name: + type: string + recipeReference: + description: A reference to a Glue DataBrew recipe. + properties: + recipeARN: + type: string + recipeVersion: + type: string + type: object + type: object redshiftSource: description: Specifies an Amazon Redshift data store. properties: @@ -1107,6 +1560,66 @@ spec: type: string type: array type: object + s3CatalogDeltaSource: + description: Specifies a Delta Lake data source that is + registered in the Glue Data Catalog. The data source must + be stored in Amazon S3. + properties: + additionalDeltaOptions: + additionalProperties: + type: string + type: object + database: + type: string + name: + type: string + outputSchemas: + items: + properties: + columns: + items: + properties: + name: + type: string + type_: + type: string + type: object + type: array + type: object + type: array + table: + type: string + type: object + s3CatalogHudiSource: + description: Specifies a Hudi data source that is registered + in the Glue Data Catalog. The Hudi data source must be + stored in Amazon S3. + properties: + additionalHudiOptions: + additionalProperties: + type: string + type: object + database: + type: string + name: + type: string + outputSchemas: + items: + properties: + columns: + items: + properties: + name: + type: string + type_: + type: string + type: object + type: array + type: object + type: array + table: + type: string + type: object s3CatalogSource: description: Specifies an Amazon S3 data store in the Glue Data Catalog. @@ -1235,6 +1748,124 @@ spec: writeHeader: type: boolean type: object + s3DeltaCatalogTarget: + description: Specifies a target that writes to a Delta Lake + data source in the Glue Data Catalog. + properties: + additionalOptions: + additionalProperties: + type: string + type: object + database: + type: string + inputs: + items: + type: string + type: array + name: + type: string + partitionKeys: + items: + items: + type: string + type: array + type: array + schemaChangePolicy: + description: A policy that specifies update behavior + for the crawler. + properties: + enableUpdateCatalog: + type: boolean + updateBehavior: + type: string + type: object + table: + type: string + type: object + s3DeltaDirectTarget: + description: Specifies a target that writes to a Delta Lake + data source in Amazon S3. + properties: + additionalOptions: + additionalProperties: + type: string + type: object + compression: + type: string + format: + type: string + inputs: + items: + type: string + type: array + name: + type: string + partitionKeys: + items: + items: + type: string + type: array + type: array + path: + type: string + schemaChangePolicy: + description: A policy that specifies update behavior + for the crawler. + properties: + database: + type: string + enableUpdateCatalog: + type: boolean + table: + type: string + updateBehavior: + type: string + type: object + type: object + s3DeltaSource: + description: Specifies a Delta Lake data source stored in + Amazon S3. + properties: + additionalDeltaOptions: + additionalProperties: + type: string + type: object + additionalOptions: + description: Specifies additional connection options + for the Amazon S3 data store. + properties: + boundedFiles: + format: int64 + type: integer + boundedSize: + format: int64 + type: integer + enableSamplePath: + type: boolean + samplePath: + type: string + type: object + name: + type: string + outputSchemas: + items: + properties: + columns: + items: + properties: + name: + type: string + type_: + type: string + type: object + type: array + type: object + type: array + paths: + items: + type: string + type: array + type: object s3DirectTarget: description: Specifies a data target that writes to Amazon S3. @@ -1305,6 +1936,124 @@ spec: type: string type: object type: object + s3HudiCatalogTarget: + description: Specifies a target that writes to a Hudi data + source in the Glue Data Catalog. + properties: + additionalOptions: + additionalProperties: + type: string + type: object + database: + type: string + inputs: + items: + type: string + type: array + name: + type: string + partitionKeys: + items: + items: + type: string + type: array + type: array + schemaChangePolicy: + description: A policy that specifies update behavior + for the crawler. + properties: + enableUpdateCatalog: + type: boolean + updateBehavior: + type: string + type: object + table: + type: string + type: object + s3HudiDirectTarget: + description: Specifies a target that writes to a Hudi data + source in Amazon S3. + properties: + additionalOptions: + additionalProperties: + type: string + type: object + compression: + type: string + format: + type: string + inputs: + items: + type: string + type: array + name: + type: string + partitionKeys: + items: + items: + type: string + type: array + type: array + path: + type: string + schemaChangePolicy: + description: A policy that specifies update behavior + for the crawler. + properties: + database: + type: string + enableUpdateCatalog: + type: boolean + table: + type: string + updateBehavior: + type: string + type: object + type: object + s3HudiSource: + description: Specifies a Hudi data source stored in Amazon + S3. + properties: + additionalHudiOptions: + additionalProperties: + type: string + type: object + additionalOptions: + description: Specifies additional connection options + for the Amazon S3 data store. + properties: + boundedFiles: + format: int64 + type: integer + boundedSize: + format: int64 + type: integer + enableSamplePath: + type: boolean + samplePath: + type: string + type: object + name: + type: string + outputSchemas: + items: + properties: + columns: + items: + properties: + name: + type: string + type_: + type: string + type: object + type: array + type: object + type: array + paths: + items: + type: string + type: array + type: object s3JSONSource: description: Specifies a JSON data store stored in Amazon S3. @@ -1457,6 +2206,202 @@ spec: name: type: string type: object + snowflakeSource: + description: Specifies a Snowflake data source. + properties: + data: + description: Specifies configuration for Snowflake nodes + in Glue Studio. + properties: + action: + type: string + additionalOptions: + additionalProperties: + type: string + type: object + autoPushdown: + type: boolean + connection: + description: Specifies an option value. + properties: + description: + type: string + label: + type: string + value: + type: string + type: object + database: + type: string + iamRole: + description: Specifies an option value. + properties: + description: + type: string + label: + type: string + value: + type: string + type: object + mergeAction: + type: string + mergeClause: + type: string + mergeWhenMatched: + type: string + mergeWhenNotMatched: + type: string + postAction: + type: string + preAction: + type: string + sampleQuery: + type: string + schema: + type: string + selectedColumns: + items: + properties: + description: + type: string + label: + type: string + value: + type: string + type: object + type: array + sourceType: + type: string + stagingTable: + type: string + table: + type: string + tableSchema: + items: + properties: + description: + type: string + label: + type: string + value: + type: string + type: object + type: array + tempDir: + type: string + upsert: + type: boolean + type: object + name: + type: string + outputSchemas: + items: + properties: + columns: + items: + properties: + name: + type: string + type_: + type: string + type: object + type: array + type: object + type: array + type: object + snowflakeTarget: + description: Specifies a Snowflake target. + properties: + data: + description: Specifies configuration for Snowflake nodes + in Glue Studio. + properties: + action: + type: string + additionalOptions: + additionalProperties: + type: string + type: object + autoPushdown: + type: boolean + connection: + description: Specifies an option value. + properties: + description: + type: string + label: + type: string + value: + type: string + type: object + database: + type: string + iamRole: + description: Specifies an option value. + properties: + description: + type: string + label: + type: string + value: + type: string + type: object + mergeAction: + type: string + mergeClause: + type: string + mergeWhenMatched: + type: string + mergeWhenNotMatched: + type: string + postAction: + type: string + preAction: + type: string + sampleQuery: + type: string + schema: + type: string + selectedColumns: + items: + properties: + description: + type: string + label: + type: string + value: + type: string + type: object + type: array + sourceType: + type: string + stagingTable: + type: string + table: + type: string + tableSchema: + items: + properties: + description: + type: string + label: + type: string + value: + type: string + type: object + type: array + tempDir: + type: string + upsert: + type: boolean + type: object + inputs: + items: + type: string + type: array + name: + type: string + type: object sparkConnectorSource: description: Specifies a connector to an Apache Spark data source. @@ -1622,6 +2567,8 @@ spec: type: string pythonVersion: type: string + runtime: + type: string scriptLocation: type: string type: object @@ -1711,18 +2658,22 @@ spec: defaultArguments: additionalProperties: type: string - description: "The default arguments for this job. \n You can specify - arguments here that your own job-execution script consumes, - as well as arguments that Glue itself consumes. \n Job arguments - may be logged. Do not pass plaintext secrets as arguments. Retrieve + description: "The default arguments for every run of this job, + specified as name-value pairs. \n You can specify arguments + here that your own job-execution script consumes, as well as + arguments that Glue itself consumes. \n Job arguments may be + logged. Do not pass plaintext secrets as arguments. Retrieve secrets from a Glue Connection, Secrets Manager or other secret management mechanism if you intend to keep them within the Job. \n For information about how to specify and consume your own Job arguments, see the Calling Glue APIs in Python (https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-python-calling.html) - topic in the developer guide. \n For information about the key-value - pairs that Glue consumes to set up your job, see the Special - Parameters Used by Glue (https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-etl-glue-arguments.html) - topic in the developer guide." + topic in the developer guide. \n For information about the arguments + you can provide to this field when configuring Spark jobs, see + the Special Parameters Used by Glue (https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-etl-glue-arguments.html) + topic in the developer guide. \n For information about the arguments + you can provide to this field when configuring Ray jobs, see + Using job parameters in Ray jobs (https://docs.aws.amazon.com/glue/latest/dg/author-job-ray-job-parameters.html) + in the developer guide." type: object description: description: Description of the job being defined. @@ -1747,11 +2698,15 @@ spec: type: integer type: object glueVersion: - description: "Glue version determines the versions of Apache Spark - and Python that Glue supports. The Python version indicates - the version supported for jobs of type Spark. \n For more information - about the available Glue versions and corresponding Spark and - Python versions, see Glue version (https://docs.aws.amazon.com/glue/latest/dg/add-job.html) + description: "In Spark jobs, GlueVersion determines the versions + of Apache Spark and Python that Glue available in a job. The + Python version indicates the version supported for jobs of type + Spark. \n Ray jobs should set GlueVersion to 4.0 or greater. + However, the versions of Ray, Python and additional libraries + available in your Ray job are determined by the Runtime parameter + of the Job command. \n For more information about the available + Glue versions and corresponding Spark and Python versions, see + Glue version (https://docs.aws.amazon.com/glue/latest/dg/add-job.html) in the developer guide. \n Jobs that are created without specifying a Glue version default to Glue 0.9." type: string @@ -1765,18 +2720,18 @@ spec: relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the Glue pricing page (https://aws.amazon.com/glue/pricing/). - \n Do not set Max Capacity if using WorkerType and NumberOfWorkers. - \n The value that can be allocated for MaxCapacity depends on - whether you are running a Python shell job or an Apache Spark - ETL job: \n * When you specify a Python shell job (JobCommand.Name=\"pythonshell\"), + \n For Glue version 2.0+ jobs, you cannot specify a Maximum + capacity. Instead, you should specify a Worker type and the + Number of workers. \n Do not set MaxCapacity if using WorkerType + and NumberOfWorkers. \n The value that can be allocated for + MaxCapacity depends on whether you are running a Python shell + job, an Apache Spark ETL job, or an Apache Spark streaming ETL + job: \n * When you specify a Python shell job (JobCommand.Name=\"pythonshell\"), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU. \n * When you specify an Apache Spark ETL job (JobCommand.Name=\"glueetl\") or Apache Spark streaming ETL job (JobCommand.Name=\"gluestreaming\"), - you can allocate a minimum of 2 DPUs. The default is 10 DPUs. - This job type cannot have a fractional DPU allocation. \n For - Glue version 2.0 jobs, you cannot instead specify a Maximum - capacity. Instead, you should specify a Worker type and the - Number of workers." + you can allocate from 2 to 100 DPUs. The default is 10 DPUs. + This job type cannot have a fractional DPU allocation." type: number maxRetries: description: The maximum number of times to retry this job if @@ -1786,8 +2741,9 @@ spec: nonOverridableArguments: additionalProperties: type: string - description: Non-overridable arguments for this job, specified - as name-value pairs. + description: Arguments for this job that are not overridden when + providing job arguments in a job run, specified as name-value + pairs. type: object notificationProperty: description: Specifies configuration properties of a job notification. @@ -2001,20 +2957,43 @@ spec: type: integer workerType: description: "The type of predefined worker that is allocated - when a job runs. Accepts a value of Standard, G.1X, G.2X, or - G.025X. \n * For the Standard worker type, each worker provides - 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per - worker. \n * For the G.1X worker type, each worker maps to 1 - DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor - per worker. We recommend this worker type for memory-intensive - jobs. \n * For the G.2X worker type, each worker maps to 2 DPU - (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor - per worker. We recommend this worker type for memory-intensive - jobs. \n * For the G.025X worker type, each worker maps to 0.25 - DPU (2 vCPU, 4 GB of memory, 64 GB disk), and provides 1 executor - per worker. We recommend this worker type for low volume streaming + when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or + G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs. + \n * For the G.1X worker type, each worker maps to 1 DPU (4 + vCPUs, 16 GB of memory) with 84GB disk (approximately 34GB free), + and provides 1 executor per worker. We recommend this worker + type for workloads such as data transforms, joins, and queries, + to offers a scalable and cost effective way to run most jobs. + \n * For the G.2X worker type, each worker maps to 2 DPU (8 + vCPUs, 32 GB of memory) with 128GB disk (approximately 77GB + free), and provides 1 executor per worker. We recommend this + worker type for workloads such as data transforms, joins, and + queries, to offers a scalable and cost effective way to run + most jobs. \n * For the G.4X worker type, each worker maps to + 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk (approximately + 235GB free), and provides 1 executor per worker. We recommend + this worker type for jobs whose workloads contain your most + demanding transforms, aggregations, joins, and queries. This + worker type is available only for Glue version 3.0 or later + Spark ETL jobs in the following Amazon Web Services Regions: + US East (Ohio), US East (N. Virginia), US West (Oregon), Asia + Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), + Canada (Central), Europe (Frankfurt), Europe (Ireland), and + Europe (Stockholm). \n * For the G.8X worker type, each worker + maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk (approximately + 487GB free), and provides 1 executor per worker. We recommend + this worker type for jobs whose workloads contain your most + demanding transforms, aggregations, joins, and queries. This + worker type is available only for Glue version 3.0 or later + Spark ETL jobs, in the same Amazon Web Services Regions as supported + for the G.4X worker type. \n * For the G.025X worker type, each + worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB + disk (approximately 34GB free), and provides 1 executor per + worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 - streaming jobs." + streaming jobs. \n * For the Z.2X worker type, each worker maps + to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk (approximately + 120GB free), and provides up to 8 Ray workers based on the autoscaler." type: string required: - command diff --git a/package/crds/iam.aws.crossplane.io_instanceprofiles.yaml b/package/crds/iam.aws.crossplane.io_instanceprofiles.yaml index e7ae8283a3..97d865a222 100644 --- a/package/crds/iam.aws.crossplane.io_instanceprofiles.yaml +++ b/package/crds/iam.aws.crossplane.io_instanceprofiles.yaml @@ -450,7 +450,7 @@ spec: within the last year. The role might have been used more than 400 days ago. For more information, see Regions where data is tracked (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_access-advisor.html#access-advisor_tracking-period) - in the IAM User Guide. \n This data type is returned as + in the IAM user Guide. \n This data type is returned as a response element in the GetRole and GetAccountAuthorizationDetails operations." properties: diff --git a/package/crds/iam.aws.crossplane.io_servicelinkedroles.yaml b/package/crds/iam.aws.crossplane.io_servicelinkedroles.yaml index 933bc8c36d..b0b26d5cfe 100644 --- a/package/crds/iam.aws.crossplane.io_servicelinkedroles.yaml +++ b/package/crds/iam.aws.crossplane.io_servicelinkedroles.yaml @@ -356,7 +356,7 @@ spec: began supporting these features within the last year. The role might have been used more than 400 days ago. For more information, see Regions where data is tracked (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_access-advisor.html#access-advisor_tracking-period) - in the IAM User Guide. + in the IAM user Guide. properties: lastUsedDate: format: date-time diff --git a/package/crds/kms.aws.crossplane.io_keys.yaml b/package/crds/kms.aws.crossplane.io_keys.yaml index a4998b889b..985613168c 100644 --- a/package/crds/kms.aws.crossplane.io_keys.yaml +++ b/package/crds/kms.aws.crossplane.io_keys.yaml @@ -68,16 +68,15 @@ spec: description: KeyParameters defines the desired state of Key properties: bypassPolicyLockoutSafetyCheck: - description: "A flag to indicate whether to bypass the key policy - lockout safety check. \n Setting this value to true increases - the risk that the KMS key becomes unmanageable. Do not set this - value to true indiscriminately. \n For more information, refer - to the scenario in the Default Key Policy (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default-allow-root-enable-iam) - section in the Key Management Service Developer Guide . \n Use - this parameter only when you include a policy in the request - and you intend to prevent the principal that is making the request - from making a subsequent PutKeyPolicy request on the KMS key. - \n The default value is false." + description: "Skips (\"bypasses\") the key policy lockout safety + check. The default value is false. \n Setting this value to + true increases the risk that the KMS key becomes unmanageable. + Do not set this value to true indiscriminately. \n For more + information, see Default key policy (https://docs.aws.amazon.com/kms/latest/developerguide/key-policy-default.html#prevent-unmanageable-key) + in the Key Management Service Developer Guide. \n Use this parameter + only when you intend to prevent the principal that is making + the request from making a subsequent PutKeyPolicy request on + the KMS key." type: boolean customKeyStoreID: description: "Creates the KMS key in the specified custom key @@ -101,11 +100,13 @@ spec: both parameters." type: string description: - description: "A description of the KMS key. \n Use a description + description: "A description of the KMS key. Use a description that helps you decide whether the KMS key is appropriate for a task. The default value is an empty string (no description). - \n To set or change the description after the key is created, - use UpdateKeyDescription." + \n Do not include confidential or sensitive information in this + field. This field may be displayed in plaintext in CloudTrail + logs and other output. \n To set or change the description after + the key is created, use UpdateKeyDescription." type: string enableKeyRotation: description: Specifies if key rotation is enabled for the corresponding @@ -210,25 +211,23 @@ spec: policy: description: "The key policy to attach to the KMS key. \n If you provide a key policy, it must meet the following criteria: \n - * If you don't set BypassPolicyLockoutSafetyCheck to true, the - key policy must allow the principal that is making the CreateKey - request to make a subsequent PutKeyPolicy request on the KMS - key. This reduces the risk that the KMS key becomes unmanageable. - For more information, refer to the scenario in the Default Key - Policy (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default-allow-root-enable-iam) - section of the Key Management Service Developer Guide . \n * + * The key policy must allow the calling principal to make a + subsequent PutKeyPolicy request on the KMS key. This reduces + the risk that the KMS key becomes unmanageable. For more information, + see Default key policy (https://docs.aws.amazon.com/kms/latest/developerguide/key-policy-default.html#prevent-unmanageable-key) + in the Key Management Service Developer Guide. (To omit this + condition, set BypassPolicyLockoutSafetyCheck to true.) \n * Each statement in the key policy must contain one or more principals. The principals in the key policy must exist and be visible to - KMS. When you create a new Amazon Web Services principal (for - example, an IAM user or role), you might need to enforce a delay - before including the new principal in a key policy because the - new principal might not be immediately visible to KMS. For more - information, see Changes that I make are not always immediately - visible (https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_eventual-consistency) + KMS. When you create a new Amazon Web Services principal, you + might need to enforce a delay before including the new principal + in a key policy because the new principal might not be immediately + visible to KMS. For more information, see Changes that I make + are not always immediately visible (https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_eventual-consistency) in the Amazon Web Services Identity and Access Management User Guide. \n If you do not provide a key policy, KMS attaches a default key policy to the KMS key. For more information, see - Default Key Policy (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default) + Default key policy (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default) in the Key Management Service Developer Guide. \n The key policy size quota is 32 kilobytes (32768 bytes). \n For help writing and formatting a JSON policy document, see the IAM JSON Policy @@ -241,9 +240,11 @@ spec: tags: description: "Assigns one or more tags to the KMS key. Use this parameter to tag the KMS key when it is created. To tag an existing - KMS key, use the TagResource operation. \n Tagging or untagging - a KMS key can allow or deny permission to the KMS key. For details, - see ABAC for KMS (https://docs.aws.amazon.com/kms/latest/developerguide/abac.html) + KMS key, use the TagResource operation. \n Do not include confidential + or sensitive information in this field. This field may be displayed + in plaintext in CloudTrail logs and other output. \n Tagging + or untagging a KMS key can allow or deny permission to the KMS + key. For details, see ABAC for KMS (https://docs.aws.amazon.com/kms/latest/developerguide/abac.html) in the Key Management Service Developer Guide. \n To use this parameter, you must have kms:TagResource (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) permission in an IAM policy. \n Each tag consists of a tag key diff --git a/package/crds/lambda.aws.crossplane.io_functions.yaml b/package/crds/lambda.aws.crossplane.io_functions.yaml index b61c23357a..e307bd25b7 100644 --- a/package/crds/lambda.aws.crossplane.io_functions.yaml +++ b/package/crds/lambda.aws.crossplane.io_functions.yaml @@ -1158,9 +1158,17 @@ spec: type: string type: object kmsKeyARN: - description: The ARN of the Key Management Service (KMS) key that's - used to encrypt your function's environment variables. If it's - not provided, Lambda uses a default service key. + description: The ARN of the Key Management Service (KMS) customer + managed key that's used to encrypt your function's environment + variables (https://docs.aws.amazon.com/lambda/latest/dg/configuration-envvars.html#configuration-envvars-encryption). + When Lambda SnapStart (https://docs.aws.amazon.com/lambda/latest/dg/snapstart-security.html) + is activated, Lambda also uses this key is to encrypt your function's + snapshot. If you deploy your function using a container image, + Lambda also uses this key to encrypt your function when it's + deployed. Note that this is not the same key that's used to + protect your container image in the Amazon Elastic Container + Registry (Amazon ECR). If you don't provide a customer managed + key, Lambda uses a default service key. type: string kmsKeyARNRef: description: KMSKeyARNRef is a reference to a kms key used to @@ -1341,9 +1349,10 @@ spec: type: object type: object runtime: - description: The identifier of the function's runtime (https://docs.aws.amazon.com/lambda/latest/dg/lambda-runtimes.html). + description: "The identifier of the function's runtime (https://docs.aws.amazon.com/lambda/latest/dg/lambda-runtimes.html). Runtime is required if the deployment package is a .zip file - archive. + archive. \n The following list includes deprecated runtimes. + For more information, see Runtime deprecation policy (https://docs.aws.amazon.com/lambda/latest/dg/lambda-runtimes.html#runtime-support-policy)." type: string snapStart: description: The function's SnapStart (https://docs.aws.amazon.com/lambda/latest/dg/snapstart.html) @@ -1823,6 +1832,21 @@ spec: role: description: The function's execution role. type: string + runtimeVersionConfig: + description: The ARN of the runtime and any errors that occured. + properties: + error: + description: Any error returned when the runtime version information + for the function could not be retrieved. + properties: + errorCode: + type: string + message: + type: string + type: object + runtimeVersionARN: + type: string + type: object signingJobARN: description: The ARN of the signing job. type: string diff --git a/package/crds/lambda.aws.crossplane.io_functionurlconfigs.yaml b/package/crds/lambda.aws.crossplane.io_functionurlconfigs.yaml index 9448b73787..824b573f70 100644 --- a/package/crds/lambda.aws.crossplane.io_functionurlconfigs.yaml +++ b/package/crds/lambda.aws.crossplane.io_functionurlconfigs.yaml @@ -71,7 +71,7 @@ spec: authType: description: The type of authentication that your function URL uses. Set to AWS_IAM if you want to restrict access to authenticated - IAM users only. Set to NONE if you want to bypass IAM authentication + users only. Set to NONE if you want to bypass IAM authentication to create a public endpoint. For more information, see Security and auth model for Lambda function URLs (https://docs.aws.amazon.com/lambda/latest/dg/urls-auth.html). type: string @@ -178,6 +178,17 @@ spec: type: string type: object type: object + invokeMode: + description: "Use one of the following options: \n * BUFFERED + – This is the default option. Lambda invokes your function using + the Invoke API operation. Invocation results are available when + the payload is complete. The maximum payload size is 6 MB. \n + * RESPONSE_STREAM – Your function streams payload results as + they become available. Lambda invokes your function using the + InvokeWithResponseStream API operation. The maximum response + payload size is 20 MB, however, you can request a quota increase + (https://docs.aws.amazon.com/servicequotas/latest/userguide/request-quota-increase.html)." + type: string qualifier: description: The alias name. type: string diff --git a/package/crds/mq.aws.crossplane.io_brokers.yaml b/package/crds/mq.aws.crossplane.io_brokers.yaml index bc75ca1e28..ae85b154df 100644 --- a/package/crds/mq.aws.crossplane.io_brokers.yaml +++ b/package/crds/mq.aws.crossplane.io_brokers.yaml @@ -81,6 +81,10 @@ spec: type: object creatorRequestID: type: string + dataReplicationMode: + type: string + dataReplicationPrimaryBrokerARN: + type: string deploymentMode: type: string encryptionOptions: @@ -580,8 +584,7 @@ spec: configurations: properties: current: - description: "A list of information about the configuration. - \n Does not apply to RabbitMQ brokers." + description: A list of information about the configuration. properties: id: type: string @@ -600,8 +603,7 @@ spec: type: object type: array pending: - description: "A list of information about the configuration. - \n Does not apply to RabbitMQ brokers." + description: A list of information about the configuration. properties: id: type: string diff --git a/package/crds/mq.aws.crossplane.io_users.yaml b/package/crds/mq.aws.crossplane.io_users.yaml index 28894b169e..e59feec1dc 100644 --- a/package/crds/mq.aws.crossplane.io_users.yaml +++ b/package/crds/mq.aws.crossplane.io_users.yaml @@ -171,6 +171,8 @@ spec: region: description: Region is which region the User will be created. type: string + replicationUser: + type: boolean required: - region type: object diff --git a/package/crds/mwaa.aws.crossplane.io_environments.yaml b/package/crds/mwaa.aws.crossplane.io_environments.yaml index f9f472a764..ad3d355470 100644 --- a/package/crds/mwaa.aws.crossplane.io_environments.yaml +++ b/package/crds/mwaa.aws.crossplane.io_environments.yaml @@ -72,24 +72,25 @@ spec: type: string description: A list of key-value pairs containing the Apache Airflow configuration options you want to attach to your environment. - To learn more, see Apache Airflow configuration options (https://docs.aws.amazon.com/mwaa/latest/userguide/configuring-env-variables.html). + For more information, see Apache Airflow configuration options + (https://docs.aws.amazon.com/mwaa/latest/userguide/configuring-env-variables.html). type: object airflowVersion: description: 'The Apache Airflow version for your environment. If no value is specified, it defaults to the latest version. - Valid values: 1.10.12, 2.0.2, 2.2.2, and 2.4.3. For more information, - see Apache Airflow versions on Amazon Managed Workflows for - Apache Airflow (MWAA) (https://docs.aws.amazon.com/mwaa/latest/userguide/airflow-versions.html).' + Valid values: 1.10.12, 2.0.2, 2.2.2, 2.4.3, and 2.5.1. For more + information, see Apache Airflow versions on Amazon Managed Workflows + for Apache Airflow (MWAA) (https://docs.aws.amazon.com/mwaa/latest/userguide/airflow-versions.html).' type: string dagS3Path: description: The relative path to the DAGs folder on your Amazon - S3 bucket. For example, dags. To learn more, see Adding or updating - DAGs (https://docs.aws.amazon.com/mwaa/latest/userguide/configuring-dag-folder.html). + S3 bucket. For example, dags. For more information, see Adding + or updating DAGs (https://docs.aws.amazon.com/mwaa/latest/userguide/configuring-dag-folder.html). type: string environmentClass: description: 'The environment class type. Valid values: mw1.small, - mw1.medium, mw1.large. To learn more, see Amazon MWAA environment - class (https://docs.aws.amazon.com/mwaa/latest/userguide/environment-class.html).' + mw1.medium, mw1.large. For more information, see Amazon MWAA + environment class (https://docs.aws.amazon.com/mwaa/latest/userguide/environment-class.html).' type: string executionRoleARN: description: "The Amazon Resource Name (ARN) of the execution @@ -509,29 +510,29 @@ spec: type: object pluginsS3ObjectVersion: description: The version of the plugins.zip file on your Amazon - S3 bucket. A version must be specified each time a plugins.zip - file is updated. To learn more, see How S3 Versioning works - (https://docs.aws.amazon.com/AmazonS3/latest/userguide/versioning-workflows.html). + S3 bucket. You must specify a version each time a plugins.zip + file is updated. For more information, see How S3 Versioning + works (https://docs.aws.amazon.com/AmazonS3/latest/userguide/versioning-workflows.html). type: string pluginsS3Path: description: The relative path to the plugins.zip file on your Amazon S3 bucket. For example, plugins.zip. If specified, then - the plugins.zip version is required. To learn more, see Installing - custom plugins (https://docs.aws.amazon.com/mwaa/latest/userguide/configuring-dag-import-plugins.html). + the plugins.zip version is required. For more information, see + Installing custom plugins (https://docs.aws.amazon.com/mwaa/latest/userguide/configuring-dag-import-plugins.html). type: string region: description: Region is which region the Environment will be created. type: string requirementsS3ObjectVersion: description: The version of the requirements.txt file on your - Amazon S3 bucket. A version must be specified each time a requirements.txt - file is updated. To learn more, see How S3 Versioning works - (https://docs.aws.amazon.com/AmazonS3/latest/userguide/versioning-workflows.html). + Amazon S3 bucket. You must specify a version each time a requirements.txt + file is updated. For more information, see How S3 Versioning + works (https://docs.aws.amazon.com/AmazonS3/latest/userguide/versioning-workflows.html). type: string requirementsS3Path: description: The relative path to the requirements.txt file on your Amazon S3 bucket. For example, requirements.txt. If specified, - then a file version is required. To learn more, see Installing + then a version is required. For more information, see Installing Python dependencies (https://docs.aws.amazon.com/mwaa/latest/userguide/working-dags-dependencies.html). type: string schedulers: @@ -623,16 +624,35 @@ spec: type: string type: object type: object + startupScriptS3ObjectVersion: + description: "The version of the startup shell script in your + Amazon S3 bucket. You must specify the version ID (https://docs.aws.amazon.com/AmazonS3/latest/userguide/versioning-workflows.html) + that Amazon S3 assigns to the file every time you update the + script. \n Version IDs are Unicode, UTF-8 encoded, URL-ready, + opaque strings that are no more than 1,024 bytes long. The following + is an example: \n 3sL4kqtJlcpXroDTDmJ+rmSpXd3dIbrHY+MTRCxf3vjVBH40Nr8X8gdRQBpUMLUo + \n For more information, see Using a startup script (https://docs.aws.amazon.com/mwaa/latest/userguide/using-startup-script.html)." + type: string + startupScriptS3Path: + description: "The relative path to the startup shell script in + your Amazon S3 bucket. For example, s3://mwaa-environment/startup.sh. + \n Amazon MWAA runs the script as your environment starts, and + before running the Apache Airflow process. You can use this + script to install dependencies, modify Apache Airflow configuration + options, and set environment variables. For more information, + see Using a startup script (https://docs.aws.amazon.com/mwaa/latest/userguide/using-startup-script.html)." + type: string tags: additionalProperties: type: string description: 'The key-value tag pairs you want to associate to - your environment. For example, "Environment": "Staging". To - learn more, see Tagging Amazon Web Services resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html).' + your environment. For example, "Environment": "Staging". For + more information, see Tagging Amazon Web Services resources + (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html).' type: object webserverAccessMode: - description: The Apache Airflow Web server access mode. To learn - more, see Apache Airflow access modes (https://docs.aws.amazon.com/mwaa/latest/userguide/configuring-networking.html). + description: The Apache Airflow Web server access mode. For more + information, see Apache Airflow access modes (https://docs.aws.amazon.com/mwaa/latest/userguide/configuring-networking.html). type: string weeklyMaintenanceWindowStart: description: 'The day and time of the week in Coordinated Universal @@ -874,12 +894,22 @@ spec: status: description: "The status of the Amazon MWAA environment. Valid values: \n * CREATING - Indicates the request to create the - environment is in progress. \n * CREATE_FAILED - Indicates the - request to create the environment failed, and the environment - could not be created. \n * AVAILABLE - Indicates the request - was successful and the environment is ready to use. \n * UPDATING - - Indicates the request to update the environment is in progress. - \n * DELETING - Indicates the request to delete the environment + environment is in progress. \n * CREATING_SNAPSHOT - Indicates + the request to update environment details, or upgrade the environment + version, is in progress and Amazon MWAA is creating a storage + volume snapshot of the Amazon RDS database cluster associated + with the environment. A database snapshot is a backup created + at a specific point in time. Amazon MWAA uses snapshots to recover + environment metadata if the process to update or upgrade an + environment fails. \n * CREATE_FAILED - Indicates the request + to create the environment failed, and the environment could + not be created. \n * AVAILABLE - Indicates the request was successful + and the environment is ready to use. \n * UPDATING - Indicates + the request to update the environment is in progress. \n * ROLLING_BACK + - Indicates the request to update environment details, or upgrade + the environment version, failed and Amazon MWAA is restoring + the environment using the latest storage volume snapshot. \n + * DELETING - Indicates the request to delete the environment is in progress. \n * DELETED - Indicates the request to delete the environment is complete, and the environment has been deleted. \n * UNAVAILABLE - Indicates the request failed, but the environment @@ -887,7 +917,7 @@ spec: - Indicates the request to update the environment failed, and the environment has rolled back successfully and is ready to use. \n We recommend reviewing our troubleshooting guide for - a list of common errors and their solutions. To learn more, + a list of common errors and their solutions. For more information, see Amazon MWAA troubleshooting (https://docs.aws.amazon.com/mwaa/latest/userguide/troubleshooting.html)." type: string type: object diff --git a/package/crds/neptune.aws.crossplane.io_dbclusters.yaml b/package/crds/neptune.aws.crossplane.io_dbclusters.yaml index 0aa6245de1..1c81fb61ae 100644 --- a/package/crds/neptune.aws.crossplane.io_dbclusters.yaml +++ b/package/crds/neptune.aws.crossplane.io_dbclusters.yaml @@ -578,6 +578,41 @@ spec: description: Specifies whether the DB cluster has instances in multiple Availability Zones. type: boolean + pendingModifiedValues: + description: This data type is used as a response element in the + ModifyDBCluster operation and contains changes that will be + applied during the next maintenance window. + properties: + allocatedStorage: + format: int64 + type: integer + backupRetentionPeriod: + format: int64 + type: integer + dbClusterIdentifier: + type: string + engineVersion: + type: string + iamDatabaseAuthenticationEnabled: + type: boolean + iops: + format: int64 + type: integer + pendingCloudwatchLogsExports: + description: A list of the log types whose configuration is + still pending. In other words, these log types are in the + process of being activated or deactivated. + properties: + logTypesToDisable: + items: + type: string + type: array + logTypesToEnable: + items: + type: string + type: array + type: object + type: object percentProgress: description: Specifies the progress of the operation as a percentage. type: string diff --git a/package/crds/opensearchservice.aws.crossplane.io_domains.yaml b/package/crds/opensearchservice.aws.crossplane.io_domains.yaml index 459dd31657..342cfec110 100644 --- a/package/crds/opensearchservice.aws.crossplane.io_domains.yaml +++ b/package/crds/opensearchservice.aws.crossplane.io_domains.yaml @@ -178,6 +178,8 @@ spec: type: string type: object type: array + useOffPeakWindow: + type: boolean type: object clusterConfig: description: Container for the cluster configuration of a domain. @@ -203,6 +205,8 @@ spec: type: integer instanceType: type: string + multiAZWithStandbyEnabled: + type: boolean warmCount: format: int64 type: integer @@ -374,7 +378,7 @@ spec: enabled: type: boolean type: object - description: Key-value pairs to configure slow log publishing. + description: Key-value pairs to configure log publishing. type: object nodeToNodeEncryptionOptions: description: Enables node-to-node encryption. @@ -382,6 +386,40 @@ spec: enabled: type: boolean type: object + offPeakWindowOptions: + description: Specifies a daily 10-hour time block during which + OpenSearch Service can perform configuration changes on the + domain, including service software updates and Auto-Tune enhancements + that require a blue/green deployment. If no options are specified, + the default start time of 10:00 P.M. local time (for the Region + that the domain is created in) is used. + properties: + enabled: + type: boolean + offPeakWindow: + description: "A custom 10-hour, low-traffic window during + which OpenSearch Service can perform mandatory configuration + changes on the domain. These actions can include scheduled + service software updates and blue/green Auto-Tune enhancements. + OpenSearch Service will schedule these actions during the + window that you specify. \n If you don't specify a window + start time, it defaults to 10:00 P.M. local time. \n For + more information, see Defining off-peak maintenance windows + for Amazon OpenSearch Service (https://docs.aws.amazon.com/opensearch-service/latest/developerguide/off-peak.html)." + properties: + windowStartTime: + description: The desired start time for an off-peak maintenance + window (https://docs.aws.amazon.com/opensearch-service/latest/APIReference/API_OffPeakWindow.html). + properties: + hours: + format: int64 + type: integer + minutes: + format: int64 + type: integer + type: object + type: object + type: object region: description: Region is which region the Domain will be created. type: string @@ -391,6 +429,12 @@ spec: format: int64 type: integer type: object + softwareUpdateOptions: + description: Software update options for the domain. + properties: + autoSoftwareUpdateEnabled: + type: boolean + type: object tags: description: List of tags to add to the domain upon creation. items: @@ -847,6 +891,8 @@ spec: description: The Auto-Tune state for the domain. For valid states see Auto-Tune for Amazon OpenSearch Service (https://docs.aws.amazon.com/opensearch-service/latest/developerguide/auto-tune.html). type: string + useOffPeakWindow: + type: boolean type: object changeProgressDetails: description: Information about a configuration change happening @@ -881,6 +927,8 @@ spec: type: integer instanceType: type: string + multiAZWithStandbyEnabled: + type: boolean warmCount: format: int64 type: integer diff --git a/package/crds/ram.aws.crossplane.io_resourceshares.yaml b/package/crds/ram.aws.crossplane.io_resourceshares.yaml index c5c3f7a35d..ddef38f54d 100644 --- a/package/crds/ram.aws.crossplane.io_resourceshares.yaml +++ b/package/crds/ram.aws.crossplane.io_resourceshares.yaml @@ -85,7 +85,9 @@ spec: same value for all other parameters. We recommend that you use a UUID type of value. (https://wikipedia.org/wiki/Universally_unique_identifier). \n If you don't provide this value, then Amazon Web Services - generates a random one for you." + generates a random one for you. \n If you retry the operation + with the same ClientToken, but with different parameters, the + retry fails with an IdempotentParameterMismatch error." type: string name: description: Specifies the name of the resource share. @@ -104,7 +106,7 @@ spec: description: "Specifies a list of one or more principals to associate with the resource share. \n You can include the following values: \n * An Amazon Web Services account ID, for example: 123456789012 - \n * An Amazon Resoure Name (ARN) (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + \n * An Amazon Resource Name (ARN) (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) of an organization in Organizations, for example: organizations::123456789012:organization/o-exampleorgid \n * An ARN of an organizational unit (OU) in Organizations, for example: organizations::123456789012:ou/o-exampleorgid/ou-examplerootid-exampleouid123 @@ -126,6 +128,12 @@ spec: items: type: string type: array + sources: + description: Specifies from which source accounts the service + principal has access to the resources in this resource share. + items: + type: string + type: array tags: description: Specifies one or more tags to attach to the resource share itself. It doesn't attach the tags to the resources associated diff --git a/package/crds/rds.aws.crossplane.io_dbclusters.yaml b/package/crds/rds.aws.crossplane.io_dbclusters.yaml index 4fae4d0dec..4e3c99348b 100644 --- a/package/crds/rds.aws.crossplane.io_dbclusters.yaml +++ b/package/crds/rds.aws.crossplane.io_dbclusters.yaml @@ -69,17 +69,17 @@ spec: properties: allocatedStorage: description: "The amount of storage in gibibytes (GiB) to allocate - to each DB instance in the Multi-AZ DB cluster. \n This setting - is required to create a Multi-AZ DB cluster. \n Valid for: Multi-AZ - DB clusters only" + to each DB instance in the Multi-AZ DB cluster. \n Valid for + Cluster Type: Multi-AZ DB clusters only \n This setting is required + to create a Multi-AZ DB cluster." format: int64 type: integer allowMajorVersionUpgrade: - description: "A value that indicates whether major version upgrades - are allowed. \n Constraints: You must allow major version upgrades - when specifying a value for the EngineVersion parameter that - is a different major version than the DB cluster's current version. - \n Valid for: Aurora DB clusters only" + description: "Specifies whether major version upgrades are allowed. + \n Valid for Cluster Type: Aurora DB clusters only \n Constraints: + \n * You must allow major version upgrades when specifying a + value for the EngineVersion parameter that is a different major + version than the DB cluster's current version." type: boolean applyImmediately: description: "A value that indicates whether the modifications @@ -96,10 +96,10 @@ spec: this parameter is disabled." type: boolean autoMinorVersionUpgrade: - description: "A value that indicates whether minor engine upgrades - are applied automatically to the DB cluster during the maintenance - window. By default, minor engine upgrades are applied automatically. - \n Valid for: Multi-AZ DB clusters only" + description: "Specifies whether minor engine upgrades are applied + automatically to the DB cluster during the maintenance window. + By default, minor engine upgrades are applied automatically. + \n Valid for Cluster Type: Multi-AZ DB clusters only" type: boolean autogeneratePassword: description: "AutogeneratePassword indicates whether the controller @@ -113,42 +113,42 @@ spec: in the DB cluster can be created. \n For information on Amazon Web Services Regions and Availability Zones, see Choosing the Regions and Availability Zones (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Concepts.RegionsAndAvailabilityZones.html) - in the Amazon Aurora User Guide. \n Valid for: Aurora DB clusters - only" + in the Amazon Aurora User Guide. \n Valid for Cluster Type: + Aurora DB clusters only" items: type: string type: array backtrackWindow: description: "The target backtrack window, in seconds. To disable - backtracking, set this value to 0. \n Default: 0 \n Constraints: + backtracking, set this value to 0. \n Valid for Cluster Type: + Aurora MySQL DB clusters only \n Default: 0 \n Constraints: \n * If specified, this value must be set to a number from 0 - to 259,200 (72 hours). \n Valid for: Aurora MySQL DB clusters - only" + to 259,200 (72 hours)." format: int64 type: integer backupRetentionPeriod: description: "The number of days for which automated backups are - retained. \n Default: 1 \n Constraints: \n * Must be a value - from 1 to 35 \n Valid for: Aurora DB clusters and Multi-AZ DB - clusters" + retained. \n Valid for Cluster Type: Aurora DB clusters and + Multi-AZ DB clusters \n Default: 1 \n Constraints: \n * Must + be a value from 1 to 35." format: int64 type: integer characterSetName: - description: "A value that indicates that the DB cluster should - be associated with the specified CharacterSet. \n Valid for: - Aurora DB clusters only" + description: "The name of the character set (CharacterSet) to + associate the DB cluster with. \n Valid for Cluster Type: Aurora + DB clusters only" type: string copyTagsToSnapshot: - description: "A value that indicates whether to copy all tags - from the DB cluster to snapshots of the DB cluster. The default - is not to copy them. \n Valid for: Aurora DB clusters and Multi-AZ - DB clusters" + description: "Specifies whether to copy all tags from the DB cluster + to snapshots of the DB cluster. The default is not to copy them. + \n Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB + clusters" type: boolean databaseName: description: "The name for your database of up to 64 alphanumeric - characters. If you do not provide a name, Amazon RDS doesn't + characters. If you don't provide a name, Amazon RDS doesn't create a database in the DB cluster you are creating. \n Valid - for: Aurora DB clusters and Multi-AZ DB clusters" + for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters" type: string dbClusterInstanceClass: description: "The compute and memory capacity of each DB instance @@ -158,16 +158,16 @@ spec: DB instance classes and availability for your engine, see DB instance class (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.DBInstanceClass.html) in the Amazon RDS User Guide. \n This setting is required to - create a Multi-AZ DB cluster. \n Valid for: Multi-AZ DB clusters - only" + create a Multi-AZ DB cluster. \n Valid for Cluster Type: Multi-AZ + DB clusters only" type: string dbClusterParameterGroupName: description: "The name of the DB cluster parameter group to associate - with this DB cluster. If you do not specify a value, then the + with this DB cluster. If you don't specify a value, then the default DB cluster parameter group for the specified DB engine - and version is used. \n Constraints: \n * If supplied, must - match the name of an existing DB cluster parameter group. \n - Valid for: Aurora DB clusters and Multi-AZ DB clusters" + and version is used. \n Valid for Cluster Type: Aurora DB clusters + and Multi-AZ DB clusters \n Constraints: \n * If supplied, must + match the name of an existing DB cluster parameter group." type: string dbClusterParameterGroupNameRef: description: DBClusterParameterGroupNameRef is a reference to @@ -247,9 +247,9 @@ spec: dbSubnetGroupName: description: "A DB subnet group to associate with this DB cluster. \n This setting is required to create a Multi-AZ DB cluster. - \n Constraints: Must match the name of an existing DBSubnetGroup. - Must not be default. \n Example: mydbsubnetgroup \n Valid for: - Aurora DB clusters and Multi-AZ DB clusters" + \n Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB + clusters \n Constraints: \n * Must match the name of an existing + DB subnet group. \n * Must not be default. \n Example: mydbsubnetgroup" type: string dbSubnetGroupNameRef: description: DBSubnetGroupNameRef is a reference to a DBSubnetGroup @@ -330,11 +330,10 @@ spec: description: Reserved for future use. type: string deletionProtection: - description: "A value that indicates whether the DB cluster has - deletion protection enabled. The database can't be deleted when - deletion protection is enabled. By default, deletion protection - isn't enabled. \n Valid for: Aurora DB clusters and Multi-AZ - DB clusters" + description: "Specifies whether the DB cluster has deletion protection + enabled. The database can't be deleted when deletion protection + is enabled. By default, deletion protection isn't enabled. \n + Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters" type: boolean destinationRegion: description: DestinationRegion is used for presigning the request @@ -346,12 +345,12 @@ spec: can use Kerberos authentication to authenticate users that connect to the DB cluster. \n For more information, see Kerberos authentication (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/kerberos-authentication.html) - in the Amazon Aurora User Guide. \n Valid for: Aurora DB clusters - only" + in the Amazon Aurora User Guide. \n Valid for Cluster Type: + Aurora DB clusters only" type: string domainIAMRoleName: - description: "Specify the name of the IAM role to be used when - making API calls to the Directory Service. \n Valid for: Aurora + description: "The name of the IAM role to use when making API + calls to the Directory Service. \n Valid for Cluster Type: Aurora DB clusters only" type: string domainIAMRoleNameRef: @@ -431,94 +430,83 @@ spec: type: object enableCloudwatchLogsExports: description: "The list of log types that need to be enabled for - exporting to CloudWatch Logs. The values in the list depend - on the DB engine being used. \n RDS for MySQL \n Possible values - are error, general, and slowquery. \n RDS for PostgreSQL \n - Possible values are postgresql and upgrade. \n Aurora MySQL - \n Possible values are audit, error, general, and slowquery. - \n Aurora PostgreSQL \n Possible value is postgresql. \n For - more information about exporting CloudWatch Logs for Amazon - RDS, see Publishing Database Logs to Amazon CloudWatch Logs - (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_LogAccess.html#USER_LogAccess.Procedural.UploadtoCloudWatch) + exporting to CloudWatch Logs. \n Valid for Cluster Type: Aurora + DB clusters and Multi-AZ DB clusters \n The following values + are valid for each DB engine: \n * Aurora MySQL - audit | error + | general | slowquery \n * Aurora PostgreSQL - postgresql \n + * RDS for MySQL - error | general | slowquery \n * RDS for PostgreSQL + - postgresql | upgrade \n For more information about exporting + CloudWatch Logs for Amazon RDS, see Publishing Database Logs + to Amazon CloudWatch Logs (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_LogAccess.html#USER_LogAccess.Procedural.UploadtoCloudWatch) in the Amazon RDS User Guide. \n For more information about exporting CloudWatch Logs for Amazon Aurora, see Publishing Database Logs to Amazon CloudWatch Logs (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_LogAccess.html#USER_LogAccess.Procedural.UploadtoCloudWatch) - in the Amazon Aurora User Guide. \n Valid for: Aurora DB clusters - and Multi-AZ DB clusters" + in the Amazon Aurora User Guide." items: type: string type: array enableGlobalWriteForwarding: - description: "A value that indicates whether to enable this DB - cluster to forward write operations to the primary cluster of - an Aurora global database (GlobalCluster). By default, write - operations are not allowed on Aurora DB clusters that are secondary - clusters in an Aurora global database. \n You can set this value - only on Aurora DB clusters that are members of an Aurora global - database. With this parameter enabled, a secondary cluster can - forward writes to the current primary cluster and the resulting - changes are replicated back to this cluster. For the primary - DB cluster of an Aurora global database, this value is used - immediately if the primary is demoted by the FailoverGlobalCluster - API operation, but it does nothing until then. \n Valid for: - Aurora DB clusters only" + description: "Specifies whether to enable this DB cluster to forward + write operations to the primary cluster of a global cluster + (Aurora global database). By default, write operations are not + allowed on Aurora DB clusters that are secondary clusters in + an Aurora global database. \n You can set this value only on + Aurora DB clusters that are members of an Aurora global database. + With this parameter enabled, a secondary cluster can forward + writes to the current primary cluster, and the resulting changes + are replicated back to this cluster. For the primary DB cluster + of an Aurora global database, this value is used immediately + if the primary is demoted by a global cluster API operation, + but it does nothing until then. \n Valid for Cluster Type: Aurora + DB clusters only" type: boolean enableHTTPEndpoint: - description: "A value that indicates whether to enable the HTTP - endpoint for an Aurora Serverless v1 DB cluster. By default, - the HTTP endpoint is disabled. \n When enabled, the HTTP endpoint - provides a connectionless web service API for running SQL queries - on the Aurora Serverless v1 DB cluster. You can also query your - database from inside the RDS console with the query editor. - \n For more information, see Using the Data API for Aurora Serverless - v1 (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/data-api.html) - in the Amazon Aurora User Guide. \n Valid for: Aurora DB clusters - only" + description: "Specifies whether to enable the HTTP endpoint for + an Aurora Serverless v1 DB cluster. By default, the HTTP endpoint + is disabled. \n When enabled, the HTTP endpoint provides a connectionless + web service API for running SQL queries on the Aurora Serverless + v1 DB cluster. You can also query your database from inside + the RDS console with the query editor. \n For more information, + see Using the Data API for Aurora Serverless v1 (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/data-api.html) + in the Amazon Aurora User Guide. \n Valid for Cluster Type: + Aurora DB clusters only" type: boolean enableIAMDatabaseAuthentication: - description: "A value that indicates whether to enable mapping - of Amazon Web Services Identity and Access Management (IAM) - accounts to database accounts. By default, mapping isn't enabled. - \n For more information, see IAM Database Authentication (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/UsingWithRDS.IAMDBAuth.html) - in the Amazon Aurora User Guide. \n Valid for: Aurora DB clusters - only" + description: "Specifies whether to enable mapping of Amazon Web + Services Identity and Access Management (IAM) accounts to database + accounts. By default, mapping isn't enabled. \n For more information, + see IAM Database Authentication (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/UsingWithRDS.IAMDBAuth.html) + in the Amazon Aurora User Guide. \n Valid for Cluster Type: + Aurora DB clusters only" + type: boolean + enableLocalWriteForwarding: + description: "Specifies whether read replicas can forward write + operations to the writer DB instance in the DB cluster. By default, + write operations aren't allowed on reader DB instances. \n Valid + for: Aurora DB clusters only" type: boolean enablePerformanceInsights: - description: "A value that indicates whether to turn on Performance - Insights for the DB cluster. \n For more information, see Using - Amazon Performance Insights (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_PerfInsights.html) - in the Amazon RDS User Guide. \n Valid for: Multi-AZ DB clusters - only" + description: "Specifies whether to turn on Performance Insights + for the DB cluster. \n For more information, see Using Amazon + Performance Insights (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_PerfInsights.html) + in the Amazon RDS User Guide. \n Valid for Cluster Type: Multi-AZ + DB clusters only" type: boolean engine: - description: "The name of the database engine to be used for this - DB cluster. \n Valid Values: \n * aurora (for MySQL 5.6-compatible - Aurora) \n * aurora-mysql (for MySQL 5.7-compatible and MySQL - 8.0-compatible Aurora) \n * aurora-postgresql \n * mysql \n - * postgres \n Valid for: Aurora DB clusters and Multi-AZ DB - clusters" + description: "The database engine to use for this DB cluster. + \n Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB + clusters \n Valid Values: aurora-mysql | aurora-postgresql | + mysql | postgres" type: string engineMode: - description: "The DB engine mode of the DB cluster, either provisioned, - serverless, parallelquery, global, or multimaster. \n The parallelquery - engine mode isn't required for Aurora MySQL version 1.23 and - higher 1.x versions, and version 2.09 and higher 2.x versions. - \n The global engine mode isn't required for Aurora MySQL version - 1.22 and higher 1.x versions, and global engine mode isn't required - for any 2.x versions. \n The multimaster engine mode only applies - for DB clusters created with Aurora MySQL version 5.6.10a. \n - The serverless engine mode only applies for Aurora Serverless - v1 DB clusters. \n For Aurora PostgreSQL, the global engine - mode isn't required, and both the parallelquery and the multimaster - engine modes currently aren't supported. \n Limitations and - requirements apply to some DB engine modes. For more information, - see the following sections in the Amazon Aurora User Guide: - \n * Limitations of Aurora Serverless v1 (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless.html#aurora-serverless.limitations) + description: "The DB engine mode of the DB cluster, either provisioned + or serverless. \n The serverless engine mode only applies for + Aurora Serverless v1 DB clusters. \n For information about limitations + and requirements for Serverless DB clusters, see the following + sections in the Amazon Aurora User Guide: \n * Limitations of + Aurora Serverless v1 (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless.html#aurora-serverless.limitations) \n * Requirements for Aurora Serverless v2 (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless-v2.requirements.html) - \n * Limitations of Parallel Query (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-mysql-parallel-query.html#aurora-mysql-parallel-query-limitations) - \n * Limitations of Aurora Global Databases (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-global-database.html#aurora-global-database.limitations) - \n * Limitations of Multi-Master Clusters (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-multi-master.html#aurora-multi-master-limitations) - \n Valid for: Aurora DB clusters only" + \n Valid for Cluster Type: Aurora DB clusters only" type: string engineVersion: description: "The version number of the database engine to use. @@ -563,17 +551,17 @@ spec: globalClusterIdentifier: description: "The global cluster ID of an Aurora cluster that becomes the primary cluster in the new global database cluster. - \n Valid for: Aurora DB clusters only" + \n Valid for Cluster Type: Aurora DB clusters only" type: string iops: description: "The amount of Provisioned IOPS (input/output operations per second) to be initially allocated for each DB instance in the Multi-AZ DB cluster. \n For information about valid IOPS - values, see Amazon RDS Provisioned IOPS storage (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Storage.html#USER_PIOPS) + values, see Provisioned IOPS storage (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Storage.html#USER_PIOPS) in the Amazon RDS User Guide. \n This setting is required to - create a Multi-AZ DB cluster. \n Constraints: Must be a multiple - between .5 and 50 of the storage amount for the DB cluster. - \n Valid for: Multi-AZ DB clusters only" + create a Multi-AZ DB cluster. \n Valid for Cluster Type: Multi-AZ + DB clusters only \n Constraints: \n * Must be a multiple between + .5 and 50 of the storage amount for the DB cluster." format: int64 type: integer kmsKeyID: @@ -583,19 +571,19 @@ spec: key. To use a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN. \n When a KMS key isn't specified in KmsKeyId: \n * If ReplicationSourceIdentifier identifies - an encrypted source, then Amazon RDS will use the KMS key used - to encrypt the source. Otherwise, Amazon RDS will use your default + an encrypted source, then Amazon RDS uses the KMS key used to + encrypt the source. Otherwise, Amazon RDS uses your default KMS key. \n * If the StorageEncrypted parameter is enabled and ReplicationSourceIdentifier isn't specified, then Amazon RDS - will use your default KMS key. \n There is a default KMS key - for your Amazon Web Services account. Your Amazon Web Services - account has a different default KMS key for each Amazon Web - Services Region. \n If you create a read replica of an encrypted - DB cluster in another Amazon Web Services Region, you must set - KmsKeyId to a KMS key identifier that is valid in the destination - Amazon Web Services Region. This KMS key is used to encrypt - the read replica in that Amazon Web Services Region. \n Valid - for: Aurora DB clusters and Multi-AZ DB clusters" + uses your default KMS key. \n There is a default KMS key for + your Amazon Web Services account. Your Amazon Web Services account + has a different default KMS key for each Amazon Web Services + Region. \n If you create a read replica of an encrypted DB cluster + in another Amazon Web Services Region, make sure to set KmsKeyId + to a KMS key identifier that is valid in the destination Amazon + Web Services Region. This KMS key is used to encrypt the read + replica in that Amazon Web Services Region. \n Valid for Cluster + Type: Aurora DB clusters and Multi-AZ DB clusters" type: string kmsKeyIDRef: description: KMSKeyIDRef is a reference to a KMS Key used to set @@ -673,16 +661,16 @@ spec: type: object type: object manageMasterUserPassword: - description: "A value that indicates whether to manage the master - user password with Amazon Web Services Secrets Manager. \n For - more information, see Password management with Amazon Web Services - Secrets Manager (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/rds-secrets-manager.html) + description: "Specifies whether to manage the master user password + with Amazon Web Services Secrets Manager. \n For more information, + see Password management with Amazon Web Services Secrets Manager + (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/rds-secrets-manager.html) in the Amazon RDS User Guide and Password management with Amazon Web Services Secrets Manager (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/rds-secrets-manager.html) - in the Amazon Aurora User Guide. \n Constraints: \n * Can't - manage the master user password with Amazon Web Services Secrets - Manager if MasterUserPassword is specified. \n Valid for: Aurora - DB clusters and Multi-AZ DB clusters" + in the Amazon Aurora User Guide. \n Valid for Cluster Type: + Aurora DB clusters and Multi-AZ DB clusters \n Constraints: + \n * Can't manage the master user password with Amazon Web Services + Secrets Manager if MasterUserPassword is specified." type: boolean masterUserPasswordSecretRef: description: "The password for the master database user. This @@ -725,23 +713,24 @@ spec: use a customer managed KMS key. \n There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different default KMS key for each Amazon Web - Services Region. \n Valid for: Aurora DB clusters and Multi-AZ - DB clusters" + Services Region. \n Valid for Cluster Type: Aurora DB clusters + and Multi-AZ DB clusters" type: string masterUsername: description: "The name of the master user for the DB cluster. - \n Constraints: \n * Must be 1 to 16 letters or numbers. \n - * First character must be a letter. \n * Can't be a reserved - word for the chosen database engine. \n Valid for: Aurora DB - clusters and Multi-AZ DB clusters" + \n Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB + clusters \n Constraints: \n * Must be 1 to 16 letters or numbers. + \n * First character must be a letter. \n * Can't be a reserved + word for the chosen database engine." type: string monitoringInterval: description: "The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB cluster. To turn - off collecting Enhanced Monitoring metrics, specify 0. The default - is 0. \n If MonitoringRoleArn is specified, also set MonitoringInterval - to a value other than 0. \n Valid Values: 0, 1, 5, 10, 15, 30, - 60 \n Valid for: Multi-AZ DB clusters only" + off collecting Enhanced Monitoring metrics, specify 0. \n If + MonitoringRoleArn is specified, also set MonitoringInterval + to a value other than 0. \n Valid for Cluster Type: Multi-AZ + DB clusters only \n Valid Values: 0 | 1 | 5 | 10 | 15 | 30 | + 60 \n Default: 0" format: int64 type: integer monitoringRoleARN: @@ -752,22 +741,21 @@ spec: and enabling Enhanced Monitoring (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Monitoring.OS.html#USER_Monitoring.OS.Enabling) in the Amazon RDS User Guide. \n If MonitoringInterval is set to a value other than 0, supply a MonitoringRoleArn value. \n - Valid for: Multi-AZ DB clusters only" + Valid for Cluster Type: Multi-AZ DB clusters only" type: string networkType: - description: "The network type of the DB cluster. \n Valid values: - \n * IPV4 \n * DUAL \n The network type is determined by the - DBSubnetGroup specified for the DB cluster. A DBSubnetGroup - can support only the IPv4 protocol or the IPv4 and the IPv6 - protocols (DUAL). \n For more information, see Working with - a DB instance in a VPC (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_VPC.WorkingWithRDSInstanceinaVPC.html) - in the Amazon Aurora User Guide. \n Valid for: Aurora DB clusters - only" + description: "The network type of the DB cluster. \n The network + type is determined by the DBSubnetGroup specified for the DB + cluster. A DBSubnetGroup can support only the IPv4 protocol + or the IPv4 and the IPv6 protocols (DUAL). \n For more information, + see Working with a DB instance in a VPC (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_VPC.WorkingWithRDSInstanceinaVPC.html) + in the Amazon Aurora User Guide. \n Valid for Cluster Type: + Aurora DB clusters only \n Valid Values: IPV4 | DUAL" type: string optionGroupName: - description: "A value that indicates that the DB cluster should - be associated with the specified option group. \n DB clusters - are associated with a default option group that can't be modified." + description: "The option group to associate the DB cluster with. + \n DB clusters are associated with a default option group that + can't be modified." type: string performanceInsightsKMSKeyID: description: "The Amazon Web Services KMS key identifier for encryption @@ -777,25 +765,25 @@ spec: then Amazon RDS uses your default KMS key. There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different default KMS key for each Amazon - Web Services Region. \n Valid for: Multi-AZ DB clusters only" + Web Services Region. \n Valid for Cluster Type: Multi-AZ DB + clusters only" type: string performanceInsightsRetentionPeriod: description: "The number of days to retain Performance Insights - data. The default is 7 days. The following values are valid: - \n * 7 \n * month * 31, where month is a number of months from - 1-23 \n * 731 \n For example, the following values are valid: - \n * 93 (3 months * 31) \n * 341 (11 months * 31) \n * 589 (19 - months * 31) \n * 731 \n If you specify a retention period such - as 94, which isn't a valid value, RDS issues an error. \n Valid - for: Multi-AZ DB clusters only" + data. \n Valid for Cluster Type: Multi-AZ DB clusters only \n + Valid Values: \n * 7 \n * month * 31, where month is a number + of months from 1-23. Examples: 93 (3 months * 31), 341 (11 months + * 31), 589 (19 months * 31) \n * 731 \n Default: 7 days \n If + you specify a retention period that isn't valid, such as 94, + Amazon RDS issues an error." format: int64 type: integer port: description: "The port number on which the instances in the DB - cluster accept connections. \n RDS for MySQL and Aurora MySQL - \n Default: 3306 \n Valid values: 1150-65535 \n RDS for PostgreSQL - and Aurora PostgreSQL \n Default: 5432 \n Valid values: 1150-65535 - \n Valid for: Aurora DB clusters and Multi-AZ DB clusters" + cluster accept connections. \n Valid for Cluster Type: Aurora + DB clusters and Multi-AZ DB clusters \n Valid Values: 1150-65535 + \n Default: \n * RDS for MySQL and Aurora MySQL - 3306 \n * + RDS for PostgreSQL and Aurora PostgreSQL - 5432" format: int64 type: integer preSignedURL: @@ -832,57 +820,58 @@ spec: instead of specifying PreSignedUrl manually. Specifying SourceRegion autogenerates a presigned URL that is a valid request for the operation that can run in the source Amazon Web Services Region. - \n Valid for: Aurora DB clusters only" + \n Valid for Cluster Type: Aurora DB clusters only" type: string preferredBackupWindow: description: "The daily time range during which automated backups are created if automated backups are enabled using the BackupRetentionPeriod - parameter. \n The default is a 30-minute window selected at - random from an 8-hour block of time for each Amazon Web Services + parameter. \n Valid for Cluster Type: Aurora DB clusters and + Multi-AZ DB clusters \n The default is a 30-minute window selected + at random from an 8-hour block of time for each Amazon Web Services Region. To view the time blocks available, see Backup window (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Aurora.Managing.Backups.html#Aurora.Managing.Backups.BackupWindow) in the Amazon Aurora User Guide. \n Constraints: \n * Must be in the format hh24:mi-hh24:mi. \n * Must be in Universal Coordinated Time (UTC). \n * Must not conflict with the preferred maintenance - window. \n * Must be at least 30 minutes. \n Valid for: Aurora - DB clusters and Multi-AZ DB clusters" + window. \n * Must be at least 30 minutes." type: string preferredMaintenanceWindow: description: "The weekly time range during which system maintenance - can occur, in Universal Coordinated Time (UTC). \n Format: ddd:hh24:mi-ddd:hh24:mi - \n The default is a 30-minute window selected at random from - an 8-hour block of time for each Amazon Web Services Region, - occurring on a random day of the week. To see the time blocks - available, see Adjusting the Preferred DB Cluster Maintenance + can occur. \n Valid for Cluster Type: Aurora DB clusters and + Multi-AZ DB clusters \n The default is a 30-minute window selected + at random from an 8-hour block of time for each Amazon Web Services + Region, occurring on a random day of the week. To see the time + blocks available, see Adjusting the Preferred DB Cluster Maintenance Window (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_UpgradeDBInstance.Maintenance.html#AdjustingTheMaintenanceWindow.Aurora) - in the Amazon Aurora User Guide. \n Valid Days: Mon, Tue, Wed, - Thu, Fri, Sat, Sun. \n Constraints: Minimum 30-minute window. - \n Valid for: Aurora DB clusters and Multi-AZ DB clusters" + in the Amazon Aurora User Guide. \n Constraints: \n * Must be + in the format ddd:hh24:mi-ddd:hh24:mi. \n * Days must be one + of Mon | Tue | Wed | Thu | Fri | Sat | Sun. \n * Must be in + Universal Coordinated Time (UTC). \n * Must be at least 30 minutes." type: string publiclyAccessible: - description: "A value that indicates whether the DB cluster is - publicly accessible. \n When the DB cluster is publicly accessible, - its Domain Name System (DNS) endpoint resolves to the private - IP address from within the DB cluster's virtual private cloud - (VPC). It resolves to the public IP address from outside of - the DB cluster's VPC. Access to the DB cluster is ultimately - controlled by the security group it uses. That public access - isn't permitted if the security group assigned to the DB cluster - doesn't permit it. \n When the DB cluster isn't publicly accessible, - it is an internal DB cluster with a DNS name that resolves to - a private IP address. \n Default: The default behavior varies - depending on whether DBSubnetGroupName is specified. \n If DBSubnetGroupName - isn't specified, and PubliclyAccessible isn't specified, the - following applies: \n * If the default VPC in the target Region - doesn’t have an internet gateway attached to it, the DB cluster - is private. \n * If the default VPC in the target Region has - an internet gateway attached to it, the DB cluster is public. - \n If DBSubnetGroupName is specified, and PubliclyAccessible - isn't specified, the following applies: \n * If the subnets - are part of a VPC that doesn’t have an internet gateway attached - to it, the DB cluster is private. \n * If the subnets are part - of a VPC that has an internet gateway attached to it, the DB - cluster is public. \n Valid for: Multi-AZ DB clusters only" + description: "Specifies whether the DB cluster is publicly accessible. + \n When the DB cluster is publicly accessible, its Domain Name + System (DNS) endpoint resolves to the private IP address from + within the DB cluster's virtual private cloud (VPC). It resolves + to the public IP address from outside of the DB cluster's VPC. + Access to the DB cluster is ultimately controlled by the security + group it uses. That public access isn't permitted if the security + group assigned to the DB cluster doesn't permit it. \n When + the DB cluster isn't publicly accessible, it is an internal + DB cluster with a DNS name that resolves to a private IP address. + \n Valid for Cluster Type: Multi-AZ DB clusters only \n Default: + The default behavior varies depending on whether DBSubnetGroupName + is specified. \n If DBSubnetGroupName isn't specified, and PubliclyAccessible + isn't specified, the following applies: \n * If the default + VPC in the target Region doesn’t have an internet gateway attached + to it, the DB cluster is private. \n * If the default VPC in + the target Region has an internet gateway attached to it, the + DB cluster is public. \n If DBSubnetGroupName is specified, + and PubliclyAccessible isn't specified, the following applies: + \n * If the subnets are part of a VPC that doesn’t have an internet + gateway attached to it, the DB cluster is private. \n * If the + subnets are part of a VPC that has an internet gateway attached + to it, the DB cluster is public." type: boolean region: description: Region is which region the DBCluster will be created. @@ -890,7 +879,8 @@ spec: replicationSourceIdentifier: description: "The Amazon Resource Name (ARN) of the source DB instance or DB cluster if this DB cluster is created as a read - replica. \n Valid for: Aurora DB clusters only" + replica. \n Valid for Cluster Type: Aurora DB clusters and Multi-AZ + DB clusters" type: string restoreFrom: description: RestoreFrom specifies the details of the backup to @@ -1000,8 +990,8 @@ spec: type: object scalingConfiguration: description: "For DB clusters in serverless DB engine mode, the - scaling properties of the DB cluster. \n Valid for: Aurora DB - clusters only" + scaling properties of the DB cluster. \n Valid for Cluster Type: + Aurora DB clusters only" properties: autoPause: type: boolean @@ -1044,20 +1034,29 @@ spec: the source ARN. type: string storageEncrypted: - description: "A value that indicates whether the DB cluster is - encrypted. \n Valid for: Aurora DB clusters and Multi-AZ DB - clusters" + description: "Specifies whether the DB cluster is encrypted. \n + Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters" type: boolean storageType: - description: "Specifies the storage type to be associated with - the DB cluster. \n This setting is required to create a Multi-AZ - DB cluster. \n Valid values: io1 \n When specified, a value - for the Iops parameter is required. \n Default: io1 \n Valid - for: Multi-AZ DB clusters only" + description: "The storage type to associate with the DB cluster. + \n For information on storage types for Aurora DB clusters, + see Storage configurations for Amazon Aurora DB clusters (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Aurora.Overview.StorageReliability.html#aurora-storage-type). + For information on storage types for Multi-AZ DB clusters, see + Settings for creating Multi-AZ DB clusters (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/create-multi-az-db-cluster.html#create-multi-az-db-cluster-settings). + \n This setting is required to create a Multi-AZ DB cluster. + \n When specified for a Multi-AZ DB cluster, a value for the + Iops parameter is required. \n Valid for Cluster Type: Aurora + DB clusters and Multi-AZ DB clusters \n Valid Values: \n * Aurora + DB clusters - aurora | aurora-iopt1 \n * Multi-AZ DB clusters + - io1 \n Default: \n * Aurora DB clusters - aurora \n * Multi-AZ + DB clusters - io1 \n When you create an Aurora DB cluster with + the storage type set to aurora-iopt1, the storage type is returned + in the response. The storage type isn't returned when you set + it to aurora." type: string tags: - description: "Tags to assign to the DB cluster. \n Valid for: - Aurora DB clusters and Multi-AZ DB clusters" + description: "Tags to assign to the DB cluster. \n Valid for Cluster + Type: Aurora DB clusters and Multi-AZ DB clusters" items: properties: key: @@ -1378,11 +1377,11 @@ spec: description: The status of the database activity stream. type: string associatedRoles: - description: Provides a list of the Amazon Web Services Identity - and Access Management (IAM) roles that are associated with the - DB cluster. IAM roles that are associated with a DB cluster - grant permission for the DB cluster to access other Amazon Web - Services on your behalf. + description: A list of the Amazon Web Services Identity and Access + Management (IAM) roles that are associated with the DB cluster. + IAM roles that are associated with a DB cluster grant permission + for the DB cluster to access other Amazon Web Services on your + behalf. items: properties: featureName: @@ -1410,21 +1409,20 @@ spec: format: int64 type: integer cloneGroupID: - description: Identifies the clone group to which the DB cluster + description: The ID of the clone group with which the DB cluster is associated. type: string clusterCreateTime: - description: Specifies the time when the DB cluster was created, - in Universal Coordinated Time (UTC). + description: The time when the DB cluster was created, in Universal + Coordinated Time (UTC). format: date-time type: string crossAccountClone: - description: Specifies whether the DB cluster is a clone of a + description: Indicates whether the DB cluster is a clone of a DB cluster owned by a different Amazon Web Services account. type: boolean customEndpoints: - description: Identifies all custom endpoints associated with the - cluster. + description: The custom endpoints associated with the DB cluster. items: type: string type: array @@ -1432,12 +1430,11 @@ spec: description: The Amazon Resource Name (ARN) for the DB cluster. type: string dbClusterIdentifier: - description: Contains a user-supplied DB cluster identifier. This - identifier is the unique key that identifies a DB cluster. + description: The user-supplied identifier for the DB cluster. + This identifier is the unique key that identifies a DB cluster. type: string dbClusterMembers: - description: Provides the list of instances that make up the DB - cluster. + description: The list of DB instances that make up the DB cluster. items: properties: dbClusterParameterGroupStatus: @@ -1452,8 +1449,8 @@ spec: type: object type: array dbClusterOptionGroupMemberships: - description: Provides the list of option group memberships for - this DB cluster. + description: The list of option group memberships for this DB + cluster. items: properties: dbClusterOptionGroupName: @@ -1463,8 +1460,8 @@ spec: type: object type: array dbClusterParameterGroup: - description: Specifies the name of the DB cluster parameter group - for the DB cluster. + description: The name of the DB cluster parameter group for the + DB cluster. type: string dbClusterResourceID: description: The Amazon Web Services Region-unique, immutable @@ -1473,8 +1470,8 @@ spec: the DB cluster is accessed. type: string dbSubnetGroup: - description: Specifies information on the subnet group associated - with the DB cluster, including the name, description, and subnets + description: Information about the subnet group associated with + the DB cluster, including the name, description, and subnets in the subnet group. type: string domainMemberships: @@ -1482,12 +1479,20 @@ spec: with the DB cluster. items: properties: + authSecretARN: + type: string + dnsIPs: + items: + type: string + type: array domain: type: string fQDN: type: string iamRoleName: type: string + oU: + type: string status: type: string type: object @@ -1511,54 +1516,63 @@ spec: type: string type: array endpoint: - description: Specifies the connection endpoint for the primary - instance of the DB cluster. + description: The connection endpoint for the primary instance + of the DB cluster. type: string engineVersion: - description: Indicates the database engine version. + description: The version of the database engine. type: string globalWriteForwardingRequested: - description: Specifies whether you have requested to enable write - forwarding for a secondary cluster in an Aurora global database. - Because write forwarding takes time to enable, check the value - of GlobalWriteForwardingStatus to confirm that the request has - completed before using the write forwarding feature for this - cluster. + description: Specifies whether write forwarding is enabled for + a secondary cluster in an Aurora global database. Because write + forwarding takes time to enable, check the value of GlobalWriteForwardingStatus + to confirm that the request has completed before using the write + forwarding feature for this cluster. type: boolean globalWriteForwardingStatus: - description: Specifies whether a secondary cluster in an Aurora - global database has write forwarding enabled, not enabled, or - is in the process of enabling it. + description: The status of write forwarding for a secondary cluster + in an Aurora global database. type: string hostedZoneID: - description: Specifies the ID that Amazon Route 53 assigns when - you create a hosted zone. + description: The ID that Amazon Route 53 assigns when you create + a hosted zone. type: string httpEndpointEnabled: - description: "A value that indicates whether the HTTP endpoint - for an Aurora Serverless v1 DB cluster is enabled. \n When enabled, - the HTTP endpoint provides a connectionless web service API - for running SQL queries on the Aurora Serverless v1 DB cluster. - You can also query your database from inside the RDS console - with the query editor. \n For more information, see Using the - Data API for Aurora Serverless v1 (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/data-api.html) + description: "Indicates whether the HTTP endpoint for an Aurora + Serverless v1 DB cluster is enabled. \n When enabled, the HTTP + endpoint provides a connectionless web service API for running + SQL queries on the Aurora Serverless v1 DB cluster. You can + also query your database from inside the RDS console with the + query editor. \n For more information, see Using the Data API + for Aurora Serverless v1 (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/data-api.html) in the Amazon Aurora User Guide." type: boolean + iOOptimizedNextAllowedModificationTime: + description: "The next time you can modify the DB cluster to use + the aurora-iopt1 storage type. \n This setting is only for Aurora + DB clusters." + format: date-time + type: string iamDatabaseAuthenticationEnabled: - description: A value that indicates whether the mapping of Amazon - Web Services Identity and Access Management (IAM) accounts to - database accounts is enabled. + description: Indicates whether the mapping of Amazon Web Services + Identity and Access Management (IAM) accounts to database accounts + is enabled. type: boolean latestRestorableTime: - description: Specifies the latest time to which a database can - be restored with point-in-time restore. + description: The latest time to which a database can be restored + with point-in-time restore. format: date-time type: string + localWriteForwardingStatus: + description: Specifies whether an Aurora DB cluster has in-cluster + write forwarding enabled, not enabled, requested, or is in the + process of enabling it. + type: string masterUserSecret: - description: "Contains the secret managed by RDS in Amazon Web - Services Secrets Manager for the master user password. \n For - more information, see Password management with Amazon Web Services - Secrets Manager (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/rds-secrets-manager.html) + description: "The secret managed by RDS in Amazon Web Services + Secrets Manager for the master user password. \n For more information, + see Password management with Amazon Web Services Secrets Manager + (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/rds-secrets-manager.html) in the Amazon RDS User Guide and Password management with Amazon Web Services Secrets Manager (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/rds-secrets-manager.html) in the Amazon Aurora User Guide." @@ -1571,16 +1585,16 @@ spec: type: string type: object multiAZ: - description: Specifies whether the DB cluster has instances in + description: Indicates whether the DB cluster has instances in multiple Availability Zones. type: boolean percentProgress: - description: Specifies the progress of the operation as a percentage. + description: The progress of the operation as a percentage. type: string performanceInsightsEnabled: - description: "True if Performance Insights is enabled for the - DB cluster, and otherwise false. \n This setting is only for - non-Aurora Multi-AZ DB clusters." + description: "Indicates whether Performance Insights is enabled + for the DB cluster. \n This setting is only for non-Aurora Multi-AZ + DB clusters." type: boolean readReplicaIdentifiers: description: Contains one or more identifiers of the read replicas @@ -1622,7 +1636,7 @@ spec: type: string type: object status: - description: Specifies the current state of this DB cluster. + description: The current state of this DB cluster. type: string tagList: items: @@ -1634,8 +1648,8 @@ spec: type: object type: array vpcSecurityGroups: - description: Provides a list of VPC security groups that the DB - cluster belongs to. + description: The list of VPC security groups that the DB cluster + belongs to. items: properties: status: diff --git a/package/crds/rds.aws.crossplane.io_dbinstances.yaml b/package/crds/rds.aws.crossplane.io_dbinstances.yaml index 20f1fdb09c..f410342809 100644 --- a/package/crds/rds.aws.crossplane.io_dbinstances.yaml +++ b/package/crds/rds.aws.crossplane.io_dbinstances.yaml @@ -69,57 +69,57 @@ spec: properties: allocatedStorage: description: "The amount of storage in gibibytes (GiB) to allocate - for the DB instance. \n Type: Integer \n Amazon Aurora \n Not - applicable. Aurora cluster volumes automatically grow as the - amount of data in your database increases, though you are only - charged for the space that you use in an Aurora cluster volume. - \n Amazon RDS Custom \n Constraints to the amount of storage - for each storage type are the following: \n * General Purpose - (SSD) storage (gp2, gp3): Must be an integer from 40 to 65536 - for RDS Custom for Oracle, 16384 for RDS Custom for SQL Server. - \n * Provisioned IOPS storage (io1): Must be an integer from - 40 to 65536 for RDS Custom for Oracle, 16384 for RDS Custom - for SQL Server. \n MySQL \n Constraints to the amount of storage - for each storage type are the following: \n * General Purpose - (SSD) storage (gp2, gp3): Must be an integer from 20 to 65536. - \n * Provisioned IOPS storage (io1): Must be an integer from - 100 to 65536. \n * Magnetic storage (standard): Must be an integer - from 5 to 3072. \n MariaDB \n Constraints to the amount of storage - for each storage type are the following: \n * General Purpose - (SSD) storage (gp2, gp3): Must be an integer from 20 to 65536. - \n * Provisioned IOPS storage (io1): Must be an integer from - 100 to 65536. \n * Magnetic storage (standard): Must be an integer - from 5 to 3072. \n PostgreSQL \n Constraints to the amount of + for the DB instance. \n This setting doesn't apply to Amazon + Aurora DB instances. Aurora cluster volumes automatically grow + as the amount of data in your database increases, though you + are only charged for the space that you use in an Aurora cluster + volume. \n Amazon RDS Custom \n Constraints to the amount of storage for each storage type are the following: \n * General - Purpose (SSD) storage (gp2, gp3): Must be an integer from 20 - to 65536. \n * Provisioned IOPS storage (io1): Must be an integer - from 100 to 65536. \n * Magnetic storage (standard): Must be - an integer from 5 to 3072. \n Oracle \n Constraints to the amount - of storage for each storage type are the following: \n * General - Purpose (SSD) storage (gp2, gp3): Must be an integer from 20 - to 65536. \n * Provisioned IOPS storage (io1): Must be an integer - from 100 to 65536. \n * Magnetic storage (standard): Must be - an integer from 10 to 3072. \n SQL Server \n Constraints to - the amount of storage for each storage type are the following: - \n * General Purpose (SSD) storage (gp2, gp3): Enterprise and - Standard editions: Must be an integer from 20 to 16384. Web - and Express editions: Must be an integer from 20 to 16384. \n - * Provisioned IOPS storage (io1): Enterprise and Standard editions: - Must be an integer from 100 to 16384. Web and Express editions: - Must be an integer from 100 to 16384. \n * Magnetic storage - (standard): Enterprise and Standard editions: Must be an integer - from 20 to 1024. Web and Express editions: Must be an integer - from 20 to 1024." + Purpose (SSD) storage (gp2, gp3): Must be an integer from 40 + to 65536 for RDS Custom for Oracle, 16384 for RDS Custom for + SQL Server. \n * Provisioned IOPS storage (io1): Must be an + integer from 40 to 65536 for RDS Custom for Oracle, 16384 for + RDS Custom for SQL Server. \n RDS for MariaDB \n Constraints + to the amount of storage for each storage type are the following: + \n * General Purpose (SSD) storage (gp2, gp3): Must be an integer + from 20 to 65536. \n * Provisioned IOPS storage (io1): Must + be an integer from 100 to 65536. \n * Magnetic storage (standard): + Must be an integer from 5 to 3072. \n RDS for MySQL \n Constraints + to the amount of storage for each storage type are the following: + \n * General Purpose (SSD) storage (gp2, gp3): Must be an integer + from 20 to 65536. \n * Provisioned IOPS storage (io1): Must + be an integer from 100 to 65536. \n * Magnetic storage (standard): + Must be an integer from 5 to 3072. \n RDS for Oracle \n Constraints + to the amount of storage for each storage type are the following: + \n * General Purpose (SSD) storage (gp2, gp3): Must be an integer + from 20 to 65536. \n * Provisioned IOPS storage (io1): Must + be an integer from 100 to 65536. \n * Magnetic storage (standard): + Must be an integer from 10 to 3072. \n RDS for PostgreSQL \n + Constraints to the amount of storage for each storage type are + the following: \n * General Purpose (SSD) storage (gp2, gp3): + Must be an integer from 20 to 65536. \n * Provisioned IOPS storage + (io1): Must be an integer from 100 to 65536. \n * Magnetic storage + (standard): Must be an integer from 5 to 3072. \n RDS for SQL + Server \n Constraints to the amount of storage for each storage + type are the following: \n * General Purpose (SSD) storage (gp2, + gp3): Enterprise and Standard editions: Must be an integer from + 20 to 16384. Web and Express editions: Must be an integer from + 20 to 16384. \n * Provisioned IOPS storage (io1): Enterprise + and Standard editions: Must be an integer from 100 to 16384. + Web and Express editions: Must be an integer from 100 to 16384. + \n * Magnetic storage (standard): Enterprise and Standard editions: + Must be an integer from 20 to 1024. Web and Express editions: + Must be an integer from 20 to 1024." format: int64 type: integer allowMajorVersionUpgrade: - description: "A value that indicates whether major version upgrades - are allowed. Changing this parameter doesn't result in an outage - and the change is asynchronously applied as soon as possible. - \n This setting doesn't apply to RDS Custom. \n Constraints: - Major version upgrades must be allowed when specifying a value - for the EngineVersion parameter that is a different major version - than the DB instance's current version." + description: "Specifies whether major version upgrades are allowed. + Changing this parameter doesn't result in an outage and the + change is asynchronously applied as soon as possible. \n This + setting doesn't apply to RDS Custom DB instances. \n Constraints: + \n * Major version upgrades must be allowed when specifying + a value for the EngineVersion parameter that's a different major + version than the DB instance's current version." type: boolean applyImmediately: description: "A value that indicates whether the modifications @@ -136,9 +136,9 @@ spec: to determine when the changes are applied." type: boolean autoMinorVersionUpgrade: - description: "A value that indicates whether minor engine upgrades - are applied automatically to the DB instance during the maintenance - window. By default, minor engine upgrades are applied automatically. + description: "Specifies whether minor engine upgrades are applied + automatically to the DB instance during the maintenance window. + By default, minor engine upgrades are applied automatically. \n If you create an RDS Custom DB instance, you must set AutoMinorVersionUpgrade to false." type: boolean @@ -153,75 +153,77 @@ spec: description: "The Availability Zone (AZ) where the database will be created. For information on Amazon Web Services Regions and Availability Zones, see Regions and Availability Zones (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.RegionsAndAvailabilityZones.html). - \n Amazon Aurora \n Each Aurora DB cluster hosts copies of its - storage in three separate Availability Zones. Specify one of - these Availability Zones. Aurora automatically chooses an appropriate - Availability Zone if you don't specify one. \n Default: A random, - system-chosen Availability Zone in the endpoint's Amazon Web - Services Region. \n Example: us-east-1d \n Constraint: The AvailabilityZone + \n For Amazon Aurora, each Aurora DB cluster hosts copies of + its storage in three separate Availability Zones. Specify one + of these Availability Zones. Aurora automatically chooses an + appropriate Availability Zone if you don't specify one. \n Default: + A random, system-chosen Availability Zone in the endpoint's + Amazon Web Services Region. \n Constraints: \n * The AvailabilityZone parameter can't be specified if the DB instance is a Multi-AZ - deployment. The specified Availability Zone must be in the same - Amazon Web Services Region as the current endpoint." + deployment. \n * The specified Availability Zone must be in + the same Amazon Web Services Region as the current endpoint. + \n Example: us-east-1d" type: string backupRetentionPeriod: description: "The number of days for which automated backups are retained. Setting this parameter to a positive number enables backups. Setting this parameter to 0 disables automated backups. - \n Amazon Aurora \n Not applicable. The retention period for - automated backups is managed by the DB cluster. \n Default: - 1 \n Constraints: \n * Must be a value from 0 to 35 \n * Can't - be set to 0 if the DB instance is a source to read replicas - \n * Can't be set to 0 for an RDS Custom for Oracle DB instance" + \n This setting doesn't apply to Amazon Aurora DB instances. + The retention period for automated backups is managed by the + DB cluster. \n Default: 1 \n Constraints: \n * Must be a value + from 0 to 35. \n * Can't be set to 0 if the DB instance is a + source to read replicas. \n * Can't be set to 0 for an RDS Custom + for Oracle DB instance." format: int64 type: integer backupTarget: - description: "Specifies where automated backups and manual snapshots - are stored. \n Possible values are outposts (Amazon Web Services - Outposts) and region (Amazon Web Services Region). The default - is region. \n For more information, see Working with Amazon - RDS on Amazon Web Services Outposts (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/rds-on-outposts.html) + description: "The location for storing automated backups and manual + snapshots. \n Valie Values: \n * outposts (Amazon Web Services + Outposts) \n * region (Amazon Web Services Region) \n Default: + region \n For more information, see Working with Amazon RDS + on Amazon Web Services Outposts (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/rds-on-outposts.html) in the Amazon RDS User Guide." type: string caCertificateIdentifier: - description: "Specifies the CA certificate identifier to use for - the DB instance’s server certificate. \n This setting doesn't - apply to RDS Custom. \n For more information, see Using SSL/TLS - to encrypt a connection to a DB instance (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/UsingWithRDS.SSL.html) + description: "The CA certificate identifier to use for the DB + instance's server certificate. \n This setting doesn't apply + to RDS Custom DB instances. \n For more information, see Using + SSL/TLS to encrypt a connection to a DB instance (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/UsingWithRDS.SSL.html) in the Amazon RDS User Guide and Using SSL/TLS to encrypt a connection to a DB cluster (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/UsingWithRDS.SSL.html) in the Amazon Aurora User Guide." type: string characterSetName: - description: "For supported engines, this value indicates that - the DB instance should be associated with the specified CharacterSet. - \n This setting doesn't apply to RDS Custom. However, if you - need to change the character set, you can change it on the database - itself. \n Amazon Aurora \n Not applicable. The character set - is managed by the DB cluster. For more information, see CreateDBCluster." + description: "For supported engines, the character set (CharacterSet) + to associate the DB instance with. \n This setting doesn't apply + to the following DB instances: \n * Amazon Aurora - The character + set is managed by the DB cluster. For more information, see + CreateDBCluster. \n * RDS Custom - However, if you need to change + the character set, you can change it on the database itself." type: string copyTagsToSnapshot: - description: "A value that indicates whether to copy tags from - the DB instance to snapshots of the DB instance. By default, - tags are not copied. \n Amazon Aurora \n Not applicable. Copying - tags to snapshots is managed by the DB cluster. Setting this - value for an Aurora DB instance has no effect on the DB cluster - setting." + description: "Specifies whether to copy tags from the DB instance + to snapshots of the DB instance. By default, tags are not copied. + \n This setting doesn't apply to Amazon Aurora DB instances. + Copying tags to snapshots is managed by the DB cluster. Setting + this value for an Aurora DB instance has no effect on the DB + cluster setting." type: boolean customIAMInstanceProfile: description: "The instance profile associated with the underlying - Amazon EC2 instance of an RDS Custom DB instance. The instance - profile must meet the following requirements: \n * The profile + Amazon EC2 instance of an RDS Custom DB instance. \n This setting + is required for RDS Custom. \n Constraints: \n * The profile must exist in your account. \n * The profile must have an IAM role that Amazon EC2 has permissions to assume. \n * The instance profile name and the associated IAM role name must start with the prefix AWSRDSCustom. \n For the list of permissions required for the IAM role, see Configure IAM and your VPC (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/custom-setup-orcl.html#custom-setup-orcl.iam-vpc) - in the Amazon RDS User Guide. \n This setting is required for - RDS Custom." + in the Amazon RDS User Guide." type: string dbClusterIdentifier: - description: "The identifier of the DB cluster that the instance - will belong to. \n This setting doesn't apply to RDS Custom." + description: "The identifier of the DB cluster that this DB instance + will belong to. \n This setting doesn't apply to RDS Custom + DB instances." type: string dbClusterIdentifierRef: description: DBClusterIdentifierRef is a reference to a DBCluster @@ -329,40 +331,41 @@ spec: \n * Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0-9). \n * Can't be a word reserved by the specified database engine \n Oracle \n The Oracle - System ID (SID) of the created DB instance. If you specify null, - the default value ORCL is used. You can't specify the string - NULL, or any other reserved word, for DBName. \n Default: ORCL + System ID (SID) of the created DB instance. If you don't specify + a value, the default value is ORCL. You can't specify the string + null, or any other reserved word, for DBName. \n Default: ORCL \n Constraints: \n * Can't be longer than 8 characters \n Amazon RDS Custom for Oracle \n The Oracle System ID (SID) of the created RDS Custom DB instance. If you don't specify a value, the default - value is ORCL. \n Default: ORCL \n Constraints: \n * It must - contain 1 to 8 alphanumeric characters. \n * It must contain - a letter. \n * It can't be a word reserved by the database engine. - \n Amazon RDS Custom for SQL Server \n Not applicable. Must - be null. \n SQL Server \n Not applicable. Must be null. \n Amazon - Aurora MySQL \n The name of the database to create when the - primary DB instance of the Aurora MySQL DB cluster is created. - If this parameter isn't specified for an Aurora MySQL DB cluster, - no database is created in the DB cluster. \n Constraints: \n - * It must contain 1 to 64 alphanumeric characters. \n * It can't - be a word reserved by the database engine. \n Amazon Aurora - PostgreSQL \n The name of the database to create when the primary - DB instance of the Aurora PostgreSQL DB cluster is created. - If this parameter isn't specified for an Aurora PostgreSQL DB - cluster, a database named postgres is created in the DB cluster. - \n Constraints: \n * It must contain 1 to 63 alphanumeric characters. - \n * It must begin with a letter. Subsequent characters can - be letters, underscores, or digits (0 to 9). \n * It can't be - a word reserved by the database engine." + value is ORCL for non-CDBs and RDSCDB for CDBs. \n Default: + ORCL \n Constraints: \n * It must contain 1 to 8 alphanumeric + characters. \n * It must contain a letter. \n * It can't be + a word reserved by the database engine. \n Amazon RDS Custom + for SQL Server \n Not applicable. Must be null. \n SQL Server + \n Not applicable. Must be null. \n Amazon Aurora MySQL \n The + name of the database to create when the primary DB instance + of the Aurora MySQL DB cluster is created. If this parameter + isn't specified for an Aurora MySQL DB cluster, no database + is created in the DB cluster. \n Constraints: \n * It must contain + 1 to 64 alphanumeric characters. \n * It can't be a word reserved + by the database engine. \n Amazon Aurora PostgreSQL \n The name + of the database to create when the primary DB instance of the + Aurora PostgreSQL DB cluster is created. If this parameter isn't + specified for an Aurora PostgreSQL DB cluster, a database named + postgres is created in the DB cluster. \n Constraints: \n * + It must contain 1 to 63 alphanumeric characters. \n * It must + begin with a letter. Subsequent characters can be letters, underscores, + or digits (0 to 9). \n * It can't be a word reserved by the + database engine." type: string dbParameterGroupName: description: "The name of the DB parameter group to associate - with this DB instance. If you do not specify a value, then the - default DB parameter group for the specified DB engine and version - is used. \n This setting doesn't apply to RDS Custom. \n Constraints: - \n * It must be 1 to 255 letters, numbers, or hyphens. \n * - The first character must be a letter. \n * It can't end with - a hyphen or contain two consecutive hyphens." + with this DB instance. If you don't specify a value, then Amazon + RDS uses the default DB parameter group for the specified DB + engine and version. \n This setting doesn't apply to RDS Custom + DB instances. \n Constraints: \n * Must be 1 to 255 letters, + numbers, or hyphens. \n * The first character must be a letter. + \n * Can't end with a hyphen or contain two consecutive hyphens." type: string dbParameterGroupNameRef: description: DBParameterGroupNameRef is a reference to a DBParameterGroup @@ -447,8 +450,8 @@ spec: type: array dbSubnetGroupName: description: "A DB subnet group to associate with this DB instance. - \n Constraints: Must match the name of an existing DBSubnetGroup. - Must not be default. \n Example: mydbsubnetgroup" + \n Constraints: \n * Must match the name of an existing DB subnet + group. \n * Must not be default. \n Example: mydbsubnetgroup" type: string dbSubnetGroupNameRef: description: DBSubnetGroupNameRef is a reference to a DBSubnetGroup @@ -525,6 +528,15 @@ spec: type: string type: object type: object + dbSystemID: + description: The Oracle system identifier (SID), which is the + name of the Oracle database instance that manages your database + files. In this context, the term "Oracle database instance" + refers exclusively to the system global area (SGA) and Oracle + background processes. If you don't specify a SID, the value + defaults to RDSCDB. The Oracle SID is also the name of your + CDB. + type: string deleteAutomatedBackups: description: DeleteAutomatedBackups indicates whether to remove automated backups immediately after the DB instance is deleted. @@ -532,31 +544,49 @@ spec: the DB instance is deleted. type: boolean deletionProtection: - description: "A value that indicates whether the DB instance has - deletion protection enabled. The database can't be deleted when - deletion protection is enabled. By default, deletion protection - isn't enabled. For more information, see Deleting a DB Instance - (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_DeleteInstance.html). - \n Amazon Aurora \n Not applicable. You can enable or disable - deletion protection for the DB cluster. For more information, - see CreateDBCluster. DB instances in a DB cluster can be deleted - even when deletion protection is enabled for the DB cluster." + description: "Specifies whether the DB instance has deletion protection + enabled. The database can't be deleted when deletion protection + is enabled. By default, deletion protection isn't enabled. For + more information, see Deleting a DB Instance (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_DeleteInstance.html). + \n This setting doesn't apply to Amazon Aurora DB instances. + You can enable or disable deletion protection for the DB cluster. + For more information, see CreateDBCluster. DB instances in a + DB cluster can be deleted even when deletion protection is enabled + for the DB cluster." type: boolean domain: description: "The Active Directory directory ID to create the - DB instance in. Currently, only MySQL, Microsoft SQL Server, + DB instance in. Currently, only Microsoft SQL Server, MySQL, Oracle, and PostgreSQL DB instances can be created in an Active Directory Domain. \n For more information, see Kerberos Authentication (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/kerberos-authentication.html) in the Amazon RDS User Guide. \n This setting doesn't apply - to RDS Custom. \n Amazon Aurora \n Not applicable. The domain - is managed by the DB cluster." + to the following DB instances: \n * Amazon Aurora (The domain + is managed by the DB cluster.) \n * RDS Custom" + type: string + domainAuthSecretARN: + description: "The ARN for the Secrets Manager secret with the + credentials for the user joining the domain. \n Example: arn:aws:secretsmanager:region:account-number:secret:myselfmanagedADtestsecret-123456" + type: string + domainDNSIPs: + description: "The IPv4 DNS IP addresses of your primary and secondary + Active Directory domain controllers. \n Constraints: \n * Two + IP addresses must be provided. If there isn't a secondary domain + controller, use the IP address of the primary domain controller + for both entries in the list. \n Example: 123.124.125.126,234.235.236.237" + items: + type: string + type: array + domainFqdn: + description: "The fully qualified domain name (FQDN) of an Active + Directory domain. \n Constraints: \n * Can't be longer than + 64 characters. \n Example: mymanagedADtest.mymanagedAD.mydomain" type: string domainIAMRoleName: - description: "Specify the name of the IAM role to be used when - making API calls to the Directory Service. \n This setting doesn't - apply to RDS Custom. \n Amazon Aurora \n Not applicable. The - domain is managed by the DB cluster." + description: "The name of the IAM role to use when making API + calls to the Directory Service. \n This setting doesn't apply + to the following DB instances: \n * Amazon Aurora (The domain + is managed by the DB cluster.) \n * RDS Custom" type: string domainIAMRoleNameRef: description: DomainIAMRoleNameRef is a reference to an IAMRole @@ -633,25 +663,31 @@ spec: type: string type: object type: object + domainOu: + description: "The Active Directory organizational unit for your + DB instance to join. \n Constraints: \n * Must be in the distinguished + name format. \n * Can't be longer than 64 characters. \n Example: + OU=mymanagedADtestOU,DC=mymanagedADtest,DC=mymanagedAD,DC=mydomain" + type: string enableCloudwatchLogsExports: description: "The list of log types that need to be enabled for - exporting to CloudWatch Logs. The values in the list depend - on the DB engine. For more information, see Publishing Database - Logs to Amazon CloudWatch Logs (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_LogAccess.html#USER_LogAccess.Procedural.UploadtoCloudWatch) - in the Amazon RDS User Guide. \n Amazon Aurora \n Not applicable. - CloudWatch Logs exports are managed by the DB cluster. \n RDS - Custom \n Not applicable. \n MariaDB \n Possible values are - audit, error, general, and slowquery. \n Microsoft SQL Server - \n Possible values are agent and error. \n MySQL \n Possible - values are audit, error, general, and slowquery. \n Oracle \n - Possible values are alert, audit, listener, trace, and oemagent. - \n PostgreSQL \n Possible values are postgresql and upgrade." + exporting to CloudWatch Logs. For more information, see Publishing + Database Logs to Amazon CloudWatch Logs (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_LogAccess.html#USER_LogAccess.Procedural.UploadtoCloudWatch) + in the Amazon RDS User Guide. \n This setting doesn't apply + to the following DB instances: \n * Amazon Aurora (CloudWatch + Logs exports are managed by the DB cluster.) \n * RDS Custom + \n The following values are valid for each DB engine: \n * RDS + for MariaDB - audit | error | general | slowquery \n * RDS for + Microsoft SQL Server - agent | error \n * RDS for MySQL - audit + | error | general | slowquery \n * RDS for Oracle - alert | + audit | listener | trace | oemagent \n * RDS for PostgreSQL + - postgresql | upgrade" items: type: string type: array enableCustomerOwnedIP: - description: "A value that indicates whether to enable a customer-owned - IP address (CoIP) for an RDS on Outposts DB instance. \n A CoIP + description: "Specifies whether to enable a customer-owned IP + address (CoIP) for an RDS on Outposts DB instance. \n A CoIP provides local or external connectivity to resources in your Outpost subnets through your on-premises network. For some use cases, a CoIP can provide lower latency for connections to the @@ -664,36 +700,36 @@ spec: in the Amazon Web Services Outposts User Guide." type: boolean enableIAMDatabaseAuthentication: - description: "A value that indicates whether to enable mapping - of Amazon Web Services Identity and Access Management (IAM) - accounts to database accounts. By default, mapping isn't enabled. - \n For more information, see IAM Database Authentication for - MySQL and PostgreSQL (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/UsingWithRDS.IAMDBAuth.html) + description: "Specifies whether to enable mapping of Amazon Web + Services Identity and Access Management (IAM) accounts to database + accounts. By default, mapping isn't enabled. \n For more information, + see IAM Database Authentication for MySQL and PostgreSQL (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/UsingWithRDS.IAMDBAuth.html) in the Amazon RDS User Guide. \n This setting doesn't apply - to RDS Custom. \n Amazon Aurora \n Not applicable. Mapping Amazon + to the following DB instances: \n * Amazon Aurora (Mapping Amazon Web Services IAM accounts to database accounts is managed by - the DB cluster." + the DB cluster.) \n * RDS Custom" type: boolean enablePerformanceInsights: - description: "A value that indicates whether to enable Performance - Insights for the DB instance. For more information, see Using - Amazon Performance Insights (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_PerfInsights.html) + description: "Specifies whether to enable Performance Insights + for the DB instance. For more information, see Using Amazon + Performance Insights (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_PerfInsights.html) in the Amazon RDS User Guide. \n This setting doesn't apply - to RDS Custom." + to RDS Custom DB instances." type: boolean engine: - description: "The name of the database engine to be used for this - instance. \n Not every database engine is available for every - Amazon Web Services Region. \n Valid Values: \n * aurora (for - MySQL 5.6-compatible Aurora) \n * aurora-mysql (for MySQL 5.7-compatible - and MySQL 8.0-compatible Aurora) \n * aurora-postgresql \n * - custom-oracle-ee (for RDS Custom for Oracle instances) \n * - custom-sqlserver-ee (for RDS Custom for SQL Server instances) - \n * custom-sqlserver-se (for RDS Custom for SQL Server instances) - \n * custom-sqlserver-web (for RDS Custom for SQL Server instances) - \n * mariadb \n * mysql \n * oracle-ee \n * oracle-ee-cdb \n - * oracle-se2 \n * oracle-se2-cdb \n * postgres \n * sqlserver-ee - \n * sqlserver-se \n * sqlserver-ex \n * sqlserver-web" + description: "The database engine to use for this DB instance. + \n Not every database engine is available in every Amazon Web + Services Region. \n Valid Values: \n * aurora-mysql (for Aurora + MySQL DB instances) \n * aurora-postgresql (for Aurora PostgreSQL + DB instances) \n * custom-oracle-ee (for RDS Custom for Oracle + DB instances) \n * custom-oracle-ee-cdb (for RDS Custom for + Oracle DB instances) \n * custom-sqlserver-ee (for RDS Custom + for SQL Server DB instances) \n * custom-sqlserver-se (for RDS + Custom for SQL Server DB instances) \n * custom-sqlserver-web + (for RDS Custom for SQL Server DB instances) \n * mariadb \n + * mysql \n * oracle-ee \n * oracle-ee-cdb \n * oracle-se2 \n + * oracle-se2-cdb \n * postgres \n * sqlserver-ee \n * sqlserver-se + \n * sqlserver-ex \n * sqlserver-web" type: string engineVersion: description: "The version number of the database engine to use. @@ -736,15 +772,16 @@ spec: type: string iops: description: "The amount of Provisioned IOPS (input/output operations - per second) to be initially allocated for the DB instance. For - information about valid IOPS values, see Amazon RDS DB instance - storage (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Storage.html) - in the Amazon RDS User Guide. \n Constraints: For MariaDB, MySQL, - Oracle, and PostgreSQL DB instances, must be a multiple between - .5 and 50 of the storage amount for the DB instance. For SQL - Server DB instances, must be a multiple between 1 and 50 of - the storage amount for the DB instance. \n Amazon Aurora \n - Not applicable. Storage is managed by the DB cluster." + per second) to initially allocate for the DB instance. For information + about valid IOPS values, see Amazon RDS DB instance storage + (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Storage.html) + in the Amazon RDS User Guide. \n This setting doesn't apply + to Amazon Aurora DB instances. Storage is managed by the DB + cluster. \n Constraints: \n * For RDS for MariaDB, MySQL, Oracle, + and PostgreSQL - Must be a multiple between .5 and 50 of the + storage amount for the DB instance. \n * For RDS for SQL Server + - Must be a multiple between 1 and 50 of the storage amount + for the DB instance." format: int64 type: integer kmsKeyID: @@ -752,19 +789,19 @@ spec: encrypted DB instance. \n The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. To use a KMS key in a different Amazon Web Services account, - specify the key ARN or alias ARN. \n Amazon Aurora \n Not applicable. - The Amazon Web Services KMS key identifier is managed by the - DB cluster. For more information, see CreateDBCluster. \n If - StorageEncrypted is enabled, and you do not specify a value - for the KmsKeyId parameter, then Amazon RDS uses your default - KMS key. There is a default KMS key for your Amazon Web Services - account. Your Amazon Web Services account has a different default - KMS key for each Amazon Web Services Region. \n Amazon RDS Custom - \n A KMS key is required for RDS Custom instances. For most - RDS engines, if you leave this parameter empty while enabling - StorageEncrypted, the engine uses the default KMS key. However, - RDS Custom doesn't use the default key when this parameter is - empty. You must explicitly specify a key." + specify the key ARN or alias ARN. \n This setting doesn't apply + to Amazon Aurora DB instances. The Amazon Web Services KMS key + identifier is managed by the DB cluster. For more information, + see CreateDBCluster. \n If StorageEncrypted is enabled, and + you do not specify a value for the KmsKeyId parameter, then + Amazon RDS uses your default KMS key. There is a default KMS + key for your Amazon Web Services account. Your Amazon Web Services + account has a different default KMS key for each Amazon Web + Services Region. \n For Amazon RDS Custom, a KMS key is required + for DB instances. For most RDS engines, if you leave this parameter + empty while enabling StorageEncrypted, the engine uses the default + KMS key. However, RDS Custom doesn't use the default key when + this parameter is empty. You must explicitly specify a key." type: string kmsKeyIDRef: description: KMSKeyIDRef is a reference to a KMS Key used to set @@ -842,16 +879,18 @@ spec: type: object type: object licenseModel: - description: "License model information for this DB instance. - \n Valid values: license-included | bring-your-own-license | - general-public-license \n This setting doesn't apply to RDS - Custom. \n Amazon Aurora \n Not applicable." + description: "The license model information for this DB instance. + \n This setting doesn't apply to Amazon Aurora or RDS Custom + DB instances. \n Valid Values: \n * RDS for MariaDB - general-public-license + \n * RDS for Microsoft SQL Server - license-included \n * RDS + for MySQL - general-public-license \n * RDS for Oracle - bring-your-own-license + | license-included \n * RDS for PostgreSQL - postgresql-license" type: string manageMasterUserPassword: - description: "A value that indicates whether to manage the master - user password with Amazon Web Services Secrets Manager. \n For - more information, see Password management with Amazon Web Services - Secrets Manager (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/rds-secrets-manager.html) + description: "Specifies whether to manage the master user password + with Amazon Web Services Secrets Manager. \n For more information, + see Password management with Amazon Web Services Secrets Manager + (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/rds-secrets-manager.html) in the Amazon RDS User Guide. \n Constraints: \n * Can't manage the master user password with Amazon Web Services Secrets Manager if MasterUserPassword is specified." @@ -895,12 +934,12 @@ spec: Services Region." type: string masterUsername: - description: "The name for the master user. \n Amazon Aurora \n - Not applicable. The name for the master user is managed by the - DB cluster. \n Amazon RDS \n Constraints: \n * Required. \n - * Must be 1 to 16 letters, numbers, or underscores. \n * First - character must be a letter. \n * Can't be a reserved word for - the chosen database engine." + description: "The name for the master user. \n This setting doesn't + apply to Amazon Aurora DB instances. The name for the master + user is managed by the DB cluster. \n This setting is required + for RDS DB instances. \n Constraints: \n * Must be 1 to 16 letters, + numbers, or underscores. \n * First character must be a letter. + \n * Can't be a reserved word for the chosen database engine." type: string maxAllocatedStorage: description: "The upper limit in gibibytes (GiB) to which Amazon @@ -909,18 +948,18 @@ spec: that apply to it, see Managing capacity automatically with Amazon RDS storage autoscaling (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_PIOPS.StorageTypes.html#USER_PIOPS.Autoscaling) in the Amazon RDS User Guide. \n This setting doesn't apply - to RDS Custom. \n Amazon Aurora \n Not applicable. Storage is - managed by the DB cluster." + to the following DB instances: \n * Amazon Aurora (Storage is + managed by the DB cluster.) \n * RDS Custom" format: int64 type: integer monitoringInterval: description: "The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB instance. To disable - collection of Enhanced Monitoring metrics, specify 0. The default - is 0. \n If MonitoringRoleArn is specified, then you must set - MonitoringInterval to a value other than 0. \n This setting - doesn't apply to RDS Custom. \n Valid Values: 0, 1, 5, 10, 15, - 30, 60" + collection of Enhanced Monitoring metrics, specify 0. \n If + MonitoringRoleArn is specified, then you must set MonitoringInterval + to a value other than 0. \n This setting doesn't apply to RDS + Custom DB instances. \n Valid Values: 0 | 1 | 5 | 10 | 15 | + 30 | 60 \n Default: 0" format: int64 type: integer monitoringRoleARN: @@ -931,7 +970,7 @@ spec: (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Monitoring.OS.html#USER_Monitoring.OS.Enabling) in the Amazon RDS User Guide. \n If MonitoringInterval is set to a value other than 0, then you must supply a MonitoringRoleArn - value. \n This setting doesn't apply to RDS Custom." + value. \n This setting doesn't apply to RDS Custom DB instances." type: string monitoringRoleArnRef: description: MonitoringRoleARNRef is a reference to an IAMRole @@ -1009,65 +1048,64 @@ spec: type: object type: object multiAZ: - description: "A value that indicates whether the DB instance is - a Multi-AZ deployment. You can't set the AvailabilityZone parameter - if the DB instance is a Multi-AZ deployment. \n This setting - doesn't apply to RDS Custom. \n Amazon Aurora \n Not applicable. - DB instance Availability Zones (AZs) are managed by the DB cluster." + description: "Specifies whether the DB instance is a Multi-AZ + deployment. You can't set the AvailabilityZone parameter if + the DB instance is a Multi-AZ deployment. \n This setting doesn't + apply to the following DB instances: \n * Amazon Aurora (DB + instance Availability Zones (AZs) are managed by the DB cluster.) + \n * RDS Custom" type: boolean ncharCharacterSetName: description: "The name of the NCHAR character set for the Oracle - DB instance. \n This parameter doesn't apply to RDS Custom." + DB instance. \n This setting doesn't apply to RDS Custom DB + instances." type: string networkType: - description: "The network type of the DB instance. \n Valid values: - \n * IPV4 \n * DUAL \n The network type is determined by the - DBSubnetGroup specified for the DB instance. A DBSubnetGroup - can support only the IPv4 protocol or the IPv4 and the IPv6 - protocols (DUAL). \n For more information, see Working with - a DB instance in a VPC (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_VPC.WorkingWithRDSInstanceinaVPC.html) - in the Amazon RDS User Guide." + description: "The network type of the DB instance. \n The network + type is determined by the DBSubnetGroup specified for the DB + instance. A DBSubnetGroup can support only the IPv4 protocol + or the IPv4 and the IPv6 protocols (DUAL). \n For more information, + see Working with a DB instance in a VPC (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_VPC.WorkingWithRDSInstanceinaVPC.html) + in the Amazon RDS User Guide. \n Valid Values: IPV4 | DUAL" type: string optionGroupName: - description: "A value that indicates that the DB instance should - be associated with the specified option group. \n Permanent - options, such as the TDE option for Oracle Advanced Security - TDE, can't be removed from an option group. Also, that option - group can't be removed from a DB instance after it is associated - with a DB instance. \n This setting doesn't apply to RDS Custom. - \n Amazon Aurora \n Not applicable." + description: "The option group to associate the DB instance with. + \n Permanent options, such as the TDE option for Oracle Advanced + Security TDE, can't be removed from an option group. Also, that + option group can't be removed from a DB instance after it is + associated with a DB instance. \n This setting doesn't apply + to Amazon Aurora or RDS Custom DB instances." type: string performanceInsightsKMSKeyID: description: "The Amazon Web Services KMS key identifier for encryption of Performance Insights data. \n The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name - for the KMS key. \n If you do not specify a value for PerformanceInsightsKMSKeyId, + for the KMS key. \n If you don't specify a value for PerformanceInsightsKMSKeyId, then Amazon RDS uses your default KMS key. There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different default KMS key for each Amazon - Web Services Region. \n This setting doesn't apply to RDS Custom." + Web Services Region. \n This setting doesn't apply to RDS Custom + DB instances." type: string performanceInsightsRetentionPeriod: description: "The number of days to retain Performance Insights - data. The default is 7 days. The following values are valid: - \n * 7 \n * month * 31, where month is a number of months from - 1-23 \n * 731 \n For example, the following values are valid: - \n * 93 (3 months * 31) \n * 341 (11 months * 31) \n * 589 (19 - months * 31) \n * 731 \n If you specify a retention period such - as 94, which isn't a valid value, RDS issues an error. \n This - setting doesn't apply to RDS Custom." + data. \n This setting doesn't apply to RDS Custom DB instances. + \n Valid Values: \n * 7 \n * month * 31, where month is a number + of months from 1-23. Examples: 93 (3 months * 31), 341 (11 months + * 31), 589 (19 months * 31) \n * 731 \n Default: 7 days \n If + you specify a retention period that isn't valid, such as 94, + Amazon RDS returns an error." format: int64 type: integer port: description: "The port number on which the database accepts connections. - \n MySQL \n Default: 3306 \n Valid values: 1150-65535 \n Type: - Integer \n MariaDB \n Default: 3306 \n Valid values: 1150-65535 - \n Type: Integer \n PostgreSQL \n Default: 5432 \n Valid values: - 1150-65535 \n Type: Integer \n Oracle \n Default: 1521 \n Valid - values: 1150-65535 \n SQL Server \n Default: 1433 \n Valid values: - 1150-65535 except 1234, 1434, 3260, 3343, 3389, 47001, and 49152-49156. - \n Amazon Aurora \n Default: 3306 \n Valid values: 1150-65535 - \n Type: Integer" + \n This setting doesn't apply to Aurora DB instances. The port + number is managed by the cluster. \n Valid Values: 1150-65535 + \n Default: \n * RDS for MariaDB - 3306 \n * RDS for Microsoft + SQL Server - 1433 \n * RDS for MySQL - 3306 \n * RDS for Oracle + - 1521 \n * RDS for PostgreSQL - 5432 \n Constraints: \n * For + RDS for Microsoft SQL Server, the value can't be 1234, 1434, + 3260, 3343, 3389, 47001, or 49152-49156." format: int64 type: integer preferredBackupWindow: @@ -1076,28 +1114,30 @@ spec: parameter. The default is a 30-minute window selected at random from an 8-hour block of time for each Amazon Web Services Region. For more information, see Backup window (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_WorkingWithAutomatedBackups.html#USER_WorkingWithAutomatedBackups.BackupWindow) - in the Amazon RDS User Guide. \n Amazon Aurora \n Not applicable. - The daily time range for creating automated backups is managed - by the DB cluster. \n Constraints: \n * Must be in the format - hh24:mi-hh24:mi. \n * Must be in Universal Coordinated Time - (UTC). \n * Must not conflict with the preferred maintenance - window. \n * Must be at least 30 minutes." + in the Amazon RDS User Guide. \n This setting doesn't apply + to Amazon Aurora DB instances. The daily time range for creating + automated backups is managed by the DB cluster. \n Constraints: + \n * Must be in the format hh24:mi-hh24:mi. \n * Must be in + Universal Coordinated Time (UTC). \n * Must not conflict with + the preferred maintenance window. \n * Must be at least 30 minutes." type: string preferredMaintenanceWindow: description: "The time range each week during which system maintenance - can occur, in Universal Coordinated Time (UTC). For more information, - see Amazon RDS Maintenance Window (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_UpgradeDBInstance.Maintenance.html#Concepts.DBMaintenance). - \n Format: ddd:hh24:mi-ddd:hh24:mi \n The default is a 30-minute + can occur. For more information, see Amazon RDS Maintenance + Window (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_UpgradeDBInstance.Maintenance.html#Concepts.DBMaintenance) + in the Amazon RDS User Guide. \n The default is a 30-minute window selected at random from an 8-hour block of time for each Amazon Web Services Region, occurring on a random day of the - week. \n Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun. \n Constraints: - Minimum 30-minute window." + week. \n Constraints: \n * Must be in the format ddd:hh24:mi-ddd:hh24:mi. + \n * The day values must be mon | tue | wed | thu | fri | sat + | sun. \n * Must be in Universal Coordinated Time (UTC). \n + * Must not conflict with the preferred backup window. \n * Must + be at least 30 minutes." type: string processorFeatures: description: "The number of CPU cores and the number of threads per core for the DB instance class of the DB instance. \n This - setting doesn't apply to RDS Custom. \n Amazon Aurora \n Not - applicable." + setting doesn't apply to Amazon Aurora or RDS Custom DB instances." items: properties: name: @@ -1107,32 +1147,33 @@ spec: type: object type: array promotionTier: - description: "A value that specifies the order in which an Aurora - Replica is promoted to the primary instance after a failure - of the existing primary instance. For more information, see - Fault Tolerance for an Aurora DB Cluster (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Aurora.Managing.Backups.html#Aurora.Managing.FaultTolerance) + description: "The order of priority in which an Aurora Replica + is promoted to the primary instance after a failure of the existing + primary instance. For more information, see Fault Tolerance + for an Aurora DB Cluster (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Concepts.AuroraHighAvailability.html#Aurora.Managing.FaultTolerance) in the Amazon Aurora User Guide. \n This setting doesn't apply - to RDS Custom. \n Default: 1 \n Valid Values: 0 - 15" + to RDS Custom DB instances. \n Default: 1 \n Valid Values: 0 + - 15" format: int64 type: integer publiclyAccessible: - description: "A value that indicates whether the DB instance is - publicly accessible. \n When the DB instance is publicly accessible, - its Domain Name System (DNS) endpoint resolves to the private - IP address from within the DB instance's virtual private cloud - (VPC). It resolves to the public IP address from outside of - the DB instance's VPC. Access to the DB instance is ultimately - controlled by the security group it uses. That public access - is not permitted if the security group assigned to the DB instance - doesn't permit it. \n When the DB instance isn't publicly accessible, - it is an internal DB instance with a DNS name that resolves - to a private IP address. \n Default: The default behavior varies - depending on whether DBSubnetGroupName is specified. \n If DBSubnetGroupName - isn't specified, and PubliclyAccessible isn't specified, the - following applies: \n * If the default VPC in the target Region - doesn’t have an internet gateway attached to it, the DB instance - is private. \n * If the default VPC in the target Region has - an internet gateway attached to it, the DB instance is public. + description: "Specifies whether the DB instance is publicly accessible. + \n When the DB instance is publicly accessible, its Domain Name + System (DNS) endpoint resolves to the private IP address from + within the DB instance's virtual private cloud (VPC). It resolves + to the public IP address from outside of the DB instance's VPC. + Access to the DB instance is ultimately controlled by the security + group it uses. That public access is not permitted if the security + group assigned to the DB instance doesn't permit it. \n When + the DB instance isn't publicly accessible, it is an internal + DB instance with a DNS name that resolves to a private IP address. + \n Default: The default behavior varies depending on whether + DBSubnetGroupName is specified. \n If DBSubnetGroupName isn't + specified, and PubliclyAccessible isn't specified, the following + applies: \n * If the default VPC in the target Region doesn’t + have an internet gateway attached to it, the DB instance is + private. \n * If the default VPC in the target Region has an + internet gateway attached to it, the DB instance is public. \n If DBSubnetGroupName is specified, and PubliclyAccessible isn't specified, the following applies: \n * If the subnets are part of a VPC that doesn’t have an internet gateway attached @@ -1244,26 +1285,26 @@ spec: parameter if SkipFinalSnapshot is disabled." type: boolean storageEncrypted: - description: "A value that indicates whether the DB instance is - encrypted. By default, it isn't encrypted. \n For RDS Custom - instances, either set this parameter to true or leave it unset. - If you set this parameter to false, RDS reports an error. \n - Amazon Aurora \n Not applicable. The encryption for DB instances - is managed by the DB cluster." + description: "Specifes whether the DB instance is encrypted. By + default, it isn't encrypted. \n For RDS Custom DB instances, + either enable this setting or leave it unset. Otherwise, Amazon + RDS reports an error. \n This setting doesn't apply to Amazon + Aurora DB instances. The encryption for DB instances is managed + by the DB cluster." type: boolean storageThroughput: - description: "Specifies the storage throughput value for the DB - instance. \n This setting applies only to the gp3 storage type. - \n This setting doesn't apply to RDS Custom or Amazon Aurora." + description: "The storage throughput value for the DB instance. + \n This setting applies only to the gp3 storage type. \n This + setting doesn't apply to Amazon Aurora or RDS Custom DB instances." format: int64 type: integer storageType: - description: "Specifies the storage type to be associated with - the DB instance. \n Valid values: gp2 | gp3 | io1 | standard + description: "The storage type to associate with the DB instance. \n If you specify io1 or gp3, you must also include a value - for the Iops parameter. \n Default: io1 if the Iops parameter - is specified, otherwise gp2 \n Amazon Aurora \n Not applicable. - Storage is managed by the DB cluster." + for the Iops parameter. \n This setting doesn't apply to Amazon + Aurora DB instances. Storage is managed by the DB cluster. \n + Valid Values: gp2 | gp3 | io1 | standard \n Default: io1, if + the Iops parameter is specified. Otherwise, gp2." type: string tags: description: Tags to assign to the DB instance. @@ -1278,12 +1319,12 @@ spec: tdeCredentialARN: description: "The ARN from the key store with which to associate the instance for TDE encryption. \n This setting doesn't apply - to RDS Custom. \n Amazon Aurora \n Not applicable." + to Amazon Aurora or RDS Custom DB instances." type: string tdeCredentialPassword: description: "The password for the given ARN from the key store in order to access the device. \n This setting doesn't apply - to RDS Custom." + to RDS Custom DB instances." type: string timezone: description: The time zone of the DB instance. The time zone parameter @@ -1649,7 +1690,7 @@ spec: type: string type: object customerOwnedIPEnabled: - description: "Specifies whether a customer-owned IP address (CoIP) + description: "Indicates whether a customer-owned IP address (CoIP) is enabled for an RDS on Outposts DB instance. \n A CoIP provides local or external connectivity to resources in your Outpost subnets through your on-premises network. For some use cases, @@ -1674,24 +1715,23 @@ spec: type: object type: array dbInstanceIdentifier: - description: Contains a user-supplied database identifier. This - identifier is the unique key that identifies a DB instance. + description: The user-supplied database identifier. This identifier + is the unique key that identifies a DB instance. type: string dbInstancePort: - description: Specifies the port that the DB instance listens on. - If the DB instance is part of a DB cluster, this can be a different + description: The port that the DB instance listens on. If the + DB instance is part of a DB cluster, this can be a different port than the DB cluster port. format: int64 type: integer dbInstanceStatus: - description: "Specifies the current state of this database. \n - For information about DB instance statuses, see Viewing DB instance - status (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/accessing-monitoring.html#Overview.DBInstance.Status) + description: "The current state of this database. \n For information + about DB instance statuses, see Viewing DB instance status (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/accessing-monitoring.html#Overview.DBInstance.Status) in the Amazon RDS User Guide." type: string dbParameterGroups: - description: Provides the list of DB parameter groups applied - to this DB instance. + description: The list of DB parameter groups applied to this DB + instance. items: properties: dbParameterGroupName: @@ -1712,8 +1752,8 @@ spec: type: object type: array dbSubnetGroup: - description: Specifies information on the subnet group associated - with the DB instance, including the name, description, and subnets + description: Information about the subnet group associated with + the DB instance, including the name, description, and subnets in the subnet group. properties: dbSubnetGroupARN: @@ -1757,11 +1797,6 @@ spec: vpcID: type: string type: object - dbSystemID: - description: The Oracle system ID (Oracle SID) for a container - database (CDB). The Oracle SID is also the name of the CDB. - This setting is valid for RDS Custom only. - type: string dbiResourceID: description: The Amazon Web Services Region-unique, immutable identifier for the DB instance. This identifier is found in @@ -1773,12 +1808,20 @@ spec: with the DB instance. items: properties: + authSecretARN: + type: string + dnsIPs: + items: + type: string + type: array domain: type: string fQDN: type: string iamRoleName: type: string + oU: + type: string status: type: string type: object @@ -1787,14 +1830,15 @@ spec: description: "A list of log types that this DB instance is configured to export to CloudWatch Logs. \n Log types vary by DB engine. For information about the log types for each DB engine, see - Amazon RDS Database Log Files (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_LogAccess.html) + Monitoring Amazon RDS log files (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_LogAccess.html) in the Amazon RDS User Guide." items: type: string type: array endpoint: - description: "Specifies the connection endpoint. \n The endpoint - might not be shown for instances whose status is creating." + description: "The connection endpoint for the DB instance. \n + The endpoint might not be shown for instances with the status + of creating." properties: address: type: string @@ -1805,7 +1849,7 @@ spec: type: integer type: object engineVersion: - description: Indicates the database engine version. + description: The version of the database engine. type: string enhancedMonitoringResourceARN: description: The Amazon Resource Name (ARN) of the Amazon CloudWatch @@ -1813,26 +1857,27 @@ spec: data for the DB instance. type: string iamDatabaseAuthenticationEnabled: - description: "True if mapping of Amazon Web Services Identity - and Access Management (IAM) accounts to database accounts is - enabled, and otherwise false. \n IAM database authentication - can be enabled for the following database engines \n * For MySQL - 5.6, minor version 5.6.34 or higher \n * For MySQL 5.7, minor - version 5.7.16 or higher \n * Aurora 5.6 or higher. To enable - IAM database authentication for Aurora, see DBCluster Type." + description: "Indicates whether mapping of Amazon Web Services + Identity and Access Management (IAM) accounts to database accounts + is enabled for the DB instance. \n For a list of engine versions + that support IAM database authentication, see IAM database authentication + (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.RDS_Fea_Regions_DB-eng.Feature.IamDatabaseAuthentication.html) + in the Amazon RDS User Guide and IAM database authentication + in Aurora (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Concepts.Aurora_Fea_Regions_DB-eng.Feature.IAMdbauth.html) + in the Amazon Aurora User Guide." type: boolean instanceCreateTime: - description: Provides the date and time the DB instance was created. + description: The date and time when the DB instance was created. format: date-time type: string latestRestorableTime: - description: Specifies the latest time to which a database can - be restored with point-in-time restore. + description: The latest time to which a database in this DB instance + can be restored with point-in-time restore. format: date-time type: string listenerEndpoint: - description: Specifies the listener connection endpoint for SQL - Server Always On. + description: The listener connection endpoint for SQL Server Always + On. properties: address: type: string @@ -1843,10 +1888,10 @@ spec: type: integer type: object masterUserSecret: - description: "Contains the secret managed by RDS in Amazon Web - Services Secrets Manager for the master user password. \n For - more information, see Password management with Amazon Web Services - Secrets Manager (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/rds-secrets-manager.html) + description: "The secret managed by RDS in Amazon Web Services + Secrets Manager for the master user password. \n For more information, + see Password management with Amazon Web Services Secrets Manager + (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/rds-secrets-manager.html) in the Amazon RDS User Guide." properties: kmsKeyID: @@ -1857,8 +1902,8 @@ spec: type: string type: object optionGroupMemberships: - description: Provides the list of option group memberships for - this DB instance. + description: The list of option group memberships for this DB + instance. items: properties: optionGroupName: @@ -1868,9 +1913,9 @@ spec: type: object type: array pendingModifiedValues: - description: A value that specifies that changes to the DB instance - are pending. This element is only included when changes are - pending. Specific changes are identified by subelements. + description: Information about pending changes to the DB instance. + This information is returned only when there are pending changes. + Specific changes are identified by subelements. properties: allocatedStorage: format: int64 @@ -1888,6 +1933,8 @@ spec: type: string dbSubnetGroupName: type: string + engine: + type: string engineVersion: type: string iamDatabaseAuthenticationEnabled: @@ -1936,30 +1983,38 @@ spec: storageType: type: string type: object + percentProgress: + description: The progress of the storage optimization operation + as a percentage. + type: string performanceInsightsEnabled: - description: True if Performance Insights is enabled for the DB - instance, and otherwise false. + description: Indicates whether Performance Insights is enabled + for the DB instance. type: boolean readReplicaDBClusterIdentifiers: - description: "Contains one or more identifiers of Aurora DB clusters - to which the RDS DB instance is replicated as a read replica. - For example, when you create an Aurora read replica of an RDS - for MySQL DB instance, the Aurora MySQL DB cluster for the Aurora - read replica is shown. This output doesn't contain information - about cross-Region Aurora read replicas. \n Currently, each - RDS DB instance can have only one Aurora read replica." + description: "The identifiers of Aurora DB clusters to which the + RDS DB instance is replicated as a read replica. For example, + when you create an Aurora read replica of an RDS for MySQL DB + instance, the Aurora MySQL DB cluster for the Aurora read replica + is shown. This output doesn't contain information about cross-Region + Aurora read replicas. \n Currently, each RDS DB instance can + have only one Aurora read replica." items: type: string type: array readReplicaDBInstanceIdentifiers: - description: Contains one or more identifiers of the read replicas - associated with this DB instance. + description: The identifiers of the read replicas associated with + this DB instance. items: type: string type: array + readReplicaSourceDBClusterIdentifier: + description: The identifier of the source DB cluster if this DB + instance is a read replica. + type: string readReplicaSourceDBInstanceIdentifier: - description: Contains the identifier of the source DB instance - if this DB instance is a read replica. + description: The identifier of the source DB instance if this + DB instance is a read replica. type: string replicaMode: description: "The open mode of an Oracle read replica. The default @@ -1979,8 +2034,8 @@ spec: Zone for a DB instance with multi-AZ support. type: string statusInfos: - description: The status of a read replica. If the instance isn't - a read replica, this is blank. + description: The status of a read replica. If the DB instance + isn't a read replica, the value is blank. items: properties: message: @@ -2003,8 +2058,8 @@ spec: type: object type: array vpcSecurityGroups: - description: Provides a list of VPC security group elements that - the DB instance belongs to. + description: The list of Amazon EC2 VPC security groups that the + DB instance belongs to. items: properties: status: diff --git a/package/crds/rds.aws.crossplane.io_globalclusters.yaml b/package/crds/rds.aws.crossplane.io_globalclusters.yaml index 92c0c186b2..a02bc46aee 100644 --- a/package/crds/rds.aws.crossplane.io_globalclusters.yaml +++ b/package/crds/rds.aws.crossplane.io_globalclusters.yaml @@ -69,30 +69,42 @@ spec: GlobalCluster properties: databaseName: - description: The name for your database of up to 64 alphanumeric - characters. If you do not provide a name, Amazon Aurora will - not create a database in the global database cluster you are - creating. + description: "The name for your database of up to 64 alphanumeric + characters. If you don't specify a name, Amazon Aurora doesn't + create a database in the global database cluster. \n Constraints: + \n * Can't be specified if SourceDBClusterIdentifier is specified. + In this case, Amazon Aurora uses the database name from the + source DB cluster." type: string deletionProtection: - description: The deletion protection setting for the new global - database. The global database can't be deleted when deletion - protection is enabled. + description: Specifies whether to enable deletion protection for + the new global database cluster. The global database can't be + deleted when deletion protection is enabled. type: boolean engine: - description: The name of the database engine to be used for this - DB cluster. + description: "The database engine to use for this global database + cluster. \n Valid Values: aurora-mysql | aurora-postgresql \n + Constraints: \n * Can't be specified if SourceDBClusterIdentifier + is specified. In this case, Amazon Aurora uses the engine of + the source DB cluster." type: string engineVersion: - description: The engine version of the Aurora global database. + description: "The engine version to use for this global database + cluster. \n Constraints: \n * Can't be specified if SourceDBClusterIdentifier + is specified. In this case, Amazon Aurora uses the engine version + of the source DB cluster." type: string region: description: Region is which region the GlobalCluster will be created. type: string sourceDBClusterIdentifier: - description: The Amazon Resource Name (ARN) to use as the primary - cluster of the global database. This parameter is optional. + description: "The Amazon Resource Name (ARN) to use as the primary + cluster of the global database. \n If you provide a value for + this parameter, don't specify values for the following settings + because Amazon Aurora uses the values from the specified source + DB cluster: \n * DatabaseName \n * Engine \n * EngineVersion + \n * StorageEncrypted" type: string sourceDBClusterIdentifierRef: description: SourceDBClusterIdentifierRef is a reference to a @@ -170,8 +182,10 @@ spec: type: object type: object storageEncrypted: - description: The storage encryption setting for the new global - database cluster. + description: "Specifies whether to enable storage encryption for + the new global database cluster. \n Constraints: \n * Can't + be specified if SourceDBClusterIdentifier is specified. In this + case, Amazon Aurora uses the setting from the source DB cluster." type: boolean required: - region @@ -382,13 +396,15 @@ spec: properties: failoverState: description: A data object containing all properties for the current - state of an in-process or pending failover process for this - Aurora global database. This object is empty unless the FailoverGlobalCluster - API operation has been called on this Aurora global database - (GlobalCluster). + state of an in-process or pending switchover or failover process + for this global cluster (Aurora global database). This object + is empty unless the SwitchoverGlobalCluster or FailoverGlobalCluster + operation was called on this global cluster. properties: fromDBClusterARN: type: string + isDataLossAllowed: + type: boolean status: type: string toDBClusterARN: @@ -404,8 +420,8 @@ spec: a global database cluster. type: string globalClusterMembers: - description: The list of cluster IDs for secondary clusters within - the global database cluster. Currently limited to 1 item. + description: The list of primary and secondary clusters within + the global database cluster. items: properties: dbClusterARN: @@ -418,6 +434,8 @@ spec: items: type: string type: array + synchronizationStatus: + type: string type: object type: array globalClusterResourceID: diff --git a/package/crds/route53resolver.aws.crossplane.io_resolverendpoints.yaml b/package/crds/route53resolver.aws.crossplane.io_resolverendpoints.yaml index 106ea276e5..6d129edb6c 100644 --- a/package/crds/route53resolver.aws.crossplane.io_resolverendpoints.yaml +++ b/package/crds/route53resolver.aws.crossplane.io_resolverendpoints.yaml @@ -173,10 +173,24 @@ spec: description: A friendly name that lets you easily find a configuration in the Resolver dashboard in the Route 53 console. type: string + outpostARN: + description: The Amazon Resource Name (ARN) of the Outpost. If + you specify this, you must also specify a value for the PreferredInstanceType. + type: string + preferredInstanceType: + description: The instance type. If you specify this, you must + also specify a value for the OutpostArn. + type: string region: description: Region is which region the ResolverEndpoint will be created. type: string + resolverEndpointType: + description: For the endpoint type you can choose either IPv4, + IPv6, or dual-stack. A dual-stack endpoint means that it will + resolve via both IPv4 and IPv6. This endpoint type is applied + to all IP addresses. + type: string securityGroupIdRefs: description: SecurityGroupIDRefs is a list of references to SecurityGroups used to set the SecurityGroupIDs. diff --git a/package/crds/route53resolver.aws.crossplane.io_resolverrules.yaml b/package/crds/route53resolver.aws.crossplane.io_resolverrules.yaml index fcd32e7750..a22df7c5ec 100644 --- a/package/crds/route53resolver.aws.crossplane.io_resolverrules.yaml +++ b/package/crds/route53resolver.aws.crossplane.io_resolverrules.yaml @@ -187,13 +187,15 @@ spec: type: array targetIPs: description: "The IPs that you want Resolver to forward DNS queries - to. You can specify only IPv4 addresses. Separate IP addresses - with a space. \n TargetIps is available only when the value - of Rule type is FORWARD." + to. You can specify either Ipv4 or Ipv6 addresses but not both + in the same rule. Separate IP addresses with a space. \n TargetIps + is available only when the value of Rule type is FORWARD." items: properties: ip: type: string + ipv6: + type: string port: format: int64 type: integer diff --git a/package/crds/secretsmanager.aws.crossplane.io_secrets.yaml b/package/crds/secretsmanager.aws.crossplane.io_secrets.yaml index f446d0076e..63e327674e 100644 --- a/package/crds/secretsmanager.aws.crossplane.io_secrets.yaml +++ b/package/crds/secretsmanager.aws.crossplane.io_secrets.yaml @@ -649,7 +649,8 @@ spec: type: boolean forceOverwriteReplicaSecret: description: Specifies whether to overwrite a secret with the - same name in the destination Region. + same name in the destination Region. By default, secrets aren't + overwritten. type: boolean kmsKeyID: description: "The ARN, key ID, or alias of the KMS key that Secrets diff --git a/package/crds/sfn.aws.crossplane.io_statemachines.yaml b/package/crds/sfn.aws.crossplane.io_statemachines.yaml index 9b68c534f9..51131b8364 100644 --- a/package/crds/sfn.aws.crossplane.io_statemachines.yaml +++ b/package/crds/sfn.aws.crossplane.io_statemachines.yaml @@ -100,6 +100,10 @@ spec: To enable logging with CloudWatch Logs, the name should only contain 0-9, A-Z, a-z, - and _." type: string + publish: + description: Set to true to publish the first version of the state + machine during creation. The default is false. + type: boolean region: description: Region is which region the StateMachine will be created. type: string @@ -213,6 +217,12 @@ spec: - STANDARD - EXPRESS type: string + versionDescription: + description: Sets description about the state machine version. + You can only set the description if the publish parameter is + set to true. Otherwise, if you set versionDescription, but publish + to false, this API action throws ValidationException. + type: string required: - definition - name @@ -430,6 +440,11 @@ spec: description: The Amazon Resource Name (ARN) that identifies the created state machine. type: string + stateMachineVersionARN: + description: The Amazon Resource Name (ARN) that identifies the + created state machine version. If you do not set the publish + parameter to true, this field returns null value. + type: string type: object conditions: description: Conditions of the resource. diff --git a/package/crds/transfer.aws.crossplane.io_servers.yaml b/package/crds/transfer.aws.crossplane.io_servers.yaml index 63a7d2f86f..7bd164427f 100644 --- a/package/crds/transfer.aws.crossplane.io_servers.yaml +++ b/package/crds/transfer.aws.crossplane.io_servers.yaml @@ -641,16 +641,17 @@ spec: a string of your choice. \n If you aren't planning to migrate existing users from an existing SFTP-enabled server to a new server, don't update the host key. Accidentally changing a server's - host key can be disruptive. \n For more information, see Update + host key can be disruptive. \n For more information, see Manage host keys for your SFTP-enabled server (https://docs.aws.amazon.com/transfer/latest/userguide/edit-server-config.html#configuring-servers-change-host-key) in the Transfer Family User Guide." type: string identityProviderDetails: - description: Required when IdentityProviderType is set to AWS_DIRECTORY_SERVICE - or API_GATEWAY. Accepts an array containing all of the information - required to use a directory in AWS_DIRECTORY_SERVICE or invoke - a customer-supplied authentication API, including the API Gateway - URL. Not required when IdentityProviderType is set to SERVICE_MANAGED. + description: Required when IdentityProviderType is set to AWS_DIRECTORY_SERVICE, + Amazon Web Services_LAMBDA or API_GATEWAY. Accepts an array + containing all of the information required to use a directory + in AWS_DIRECTORY_SERVICE or invoke a customer-supplied authentication + API, including the API Gateway URL. Not required when IdentityProviderType + is set to SERVICE_MANAGED. properties: directoryID: type: string @@ -658,6 +659,8 @@ spec: type: string invocationRole: type: string + sftpAuthenticationMethods: + type: string url: type: string type: object @@ -677,7 +680,7 @@ spec: parameter. \n Use the AWS_LAMBDA value to directly use an Lambda function as your identity provider. If you choose this value, you must specify the ARN for the Lambda function in the Function - parameter or the IdentityProviderDetails data type." + parameter for the IdentityProviderDetails data type." type: string loggingRole: description: Allows the service to write your users' activity @@ -815,12 +818,13 @@ spec: Manager (ACM) which is used to identify your server when clients connect to it over FTPS. \n * If Protocol includes either FTP or FTPS, then the EndpointType must be VPC and the IdentityProviderType - must be AWS_DIRECTORY_SERVICE or API_GATEWAY. \n * If Protocol - includes FTP, then AddressAllocationIds cannot be associated. - \n * If Protocol is set only to SFTP, the EndpointType can be - set to PUBLIC and the IdentityProviderType can be set to SERVICE_MANAGED. - \n * If Protocol includes AS2, then the EndpointType must be - VPC, and domain must be Amazon S3." + must be either AWS_DIRECTORY_SERVICE, AWS_LAMBDA, or API_GATEWAY. + \n * If Protocol includes FTP, then AddressAllocationIds cannot + be associated. \n * If Protocol is set only to SFTP, the EndpointType + can be set to PUBLIC and the IdentityProviderType can be set + any of the supported identity types: SERVICE_MANAGED, AWS_DIRECTORY_SERVICE, + AWS_LAMBDA, or API_GATEWAY. \n * If Protocol includes AS2, then + the EndpointType must be VPC, and domain must be Amazon S3." items: type: string type: array @@ -831,6 +835,20 @@ spec: description: Specifies the name of the security policy that is attached to the server. type: string + structuredLogDestinations: + description: "Specifies the log groups to which your server logs + are sent. \n To specify a log group, you must provide the ARN + for an existing log group. In this case, the format of the log + group is as follows: \n arn:aws:logs:region-name:amazon-account-id:log-group:log-group-name:* + \n For example, arn:aws:logs:us-east-1:111122223333:log-group:mytestgroup:* + \n If you have previously specified a log group for a server, + you can clear it, and in effect turn off structured logging, + by providing an empty value for this parameter in an update-server + call. For example: \n update-server --server-id s-1234567890abcdef0 + --structured-log-destinations" + items: + type: string + type: array tags: description: Key-value pairs that can be used to group and search for servers. @@ -845,11 +863,11 @@ spec: workflowDetails: description: "Specifies the workflow ID for the workflow to assign and the execution role that's used for executing the workflow. - \n In additon to a workflow to execute when a file is uploaded - completely, WorkflowDeatails can also contain a workflow ID - (and execution role) for a workflow to execute on partial upload. - A partial upload occurs when a file is open when the session - disconnects." + \n In addition to a workflow to execute when a file is uploaded + completely, WorkflowDetails can also contain a workflow ID (and + execution role) for a workflow to execute on partial upload. + A partial upload occurs when the server session disconnects + while the file is still being uploaded." properties: onPartialUpload: items: diff --git a/package/crds/transfer.aws.crossplane.io_users.yaml b/package/crds/transfer.aws.crossplane.io_users.yaml index c412134b1b..0a21088314 100644 --- a/package/crds/transfer.aws.crossplane.io_users.yaml +++ b/package/crds/transfer.aws.crossplane.io_users.yaml @@ -86,9 +86,10 @@ spec: \"Target\": \"/bucket_name/home/mydirectory\" } ] \n In most cases, you can use this value instead of the session policy to lock your user down to the designated home directory (\"chroot\"). - To do this, you can set Entry to / and set Target to the HomeDirectory - parameter value. \n The following is an Entry and Target pair - example for chroot. \n [ { \"Entry\": \"/\", \"Target\": \"/bucket_name/home/mydirectory\" + To do this, you can set Entry to / and set Target to the value + the user should see for their home directory when they log in. + \n The following is an Entry and Target pair example for chroot. + \n [ { \"Entry\": \"/\", \"Target\": \"/bucket_name/home/mydirectory\" } ]" items: properties: @@ -309,8 +310,14 @@ spec: type: object sshPublicKeyBody: description: "The public portion of the Secure Shell (SSH) key - used to authenticate the user to the server. \n Transfer Family - accepts RSA, ECDSA, and ED25519 keys." + used to authenticate the user to the server. \n The three standard + SSH public key format elements are , , + and an optional , with spaces between each element. + \n Transfer Family accepts RSA, ECDSA, and ED25519 keys. \n + * For RSA keys, the key type is ssh-rsa. \n * For ED25519 keys, + the key type is ssh-ed25519. \n * For ECDSA keys, the key type + is either ecdsa-sha2-nistp256, ecdsa-sha2-nistp384, or ecdsa-sha2-nistp521, + depending on the size of the key you generated." type: string tags: description: Key-value pairs that can be used to group and search @@ -534,8 +541,8 @@ spec: to. type: string userName: - description: A unique string that identifies a user account associated - with a server. + description: A unique string that identifies a Transfer Family + user. type: string type: object conditions: diff --git a/pkg/clients/database/rds_test.go b/pkg/clients/database/rds_test.go index 9803a118a0..a90d82cd99 100644 --- a/pkg/clients/database/rds_test.go +++ b/pkg/clients/database/rds_test.go @@ -33,7 +33,7 @@ import ( kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" @@ -349,12 +349,12 @@ func TestIsUpToDate(t *testing.T) { "EngineVersionUpgrade": { args: args{ db: rdstypes.DBInstance{ - EngineVersion: pointer.String("12.3"), + EngineVersion: ptr.To("12.3"), }, r: v1beta1.RDSInstance{ Spec: v1beta1.RDSInstanceSpec{ ForProvider: v1beta1.RDSInstanceParameters{ - EngineVersion: pointer.String("12.7"), + EngineVersion: ptr.To("12.7"), }, }, }, @@ -364,12 +364,12 @@ func TestIsUpToDate(t *testing.T) { "EngineVersionUpgradeMajorVersion": { args: args{ db: rdstypes.DBInstance{ - EngineVersion: pointer.String("12.3"), + EngineVersion: ptr.To("12.3"), }, r: v1beta1.RDSInstance{ Spec: v1beta1.RDSInstanceSpec{ ForProvider: v1beta1.RDSInstanceParameters{ - EngineVersion: pointer.String("13.7"), + EngineVersion: ptr.To("13.7"), }, }, }, @@ -379,12 +379,12 @@ func TestIsUpToDate(t *testing.T) { "EngineVersionMajorVersionOnly": { args: args{ db: rdstypes.DBInstance{ - EngineVersion: pointer.String("12.3"), + EngineVersion: ptr.To("12.3"), }, r: v1beta1.RDSInstance{ Spec: v1beta1.RDSInstanceSpec{ ForProvider: v1beta1.RDSInstanceParameters{ - EngineVersion: pointer.String("12"), + EngineVersion: ptr.To("12"), }, }, }, @@ -394,12 +394,12 @@ func TestIsUpToDate(t *testing.T) { "EngineVersionDowngrade": { args: args{ db: rdstypes.DBInstance{ - EngineVersion: pointer.String("12.3"), + EngineVersion: ptr.To("12.3"), }, r: v1beta1.RDSInstance{ Spec: v1beta1.RDSInstanceSpec{ ForProvider: v1beta1.RDSInstanceParameters{ - EngineVersion: pointer.String("12.1"), + EngineVersion: ptr.To("12.1"), }, }, }, diff --git a/pkg/clients/ec2/tags_test.go b/pkg/clients/ec2/tags_test.go index 6cfee8b2c6..98faacaf17 100644 --- a/pkg/clients/ec2/tags_test.go +++ b/pkg/clients/ec2/tags_test.go @@ -24,7 +24,7 @@ import ( "github.com/aws/smithy-go/document" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ) func TestDiffEC2Tags(t *testing.T) { @@ -239,7 +239,7 @@ func TestDiffEC2Tags(t *testing.T) { for name, tc := range cases { t.Run(name, func(t *testing.T) { tagCmp := cmpopts.SortSlices(func(i, j ec2types.Tag) bool { - return pointer.StringDeref(i.Key, "") < pointer.StringDeref(j.Key, "") + return ptr.Deref(i.Key, "") < ptr.Deref(j.Key, "") }) add, remove := DiffEC2Tags(tc.args.local, tc.args.remote) if diff := cmp.Diff(tc.want.add, add, tagCmp, cmpopts.IgnoreTypes(document.NoSerde{})); diff != "" { diff --git a/pkg/controller/apigateway/restapi/zz_controller.go b/pkg/controller/apigateway/restapi/zz_controller.go index 20b37c63cc..032b2a48ff 100644 --- a/pkg/controller/apigateway/restapi/zz_controller.go +++ b/pkg/controller/apigateway/restapi/zz_controller.go @@ -191,14 +191,19 @@ func (e *external) Create(ctx context.Context, mg cpresource.Managed) (managed.E } else { cr.Spec.ForProvider.Policy = nil } + if resp.RootResourceId != nil { + cr.Status.AtProvider.RootResourceID = resp.RootResourceId + } else { + cr.Status.AtProvider.RootResourceID = nil + } if resp.Tags != nil { - f10 := map[string]*string{} - for f10key, f10valiter := range resp.Tags { - var f10val string - f10val = *f10valiter - f10[f10key] = &f10val + f11 := map[string]*string{} + for f11key, f11valiter := range resp.Tags { + var f11val string + f11val = *f11valiter + f11[f11key] = &f11val } - cr.Spec.ForProvider.Tags = f10 + cr.Spec.ForProvider.Tags = f11 } else { cr.Spec.ForProvider.Tags = nil } @@ -208,13 +213,13 @@ func (e *external) Create(ctx context.Context, mg cpresource.Managed) (managed.E cr.Spec.ForProvider.Version = nil } if resp.Warnings != nil { - f12 := []*string{} - for _, f12iter := range resp.Warnings { - var f12elem string - f12elem = *f12iter - f12 = append(f12, &f12elem) + f13 := []*string{} + for _, f13iter := range resp.Warnings { + var f13elem string + f13elem = *f13iter + f13 = append(f13, &f13elem) } - cr.Status.AtProvider.Warnings = f12 + cr.Status.AtProvider.Warnings = f13 } else { cr.Status.AtProvider.Warnings = nil } diff --git a/pkg/controller/apigateway/restapi/zz_conversions.go b/pkg/controller/apigateway/restapi/zz_conversions.go index 443ae625c8..f71420cc05 100644 --- a/pkg/controller/apigateway/restapi/zz_conversions.go +++ b/pkg/controller/apigateway/restapi/zz_conversions.go @@ -116,14 +116,19 @@ func GenerateRestAPI(resp *svcsdk.RestApi) *svcapitypes.RestAPI { } else { cr.Spec.ForProvider.Policy = nil } + if resp.RootResourceId != nil { + cr.Status.AtProvider.RootResourceID = resp.RootResourceId + } else { + cr.Status.AtProvider.RootResourceID = nil + } if resp.Tags != nil { - f10 := map[string]*string{} - for f10key, f10valiter := range resp.Tags { - var f10val string - f10val = *f10valiter - f10[f10key] = &f10val + f11 := map[string]*string{} + for f11key, f11valiter := range resp.Tags { + var f11val string + f11val = *f11valiter + f11[f11key] = &f11val } - cr.Spec.ForProvider.Tags = f10 + cr.Spec.ForProvider.Tags = f11 } else { cr.Spec.ForProvider.Tags = nil } @@ -133,13 +138,13 @@ func GenerateRestAPI(resp *svcsdk.RestApi) *svcapitypes.RestAPI { cr.Spec.ForProvider.Version = nil } if resp.Warnings != nil { - f12 := []*string{} - for _, f12iter := range resp.Warnings { - var f12elem string - f12elem = *f12iter - f12 = append(f12, &f12elem) + f13 := []*string{} + for _, f13iter := range resp.Warnings { + var f13elem string + f13elem = *f13iter + f13 = append(f13, &f13elem) } - cr.Status.AtProvider.Warnings = f12 + cr.Status.AtProvider.Warnings = f13 } else { cr.Status.AtProvider.Warnings = nil } diff --git a/pkg/controller/athena/workgroup/zz_conversions.go b/pkg/controller/athena/workgroup/zz_conversions.go index df8ca4d38b..98168acebb 100644 --- a/pkg/controller/athena/workgroup/zz_conversions.go +++ b/pkg/controller/athena/workgroup/zz_conversions.go @@ -55,18 +55,21 @@ func GenerateWorkGroup(resp *svcsdk.GetWorkGroupOutput) *svcapitypes.WorkGroup { } f0.CustomerContentEncryptionConfiguration = f0f2 } + if resp.WorkGroup.Configuration.EnableMinimumEncryptionConfiguration != nil { + f0.EnableMinimumEncryptionConfiguration = resp.WorkGroup.Configuration.EnableMinimumEncryptionConfiguration + } if resp.WorkGroup.Configuration.EnforceWorkGroupConfiguration != nil { f0.EnforceWorkGroupConfiguration = resp.WorkGroup.Configuration.EnforceWorkGroupConfiguration } if resp.WorkGroup.Configuration.EngineVersion != nil { - f0f4 := &svcapitypes.EngineVersion{} + f0f5 := &svcapitypes.EngineVersion{} if resp.WorkGroup.Configuration.EngineVersion.EffectiveEngineVersion != nil { - f0f4.EffectiveEngineVersion = resp.WorkGroup.Configuration.EngineVersion.EffectiveEngineVersion + f0f5.EffectiveEngineVersion = resp.WorkGroup.Configuration.EngineVersion.EffectiveEngineVersion } if resp.WorkGroup.Configuration.EngineVersion.SelectedEngineVersion != nil { - f0f4.SelectedEngineVersion = resp.WorkGroup.Configuration.EngineVersion.SelectedEngineVersion + f0f5.SelectedEngineVersion = resp.WorkGroup.Configuration.EngineVersion.SelectedEngineVersion } - f0.EngineVersion = f0f4 + f0.EngineVersion = f0f5 } if resp.WorkGroup.Configuration.ExecutionRole != nil { f0.ExecutionRole = resp.WorkGroup.Configuration.ExecutionRole @@ -78,31 +81,31 @@ func GenerateWorkGroup(resp *svcsdk.GetWorkGroupOutput) *svcapitypes.WorkGroup { f0.RequesterPaysEnabled = resp.WorkGroup.Configuration.RequesterPaysEnabled } if resp.WorkGroup.Configuration.ResultConfiguration != nil { - f0f8 := &svcapitypes.ResultConfiguration{} + f0f9 := &svcapitypes.ResultConfiguration{} if resp.WorkGroup.Configuration.ResultConfiguration.AclConfiguration != nil { - f0f8f0 := &svcapitypes.ACLConfiguration{} + f0f9f0 := &svcapitypes.ACLConfiguration{} if resp.WorkGroup.Configuration.ResultConfiguration.AclConfiguration.S3AclOption != nil { - f0f8f0.S3ACLOption = resp.WorkGroup.Configuration.ResultConfiguration.AclConfiguration.S3AclOption + f0f9f0.S3ACLOption = resp.WorkGroup.Configuration.ResultConfiguration.AclConfiguration.S3AclOption } - f0f8.ACLConfiguration = f0f8f0 + f0f9.ACLConfiguration = f0f9f0 } if resp.WorkGroup.Configuration.ResultConfiguration.EncryptionConfiguration != nil { - f0f8f1 := &svcapitypes.EncryptionConfiguration{} + f0f9f1 := &svcapitypes.EncryptionConfiguration{} if resp.WorkGroup.Configuration.ResultConfiguration.EncryptionConfiguration.EncryptionOption != nil { - f0f8f1.EncryptionOption = resp.WorkGroup.Configuration.ResultConfiguration.EncryptionConfiguration.EncryptionOption + f0f9f1.EncryptionOption = resp.WorkGroup.Configuration.ResultConfiguration.EncryptionConfiguration.EncryptionOption } if resp.WorkGroup.Configuration.ResultConfiguration.EncryptionConfiguration.KmsKey != nil { - f0f8f1.KMSKey = resp.WorkGroup.Configuration.ResultConfiguration.EncryptionConfiguration.KmsKey + f0f9f1.KMSKey = resp.WorkGroup.Configuration.ResultConfiguration.EncryptionConfiguration.KmsKey } - f0f8.EncryptionConfiguration = f0f8f1 + f0f9.EncryptionConfiguration = f0f9f1 } if resp.WorkGroup.Configuration.ResultConfiguration.ExpectedBucketOwner != nil { - f0f8.ExpectedBucketOwner = resp.WorkGroup.Configuration.ResultConfiguration.ExpectedBucketOwner + f0f9.ExpectedBucketOwner = resp.WorkGroup.Configuration.ResultConfiguration.ExpectedBucketOwner } if resp.WorkGroup.Configuration.ResultConfiguration.OutputLocation != nil { - f0f8.OutputLocation = resp.WorkGroup.Configuration.ResultConfiguration.OutputLocation + f0f9.OutputLocation = resp.WorkGroup.Configuration.ResultConfiguration.OutputLocation } - f0.ResultConfiguration = f0f8 + f0.ResultConfiguration = f0f9 } cr.Spec.ForProvider.Configuration = f0 } else { @@ -136,18 +139,21 @@ func GenerateCreateWorkGroupInput(cr *svcapitypes.WorkGroup) *svcsdk.CreateWorkG } f0.SetCustomerContentEncryptionConfiguration(f0f2) } + if cr.Spec.ForProvider.Configuration.EnableMinimumEncryptionConfiguration != nil { + f0.SetEnableMinimumEncryptionConfiguration(*cr.Spec.ForProvider.Configuration.EnableMinimumEncryptionConfiguration) + } if cr.Spec.ForProvider.Configuration.EnforceWorkGroupConfiguration != nil { f0.SetEnforceWorkGroupConfiguration(*cr.Spec.ForProvider.Configuration.EnforceWorkGroupConfiguration) } if cr.Spec.ForProvider.Configuration.EngineVersion != nil { - f0f4 := &svcsdk.EngineVersion{} + f0f5 := &svcsdk.EngineVersion{} if cr.Spec.ForProvider.Configuration.EngineVersion.EffectiveEngineVersion != nil { - f0f4.SetEffectiveEngineVersion(*cr.Spec.ForProvider.Configuration.EngineVersion.EffectiveEngineVersion) + f0f5.SetEffectiveEngineVersion(*cr.Spec.ForProvider.Configuration.EngineVersion.EffectiveEngineVersion) } if cr.Spec.ForProvider.Configuration.EngineVersion.SelectedEngineVersion != nil { - f0f4.SetSelectedEngineVersion(*cr.Spec.ForProvider.Configuration.EngineVersion.SelectedEngineVersion) + f0f5.SetSelectedEngineVersion(*cr.Spec.ForProvider.Configuration.EngineVersion.SelectedEngineVersion) } - f0.SetEngineVersion(f0f4) + f0.SetEngineVersion(f0f5) } if cr.Spec.ForProvider.Configuration.ExecutionRole != nil { f0.SetExecutionRole(*cr.Spec.ForProvider.Configuration.ExecutionRole) @@ -159,31 +165,31 @@ func GenerateCreateWorkGroupInput(cr *svcapitypes.WorkGroup) *svcsdk.CreateWorkG f0.SetRequesterPaysEnabled(*cr.Spec.ForProvider.Configuration.RequesterPaysEnabled) } if cr.Spec.ForProvider.Configuration.ResultConfiguration != nil { - f0f8 := &svcsdk.ResultConfiguration{} + f0f9 := &svcsdk.ResultConfiguration{} if cr.Spec.ForProvider.Configuration.ResultConfiguration.ACLConfiguration != nil { - f0f8f0 := &svcsdk.AclConfiguration{} + f0f9f0 := &svcsdk.AclConfiguration{} if cr.Spec.ForProvider.Configuration.ResultConfiguration.ACLConfiguration.S3ACLOption != nil { - f0f8f0.SetS3AclOption(*cr.Spec.ForProvider.Configuration.ResultConfiguration.ACLConfiguration.S3ACLOption) + f0f9f0.SetS3AclOption(*cr.Spec.ForProvider.Configuration.ResultConfiguration.ACLConfiguration.S3ACLOption) } - f0f8.SetAclConfiguration(f0f8f0) + f0f9.SetAclConfiguration(f0f9f0) } if cr.Spec.ForProvider.Configuration.ResultConfiguration.EncryptionConfiguration != nil { - f0f8f1 := &svcsdk.EncryptionConfiguration{} + f0f9f1 := &svcsdk.EncryptionConfiguration{} if cr.Spec.ForProvider.Configuration.ResultConfiguration.EncryptionConfiguration.EncryptionOption != nil { - f0f8f1.SetEncryptionOption(*cr.Spec.ForProvider.Configuration.ResultConfiguration.EncryptionConfiguration.EncryptionOption) + f0f9f1.SetEncryptionOption(*cr.Spec.ForProvider.Configuration.ResultConfiguration.EncryptionConfiguration.EncryptionOption) } if cr.Spec.ForProvider.Configuration.ResultConfiguration.EncryptionConfiguration.KMSKey != nil { - f0f8f1.SetKmsKey(*cr.Spec.ForProvider.Configuration.ResultConfiguration.EncryptionConfiguration.KMSKey) + f0f9f1.SetKmsKey(*cr.Spec.ForProvider.Configuration.ResultConfiguration.EncryptionConfiguration.KMSKey) } - f0f8.SetEncryptionConfiguration(f0f8f1) + f0f9.SetEncryptionConfiguration(f0f9f1) } if cr.Spec.ForProvider.Configuration.ResultConfiguration.ExpectedBucketOwner != nil { - f0f8.SetExpectedBucketOwner(*cr.Spec.ForProvider.Configuration.ResultConfiguration.ExpectedBucketOwner) + f0f9.SetExpectedBucketOwner(*cr.Spec.ForProvider.Configuration.ResultConfiguration.ExpectedBucketOwner) } if cr.Spec.ForProvider.Configuration.ResultConfiguration.OutputLocation != nil { - f0f8.SetOutputLocation(*cr.Spec.ForProvider.Configuration.ResultConfiguration.OutputLocation) + f0f9.SetOutputLocation(*cr.Spec.ForProvider.Configuration.ResultConfiguration.OutputLocation) } - f0.SetResultConfiguration(f0f8) + f0.SetResultConfiguration(f0f9) } res.SetConfiguration(f0) } diff --git a/pkg/controller/autoscaling/autoscalinggroup/zz_conversions.go b/pkg/controller/autoscaling/autoscalinggroup/zz_conversions.go index b0f831b807..d3bd276f68 100644 --- a/pkg/controller/autoscaling/autoscalinggroup/zz_conversions.go +++ b/pkg/controller/autoscaling/autoscalinggroup/zz_conversions.go @@ -569,11 +569,14 @@ func GenerateAutoScalingGroup(resp *svcsdk.DescribeAutoScalingGroupsOutput) *svc if f30iter.Identifier != nil { f30elem.Identifier = f30iter.Identifier } + if f30iter.Type != nil { + f30elem.Type = f30iter.Type + } f30 = append(f30, f30elem) } - cr.Spec.ForProvider.TrafficSources = f30 + cr.Status.AtProvider.TrafficSources = f30 } else { - cr.Spec.ForProvider.TrafficSources = nil + cr.Status.AtProvider.TrafficSources = nil } if elem.VPCZoneIdentifier != nil { cr.Spec.ForProvider.VPCZoneIdentifier = elem.VPCZoneIdentifier @@ -1027,17 +1030,6 @@ func GenerateCreateAutoScalingGroupInput(cr *svcapitypes.AutoScalingGroup) *svcs } res.SetTerminationPolicies(f23) } - if cr.Spec.ForProvider.TrafficSources != nil { - f24 := []*svcsdk.TrafficSourceIdentifier{} - for _, f24iter := range cr.Spec.ForProvider.TrafficSources { - f24elem := &svcsdk.TrafficSourceIdentifier{} - if f24iter.Identifier != nil { - f24elem.SetIdentifier(*f24iter.Identifier) - } - f24 = append(f24, f24elem) - } - res.SetTrafficSources(f24) - } if cr.Spec.ForProvider.VPCZoneIdentifier != nil { res.SetVPCZoneIdentifier(*cr.Spec.ForProvider.VPCZoneIdentifier) } diff --git a/pkg/controller/docdb/dbcluster/setup_test.go b/pkg/controller/docdb/dbcluster/setup_test.go index 195f96dbfc..4a142e9032 100644 --- a/pkg/controller/docdb/dbcluster/setup_test.go +++ b/pkg/controller/docdb/dbcluster/setup_test.go @@ -26,7 +26,7 @@ import ( "github.com/pkg/errors" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" "github.com/aws/aws-sdk-go/aws/request" @@ -2229,7 +2229,7 @@ func TestCreate(t *testing.T) { {Key: awsclient.String(testTagKey), Value: awsclient.String(testTagValue)}, {Key: awsclient.String(testOtherTagKey), Value: awsclient.String(testOtherTagValue)}, }, - SnapshotIdentifier: pointer.String("abcd"), + SnapshotIdentifier: ptr.To("abcd"), }, }, }, @@ -2372,9 +2372,9 @@ func TestCreate(t *testing.T) { withMasterPasswordSecretRef(testMasterPasswordSecretNamespace, testMasterPasswordSecretName, testMasterPasswordSecretKey), withRestoreToPointInTime(&svcapitypes.RestorePointInTimeConfiguration{ RestoreTime: &metav1.Time{Time: timeNow}, - UseLatestRestorableTime: pointer.Bool(true), + UseLatestRestorableTime: ptr.To(true), SourceDBClusterIdentifier: "abcd", - RestoreType: pointer.String("full-copy"), + RestoreType: ptr.To("full-copy"), }), ), }, @@ -2419,9 +2419,9 @@ func TestCreate(t *testing.T) { withStatusDBClusterParameterGroupName(testDBClusterParameterGroupName), withRestoreToPointInTime(&svcapitypes.RestorePointInTimeConfiguration{ RestoreTime: &metav1.Time{Time: timeNow}, - UseLatestRestorableTime: pointer.Bool(true), + UseLatestRestorableTime: ptr.To(true), SourceDBClusterIdentifier: "abcd", - RestoreType: pointer.String("full-copy"), + RestoreType: ptr.To("full-copy"), }), ), result: managed.ExternalCreation{ @@ -2456,9 +2456,9 @@ func TestCreate(t *testing.T) { {Key: awsclient.String(testOtherTagKey), Value: awsclient.String(testOtherTagValue)}, }, RestoreToTime: &timeNow, - UseLatestRestorableTime: pointer.Bool(true), - SourceDBClusterIdentifier: pointer.String("abcd"), - RestoreType: pointer.String("full-copy"), + UseLatestRestorableTime: ptr.To(true), + SourceDBClusterIdentifier: ptr.To("abcd"), + RestoreType: ptr.To("full-copy"), }, }, }, diff --git a/pkg/controller/docdb/dbcluster/zz_conversions.go b/pkg/controller/docdb/dbcluster/zz_conversions.go index ad609270d8..eedd42ecca 100644 --- a/pkg/controller/docdb/dbcluster/zz_conversions.go +++ b/pkg/controller/docdb/dbcluster/zz_conversions.go @@ -393,13 +393,13 @@ func GenerateModifyDBClusterInput(cr *svcapitypes.DBCluster) *svcsdk.ModifyDBClu res.SetPreferredMaintenanceWindow(*cr.Spec.ForProvider.PreferredMaintenanceWindow) } if cr.Spec.ForProvider.VPCSecurityGroupIDs != nil { - f10 := []*string{} - for _, f10iter := range cr.Spec.ForProvider.VPCSecurityGroupIDs { - var f10elem string - f10elem = *f10iter - f10 = append(f10, &f10elem) + f11 := []*string{} + for _, f11iter := range cr.Spec.ForProvider.VPCSecurityGroupIDs { + var f11elem string + f11elem = *f11iter + f11 = append(f11, &f11elem) } - res.SetVpcSecurityGroupIds(f10) + res.SetVpcSecurityGroupIds(f11) } return res diff --git a/pkg/controller/dynamodb/table/hooks.go b/pkg/controller/dynamodb/table/hooks.go index 3b49bb9400..3b3f013ff7 100644 --- a/pkg/controller/dynamodb/table/hooks.go +++ b/pkg/controller/dynamodb/table/hooks.go @@ -26,7 +26,7 @@ import ( svcsdkapi "github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface" "github.com/google/go-cmp/cmp" "github.com/pkg/errors" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -106,7 +106,7 @@ func (e *updateClient) postUpdate(_ context.Context, cr *svcapitypes.Table, obj pitrStatusBool := pitrStatusToBool(pitrStatus) if !isPitrUpToDate(cr, pitrStatusBool) { - pitrSpecEnabled := pointer.BoolDeref(cr.Spec.ForProvider.PointInTimeRecoveryEnabled, false) + pitrSpecEnabled := ptr.Deref(cr.Spec.ForProvider.PointInTimeRecoveryEnabled, false) pitrInput := &svcsdk.UpdateContinuousBackupsInput{ TableName: aws.String(meta.GetExternalName(cr)), @@ -451,7 +451,7 @@ func (e *updateClient) isUpToDate(ctx context.Context, cr *svcapitypes.Table, re } func pitrStatusToBool(pitrStatus *string) bool { - return pointer.StringDeref(pitrStatus, "") == string(svcapitypes.PointInTimeRecoveryStatus_ENABLED) + return ptr.Deref(pitrStatus, "") == string(svcapitypes.PointInTimeRecoveryStatus_ENABLED) } type updateClient struct { diff --git a/pkg/controller/dynamodb/table/zz_controller.go b/pkg/controller/dynamodb/table/zz_controller.go index 0e28bb7156..9581a98304 100644 --- a/pkg/controller/dynamodb/table/zz_controller.go +++ b/pkg/controller/dynamodb/table/zz_controller.go @@ -164,56 +164,61 @@ func (e *external) Create(ctx context.Context, mg cpresource.Managed) (managed.E } else { cr.Status.AtProvider.CreationDateTime = nil } + if resp.TableDescription.DeletionProtectionEnabled != nil { + cr.Spec.ForProvider.DeletionProtectionEnabled = resp.TableDescription.DeletionProtectionEnabled + } else { + cr.Spec.ForProvider.DeletionProtectionEnabled = nil + } if resp.TableDescription.GlobalSecondaryIndexes != nil { - f4 := []*svcapitypes.GlobalSecondaryIndex{} - for _, f4iter := range resp.TableDescription.GlobalSecondaryIndexes { - f4elem := &svcapitypes.GlobalSecondaryIndex{} - if f4iter.IndexName != nil { - f4elem.IndexName = f4iter.IndexName + f5 := []*svcapitypes.GlobalSecondaryIndex{} + for _, f5iter := range resp.TableDescription.GlobalSecondaryIndexes { + f5elem := &svcapitypes.GlobalSecondaryIndex{} + if f5iter.IndexName != nil { + f5elem.IndexName = f5iter.IndexName } - if f4iter.KeySchema != nil { - f4elemf6 := []*svcapitypes.KeySchemaElement{} - for _, f4elemf6iter := range f4iter.KeySchema { - f4elemf6elem := &svcapitypes.KeySchemaElement{} - if f4elemf6iter.AttributeName != nil { - f4elemf6elem.AttributeName = f4elemf6iter.AttributeName + if f5iter.KeySchema != nil { + f5elemf6 := []*svcapitypes.KeySchemaElement{} + for _, f5elemf6iter := range f5iter.KeySchema { + f5elemf6elem := &svcapitypes.KeySchemaElement{} + if f5elemf6iter.AttributeName != nil { + f5elemf6elem.AttributeName = f5elemf6iter.AttributeName } - if f4elemf6iter.KeyType != nil { - f4elemf6elem.KeyType = f4elemf6iter.KeyType + if f5elemf6iter.KeyType != nil { + f5elemf6elem.KeyType = f5elemf6iter.KeyType } - f4elemf6 = append(f4elemf6, f4elemf6elem) + f5elemf6 = append(f5elemf6, f5elemf6elem) } - f4elem.KeySchema = f4elemf6 + f5elem.KeySchema = f5elemf6 } - if f4iter.Projection != nil { - f4elemf7 := &svcapitypes.Projection{} - if f4iter.Projection.NonKeyAttributes != nil { - f4elemf7f0 := []*string{} - for _, f4elemf7f0iter := range f4iter.Projection.NonKeyAttributes { - var f4elemf7f0elem string - f4elemf7f0elem = *f4elemf7f0iter - f4elemf7f0 = append(f4elemf7f0, &f4elemf7f0elem) + if f5iter.Projection != nil { + f5elemf7 := &svcapitypes.Projection{} + if f5iter.Projection.NonKeyAttributes != nil { + f5elemf7f0 := []*string{} + for _, f5elemf7f0iter := range f5iter.Projection.NonKeyAttributes { + var f5elemf7f0elem string + f5elemf7f0elem = *f5elemf7f0iter + f5elemf7f0 = append(f5elemf7f0, &f5elemf7f0elem) } - f4elemf7.NonKeyAttributes = f4elemf7f0 + f5elemf7.NonKeyAttributes = f5elemf7f0 } - if f4iter.Projection.ProjectionType != nil { - f4elemf7.ProjectionType = f4iter.Projection.ProjectionType + if f5iter.Projection.ProjectionType != nil { + f5elemf7.ProjectionType = f5iter.Projection.ProjectionType } - f4elem.Projection = f4elemf7 + f5elem.Projection = f5elemf7 } - if f4iter.ProvisionedThroughput != nil { - f4elemf8 := &svcapitypes.ProvisionedThroughput{} - if f4iter.ProvisionedThroughput.ReadCapacityUnits != nil { - f4elemf8.ReadCapacityUnits = f4iter.ProvisionedThroughput.ReadCapacityUnits + if f5iter.ProvisionedThroughput != nil { + f5elemf8 := &svcapitypes.ProvisionedThroughput{} + if f5iter.ProvisionedThroughput.ReadCapacityUnits != nil { + f5elemf8.ReadCapacityUnits = f5iter.ProvisionedThroughput.ReadCapacityUnits } - if f4iter.ProvisionedThroughput.WriteCapacityUnits != nil { - f4elemf8.WriteCapacityUnits = f4iter.ProvisionedThroughput.WriteCapacityUnits + if f5iter.ProvisionedThroughput.WriteCapacityUnits != nil { + f5elemf8.WriteCapacityUnits = f5iter.ProvisionedThroughput.WriteCapacityUnits } - f4elem.ProvisionedThroughput = f4elemf8 + f5elem.ProvisionedThroughput = f5elemf8 } - f4 = append(f4, f4elem) + f5 = append(f5, f5elem) } - cr.Spec.ForProvider.GlobalSecondaryIndexes = f4 + cr.Spec.ForProvider.GlobalSecondaryIndexes = f5 } else { cr.Spec.ForProvider.GlobalSecondaryIndexes = nil } @@ -228,18 +233,18 @@ func (e *external) Create(ctx context.Context, mg cpresource.Managed) (managed.E cr.Status.AtProvider.ItemCount = nil } if resp.TableDescription.KeySchema != nil { - f7 := []*svcapitypes.KeySchemaElement{} - for _, f7iter := range resp.TableDescription.KeySchema { - f7elem := &svcapitypes.KeySchemaElement{} - if f7iter.AttributeName != nil { - f7elem.AttributeName = f7iter.AttributeName + f8 := []*svcapitypes.KeySchemaElement{} + for _, f8iter := range resp.TableDescription.KeySchema { + f8elem := &svcapitypes.KeySchemaElement{} + if f8iter.AttributeName != nil { + f8elem.AttributeName = f8iter.AttributeName } - if f7iter.KeyType != nil { - f7elem.KeyType = f7iter.KeyType + if f8iter.KeyType != nil { + f8elem.KeyType = f8iter.KeyType } - f7 = append(f7, f7elem) + f8 = append(f8, f8elem) } - cr.Spec.ForProvider.KeySchema = f7 + cr.Spec.ForProvider.KeySchema = f8 } else { cr.Spec.ForProvider.KeySchema = nil } @@ -254,168 +259,168 @@ func (e *external) Create(ctx context.Context, mg cpresource.Managed) (managed.E cr.Status.AtProvider.LatestStreamLabel = nil } if resp.TableDescription.LocalSecondaryIndexes != nil { - f10 := []*svcapitypes.LocalSecondaryIndex{} - for _, f10iter := range resp.TableDescription.LocalSecondaryIndexes { - f10elem := &svcapitypes.LocalSecondaryIndex{} - if f10iter.IndexName != nil { - f10elem.IndexName = f10iter.IndexName + f11 := []*svcapitypes.LocalSecondaryIndex{} + for _, f11iter := range resp.TableDescription.LocalSecondaryIndexes { + f11elem := &svcapitypes.LocalSecondaryIndex{} + if f11iter.IndexName != nil { + f11elem.IndexName = f11iter.IndexName } - if f10iter.KeySchema != nil { - f10elemf4 := []*svcapitypes.KeySchemaElement{} - for _, f10elemf4iter := range f10iter.KeySchema { - f10elemf4elem := &svcapitypes.KeySchemaElement{} - if f10elemf4iter.AttributeName != nil { - f10elemf4elem.AttributeName = f10elemf4iter.AttributeName + if f11iter.KeySchema != nil { + f11elemf4 := []*svcapitypes.KeySchemaElement{} + for _, f11elemf4iter := range f11iter.KeySchema { + f11elemf4elem := &svcapitypes.KeySchemaElement{} + if f11elemf4iter.AttributeName != nil { + f11elemf4elem.AttributeName = f11elemf4iter.AttributeName } - if f10elemf4iter.KeyType != nil { - f10elemf4elem.KeyType = f10elemf4iter.KeyType + if f11elemf4iter.KeyType != nil { + f11elemf4elem.KeyType = f11elemf4iter.KeyType } - f10elemf4 = append(f10elemf4, f10elemf4elem) + f11elemf4 = append(f11elemf4, f11elemf4elem) } - f10elem.KeySchema = f10elemf4 + f11elem.KeySchema = f11elemf4 } - if f10iter.Projection != nil { - f10elemf5 := &svcapitypes.Projection{} - if f10iter.Projection.NonKeyAttributes != nil { - f10elemf5f0 := []*string{} - for _, f10elemf5f0iter := range f10iter.Projection.NonKeyAttributes { - var f10elemf5f0elem string - f10elemf5f0elem = *f10elemf5f0iter - f10elemf5f0 = append(f10elemf5f0, &f10elemf5f0elem) + if f11iter.Projection != nil { + f11elemf5 := &svcapitypes.Projection{} + if f11iter.Projection.NonKeyAttributes != nil { + f11elemf5f0 := []*string{} + for _, f11elemf5f0iter := range f11iter.Projection.NonKeyAttributes { + var f11elemf5f0elem string + f11elemf5f0elem = *f11elemf5f0iter + f11elemf5f0 = append(f11elemf5f0, &f11elemf5f0elem) } - f10elemf5.NonKeyAttributes = f10elemf5f0 + f11elemf5.NonKeyAttributes = f11elemf5f0 } - if f10iter.Projection.ProjectionType != nil { - f10elemf5.ProjectionType = f10iter.Projection.ProjectionType + if f11iter.Projection.ProjectionType != nil { + f11elemf5.ProjectionType = f11iter.Projection.ProjectionType } - f10elem.Projection = f10elemf5 + f11elem.Projection = f11elemf5 } - f10 = append(f10, f10elem) + f11 = append(f11, f11elem) } - cr.Spec.ForProvider.LocalSecondaryIndexes = f10 + cr.Spec.ForProvider.LocalSecondaryIndexes = f11 } else { cr.Spec.ForProvider.LocalSecondaryIndexes = nil } if resp.TableDescription.ProvisionedThroughput != nil { - f11 := &svcapitypes.ProvisionedThroughput{} + f12 := &svcapitypes.ProvisionedThroughput{} if resp.TableDescription.ProvisionedThroughput.ReadCapacityUnits != nil { - f11.ReadCapacityUnits = resp.TableDescription.ProvisionedThroughput.ReadCapacityUnits + f12.ReadCapacityUnits = resp.TableDescription.ProvisionedThroughput.ReadCapacityUnits } if resp.TableDescription.ProvisionedThroughput.WriteCapacityUnits != nil { - f11.WriteCapacityUnits = resp.TableDescription.ProvisionedThroughput.WriteCapacityUnits + f12.WriteCapacityUnits = resp.TableDescription.ProvisionedThroughput.WriteCapacityUnits } - cr.Spec.ForProvider.ProvisionedThroughput = f11 + cr.Spec.ForProvider.ProvisionedThroughput = f12 } else { cr.Spec.ForProvider.ProvisionedThroughput = nil } if resp.TableDescription.Replicas != nil { - f12 := []*svcapitypes.ReplicaDescription{} - for _, f12iter := range resp.TableDescription.Replicas { - f12elem := &svcapitypes.ReplicaDescription{} - if f12iter.GlobalSecondaryIndexes != nil { - f12elemf0 := []*svcapitypes.ReplicaGlobalSecondaryIndexDescription{} - for _, f12elemf0iter := range f12iter.GlobalSecondaryIndexes { - f12elemf0elem := &svcapitypes.ReplicaGlobalSecondaryIndexDescription{} - if f12elemf0iter.IndexName != nil { - f12elemf0elem.IndexName = f12elemf0iter.IndexName + f13 := []*svcapitypes.ReplicaDescription{} + for _, f13iter := range resp.TableDescription.Replicas { + f13elem := &svcapitypes.ReplicaDescription{} + if f13iter.GlobalSecondaryIndexes != nil { + f13elemf0 := []*svcapitypes.ReplicaGlobalSecondaryIndexDescription{} + for _, f13elemf0iter := range f13iter.GlobalSecondaryIndexes { + f13elemf0elem := &svcapitypes.ReplicaGlobalSecondaryIndexDescription{} + if f13elemf0iter.IndexName != nil { + f13elemf0elem.IndexName = f13elemf0iter.IndexName } - if f12elemf0iter.ProvisionedThroughputOverride != nil { - f12elemf0elemf1 := &svcapitypes.ProvisionedThroughputOverride{} - if f12elemf0iter.ProvisionedThroughputOverride.ReadCapacityUnits != nil { - f12elemf0elemf1.ReadCapacityUnits = f12elemf0iter.ProvisionedThroughputOverride.ReadCapacityUnits + if f13elemf0iter.ProvisionedThroughputOverride != nil { + f13elemf0elemf1 := &svcapitypes.ProvisionedThroughputOverride{} + if f13elemf0iter.ProvisionedThroughputOverride.ReadCapacityUnits != nil { + f13elemf0elemf1.ReadCapacityUnits = f13elemf0iter.ProvisionedThroughputOverride.ReadCapacityUnits } - f12elemf0elem.ProvisionedThroughputOverride = f12elemf0elemf1 + f13elemf0elem.ProvisionedThroughputOverride = f13elemf0elemf1 } - f12elemf0 = append(f12elemf0, f12elemf0elem) + f13elemf0 = append(f13elemf0, f13elemf0elem) } - f12elem.GlobalSecondaryIndexes = f12elemf0 + f13elem.GlobalSecondaryIndexes = f13elemf0 } - if f12iter.KMSMasterKeyId != nil { - f12elem.KMSMasterKeyID = f12iter.KMSMasterKeyId + if f13iter.KMSMasterKeyId != nil { + f13elem.KMSMasterKeyID = f13iter.KMSMasterKeyId } - if f12iter.ProvisionedThroughputOverride != nil { - f12elemf2 := &svcapitypes.ProvisionedThroughputOverride{} - if f12iter.ProvisionedThroughputOverride.ReadCapacityUnits != nil { - f12elemf2.ReadCapacityUnits = f12iter.ProvisionedThroughputOverride.ReadCapacityUnits + if f13iter.ProvisionedThroughputOverride != nil { + f13elemf2 := &svcapitypes.ProvisionedThroughputOverride{} + if f13iter.ProvisionedThroughputOverride.ReadCapacityUnits != nil { + f13elemf2.ReadCapacityUnits = f13iter.ProvisionedThroughputOverride.ReadCapacityUnits } - f12elem.ProvisionedThroughputOverride = f12elemf2 + f13elem.ProvisionedThroughputOverride = f13elemf2 } - if f12iter.RegionName != nil { - f12elem.RegionName = f12iter.RegionName + if f13iter.RegionName != nil { + f13elem.RegionName = f13iter.RegionName } - if f12iter.ReplicaInaccessibleDateTime != nil { - f12elem.ReplicaInaccessibleDateTime = &metav1.Time{*f12iter.ReplicaInaccessibleDateTime} + if f13iter.ReplicaInaccessibleDateTime != nil { + f13elem.ReplicaInaccessibleDateTime = &metav1.Time{*f13iter.ReplicaInaccessibleDateTime} } - if f12iter.ReplicaStatus != nil { - f12elem.ReplicaStatus = f12iter.ReplicaStatus + if f13iter.ReplicaStatus != nil { + f13elem.ReplicaStatus = f13iter.ReplicaStatus } - if f12iter.ReplicaStatusDescription != nil { - f12elem.ReplicaStatusDescription = f12iter.ReplicaStatusDescription + if f13iter.ReplicaStatusDescription != nil { + f13elem.ReplicaStatusDescription = f13iter.ReplicaStatusDescription } - if f12iter.ReplicaStatusPercentProgress != nil { - f12elem.ReplicaStatusPercentProgress = f12iter.ReplicaStatusPercentProgress + if f13iter.ReplicaStatusPercentProgress != nil { + f13elem.ReplicaStatusPercentProgress = f13iter.ReplicaStatusPercentProgress } - if f12iter.ReplicaTableClassSummary != nil { - f12elemf8 := &svcapitypes.TableClassSummary{} - if f12iter.ReplicaTableClassSummary.LastUpdateDateTime != nil { - f12elemf8.LastUpdateDateTime = &metav1.Time{*f12iter.ReplicaTableClassSummary.LastUpdateDateTime} + if f13iter.ReplicaTableClassSummary != nil { + f13elemf8 := &svcapitypes.TableClassSummary{} + if f13iter.ReplicaTableClassSummary.LastUpdateDateTime != nil { + f13elemf8.LastUpdateDateTime = &metav1.Time{*f13iter.ReplicaTableClassSummary.LastUpdateDateTime} } - if f12iter.ReplicaTableClassSummary.TableClass != nil { - f12elemf8.TableClass = f12iter.ReplicaTableClassSummary.TableClass + if f13iter.ReplicaTableClassSummary.TableClass != nil { + f13elemf8.TableClass = f13iter.ReplicaTableClassSummary.TableClass } - f12elem.ReplicaTableClassSummary = f12elemf8 + f13elem.ReplicaTableClassSummary = f13elemf8 } - f12 = append(f12, f12elem) + f13 = append(f13, f13elem) } - cr.Status.AtProvider.Replicas = f12 + cr.Status.AtProvider.Replicas = f13 } else { cr.Status.AtProvider.Replicas = nil } if resp.TableDescription.RestoreSummary != nil { - f13 := &svcapitypes.RestoreSummary{} + f14 := &svcapitypes.RestoreSummary{} if resp.TableDescription.RestoreSummary.RestoreDateTime != nil { - f13.RestoreDateTime = &metav1.Time{*resp.TableDescription.RestoreSummary.RestoreDateTime} + f14.RestoreDateTime = &metav1.Time{*resp.TableDescription.RestoreSummary.RestoreDateTime} } if resp.TableDescription.RestoreSummary.RestoreInProgress != nil { - f13.RestoreInProgress = resp.TableDescription.RestoreSummary.RestoreInProgress + f14.RestoreInProgress = resp.TableDescription.RestoreSummary.RestoreInProgress } if resp.TableDescription.RestoreSummary.SourceBackupArn != nil { - f13.SourceBackupARN = resp.TableDescription.RestoreSummary.SourceBackupArn + f14.SourceBackupARN = resp.TableDescription.RestoreSummary.SourceBackupArn } if resp.TableDescription.RestoreSummary.SourceTableArn != nil { - f13.SourceTableARN = resp.TableDescription.RestoreSummary.SourceTableArn + f14.SourceTableARN = resp.TableDescription.RestoreSummary.SourceTableArn } - cr.Status.AtProvider.RestoreSummary = f13 + cr.Status.AtProvider.RestoreSummary = f14 } else { cr.Status.AtProvider.RestoreSummary = nil } if resp.TableDescription.SSEDescription != nil { - f14 := &svcapitypes.SSEDescription{} + f15 := &svcapitypes.SSEDescription{} if resp.TableDescription.SSEDescription.InaccessibleEncryptionDateTime != nil { - f14.InaccessibleEncryptionDateTime = &metav1.Time{*resp.TableDescription.SSEDescription.InaccessibleEncryptionDateTime} + f15.InaccessibleEncryptionDateTime = &metav1.Time{*resp.TableDescription.SSEDescription.InaccessibleEncryptionDateTime} } if resp.TableDescription.SSEDescription.KMSMasterKeyArn != nil { - f14.KMSMasterKeyARN = resp.TableDescription.SSEDescription.KMSMasterKeyArn + f15.KMSMasterKeyARN = resp.TableDescription.SSEDescription.KMSMasterKeyArn } if resp.TableDescription.SSEDescription.SSEType != nil { - f14.SSEType = resp.TableDescription.SSEDescription.SSEType + f15.SSEType = resp.TableDescription.SSEDescription.SSEType } if resp.TableDescription.SSEDescription.Status != nil { - f14.Status = resp.TableDescription.SSEDescription.Status + f15.Status = resp.TableDescription.SSEDescription.Status } - cr.Status.AtProvider.SSEDescription = f14 + cr.Status.AtProvider.SSEDescription = f15 } else { cr.Status.AtProvider.SSEDescription = nil } if resp.TableDescription.StreamSpecification != nil { - f15 := &svcapitypes.StreamSpecification{} + f16 := &svcapitypes.StreamSpecification{} if resp.TableDescription.StreamSpecification.StreamEnabled != nil { - f15.StreamEnabled = resp.TableDescription.StreamSpecification.StreamEnabled + f16.StreamEnabled = resp.TableDescription.StreamSpecification.StreamEnabled } if resp.TableDescription.StreamSpecification.StreamViewType != nil { - f15.StreamViewType = resp.TableDescription.StreamSpecification.StreamViewType + f16.StreamViewType = resp.TableDescription.StreamSpecification.StreamViewType } - cr.Spec.ForProvider.StreamSpecification = f15 + cr.Spec.ForProvider.StreamSpecification = f16 } else { cr.Spec.ForProvider.StreamSpecification = nil } @@ -425,14 +430,14 @@ func (e *external) Create(ctx context.Context, mg cpresource.Managed) (managed.E cr.Status.AtProvider.TableARN = nil } if resp.TableDescription.TableClassSummary != nil { - f17 := &svcapitypes.TableClassSummary{} + f18 := &svcapitypes.TableClassSummary{} if resp.TableDescription.TableClassSummary.LastUpdateDateTime != nil { - f17.LastUpdateDateTime = &metav1.Time{*resp.TableDescription.TableClassSummary.LastUpdateDateTime} + f18.LastUpdateDateTime = &metav1.Time{*resp.TableDescription.TableClassSummary.LastUpdateDateTime} } if resp.TableDescription.TableClassSummary.TableClass != nil { - f17.TableClass = resp.TableDescription.TableClassSummary.TableClass + f18.TableClass = resp.TableDescription.TableClassSummary.TableClass } - cr.Status.AtProvider.TableClassSummary = f17 + cr.Status.AtProvider.TableClassSummary = f18 } else { cr.Status.AtProvider.TableClassSummary = nil } diff --git a/pkg/controller/dynamodb/table/zz_conversions.go b/pkg/controller/dynamodb/table/zz_conversions.go index 8767567323..cebae8a038 100644 --- a/pkg/controller/dynamodb/table/zz_conversions.go +++ b/pkg/controller/dynamodb/table/zz_conversions.go @@ -93,56 +93,61 @@ func GenerateTable(resp *svcsdk.DescribeTableOutput) *svcapitypes.Table { } else { cr.Status.AtProvider.CreationDateTime = nil } + if resp.Table.DeletionProtectionEnabled != nil { + cr.Spec.ForProvider.DeletionProtectionEnabled = resp.Table.DeletionProtectionEnabled + } else { + cr.Spec.ForProvider.DeletionProtectionEnabled = nil + } if resp.Table.GlobalSecondaryIndexes != nil { - f4 := []*svcapitypes.GlobalSecondaryIndex{} - for _, f4iter := range resp.Table.GlobalSecondaryIndexes { - f4elem := &svcapitypes.GlobalSecondaryIndex{} - if f4iter.IndexName != nil { - f4elem.IndexName = f4iter.IndexName - } - if f4iter.KeySchema != nil { - f4elemf6 := []*svcapitypes.KeySchemaElement{} - for _, f4elemf6iter := range f4iter.KeySchema { - f4elemf6elem := &svcapitypes.KeySchemaElement{} - if f4elemf6iter.AttributeName != nil { - f4elemf6elem.AttributeName = f4elemf6iter.AttributeName + f5 := []*svcapitypes.GlobalSecondaryIndex{} + for _, f5iter := range resp.Table.GlobalSecondaryIndexes { + f5elem := &svcapitypes.GlobalSecondaryIndex{} + if f5iter.IndexName != nil { + f5elem.IndexName = f5iter.IndexName + } + if f5iter.KeySchema != nil { + f5elemf6 := []*svcapitypes.KeySchemaElement{} + for _, f5elemf6iter := range f5iter.KeySchema { + f5elemf6elem := &svcapitypes.KeySchemaElement{} + if f5elemf6iter.AttributeName != nil { + f5elemf6elem.AttributeName = f5elemf6iter.AttributeName } - if f4elemf6iter.KeyType != nil { - f4elemf6elem.KeyType = f4elemf6iter.KeyType + if f5elemf6iter.KeyType != nil { + f5elemf6elem.KeyType = f5elemf6iter.KeyType } - f4elemf6 = append(f4elemf6, f4elemf6elem) + f5elemf6 = append(f5elemf6, f5elemf6elem) } - f4elem.KeySchema = f4elemf6 - } - if f4iter.Projection != nil { - f4elemf7 := &svcapitypes.Projection{} - if f4iter.Projection.NonKeyAttributes != nil { - f4elemf7f0 := []*string{} - for _, f4elemf7f0iter := range f4iter.Projection.NonKeyAttributes { - var f4elemf7f0elem string - f4elemf7f0elem = *f4elemf7f0iter - f4elemf7f0 = append(f4elemf7f0, &f4elemf7f0elem) + f5elem.KeySchema = f5elemf6 + } + if f5iter.Projection != nil { + f5elemf7 := &svcapitypes.Projection{} + if f5iter.Projection.NonKeyAttributes != nil { + f5elemf7f0 := []*string{} + for _, f5elemf7f0iter := range f5iter.Projection.NonKeyAttributes { + var f5elemf7f0elem string + f5elemf7f0elem = *f5elemf7f0iter + f5elemf7f0 = append(f5elemf7f0, &f5elemf7f0elem) } - f4elemf7.NonKeyAttributes = f4elemf7f0 + f5elemf7.NonKeyAttributes = f5elemf7f0 } - if f4iter.Projection.ProjectionType != nil { - f4elemf7.ProjectionType = f4iter.Projection.ProjectionType + if f5iter.Projection.ProjectionType != nil { + f5elemf7.ProjectionType = f5iter.Projection.ProjectionType } - f4elem.Projection = f4elemf7 + f5elem.Projection = f5elemf7 } - if f4iter.ProvisionedThroughput != nil { - f4elemf8 := &svcapitypes.ProvisionedThroughput{} - if f4iter.ProvisionedThroughput.ReadCapacityUnits != nil { - f4elemf8.ReadCapacityUnits = f4iter.ProvisionedThroughput.ReadCapacityUnits + if f5iter.ProvisionedThroughput != nil { + f5elemf8 := &svcapitypes.ProvisionedThroughput{} + if f5iter.ProvisionedThroughput.ReadCapacityUnits != nil { + f5elemf8.ReadCapacityUnits = f5iter.ProvisionedThroughput.ReadCapacityUnits } - if f4iter.ProvisionedThroughput.WriteCapacityUnits != nil { - f4elemf8.WriteCapacityUnits = f4iter.ProvisionedThroughput.WriteCapacityUnits + if f5iter.ProvisionedThroughput.WriteCapacityUnits != nil { + f5elemf8.WriteCapacityUnits = f5iter.ProvisionedThroughput.WriteCapacityUnits } - f4elem.ProvisionedThroughput = f4elemf8 + f5elem.ProvisionedThroughput = f5elemf8 } - f4 = append(f4, f4elem) + f5 = append(f5, f5elem) } - cr.Spec.ForProvider.GlobalSecondaryIndexes = f4 + cr.Spec.ForProvider.GlobalSecondaryIndexes = f5 } else { cr.Spec.ForProvider.GlobalSecondaryIndexes = nil } @@ -157,18 +162,18 @@ func GenerateTable(resp *svcsdk.DescribeTableOutput) *svcapitypes.Table { cr.Status.AtProvider.ItemCount = nil } if resp.Table.KeySchema != nil { - f7 := []*svcapitypes.KeySchemaElement{} - for _, f7iter := range resp.Table.KeySchema { - f7elem := &svcapitypes.KeySchemaElement{} - if f7iter.AttributeName != nil { - f7elem.AttributeName = f7iter.AttributeName + f8 := []*svcapitypes.KeySchemaElement{} + for _, f8iter := range resp.Table.KeySchema { + f8elem := &svcapitypes.KeySchemaElement{} + if f8iter.AttributeName != nil { + f8elem.AttributeName = f8iter.AttributeName } - if f7iter.KeyType != nil { - f7elem.KeyType = f7iter.KeyType + if f8iter.KeyType != nil { + f8elem.KeyType = f8iter.KeyType } - f7 = append(f7, f7elem) + f8 = append(f8, f8elem) } - cr.Spec.ForProvider.KeySchema = f7 + cr.Spec.ForProvider.KeySchema = f8 } else { cr.Spec.ForProvider.KeySchema = nil } @@ -183,168 +188,168 @@ func GenerateTable(resp *svcsdk.DescribeTableOutput) *svcapitypes.Table { cr.Status.AtProvider.LatestStreamLabel = nil } if resp.Table.LocalSecondaryIndexes != nil { - f10 := []*svcapitypes.LocalSecondaryIndex{} - for _, f10iter := range resp.Table.LocalSecondaryIndexes { - f10elem := &svcapitypes.LocalSecondaryIndex{} - if f10iter.IndexName != nil { - f10elem.IndexName = f10iter.IndexName - } - if f10iter.KeySchema != nil { - f10elemf4 := []*svcapitypes.KeySchemaElement{} - for _, f10elemf4iter := range f10iter.KeySchema { - f10elemf4elem := &svcapitypes.KeySchemaElement{} - if f10elemf4iter.AttributeName != nil { - f10elemf4elem.AttributeName = f10elemf4iter.AttributeName + f11 := []*svcapitypes.LocalSecondaryIndex{} + for _, f11iter := range resp.Table.LocalSecondaryIndexes { + f11elem := &svcapitypes.LocalSecondaryIndex{} + if f11iter.IndexName != nil { + f11elem.IndexName = f11iter.IndexName + } + if f11iter.KeySchema != nil { + f11elemf4 := []*svcapitypes.KeySchemaElement{} + for _, f11elemf4iter := range f11iter.KeySchema { + f11elemf4elem := &svcapitypes.KeySchemaElement{} + if f11elemf4iter.AttributeName != nil { + f11elemf4elem.AttributeName = f11elemf4iter.AttributeName } - if f10elemf4iter.KeyType != nil { - f10elemf4elem.KeyType = f10elemf4iter.KeyType + if f11elemf4iter.KeyType != nil { + f11elemf4elem.KeyType = f11elemf4iter.KeyType } - f10elemf4 = append(f10elemf4, f10elemf4elem) + f11elemf4 = append(f11elemf4, f11elemf4elem) } - f10elem.KeySchema = f10elemf4 - } - if f10iter.Projection != nil { - f10elemf5 := &svcapitypes.Projection{} - if f10iter.Projection.NonKeyAttributes != nil { - f10elemf5f0 := []*string{} - for _, f10elemf5f0iter := range f10iter.Projection.NonKeyAttributes { - var f10elemf5f0elem string - f10elemf5f0elem = *f10elemf5f0iter - f10elemf5f0 = append(f10elemf5f0, &f10elemf5f0elem) + f11elem.KeySchema = f11elemf4 + } + if f11iter.Projection != nil { + f11elemf5 := &svcapitypes.Projection{} + if f11iter.Projection.NonKeyAttributes != nil { + f11elemf5f0 := []*string{} + for _, f11elemf5f0iter := range f11iter.Projection.NonKeyAttributes { + var f11elemf5f0elem string + f11elemf5f0elem = *f11elemf5f0iter + f11elemf5f0 = append(f11elemf5f0, &f11elemf5f0elem) } - f10elemf5.NonKeyAttributes = f10elemf5f0 + f11elemf5.NonKeyAttributes = f11elemf5f0 } - if f10iter.Projection.ProjectionType != nil { - f10elemf5.ProjectionType = f10iter.Projection.ProjectionType + if f11iter.Projection.ProjectionType != nil { + f11elemf5.ProjectionType = f11iter.Projection.ProjectionType } - f10elem.Projection = f10elemf5 + f11elem.Projection = f11elemf5 } - f10 = append(f10, f10elem) + f11 = append(f11, f11elem) } - cr.Spec.ForProvider.LocalSecondaryIndexes = f10 + cr.Spec.ForProvider.LocalSecondaryIndexes = f11 } else { cr.Spec.ForProvider.LocalSecondaryIndexes = nil } if resp.Table.ProvisionedThroughput != nil { - f11 := &svcapitypes.ProvisionedThroughput{} + f12 := &svcapitypes.ProvisionedThroughput{} if resp.Table.ProvisionedThroughput.ReadCapacityUnits != nil { - f11.ReadCapacityUnits = resp.Table.ProvisionedThroughput.ReadCapacityUnits + f12.ReadCapacityUnits = resp.Table.ProvisionedThroughput.ReadCapacityUnits } if resp.Table.ProvisionedThroughput.WriteCapacityUnits != nil { - f11.WriteCapacityUnits = resp.Table.ProvisionedThroughput.WriteCapacityUnits + f12.WriteCapacityUnits = resp.Table.ProvisionedThroughput.WriteCapacityUnits } - cr.Spec.ForProvider.ProvisionedThroughput = f11 + cr.Spec.ForProvider.ProvisionedThroughput = f12 } else { cr.Spec.ForProvider.ProvisionedThroughput = nil } if resp.Table.Replicas != nil { - f12 := []*svcapitypes.ReplicaDescription{} - for _, f12iter := range resp.Table.Replicas { - f12elem := &svcapitypes.ReplicaDescription{} - if f12iter.GlobalSecondaryIndexes != nil { - f12elemf0 := []*svcapitypes.ReplicaGlobalSecondaryIndexDescription{} - for _, f12elemf0iter := range f12iter.GlobalSecondaryIndexes { - f12elemf0elem := &svcapitypes.ReplicaGlobalSecondaryIndexDescription{} - if f12elemf0iter.IndexName != nil { - f12elemf0elem.IndexName = f12elemf0iter.IndexName + f13 := []*svcapitypes.ReplicaDescription{} + for _, f13iter := range resp.Table.Replicas { + f13elem := &svcapitypes.ReplicaDescription{} + if f13iter.GlobalSecondaryIndexes != nil { + f13elemf0 := []*svcapitypes.ReplicaGlobalSecondaryIndexDescription{} + for _, f13elemf0iter := range f13iter.GlobalSecondaryIndexes { + f13elemf0elem := &svcapitypes.ReplicaGlobalSecondaryIndexDescription{} + if f13elemf0iter.IndexName != nil { + f13elemf0elem.IndexName = f13elemf0iter.IndexName } - if f12elemf0iter.ProvisionedThroughputOverride != nil { - f12elemf0elemf1 := &svcapitypes.ProvisionedThroughputOverride{} - if f12elemf0iter.ProvisionedThroughputOverride.ReadCapacityUnits != nil { - f12elemf0elemf1.ReadCapacityUnits = f12elemf0iter.ProvisionedThroughputOverride.ReadCapacityUnits + if f13elemf0iter.ProvisionedThroughputOverride != nil { + f13elemf0elemf1 := &svcapitypes.ProvisionedThroughputOverride{} + if f13elemf0iter.ProvisionedThroughputOverride.ReadCapacityUnits != nil { + f13elemf0elemf1.ReadCapacityUnits = f13elemf0iter.ProvisionedThroughputOverride.ReadCapacityUnits } - f12elemf0elem.ProvisionedThroughputOverride = f12elemf0elemf1 + f13elemf0elem.ProvisionedThroughputOverride = f13elemf0elemf1 } - f12elemf0 = append(f12elemf0, f12elemf0elem) + f13elemf0 = append(f13elemf0, f13elemf0elem) } - f12elem.GlobalSecondaryIndexes = f12elemf0 + f13elem.GlobalSecondaryIndexes = f13elemf0 } - if f12iter.KMSMasterKeyId != nil { - f12elem.KMSMasterKeyID = f12iter.KMSMasterKeyId + if f13iter.KMSMasterKeyId != nil { + f13elem.KMSMasterKeyID = f13iter.KMSMasterKeyId } - if f12iter.ProvisionedThroughputOverride != nil { - f12elemf2 := &svcapitypes.ProvisionedThroughputOverride{} - if f12iter.ProvisionedThroughputOverride.ReadCapacityUnits != nil { - f12elemf2.ReadCapacityUnits = f12iter.ProvisionedThroughputOverride.ReadCapacityUnits + if f13iter.ProvisionedThroughputOverride != nil { + f13elemf2 := &svcapitypes.ProvisionedThroughputOverride{} + if f13iter.ProvisionedThroughputOverride.ReadCapacityUnits != nil { + f13elemf2.ReadCapacityUnits = f13iter.ProvisionedThroughputOverride.ReadCapacityUnits } - f12elem.ProvisionedThroughputOverride = f12elemf2 + f13elem.ProvisionedThroughputOverride = f13elemf2 } - if f12iter.RegionName != nil { - f12elem.RegionName = f12iter.RegionName + if f13iter.RegionName != nil { + f13elem.RegionName = f13iter.RegionName } - if f12iter.ReplicaInaccessibleDateTime != nil { - f12elem.ReplicaInaccessibleDateTime = &metav1.Time{*f12iter.ReplicaInaccessibleDateTime} + if f13iter.ReplicaInaccessibleDateTime != nil { + f13elem.ReplicaInaccessibleDateTime = &metav1.Time{*f13iter.ReplicaInaccessibleDateTime} } - if f12iter.ReplicaStatus != nil { - f12elem.ReplicaStatus = f12iter.ReplicaStatus + if f13iter.ReplicaStatus != nil { + f13elem.ReplicaStatus = f13iter.ReplicaStatus } - if f12iter.ReplicaStatusDescription != nil { - f12elem.ReplicaStatusDescription = f12iter.ReplicaStatusDescription + if f13iter.ReplicaStatusDescription != nil { + f13elem.ReplicaStatusDescription = f13iter.ReplicaStatusDescription } - if f12iter.ReplicaStatusPercentProgress != nil { - f12elem.ReplicaStatusPercentProgress = f12iter.ReplicaStatusPercentProgress + if f13iter.ReplicaStatusPercentProgress != nil { + f13elem.ReplicaStatusPercentProgress = f13iter.ReplicaStatusPercentProgress } - if f12iter.ReplicaTableClassSummary != nil { - f12elemf8 := &svcapitypes.TableClassSummary{} - if f12iter.ReplicaTableClassSummary.LastUpdateDateTime != nil { - f12elemf8.LastUpdateDateTime = &metav1.Time{*f12iter.ReplicaTableClassSummary.LastUpdateDateTime} + if f13iter.ReplicaTableClassSummary != nil { + f13elemf8 := &svcapitypes.TableClassSummary{} + if f13iter.ReplicaTableClassSummary.LastUpdateDateTime != nil { + f13elemf8.LastUpdateDateTime = &metav1.Time{*f13iter.ReplicaTableClassSummary.LastUpdateDateTime} } - if f12iter.ReplicaTableClassSummary.TableClass != nil { - f12elemf8.TableClass = f12iter.ReplicaTableClassSummary.TableClass + if f13iter.ReplicaTableClassSummary.TableClass != nil { + f13elemf8.TableClass = f13iter.ReplicaTableClassSummary.TableClass } - f12elem.ReplicaTableClassSummary = f12elemf8 + f13elem.ReplicaTableClassSummary = f13elemf8 } - f12 = append(f12, f12elem) + f13 = append(f13, f13elem) } - cr.Status.AtProvider.Replicas = f12 + cr.Status.AtProvider.Replicas = f13 } else { cr.Status.AtProvider.Replicas = nil } if resp.Table.RestoreSummary != nil { - f13 := &svcapitypes.RestoreSummary{} + f14 := &svcapitypes.RestoreSummary{} if resp.Table.RestoreSummary.RestoreDateTime != nil { - f13.RestoreDateTime = &metav1.Time{*resp.Table.RestoreSummary.RestoreDateTime} + f14.RestoreDateTime = &metav1.Time{*resp.Table.RestoreSummary.RestoreDateTime} } if resp.Table.RestoreSummary.RestoreInProgress != nil { - f13.RestoreInProgress = resp.Table.RestoreSummary.RestoreInProgress + f14.RestoreInProgress = resp.Table.RestoreSummary.RestoreInProgress } if resp.Table.RestoreSummary.SourceBackupArn != nil { - f13.SourceBackupARN = resp.Table.RestoreSummary.SourceBackupArn + f14.SourceBackupARN = resp.Table.RestoreSummary.SourceBackupArn } if resp.Table.RestoreSummary.SourceTableArn != nil { - f13.SourceTableARN = resp.Table.RestoreSummary.SourceTableArn + f14.SourceTableARN = resp.Table.RestoreSummary.SourceTableArn } - cr.Status.AtProvider.RestoreSummary = f13 + cr.Status.AtProvider.RestoreSummary = f14 } else { cr.Status.AtProvider.RestoreSummary = nil } if resp.Table.SSEDescription != nil { - f14 := &svcapitypes.SSEDescription{} + f15 := &svcapitypes.SSEDescription{} if resp.Table.SSEDescription.InaccessibleEncryptionDateTime != nil { - f14.InaccessibleEncryptionDateTime = &metav1.Time{*resp.Table.SSEDescription.InaccessibleEncryptionDateTime} + f15.InaccessibleEncryptionDateTime = &metav1.Time{*resp.Table.SSEDescription.InaccessibleEncryptionDateTime} } if resp.Table.SSEDescription.KMSMasterKeyArn != nil { - f14.KMSMasterKeyARN = resp.Table.SSEDescription.KMSMasterKeyArn + f15.KMSMasterKeyARN = resp.Table.SSEDescription.KMSMasterKeyArn } if resp.Table.SSEDescription.SSEType != nil { - f14.SSEType = resp.Table.SSEDescription.SSEType + f15.SSEType = resp.Table.SSEDescription.SSEType } if resp.Table.SSEDescription.Status != nil { - f14.Status = resp.Table.SSEDescription.Status + f15.Status = resp.Table.SSEDescription.Status } - cr.Status.AtProvider.SSEDescription = f14 + cr.Status.AtProvider.SSEDescription = f15 } else { cr.Status.AtProvider.SSEDescription = nil } if resp.Table.StreamSpecification != nil { - f15 := &svcapitypes.StreamSpecification{} + f16 := &svcapitypes.StreamSpecification{} if resp.Table.StreamSpecification.StreamEnabled != nil { - f15.StreamEnabled = resp.Table.StreamSpecification.StreamEnabled + f16.StreamEnabled = resp.Table.StreamSpecification.StreamEnabled } if resp.Table.StreamSpecification.StreamViewType != nil { - f15.StreamViewType = resp.Table.StreamSpecification.StreamViewType + f16.StreamViewType = resp.Table.StreamSpecification.StreamViewType } - cr.Spec.ForProvider.StreamSpecification = f15 + cr.Spec.ForProvider.StreamSpecification = f16 } else { cr.Spec.ForProvider.StreamSpecification = nil } @@ -354,14 +359,14 @@ func GenerateTable(resp *svcsdk.DescribeTableOutput) *svcapitypes.Table { cr.Status.AtProvider.TableARN = nil } if resp.Table.TableClassSummary != nil { - f17 := &svcapitypes.TableClassSummary{} + f18 := &svcapitypes.TableClassSummary{} if resp.Table.TableClassSummary.LastUpdateDateTime != nil { - f17.LastUpdateDateTime = &metav1.Time{*resp.Table.TableClassSummary.LastUpdateDateTime} + f18.LastUpdateDateTime = &metav1.Time{*resp.Table.TableClassSummary.LastUpdateDateTime} } if resp.Table.TableClassSummary.TableClass != nil { - f17.TableClass = resp.Table.TableClassSummary.TableClass + f18.TableClass = resp.Table.TableClassSummary.TableClass } - cr.Status.AtProvider.TableClassSummary = f17 + cr.Status.AtProvider.TableClassSummary = f18 } else { cr.Status.AtProvider.TableClassSummary = nil } @@ -410,161 +415,164 @@ func GenerateCreateTableInput(cr *svcapitypes.Table) *svcsdk.CreateTableInput { if cr.Spec.ForProvider.BillingMode != nil { res.SetBillingMode(*cr.Spec.ForProvider.BillingMode) } + if cr.Spec.ForProvider.DeletionProtectionEnabled != nil { + res.SetDeletionProtectionEnabled(*cr.Spec.ForProvider.DeletionProtectionEnabled) + } if cr.Spec.ForProvider.GlobalSecondaryIndexes != nil { - f2 := []*svcsdk.GlobalSecondaryIndex{} - for _, f2iter := range cr.Spec.ForProvider.GlobalSecondaryIndexes { - f2elem := &svcsdk.GlobalSecondaryIndex{} - if f2iter.IndexName != nil { - f2elem.SetIndexName(*f2iter.IndexName) - } - if f2iter.KeySchema != nil { - f2elemf1 := []*svcsdk.KeySchemaElement{} - for _, f2elemf1iter := range f2iter.KeySchema { - f2elemf1elem := &svcsdk.KeySchemaElement{} - if f2elemf1iter.AttributeName != nil { - f2elemf1elem.SetAttributeName(*f2elemf1iter.AttributeName) + f3 := []*svcsdk.GlobalSecondaryIndex{} + for _, f3iter := range cr.Spec.ForProvider.GlobalSecondaryIndexes { + f3elem := &svcsdk.GlobalSecondaryIndex{} + if f3iter.IndexName != nil { + f3elem.SetIndexName(*f3iter.IndexName) + } + if f3iter.KeySchema != nil { + f3elemf1 := []*svcsdk.KeySchemaElement{} + for _, f3elemf1iter := range f3iter.KeySchema { + f3elemf1elem := &svcsdk.KeySchemaElement{} + if f3elemf1iter.AttributeName != nil { + f3elemf1elem.SetAttributeName(*f3elemf1iter.AttributeName) } - if f2elemf1iter.KeyType != nil { - f2elemf1elem.SetKeyType(*f2elemf1iter.KeyType) + if f3elemf1iter.KeyType != nil { + f3elemf1elem.SetKeyType(*f3elemf1iter.KeyType) } - f2elemf1 = append(f2elemf1, f2elemf1elem) + f3elemf1 = append(f3elemf1, f3elemf1elem) } - f2elem.SetKeySchema(f2elemf1) - } - if f2iter.Projection != nil { - f2elemf2 := &svcsdk.Projection{} - if f2iter.Projection.NonKeyAttributes != nil { - f2elemf2f0 := []*string{} - for _, f2elemf2f0iter := range f2iter.Projection.NonKeyAttributes { - var f2elemf2f0elem string - f2elemf2f0elem = *f2elemf2f0iter - f2elemf2f0 = append(f2elemf2f0, &f2elemf2f0elem) + f3elem.SetKeySchema(f3elemf1) + } + if f3iter.Projection != nil { + f3elemf2 := &svcsdk.Projection{} + if f3iter.Projection.NonKeyAttributes != nil { + f3elemf2f0 := []*string{} + for _, f3elemf2f0iter := range f3iter.Projection.NonKeyAttributes { + var f3elemf2f0elem string + f3elemf2f0elem = *f3elemf2f0iter + f3elemf2f0 = append(f3elemf2f0, &f3elemf2f0elem) } - f2elemf2.SetNonKeyAttributes(f2elemf2f0) + f3elemf2.SetNonKeyAttributes(f3elemf2f0) } - if f2iter.Projection.ProjectionType != nil { - f2elemf2.SetProjectionType(*f2iter.Projection.ProjectionType) + if f3iter.Projection.ProjectionType != nil { + f3elemf2.SetProjectionType(*f3iter.Projection.ProjectionType) } - f2elem.SetProjection(f2elemf2) + f3elem.SetProjection(f3elemf2) } - if f2iter.ProvisionedThroughput != nil { - f2elemf3 := &svcsdk.ProvisionedThroughput{} - if f2iter.ProvisionedThroughput.ReadCapacityUnits != nil { - f2elemf3.SetReadCapacityUnits(*f2iter.ProvisionedThroughput.ReadCapacityUnits) + if f3iter.ProvisionedThroughput != nil { + f3elemf3 := &svcsdk.ProvisionedThroughput{} + if f3iter.ProvisionedThroughput.ReadCapacityUnits != nil { + f3elemf3.SetReadCapacityUnits(*f3iter.ProvisionedThroughput.ReadCapacityUnits) } - if f2iter.ProvisionedThroughput.WriteCapacityUnits != nil { - f2elemf3.SetWriteCapacityUnits(*f2iter.ProvisionedThroughput.WriteCapacityUnits) + if f3iter.ProvisionedThroughput.WriteCapacityUnits != nil { + f3elemf3.SetWriteCapacityUnits(*f3iter.ProvisionedThroughput.WriteCapacityUnits) } - f2elem.SetProvisionedThroughput(f2elemf3) + f3elem.SetProvisionedThroughput(f3elemf3) } - f2 = append(f2, f2elem) + f3 = append(f3, f3elem) } - res.SetGlobalSecondaryIndexes(f2) + res.SetGlobalSecondaryIndexes(f3) } if cr.Spec.ForProvider.KeySchema != nil { - f3 := []*svcsdk.KeySchemaElement{} - for _, f3iter := range cr.Spec.ForProvider.KeySchema { - f3elem := &svcsdk.KeySchemaElement{} - if f3iter.AttributeName != nil { - f3elem.SetAttributeName(*f3iter.AttributeName) + f4 := []*svcsdk.KeySchemaElement{} + for _, f4iter := range cr.Spec.ForProvider.KeySchema { + f4elem := &svcsdk.KeySchemaElement{} + if f4iter.AttributeName != nil { + f4elem.SetAttributeName(*f4iter.AttributeName) } - if f3iter.KeyType != nil { - f3elem.SetKeyType(*f3iter.KeyType) + if f4iter.KeyType != nil { + f4elem.SetKeyType(*f4iter.KeyType) } - f3 = append(f3, f3elem) + f4 = append(f4, f4elem) } - res.SetKeySchema(f3) + res.SetKeySchema(f4) } if cr.Spec.ForProvider.LocalSecondaryIndexes != nil { - f4 := []*svcsdk.LocalSecondaryIndex{} - for _, f4iter := range cr.Spec.ForProvider.LocalSecondaryIndexes { - f4elem := &svcsdk.LocalSecondaryIndex{} - if f4iter.IndexName != nil { - f4elem.SetIndexName(*f4iter.IndexName) - } - if f4iter.KeySchema != nil { - f4elemf1 := []*svcsdk.KeySchemaElement{} - for _, f4elemf1iter := range f4iter.KeySchema { - f4elemf1elem := &svcsdk.KeySchemaElement{} - if f4elemf1iter.AttributeName != nil { - f4elemf1elem.SetAttributeName(*f4elemf1iter.AttributeName) + f5 := []*svcsdk.LocalSecondaryIndex{} + for _, f5iter := range cr.Spec.ForProvider.LocalSecondaryIndexes { + f5elem := &svcsdk.LocalSecondaryIndex{} + if f5iter.IndexName != nil { + f5elem.SetIndexName(*f5iter.IndexName) + } + if f5iter.KeySchema != nil { + f5elemf1 := []*svcsdk.KeySchemaElement{} + for _, f5elemf1iter := range f5iter.KeySchema { + f5elemf1elem := &svcsdk.KeySchemaElement{} + if f5elemf1iter.AttributeName != nil { + f5elemf1elem.SetAttributeName(*f5elemf1iter.AttributeName) } - if f4elemf1iter.KeyType != nil { - f4elemf1elem.SetKeyType(*f4elemf1iter.KeyType) + if f5elemf1iter.KeyType != nil { + f5elemf1elem.SetKeyType(*f5elemf1iter.KeyType) } - f4elemf1 = append(f4elemf1, f4elemf1elem) + f5elemf1 = append(f5elemf1, f5elemf1elem) } - f4elem.SetKeySchema(f4elemf1) - } - if f4iter.Projection != nil { - f4elemf2 := &svcsdk.Projection{} - if f4iter.Projection.NonKeyAttributes != nil { - f4elemf2f0 := []*string{} - for _, f4elemf2f0iter := range f4iter.Projection.NonKeyAttributes { - var f4elemf2f0elem string - f4elemf2f0elem = *f4elemf2f0iter - f4elemf2f0 = append(f4elemf2f0, &f4elemf2f0elem) + f5elem.SetKeySchema(f5elemf1) + } + if f5iter.Projection != nil { + f5elemf2 := &svcsdk.Projection{} + if f5iter.Projection.NonKeyAttributes != nil { + f5elemf2f0 := []*string{} + for _, f5elemf2f0iter := range f5iter.Projection.NonKeyAttributes { + var f5elemf2f0elem string + f5elemf2f0elem = *f5elemf2f0iter + f5elemf2f0 = append(f5elemf2f0, &f5elemf2f0elem) } - f4elemf2.SetNonKeyAttributes(f4elemf2f0) + f5elemf2.SetNonKeyAttributes(f5elemf2f0) } - if f4iter.Projection.ProjectionType != nil { - f4elemf2.SetProjectionType(*f4iter.Projection.ProjectionType) + if f5iter.Projection.ProjectionType != nil { + f5elemf2.SetProjectionType(*f5iter.Projection.ProjectionType) } - f4elem.SetProjection(f4elemf2) + f5elem.SetProjection(f5elemf2) } - f4 = append(f4, f4elem) + f5 = append(f5, f5elem) } - res.SetLocalSecondaryIndexes(f4) + res.SetLocalSecondaryIndexes(f5) } if cr.Spec.ForProvider.ProvisionedThroughput != nil { - f5 := &svcsdk.ProvisionedThroughput{} + f6 := &svcsdk.ProvisionedThroughput{} if cr.Spec.ForProvider.ProvisionedThroughput.ReadCapacityUnits != nil { - f5.SetReadCapacityUnits(*cr.Spec.ForProvider.ProvisionedThroughput.ReadCapacityUnits) + f6.SetReadCapacityUnits(*cr.Spec.ForProvider.ProvisionedThroughput.ReadCapacityUnits) } if cr.Spec.ForProvider.ProvisionedThroughput.WriteCapacityUnits != nil { - f5.SetWriteCapacityUnits(*cr.Spec.ForProvider.ProvisionedThroughput.WriteCapacityUnits) + f6.SetWriteCapacityUnits(*cr.Spec.ForProvider.ProvisionedThroughput.WriteCapacityUnits) } - res.SetProvisionedThroughput(f5) + res.SetProvisionedThroughput(f6) } if cr.Spec.ForProvider.SSESpecification != nil { - f6 := &svcsdk.SSESpecification{} + f7 := &svcsdk.SSESpecification{} if cr.Spec.ForProvider.SSESpecification.Enabled != nil { - f6.SetEnabled(*cr.Spec.ForProvider.SSESpecification.Enabled) + f7.SetEnabled(*cr.Spec.ForProvider.SSESpecification.Enabled) } if cr.Spec.ForProvider.SSESpecification.KMSMasterKeyID != nil { - f6.SetKMSMasterKeyId(*cr.Spec.ForProvider.SSESpecification.KMSMasterKeyID) + f7.SetKMSMasterKeyId(*cr.Spec.ForProvider.SSESpecification.KMSMasterKeyID) } if cr.Spec.ForProvider.SSESpecification.SSEType != nil { - f6.SetSSEType(*cr.Spec.ForProvider.SSESpecification.SSEType) + f7.SetSSEType(*cr.Spec.ForProvider.SSESpecification.SSEType) } - res.SetSSESpecification(f6) + res.SetSSESpecification(f7) } if cr.Spec.ForProvider.StreamSpecification != nil { - f7 := &svcsdk.StreamSpecification{} + f8 := &svcsdk.StreamSpecification{} if cr.Spec.ForProvider.StreamSpecification.StreamEnabled != nil { - f7.SetStreamEnabled(*cr.Spec.ForProvider.StreamSpecification.StreamEnabled) + f8.SetStreamEnabled(*cr.Spec.ForProvider.StreamSpecification.StreamEnabled) } if cr.Spec.ForProvider.StreamSpecification.StreamViewType != nil { - f7.SetStreamViewType(*cr.Spec.ForProvider.StreamSpecification.StreamViewType) + f8.SetStreamViewType(*cr.Spec.ForProvider.StreamSpecification.StreamViewType) } - res.SetStreamSpecification(f7) + res.SetStreamSpecification(f8) } if cr.Spec.ForProvider.TableClass != nil { res.SetTableClass(*cr.Spec.ForProvider.TableClass) } if cr.Spec.ForProvider.Tags != nil { - f9 := []*svcsdk.Tag{} - for _, f9iter := range cr.Spec.ForProvider.Tags { - f9elem := &svcsdk.Tag{} - if f9iter.Key != nil { - f9elem.SetKey(*f9iter.Key) + f10 := []*svcsdk.Tag{} + for _, f10iter := range cr.Spec.ForProvider.Tags { + f10elem := &svcsdk.Tag{} + if f10iter.Key != nil { + f10elem.SetKey(*f10iter.Key) } - if f9iter.Value != nil { - f9elem.SetValue(*f9iter.Value) + if f10iter.Value != nil { + f10elem.SetValue(*f10iter.Value) } - f9 = append(f9, f9elem) + f10 = append(f10, f10elem) } - res.SetTags(f9) + res.SetTags(f10) } return res @@ -591,38 +599,41 @@ func GenerateUpdateTableInput(cr *svcapitypes.Table) *svcsdk.UpdateTableInput { if cr.Spec.ForProvider.BillingMode != nil { res.SetBillingMode(*cr.Spec.ForProvider.BillingMode) } + if cr.Spec.ForProvider.DeletionProtectionEnabled != nil { + res.SetDeletionProtectionEnabled(*cr.Spec.ForProvider.DeletionProtectionEnabled) + } if cr.Spec.ForProvider.ProvisionedThroughput != nil { - f3 := &svcsdk.ProvisionedThroughput{} + f4 := &svcsdk.ProvisionedThroughput{} if cr.Spec.ForProvider.ProvisionedThroughput.ReadCapacityUnits != nil { - f3.SetReadCapacityUnits(*cr.Spec.ForProvider.ProvisionedThroughput.ReadCapacityUnits) + f4.SetReadCapacityUnits(*cr.Spec.ForProvider.ProvisionedThroughput.ReadCapacityUnits) } if cr.Spec.ForProvider.ProvisionedThroughput.WriteCapacityUnits != nil { - f3.SetWriteCapacityUnits(*cr.Spec.ForProvider.ProvisionedThroughput.WriteCapacityUnits) + f4.SetWriteCapacityUnits(*cr.Spec.ForProvider.ProvisionedThroughput.WriteCapacityUnits) } - res.SetProvisionedThroughput(f3) + res.SetProvisionedThroughput(f4) } if cr.Spec.ForProvider.SSESpecification != nil { - f5 := &svcsdk.SSESpecification{} + f6 := &svcsdk.SSESpecification{} if cr.Spec.ForProvider.SSESpecification.Enabled != nil { - f5.SetEnabled(*cr.Spec.ForProvider.SSESpecification.Enabled) + f6.SetEnabled(*cr.Spec.ForProvider.SSESpecification.Enabled) } if cr.Spec.ForProvider.SSESpecification.KMSMasterKeyID != nil { - f5.SetKMSMasterKeyId(*cr.Spec.ForProvider.SSESpecification.KMSMasterKeyID) + f6.SetKMSMasterKeyId(*cr.Spec.ForProvider.SSESpecification.KMSMasterKeyID) } if cr.Spec.ForProvider.SSESpecification.SSEType != nil { - f5.SetSSEType(*cr.Spec.ForProvider.SSESpecification.SSEType) + f6.SetSSEType(*cr.Spec.ForProvider.SSESpecification.SSEType) } - res.SetSSESpecification(f5) + res.SetSSESpecification(f6) } if cr.Spec.ForProvider.StreamSpecification != nil { - f6 := &svcsdk.StreamSpecification{} + f7 := &svcsdk.StreamSpecification{} if cr.Spec.ForProvider.StreamSpecification.StreamEnabled != nil { - f6.SetStreamEnabled(*cr.Spec.ForProvider.StreamSpecification.StreamEnabled) + f7.SetStreamEnabled(*cr.Spec.ForProvider.StreamSpecification.StreamEnabled) } if cr.Spec.ForProvider.StreamSpecification.StreamViewType != nil { - f6.SetStreamViewType(*cr.Spec.ForProvider.StreamSpecification.StreamViewType) + f7.SetStreamViewType(*cr.Spec.ForProvider.StreamSpecification.StreamViewType) } - res.SetStreamSpecification(f6) + res.SetStreamSpecification(f7) } if cr.Spec.ForProvider.TableClass != nil { res.SetTableClass(*cr.Spec.ForProvider.TableClass) diff --git a/pkg/controller/ec2/launchtemplate/zz_conversions.go b/pkg/controller/ec2/launchtemplate/zz_conversions.go index f7d7963d96..0558a8b4d5 100644 --- a/pkg/controller/ec2/launchtemplate/zz_conversions.go +++ b/pkg/controller/ec2/launchtemplate/zz_conversions.go @@ -133,6 +133,9 @@ func GenerateCreateLaunchTemplateInput(cr *svcapitypes.LaunchTemplate) *svcsdk.C } if cr.Spec.ForProvider.LaunchTemplateData.CPUOptions != nil { f0f2 := &svcsdk.LaunchTemplateCpuOptionsRequest{} + if cr.Spec.ForProvider.LaunchTemplateData.CPUOptions.AmdSevSnp != nil { + f0f2.SetAmdSevSnp(*cr.Spec.ForProvider.LaunchTemplateData.CPUOptions.AmdSevSnp) + } if cr.Spec.ForProvider.LaunchTemplateData.CPUOptions.CoreCount != nil { f0f2.SetCoreCount(*cr.Spec.ForProvider.LaunchTemplateData.CPUOptions.CoreCount) } @@ -554,22 +557,25 @@ func GenerateCreateLaunchTemplateInput(cr *svcapitypes.LaunchTemplate) *svcsdk.C if f0f23iter.NetworkInterfaceID != nil { f0f23elem.SetNetworkInterfaceId(*f0f23iter.NetworkInterfaceID) } + if f0f23iter.PrimaryIPv6 != nil { + f0f23elem.SetPrimaryIpv6(*f0f23iter.PrimaryIPv6) + } if f0f23iter.PrivateIPAddress != nil { f0f23elem.SetPrivateIpAddress(*f0f23iter.PrivateIPAddress) } if f0f23iter.PrivateIPAddresses != nil { - f0f23elemf16 := []*svcsdk.PrivateIpAddressSpecification{} - for _, f0f23elemf16iter := range f0f23iter.PrivateIPAddresses { - f0f23elemf16elem := &svcsdk.PrivateIpAddressSpecification{} - if f0f23elemf16iter.Primary != nil { - f0f23elemf16elem.SetPrimary(*f0f23elemf16iter.Primary) + f0f23elemf17 := []*svcsdk.PrivateIpAddressSpecification{} + for _, f0f23elemf17iter := range f0f23iter.PrivateIPAddresses { + f0f23elemf17elem := &svcsdk.PrivateIpAddressSpecification{} + if f0f23elemf17iter.Primary != nil { + f0f23elemf17elem.SetPrimary(*f0f23elemf17iter.Primary) } - if f0f23elemf16iter.PrivateIPAddress != nil { - f0f23elemf16elem.SetPrivateIpAddress(*f0f23elemf16iter.PrivateIPAddress) + if f0f23elemf17iter.PrivateIPAddress != nil { + f0f23elemf17elem.SetPrivateIpAddress(*f0f23elemf17iter.PrivateIPAddress) } - f0f23elemf16 = append(f0f23elemf16, f0f23elemf16elem) + f0f23elemf17 = append(f0f23elemf17, f0f23elemf17elem) } - f0f23elem.SetPrivateIpAddresses(f0f23elemf16) + f0f23elem.SetPrivateIpAddresses(f0f23elemf17) } if f0f23iter.SecondaryPrivateIPAddressCount != nil { f0f23elem.SetSecondaryPrivateIpAddressCount(*f0f23iter.SecondaryPrivateIPAddressCount) diff --git a/pkg/controller/ec2/launchtemplateversion/zz_controller.go b/pkg/controller/ec2/launchtemplateversion/zz_controller.go index db4b5e17a3..a5a54a26fe 100644 --- a/pkg/controller/ec2/launchtemplateversion/zz_controller.go +++ b/pkg/controller/ec2/launchtemplateversion/zz_controller.go @@ -197,6 +197,9 @@ func (e *external) Create(ctx context.Context, mg cpresource.Managed) (managed.E } if resp.LaunchTemplateVersion.LaunchTemplateData.CpuOptions != nil { f0f3f2 := &svcapitypes.LaunchTemplateCPUOptions{} + if resp.LaunchTemplateVersion.LaunchTemplateData.CpuOptions.AmdSevSnp != nil { + f0f3f2.AmdSevSnp = resp.LaunchTemplateVersion.LaunchTemplateData.CpuOptions.AmdSevSnp + } if resp.LaunchTemplateVersion.LaunchTemplateData.CpuOptions.CoreCount != nil { f0f3f2.CoreCount = resp.LaunchTemplateVersion.LaunchTemplateData.CpuOptions.CoreCount } @@ -597,6 +600,9 @@ func (e *external) Create(ctx context.Context, mg cpresource.Managed) (managed.E if f0f3f23elemf10iter.Ipv6Address != nil { f0f3f23elemf10elem.IPv6Address = f0f3f23elemf10iter.Ipv6Address } + if f0f3f23elemf10iter.IsPrimaryIpv6 != nil { + f0f3f23elemf10elem.IsPrimaryIPv6 = f0f3f23elemf10iter.IsPrimaryIpv6 + } f0f3f23elemf10 = append(f0f3f23elemf10, f0f3f23elemf10elem) } f0f3f23elem.IPv6Addresses = f0f3f23elemf10 @@ -621,22 +627,25 @@ func (e *external) Create(ctx context.Context, mg cpresource.Managed) (managed.E if f0f3f23iter.NetworkInterfaceId != nil { f0f3f23elem.NetworkInterfaceID = f0f3f23iter.NetworkInterfaceId } + if f0f3f23iter.PrimaryIpv6 != nil { + f0f3f23elem.PrimaryIPv6 = f0f3f23iter.PrimaryIpv6 + } if f0f3f23iter.PrivateIpAddress != nil { f0f3f23elem.PrivateIPAddress = f0f3f23iter.PrivateIpAddress } if f0f3f23iter.PrivateIpAddresses != nil { - f0f3f23elemf16 := []*svcapitypes.PrivateIPAddressSpecification{} - for _, f0f3f23elemf16iter := range f0f3f23iter.PrivateIpAddresses { - f0f3f23elemf16elem := &svcapitypes.PrivateIPAddressSpecification{} - if f0f3f23elemf16iter.Primary != nil { - f0f3f23elemf16elem.Primary = f0f3f23elemf16iter.Primary + f0f3f23elemf17 := []*svcapitypes.PrivateIPAddressSpecification{} + for _, f0f3f23elemf17iter := range f0f3f23iter.PrivateIpAddresses { + f0f3f23elemf17elem := &svcapitypes.PrivateIPAddressSpecification{} + if f0f3f23elemf17iter.Primary != nil { + f0f3f23elemf17elem.Primary = f0f3f23elemf17iter.Primary } - if f0f3f23elemf16iter.PrivateIpAddress != nil { - f0f3f23elemf16elem.PrivateIPAddress = f0f3f23elemf16iter.PrivateIpAddress + if f0f3f23elemf17iter.PrivateIpAddress != nil { + f0f3f23elemf17elem.PrivateIPAddress = f0f3f23elemf17iter.PrivateIpAddress } - f0f3f23elemf16 = append(f0f3f23elemf16, f0f3f23elemf16elem) + f0f3f23elemf17 = append(f0f3f23elemf17, f0f3f23elemf17elem) } - f0f3f23elem.PrivateIPAddresses = f0f3f23elemf16 + f0f3f23elem.PrivateIPAddresses = f0f3f23elemf17 } if f0f3f23iter.SecondaryPrivateIpAddressCount != nil { f0f3f23elem.SecondaryPrivateIPAddressCount = f0f3f23iter.SecondaryPrivateIpAddressCount diff --git a/pkg/controller/ec2/launchtemplateversion/zz_conversions.go b/pkg/controller/ec2/launchtemplateversion/zz_conversions.go index 56767d50e2..e80cee76c6 100644 --- a/pkg/controller/ec2/launchtemplateversion/zz_conversions.go +++ b/pkg/controller/ec2/launchtemplateversion/zz_conversions.go @@ -34,6 +34,10 @@ import ( func GenerateDescribeLaunchTemplateVersionsInput(cr *svcapitypes.LaunchTemplateVersion) *svcsdk.DescribeLaunchTemplateVersionsInput { res := &svcsdk.DescribeLaunchTemplateVersionsInput{} + if cr.Spec.ForProvider.ResolveAlias != nil { + res.SetResolveAlias(*cr.Spec.ForProvider.ResolveAlias) + } + return res } @@ -109,6 +113,9 @@ func GenerateLaunchTemplateVersion(resp *svcsdk.DescribeLaunchTemplateVersionsOu } if elem.LaunchTemplateData.CpuOptions != nil { f3f2 := &svcapitypes.LaunchTemplateCPUOptionsRequest{} + if elem.LaunchTemplateData.CpuOptions.AmdSevSnp != nil { + f3f2.AmdSevSnp = elem.LaunchTemplateData.CpuOptions.AmdSevSnp + } if elem.LaunchTemplateData.CpuOptions.CoreCount != nil { f3f2.CoreCount = elem.LaunchTemplateData.CpuOptions.CoreCount } @@ -530,22 +537,25 @@ func GenerateLaunchTemplateVersion(resp *svcsdk.DescribeLaunchTemplateVersionsOu if f3f23iter.NetworkInterfaceId != nil { f3f23elem.NetworkInterfaceID = f3f23iter.NetworkInterfaceId } + if f3f23iter.PrimaryIpv6 != nil { + f3f23elem.PrimaryIPv6 = f3f23iter.PrimaryIpv6 + } if f3f23iter.PrivateIpAddress != nil { f3f23elem.PrivateIPAddress = f3f23iter.PrivateIpAddress } if f3f23iter.PrivateIpAddresses != nil { - f3f23elemf16 := []*svcapitypes.PrivateIPAddressSpecification{} - for _, f3f23elemf16iter := range f3f23iter.PrivateIpAddresses { - f3f23elemf16elem := &svcapitypes.PrivateIPAddressSpecification{} - if f3f23elemf16iter.Primary != nil { - f3f23elemf16elem.Primary = f3f23elemf16iter.Primary + f3f23elemf17 := []*svcapitypes.PrivateIPAddressSpecification{} + for _, f3f23elemf17iter := range f3f23iter.PrivateIpAddresses { + f3f23elemf17elem := &svcapitypes.PrivateIPAddressSpecification{} + if f3f23elemf17iter.Primary != nil { + f3f23elemf17elem.Primary = f3f23elemf17iter.Primary } - if f3f23elemf16iter.PrivateIpAddress != nil { - f3f23elemf16elem.PrivateIPAddress = f3f23elemf16iter.PrivateIpAddress + if f3f23elemf17iter.PrivateIpAddress != nil { + f3f23elemf17elem.PrivateIPAddress = f3f23elemf17iter.PrivateIpAddress } - f3f23elemf16 = append(f3f23elemf16, f3f23elemf16elem) + f3f23elemf17 = append(f3f23elemf17, f3f23elemf17elem) } - f3f23elem.PrivateIPAddresses = f3f23elemf16 + f3f23elem.PrivateIPAddresses = f3f23elemf17 } if f3f23iter.SecondaryPrivateIpAddressCount != nil { f3f23elem.SecondaryPrivateIPAddressCount = f3f23iter.SecondaryPrivateIpAddressCount @@ -739,6 +749,9 @@ func GenerateCreateLaunchTemplateVersionInput(cr *svcapitypes.LaunchTemplateVers } if cr.Spec.ForProvider.LaunchTemplateData.CPUOptions != nil { f0f2 := &svcsdk.LaunchTemplateCpuOptionsRequest{} + if cr.Spec.ForProvider.LaunchTemplateData.CPUOptions.AmdSevSnp != nil { + f0f2.SetAmdSevSnp(*cr.Spec.ForProvider.LaunchTemplateData.CPUOptions.AmdSevSnp) + } if cr.Spec.ForProvider.LaunchTemplateData.CPUOptions.CoreCount != nil { f0f2.SetCoreCount(*cr.Spec.ForProvider.LaunchTemplateData.CPUOptions.CoreCount) } @@ -1160,22 +1173,25 @@ func GenerateCreateLaunchTemplateVersionInput(cr *svcapitypes.LaunchTemplateVers if f0f23iter.NetworkInterfaceID != nil { f0f23elem.SetNetworkInterfaceId(*f0f23iter.NetworkInterfaceID) } + if f0f23iter.PrimaryIPv6 != nil { + f0f23elem.SetPrimaryIpv6(*f0f23iter.PrimaryIPv6) + } if f0f23iter.PrivateIPAddress != nil { f0f23elem.SetPrivateIpAddress(*f0f23iter.PrivateIPAddress) } if f0f23iter.PrivateIPAddresses != nil { - f0f23elemf16 := []*svcsdk.PrivateIpAddressSpecification{} - for _, f0f23elemf16iter := range f0f23iter.PrivateIPAddresses { - f0f23elemf16elem := &svcsdk.PrivateIpAddressSpecification{} - if f0f23elemf16iter.Primary != nil { - f0f23elemf16elem.SetPrimary(*f0f23elemf16iter.Primary) + f0f23elemf17 := []*svcsdk.PrivateIpAddressSpecification{} + for _, f0f23elemf17iter := range f0f23iter.PrivateIPAddresses { + f0f23elemf17elem := &svcsdk.PrivateIpAddressSpecification{} + if f0f23elemf17iter.Primary != nil { + f0f23elemf17elem.SetPrimary(*f0f23elemf17iter.Primary) } - if f0f23elemf16iter.PrivateIPAddress != nil { - f0f23elemf16elem.SetPrivateIpAddress(*f0f23elemf16iter.PrivateIPAddress) + if f0f23elemf17iter.PrivateIPAddress != nil { + f0f23elemf17elem.SetPrivateIpAddress(*f0f23elemf17iter.PrivateIPAddress) } - f0f23elemf16 = append(f0f23elemf16, f0f23elemf16elem) + f0f23elemf17 = append(f0f23elemf17, f0f23elemf17elem) } - f0f23elem.SetPrivateIpAddresses(f0f23elemf16) + f0f23elem.SetPrivateIpAddresses(f0f23elemf17) } if f0f23iter.SecondaryPrivateIPAddressCount != nil { f0f23elem.SetSecondaryPrivateIpAddressCount(*f0f23iter.SecondaryPrivateIPAddressCount) @@ -1282,6 +1298,9 @@ func GenerateCreateLaunchTemplateVersionInput(cr *svcapitypes.LaunchTemplateVers } res.SetLaunchTemplateData(f0) } + if cr.Spec.ForProvider.ResolveAlias != nil { + res.SetResolveAlias(*cr.Spec.ForProvider.ResolveAlias) + } if cr.Spec.ForProvider.SourceVersion != nil { res.SetSourceVersion(*cr.Spec.ForProvider.SourceVersion) } diff --git a/pkg/controller/ec2/volume/zz_controller.go b/pkg/controller/ec2/volume/zz_controller.go index b47c07fc9f..e05d40bc09 100644 --- a/pkg/controller/ec2/volume/zz_controller.go +++ b/pkg/controller/ec2/volume/zz_controller.go @@ -198,24 +198,29 @@ func (e *external) Create(ctx context.Context, mg cpresource.Managed) (managed.E } else { cr.Spec.ForProvider.SnapshotID = nil } + if resp.SseType != nil { + cr.Status.AtProvider.SSEType = resp.SseType + } else { + cr.Status.AtProvider.SSEType = nil + } if resp.State != nil { cr.Status.AtProvider.State = resp.State } else { cr.Status.AtProvider.State = nil } if resp.Tags != nil { - f12 := []*svcapitypes.Tag{} - for _, f12iter := range resp.Tags { - f12elem := &svcapitypes.Tag{} - if f12iter.Key != nil { - f12elem.Key = f12iter.Key + f13 := []*svcapitypes.Tag{} + for _, f13iter := range resp.Tags { + f13elem := &svcapitypes.Tag{} + if f13iter.Key != nil { + f13elem.Key = f13iter.Key } - if f12iter.Value != nil { - f12elem.Value = f12iter.Value + if f13iter.Value != nil { + f13elem.Value = f13iter.Value } - f12 = append(f12, f12elem) + f13 = append(f13, f13elem) } - cr.Status.AtProvider.Tags = f12 + cr.Status.AtProvider.Tags = f13 } else { cr.Status.AtProvider.Tags = nil } diff --git a/pkg/controller/ec2/volume/zz_conversions.go b/pkg/controller/ec2/volume/zz_conversions.go index e0779c8627..6b26cd7dd2 100644 --- a/pkg/controller/ec2/volume/zz_conversions.go +++ b/pkg/controller/ec2/volume/zz_conversions.go @@ -127,24 +127,29 @@ func GenerateVolume(resp *svcsdk.DescribeVolumesOutput) *svcapitypes.Volume { } else { cr.Spec.ForProvider.SnapshotID = nil } + if elem.SseType != nil { + cr.Status.AtProvider.SSEType = elem.SseType + } else { + cr.Status.AtProvider.SSEType = nil + } if elem.State != nil { cr.Status.AtProvider.State = elem.State } else { cr.Status.AtProvider.State = nil } if elem.Tags != nil { - f12 := []*svcapitypes.Tag{} - for _, f12iter := range elem.Tags { - f12elem := &svcapitypes.Tag{} - if f12iter.Key != nil { - f12elem.Key = f12iter.Key + f13 := []*svcapitypes.Tag{} + for _, f13iter := range elem.Tags { + f13elem := &svcapitypes.Tag{} + if f13iter.Key != nil { + f13elem.Key = f13iter.Key } - if f12iter.Value != nil { - f12elem.Value = f12iter.Value + if f13iter.Value != nil { + f13elem.Value = f13iter.Value } - f12 = append(f12, f12elem) + f13 = append(f13, f13elem) } - cr.Status.AtProvider.Tags = f12 + cr.Status.AtProvider.Tags = f13 } else { cr.Status.AtProvider.Tags = nil } diff --git a/pkg/controller/ec2/vpcendpoint/setup_test.go b/pkg/controller/ec2/vpcendpoint/setup_test.go index 5a78578831..6637306845 100644 --- a/pkg/controller/ec2/vpcendpoint/setup_test.go +++ b/pkg/controller/ec2/vpcendpoint/setup_test.go @@ -30,7 +30,7 @@ import ( "github.com/google/go-cmp/cmp" "github.com/pkg/errors" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" "github.com/crossplane-contrib/provider-aws/apis/ec2/v1alpha1" @@ -585,7 +585,7 @@ func TestTagger(t *testing.T) { } tag := func(k, v string) *v1alpha1.Tag { - return &v1alpha1.Tag{Key: pointer.String(k), Value: pointer.String(v)} + return &v1alpha1.Tag{Key: ptr.To(k), Value: ptr.To(v)} } cases := map[string]struct { diff --git a/pkg/controller/ec2/vpcendpoint/zz_controller.go b/pkg/controller/ec2/vpcendpoint/zz_controller.go index 7a72f66963..61506228e6 100644 --- a/pkg/controller/ec2/vpcendpoint/zz_controller.go +++ b/pkg/controller/ec2/vpcendpoint/zz_controller.go @@ -146,6 +146,9 @@ func (e *external) Create(ctx context.Context, mg cpresource.Managed) (managed.E if resp.VpcEndpoint.DnsOptions.DnsRecordIpType != nil { f2.DNSRecordIPType = resp.VpcEndpoint.DnsOptions.DnsRecordIpType } + if resp.VpcEndpoint.DnsOptions.PrivateDnsOnlyForInboundResolverEndpoint != nil { + f2.PrivateDNSOnlyForInboundResolverEndpoint = resp.VpcEndpoint.DnsOptions.PrivateDnsOnlyForInboundResolverEndpoint + } cr.Spec.ForProvider.DNSOptions = f2 } else { cr.Spec.ForProvider.DNSOptions = nil diff --git a/pkg/controller/ec2/vpcendpoint/zz_conversions.go b/pkg/controller/ec2/vpcendpoint/zz_conversions.go index ed413c216c..a7c67309b4 100644 --- a/pkg/controller/ec2/vpcendpoint/zz_conversions.go +++ b/pkg/controller/ec2/vpcendpoint/zz_conversions.go @@ -75,6 +75,9 @@ func GenerateVPCEndpoint(resp *svcsdk.DescribeVpcEndpointsOutput) *svcapitypes.V if elem.DnsOptions.DnsRecordIpType != nil { f2.DNSRecordIPType = elem.DnsOptions.DnsRecordIpType } + if elem.DnsOptions.PrivateDnsOnlyForInboundResolverEndpoint != nil { + f2.PrivateDNSOnlyForInboundResolverEndpoint = elem.DnsOptions.PrivateDnsOnlyForInboundResolverEndpoint + } cr.Spec.ForProvider.DNSOptions = f2 } else { cr.Spec.ForProvider.DNSOptions = nil @@ -225,6 +228,9 @@ func GenerateCreateVpcEndpointInput(cr *svcapitypes.VPCEndpoint) *svcsdk.CreateV if cr.Spec.ForProvider.DNSOptions.DNSRecordIPType != nil { f0.SetDnsRecordIpType(*cr.Spec.ForProvider.DNSOptions.DNSRecordIPType) } + if cr.Spec.ForProvider.DNSOptions.PrivateDNSOnlyForInboundResolverEndpoint != nil { + f0.SetPrivateDnsOnlyForInboundResolverEndpoint(*cr.Spec.ForProvider.DNSOptions.PrivateDNSOnlyForInboundResolverEndpoint) + } res.SetDnsOptions(f0) } if cr.Spec.ForProvider.IPAddressType != nil { @@ -239,30 +245,47 @@ func GenerateCreateVpcEndpointInput(cr *svcapitypes.VPCEndpoint) *svcsdk.CreateV if cr.Spec.ForProvider.ServiceName != nil { res.SetServiceName(*cr.Spec.ForProvider.ServiceName) } + if cr.Spec.ForProvider.SubnetConfigurations != nil { + f5 := []*svcsdk.SubnetConfiguration{} + for _, f5iter := range cr.Spec.ForProvider.SubnetConfigurations { + f5elem := &svcsdk.SubnetConfiguration{} + if f5iter.IPv4 != nil { + f5elem.SetIpv4(*f5iter.IPv4) + } + if f5iter.IPv6 != nil { + f5elem.SetIpv6(*f5iter.IPv6) + } + if f5iter.SubnetID != nil { + f5elem.SetSubnetId(*f5iter.SubnetID) + } + f5 = append(f5, f5elem) + } + res.SetSubnetConfigurations(f5) + } if cr.Spec.ForProvider.TagSpecifications != nil { - f5 := []*svcsdk.TagSpecification{} - for _, f5iter := range cr.Spec.ForProvider.TagSpecifications { - f5elem := &svcsdk.TagSpecification{} - if f5iter.ResourceType != nil { - f5elem.SetResourceType(*f5iter.ResourceType) + f6 := []*svcsdk.TagSpecification{} + for _, f6iter := range cr.Spec.ForProvider.TagSpecifications { + f6elem := &svcsdk.TagSpecification{} + if f6iter.ResourceType != nil { + f6elem.SetResourceType(*f6iter.ResourceType) } - if f5iter.Tags != nil { - f5elemf1 := []*svcsdk.Tag{} - for _, f5elemf1iter := range f5iter.Tags { - f5elemf1elem := &svcsdk.Tag{} - if f5elemf1iter.Key != nil { - f5elemf1elem.SetKey(*f5elemf1iter.Key) + if f6iter.Tags != nil { + f6elemf1 := []*svcsdk.Tag{} + for _, f6elemf1iter := range f6iter.Tags { + f6elemf1elem := &svcsdk.Tag{} + if f6elemf1iter.Key != nil { + f6elemf1elem.SetKey(*f6elemf1iter.Key) } - if f5elemf1iter.Value != nil { - f5elemf1elem.SetValue(*f5elemf1iter.Value) + if f6elemf1iter.Value != nil { + f6elemf1elem.SetValue(*f6elemf1iter.Value) } - f5elemf1 = append(f5elemf1, f5elemf1elem) + f6elemf1 = append(f6elemf1, f6elemf1elem) } - f5elem.SetTags(f5elemf1) + f6elem.SetTags(f6elemf1) } - f5 = append(f5, f5elem) + f6 = append(f6, f6elem) } - res.SetTagSpecifications(f5) + res.SetTagSpecifications(f6) } if cr.Spec.ForProvider.VPCEndpointType != nil { res.SetVpcEndpointType(*cr.Spec.ForProvider.VPCEndpointType) @@ -280,6 +303,9 @@ func GenerateModifyVpcEndpointInput(cr *svcapitypes.VPCEndpoint) *svcsdk.ModifyV if cr.Spec.ForProvider.DNSOptions.DNSRecordIPType != nil { f3.SetDnsRecordIpType(*cr.Spec.ForProvider.DNSOptions.DNSRecordIPType) } + if cr.Spec.ForProvider.DNSOptions.PrivateDNSOnlyForInboundResolverEndpoint != nil { + f3.SetPrivateDnsOnlyForInboundResolverEndpoint(*cr.Spec.ForProvider.DNSOptions.PrivateDNSOnlyForInboundResolverEndpoint) + } res.SetDnsOptions(f3) } if cr.Spec.ForProvider.IPAddressType != nil { @@ -291,6 +317,23 @@ func GenerateModifyVpcEndpointInput(cr *svcapitypes.VPCEndpoint) *svcsdk.ModifyV if cr.Spec.ForProvider.PrivateDNSEnabled != nil { res.SetPrivateDnsEnabled(*cr.Spec.ForProvider.PrivateDNSEnabled) } + if cr.Spec.ForProvider.SubnetConfigurations != nil { + f12 := []*svcsdk.SubnetConfiguration{} + for _, f12iter := range cr.Spec.ForProvider.SubnetConfigurations { + f12elem := &svcsdk.SubnetConfiguration{} + if f12iter.IPv4 != nil { + f12elem.SetIpv4(*f12iter.IPv4) + } + if f12iter.IPv6 != nil { + f12elem.SetIpv6(*f12iter.IPv6) + } + if f12iter.SubnetID != nil { + f12elem.SetSubnetId(*f12iter.SubnetID) + } + f12 = append(f12, f12elem) + } + res.SetSubnetConfigurations(f12) + } if cr.Status.AtProvider.VPCEndpointID != nil { res.SetVpcEndpointId(*cr.Status.AtProvider.VPCEndpointID) } diff --git a/pkg/controller/ec2/vpcendpointserviceconfiguration/setup_test.go b/pkg/controller/ec2/vpcendpointserviceconfiguration/setup_test.go index 962fe7c03f..a2316c85b8 100644 --- a/pkg/controller/ec2/vpcendpointserviceconfiguration/setup_test.go +++ b/pkg/controller/ec2/vpcendpointserviceconfiguration/setup_test.go @@ -23,7 +23,7 @@ import ( cpresource "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/test" "github.com/google/go-cmp/cmp" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" "github.com/crossplane-contrib/provider-aws/apis/ec2/v1alpha1" @@ -60,7 +60,7 @@ func TestTagger(t *testing.T) { } tag := func(k, v string) *v1alpha1.Tag { - return &v1alpha1.Tag{Key: pointer.String(k), Value: pointer.String(v)} + return &v1alpha1.Tag{Key: ptr.To(k), Value: ptr.To(v)} } cases := map[string]struct { diff --git a/pkg/controller/ec2/vpcpeeringconnection/setup_test.go b/pkg/controller/ec2/vpcpeeringconnection/setup_test.go index 97e19c8104..24a7750dab 100644 --- a/pkg/controller/ec2/vpcpeeringconnection/setup_test.go +++ b/pkg/controller/ec2/vpcpeeringconnection/setup_test.go @@ -23,7 +23,7 @@ import ( cpresource "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/test" "github.com/google/go-cmp/cmp" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" "github.com/crossplane-contrib/provider-aws/apis/ec2/v1alpha1" @@ -61,7 +61,7 @@ func TestTagger(t *testing.T) { } tag := func(k, v string) *v1alpha1.Tag { - return &v1alpha1.Tag{Key: pointer.String(k), Value: pointer.String(v)} + return &v1alpha1.Tag{Key: ptr.To(k), Value: ptr.To(v)} } cases := map[string]struct { diff --git a/pkg/controller/ecs/taskdefinition/zz_controller.go b/pkg/controller/ecs/taskdefinition/zz_controller.go index 3db14827b5..cf08f90779 100644 --- a/pkg/controller/ecs/taskdefinition/zz_controller.go +++ b/pkg/controller/ecs/taskdefinition/zz_controller.go @@ -159,153 +159,162 @@ func (e *external) Create(ctx context.Context, mg cpresource.Managed) (managed.E if f1f1iter.Cpu != nil { f1f1elem.CPU = f1f1iter.Cpu } + if f1f1iter.CredentialSpecs != nil { + f1f1elemf2 := []*string{} + for _, f1f1elemf2iter := range f1f1iter.CredentialSpecs { + var f1f1elemf2elem string + f1f1elemf2elem = *f1f1elemf2iter + f1f1elemf2 = append(f1f1elemf2, &f1f1elemf2elem) + } + f1f1elem.CredentialSpecs = f1f1elemf2 + } if f1f1iter.DependsOn != nil { - f1f1elemf2 := []*svcapitypes.ContainerDependency{} - for _, f1f1elemf2iter := range f1f1iter.DependsOn { - f1f1elemf2elem := &svcapitypes.ContainerDependency{} - if f1f1elemf2iter.Condition != nil { - f1f1elemf2elem.Condition = f1f1elemf2iter.Condition + f1f1elemf3 := []*svcapitypes.ContainerDependency{} + for _, f1f1elemf3iter := range f1f1iter.DependsOn { + f1f1elemf3elem := &svcapitypes.ContainerDependency{} + if f1f1elemf3iter.Condition != nil { + f1f1elemf3elem.Condition = f1f1elemf3iter.Condition } - if f1f1elemf2iter.ContainerName != nil { - f1f1elemf2elem.ContainerName = f1f1elemf2iter.ContainerName + if f1f1elemf3iter.ContainerName != nil { + f1f1elemf3elem.ContainerName = f1f1elemf3iter.ContainerName } - f1f1elemf2 = append(f1f1elemf2, f1f1elemf2elem) + f1f1elemf3 = append(f1f1elemf3, f1f1elemf3elem) } - f1f1elem.DependsOn = f1f1elemf2 + f1f1elem.DependsOn = f1f1elemf3 } if f1f1iter.DisableNetworking != nil { f1f1elem.DisableNetworking = f1f1iter.DisableNetworking } if f1f1iter.DnsSearchDomains != nil { - f1f1elemf4 := []*string{} - for _, f1f1elemf4iter := range f1f1iter.DnsSearchDomains { - var f1f1elemf4elem string - f1f1elemf4elem = *f1f1elemf4iter - f1f1elemf4 = append(f1f1elemf4, &f1f1elemf4elem) - } - f1f1elem.DNSSearchDomains = f1f1elemf4 - } - if f1f1iter.DnsServers != nil { f1f1elemf5 := []*string{} - for _, f1f1elemf5iter := range f1f1iter.DnsServers { + for _, f1f1elemf5iter := range f1f1iter.DnsSearchDomains { var f1f1elemf5elem string f1f1elemf5elem = *f1f1elemf5iter f1f1elemf5 = append(f1f1elemf5, &f1f1elemf5elem) } - f1f1elem.DNSServers = f1f1elemf5 + f1f1elem.DNSSearchDomains = f1f1elemf5 } - if f1f1iter.DockerLabels != nil { - f1f1elemf6 := map[string]*string{} - for f1f1elemf6key, f1f1elemf6valiter := range f1f1iter.DockerLabels { - var f1f1elemf6val string - f1f1elemf6val = *f1f1elemf6valiter - f1f1elemf6[f1f1elemf6key] = &f1f1elemf6val + if f1f1iter.DnsServers != nil { + f1f1elemf6 := []*string{} + for _, f1f1elemf6iter := range f1f1iter.DnsServers { + var f1f1elemf6elem string + f1f1elemf6elem = *f1f1elemf6iter + f1f1elemf6 = append(f1f1elemf6, &f1f1elemf6elem) } - f1f1elem.DockerLabels = f1f1elemf6 + f1f1elem.DNSServers = f1f1elemf6 } - if f1f1iter.DockerSecurityOptions != nil { - f1f1elemf7 := []*string{} - for _, f1f1elemf7iter := range f1f1iter.DockerSecurityOptions { - var f1f1elemf7elem string - f1f1elemf7elem = *f1f1elemf7iter - f1f1elemf7 = append(f1f1elemf7, &f1f1elemf7elem) + if f1f1iter.DockerLabels != nil { + f1f1elemf7 := map[string]*string{} + for f1f1elemf7key, f1f1elemf7valiter := range f1f1iter.DockerLabels { + var f1f1elemf7val string + f1f1elemf7val = *f1f1elemf7valiter + f1f1elemf7[f1f1elemf7key] = &f1f1elemf7val } - f1f1elem.DockerSecurityOptions = f1f1elemf7 + f1f1elem.DockerLabels = f1f1elemf7 } - if f1f1iter.EntryPoint != nil { + if f1f1iter.DockerSecurityOptions != nil { f1f1elemf8 := []*string{} - for _, f1f1elemf8iter := range f1f1iter.EntryPoint { + for _, f1f1elemf8iter := range f1f1iter.DockerSecurityOptions { var f1f1elemf8elem string f1f1elemf8elem = *f1f1elemf8iter f1f1elemf8 = append(f1f1elemf8, &f1f1elemf8elem) } - f1f1elem.EntryPoint = f1f1elemf8 + f1f1elem.DockerSecurityOptions = f1f1elemf8 + } + if f1f1iter.EntryPoint != nil { + f1f1elemf9 := []*string{} + for _, f1f1elemf9iter := range f1f1iter.EntryPoint { + var f1f1elemf9elem string + f1f1elemf9elem = *f1f1elemf9iter + f1f1elemf9 = append(f1f1elemf9, &f1f1elemf9elem) + } + f1f1elem.EntryPoint = f1f1elemf9 } if f1f1iter.Environment != nil { - f1f1elemf9 := []*svcapitypes.KeyValuePair{} - for _, f1f1elemf9iter := range f1f1iter.Environment { - f1f1elemf9elem := &svcapitypes.KeyValuePair{} - if f1f1elemf9iter.Name != nil { - f1f1elemf9elem.Name = f1f1elemf9iter.Name + f1f1elemf10 := []*svcapitypes.KeyValuePair{} + for _, f1f1elemf10iter := range f1f1iter.Environment { + f1f1elemf10elem := &svcapitypes.KeyValuePair{} + if f1f1elemf10iter.Name != nil { + f1f1elemf10elem.Name = f1f1elemf10iter.Name } - if f1f1elemf9iter.Value != nil { - f1f1elemf9elem.Value = f1f1elemf9iter.Value + if f1f1elemf10iter.Value != nil { + f1f1elemf10elem.Value = f1f1elemf10iter.Value } - f1f1elemf9 = append(f1f1elemf9, f1f1elemf9elem) + f1f1elemf10 = append(f1f1elemf10, f1f1elemf10elem) } - f1f1elem.Environment = f1f1elemf9 + f1f1elem.Environment = f1f1elemf10 } if f1f1iter.EnvironmentFiles != nil { - f1f1elemf10 := []*svcapitypes.EnvironmentFile{} - for _, f1f1elemf10iter := range f1f1iter.EnvironmentFiles { - f1f1elemf10elem := &svcapitypes.EnvironmentFile{} - if f1f1elemf10iter.Type != nil { - f1f1elemf10elem.Type = f1f1elemf10iter.Type + f1f1elemf11 := []*svcapitypes.EnvironmentFile{} + for _, f1f1elemf11iter := range f1f1iter.EnvironmentFiles { + f1f1elemf11elem := &svcapitypes.EnvironmentFile{} + if f1f1elemf11iter.Type != nil { + f1f1elemf11elem.Type = f1f1elemf11iter.Type } - if f1f1elemf10iter.Value != nil { - f1f1elemf10elem.Value = f1f1elemf10iter.Value + if f1f1elemf11iter.Value != nil { + f1f1elemf11elem.Value = f1f1elemf11iter.Value } - f1f1elemf10 = append(f1f1elemf10, f1f1elemf10elem) + f1f1elemf11 = append(f1f1elemf11, f1f1elemf11elem) } - f1f1elem.EnvironmentFiles = f1f1elemf10 + f1f1elem.EnvironmentFiles = f1f1elemf11 } if f1f1iter.Essential != nil { f1f1elem.Essential = f1f1iter.Essential } if f1f1iter.ExtraHosts != nil { - f1f1elemf12 := []*svcapitypes.HostEntry{} - for _, f1f1elemf12iter := range f1f1iter.ExtraHosts { - f1f1elemf12elem := &svcapitypes.HostEntry{} - if f1f1elemf12iter.Hostname != nil { - f1f1elemf12elem.Hostname = f1f1elemf12iter.Hostname + f1f1elemf13 := []*svcapitypes.HostEntry{} + for _, f1f1elemf13iter := range f1f1iter.ExtraHosts { + f1f1elemf13elem := &svcapitypes.HostEntry{} + if f1f1elemf13iter.Hostname != nil { + f1f1elemf13elem.Hostname = f1f1elemf13iter.Hostname } - if f1f1elemf12iter.IpAddress != nil { - f1f1elemf12elem.IPAddress = f1f1elemf12iter.IpAddress + if f1f1elemf13iter.IpAddress != nil { + f1f1elemf13elem.IPAddress = f1f1elemf13iter.IpAddress } - f1f1elemf12 = append(f1f1elemf12, f1f1elemf12elem) + f1f1elemf13 = append(f1f1elemf13, f1f1elemf13elem) } - f1f1elem.ExtraHosts = f1f1elemf12 + f1f1elem.ExtraHosts = f1f1elemf13 } if f1f1iter.FirelensConfiguration != nil { - f1f1elemf13 := &svcapitypes.FirelensConfiguration{} + f1f1elemf14 := &svcapitypes.FirelensConfiguration{} if f1f1iter.FirelensConfiguration.Options != nil { - f1f1elemf13f0 := map[string]*string{} - for f1f1elemf13f0key, f1f1elemf13f0valiter := range f1f1iter.FirelensConfiguration.Options { - var f1f1elemf13f0val string - f1f1elemf13f0val = *f1f1elemf13f0valiter - f1f1elemf13f0[f1f1elemf13f0key] = &f1f1elemf13f0val + f1f1elemf14f0 := map[string]*string{} + for f1f1elemf14f0key, f1f1elemf14f0valiter := range f1f1iter.FirelensConfiguration.Options { + var f1f1elemf14f0val string + f1f1elemf14f0val = *f1f1elemf14f0valiter + f1f1elemf14f0[f1f1elemf14f0key] = &f1f1elemf14f0val } - f1f1elemf13.Options = f1f1elemf13f0 + f1f1elemf14.Options = f1f1elemf14f0 } if f1f1iter.FirelensConfiguration.Type != nil { - f1f1elemf13.Type = f1f1iter.FirelensConfiguration.Type + f1f1elemf14.Type = f1f1iter.FirelensConfiguration.Type } - f1f1elem.FirelensConfiguration = f1f1elemf13 + f1f1elem.FirelensConfiguration = f1f1elemf14 } if f1f1iter.HealthCheck != nil { - f1f1elemf14 := &svcapitypes.HealthCheck{} + f1f1elemf15 := &svcapitypes.HealthCheck{} if f1f1iter.HealthCheck.Command != nil { - f1f1elemf14f0 := []*string{} - for _, f1f1elemf14f0iter := range f1f1iter.HealthCheck.Command { - var f1f1elemf14f0elem string - f1f1elemf14f0elem = *f1f1elemf14f0iter - f1f1elemf14f0 = append(f1f1elemf14f0, &f1f1elemf14f0elem) + f1f1elemf15f0 := []*string{} + for _, f1f1elemf15f0iter := range f1f1iter.HealthCheck.Command { + var f1f1elemf15f0elem string + f1f1elemf15f0elem = *f1f1elemf15f0iter + f1f1elemf15f0 = append(f1f1elemf15f0, &f1f1elemf15f0elem) } - f1f1elemf14.Command = f1f1elemf14f0 + f1f1elemf15.Command = f1f1elemf15f0 } if f1f1iter.HealthCheck.Interval != nil { - f1f1elemf14.Interval = f1f1iter.HealthCheck.Interval + f1f1elemf15.Interval = f1f1iter.HealthCheck.Interval } if f1f1iter.HealthCheck.Retries != nil { - f1f1elemf14.Retries = f1f1iter.HealthCheck.Retries + f1f1elemf15.Retries = f1f1iter.HealthCheck.Retries } if f1f1iter.HealthCheck.StartPeriod != nil { - f1f1elemf14.StartPeriod = f1f1iter.HealthCheck.StartPeriod + f1f1elemf15.StartPeriod = f1f1iter.HealthCheck.StartPeriod } if f1f1iter.HealthCheck.Timeout != nil { - f1f1elemf14.Timeout = f1f1iter.HealthCheck.Timeout + f1f1elemf15.Timeout = f1f1iter.HealthCheck.Timeout } - f1f1elem.HealthCheck = f1f1elemf14 + f1f1elem.HealthCheck = f1f1elemf15 } if f1f1iter.Hostname != nil { f1f1elem.Hostname = f1f1iter.Hostname @@ -317,127 +326,127 @@ func (e *external) Create(ctx context.Context, mg cpresource.Managed) (managed.E f1f1elem.Interactive = f1f1iter.Interactive } if f1f1iter.Links != nil { - f1f1elemf18 := []*string{} - for _, f1f1elemf18iter := range f1f1iter.Links { - var f1f1elemf18elem string - f1f1elemf18elem = *f1f1elemf18iter - f1f1elemf18 = append(f1f1elemf18, &f1f1elemf18elem) + f1f1elemf19 := []*string{} + for _, f1f1elemf19iter := range f1f1iter.Links { + var f1f1elemf19elem string + f1f1elemf19elem = *f1f1elemf19iter + f1f1elemf19 = append(f1f1elemf19, &f1f1elemf19elem) } - f1f1elem.Links = f1f1elemf18 + f1f1elem.Links = f1f1elemf19 } if f1f1iter.LinuxParameters != nil { - f1f1elemf19 := &svcapitypes.LinuxParameters{} + f1f1elemf20 := &svcapitypes.LinuxParameters{} if f1f1iter.LinuxParameters.Capabilities != nil { - f1f1elemf19f0 := &svcapitypes.KernelCapabilities{} + f1f1elemf20f0 := &svcapitypes.KernelCapabilities{} if f1f1iter.LinuxParameters.Capabilities.Add != nil { - f1f1elemf19f0f0 := []*string{} - for _, f1f1elemf19f0f0iter := range f1f1iter.LinuxParameters.Capabilities.Add { - var f1f1elemf19f0f0elem string - f1f1elemf19f0f0elem = *f1f1elemf19f0f0iter - f1f1elemf19f0f0 = append(f1f1elemf19f0f0, &f1f1elemf19f0f0elem) + f1f1elemf20f0f0 := []*string{} + for _, f1f1elemf20f0f0iter := range f1f1iter.LinuxParameters.Capabilities.Add { + var f1f1elemf20f0f0elem string + f1f1elemf20f0f0elem = *f1f1elemf20f0f0iter + f1f1elemf20f0f0 = append(f1f1elemf20f0f0, &f1f1elemf20f0f0elem) } - f1f1elemf19f0.Add = f1f1elemf19f0f0 + f1f1elemf20f0.Add = f1f1elemf20f0f0 } if f1f1iter.LinuxParameters.Capabilities.Drop != nil { - f1f1elemf19f0f1 := []*string{} - for _, f1f1elemf19f0f1iter := range f1f1iter.LinuxParameters.Capabilities.Drop { - var f1f1elemf19f0f1elem string - f1f1elemf19f0f1elem = *f1f1elemf19f0f1iter - f1f1elemf19f0f1 = append(f1f1elemf19f0f1, &f1f1elemf19f0f1elem) + f1f1elemf20f0f1 := []*string{} + for _, f1f1elemf20f0f1iter := range f1f1iter.LinuxParameters.Capabilities.Drop { + var f1f1elemf20f0f1elem string + f1f1elemf20f0f1elem = *f1f1elemf20f0f1iter + f1f1elemf20f0f1 = append(f1f1elemf20f0f1, &f1f1elemf20f0f1elem) } - f1f1elemf19f0.Drop = f1f1elemf19f0f1 + f1f1elemf20f0.Drop = f1f1elemf20f0f1 } - f1f1elemf19.Capabilities = f1f1elemf19f0 + f1f1elemf20.Capabilities = f1f1elemf20f0 } if f1f1iter.LinuxParameters.Devices != nil { - f1f1elemf19f1 := []*svcapitypes.Device{} - for _, f1f1elemf19f1iter := range f1f1iter.LinuxParameters.Devices { - f1f1elemf19f1elem := &svcapitypes.Device{} - if f1f1elemf19f1iter.ContainerPath != nil { - f1f1elemf19f1elem.ContainerPath = f1f1elemf19f1iter.ContainerPath + f1f1elemf20f1 := []*svcapitypes.Device{} + for _, f1f1elemf20f1iter := range f1f1iter.LinuxParameters.Devices { + f1f1elemf20f1elem := &svcapitypes.Device{} + if f1f1elemf20f1iter.ContainerPath != nil { + f1f1elemf20f1elem.ContainerPath = f1f1elemf20f1iter.ContainerPath } - if f1f1elemf19f1iter.HostPath != nil { - f1f1elemf19f1elem.HostPath = f1f1elemf19f1iter.HostPath + if f1f1elemf20f1iter.HostPath != nil { + f1f1elemf20f1elem.HostPath = f1f1elemf20f1iter.HostPath } - if f1f1elemf19f1iter.Permissions != nil { - f1f1elemf19f1elemf2 := []*string{} - for _, f1f1elemf19f1elemf2iter := range f1f1elemf19f1iter.Permissions { - var f1f1elemf19f1elemf2elem string - f1f1elemf19f1elemf2elem = *f1f1elemf19f1elemf2iter - f1f1elemf19f1elemf2 = append(f1f1elemf19f1elemf2, &f1f1elemf19f1elemf2elem) + if f1f1elemf20f1iter.Permissions != nil { + f1f1elemf20f1elemf2 := []*string{} + for _, f1f1elemf20f1elemf2iter := range f1f1elemf20f1iter.Permissions { + var f1f1elemf20f1elemf2elem string + f1f1elemf20f1elemf2elem = *f1f1elemf20f1elemf2iter + f1f1elemf20f1elemf2 = append(f1f1elemf20f1elemf2, &f1f1elemf20f1elemf2elem) } - f1f1elemf19f1elem.Permissions = f1f1elemf19f1elemf2 + f1f1elemf20f1elem.Permissions = f1f1elemf20f1elemf2 } - f1f1elemf19f1 = append(f1f1elemf19f1, f1f1elemf19f1elem) + f1f1elemf20f1 = append(f1f1elemf20f1, f1f1elemf20f1elem) } - f1f1elemf19.Devices = f1f1elemf19f1 + f1f1elemf20.Devices = f1f1elemf20f1 } if f1f1iter.LinuxParameters.InitProcessEnabled != nil { - f1f1elemf19.InitProcessEnabled = f1f1iter.LinuxParameters.InitProcessEnabled + f1f1elemf20.InitProcessEnabled = f1f1iter.LinuxParameters.InitProcessEnabled } if f1f1iter.LinuxParameters.MaxSwap != nil { - f1f1elemf19.MaxSwap = f1f1iter.LinuxParameters.MaxSwap + f1f1elemf20.MaxSwap = f1f1iter.LinuxParameters.MaxSwap } if f1f1iter.LinuxParameters.SharedMemorySize != nil { - f1f1elemf19.SharedMemorySize = f1f1iter.LinuxParameters.SharedMemorySize + f1f1elemf20.SharedMemorySize = f1f1iter.LinuxParameters.SharedMemorySize } if f1f1iter.LinuxParameters.Swappiness != nil { - f1f1elemf19.Swappiness = f1f1iter.LinuxParameters.Swappiness + f1f1elemf20.Swappiness = f1f1iter.LinuxParameters.Swappiness } if f1f1iter.LinuxParameters.Tmpfs != nil { - f1f1elemf19f6 := []*svcapitypes.Tmpfs{} - for _, f1f1elemf19f6iter := range f1f1iter.LinuxParameters.Tmpfs { - f1f1elemf19f6elem := &svcapitypes.Tmpfs{} - if f1f1elemf19f6iter.ContainerPath != nil { - f1f1elemf19f6elem.ContainerPath = f1f1elemf19f6iter.ContainerPath + f1f1elemf20f6 := []*svcapitypes.Tmpfs{} + for _, f1f1elemf20f6iter := range f1f1iter.LinuxParameters.Tmpfs { + f1f1elemf20f6elem := &svcapitypes.Tmpfs{} + if f1f1elemf20f6iter.ContainerPath != nil { + f1f1elemf20f6elem.ContainerPath = f1f1elemf20f6iter.ContainerPath } - if f1f1elemf19f6iter.MountOptions != nil { - f1f1elemf19f6elemf1 := []*string{} - for _, f1f1elemf19f6elemf1iter := range f1f1elemf19f6iter.MountOptions { - var f1f1elemf19f6elemf1elem string - f1f1elemf19f6elemf1elem = *f1f1elemf19f6elemf1iter - f1f1elemf19f6elemf1 = append(f1f1elemf19f6elemf1, &f1f1elemf19f6elemf1elem) + if f1f1elemf20f6iter.MountOptions != nil { + f1f1elemf20f6elemf1 := []*string{} + for _, f1f1elemf20f6elemf1iter := range f1f1elemf20f6iter.MountOptions { + var f1f1elemf20f6elemf1elem string + f1f1elemf20f6elemf1elem = *f1f1elemf20f6elemf1iter + f1f1elemf20f6elemf1 = append(f1f1elemf20f6elemf1, &f1f1elemf20f6elemf1elem) } - f1f1elemf19f6elem.MountOptions = f1f1elemf19f6elemf1 + f1f1elemf20f6elem.MountOptions = f1f1elemf20f6elemf1 } - if f1f1elemf19f6iter.Size != nil { - f1f1elemf19f6elem.Size = f1f1elemf19f6iter.Size + if f1f1elemf20f6iter.Size != nil { + f1f1elemf20f6elem.Size = f1f1elemf20f6iter.Size } - f1f1elemf19f6 = append(f1f1elemf19f6, f1f1elemf19f6elem) + f1f1elemf20f6 = append(f1f1elemf20f6, f1f1elemf20f6elem) } - f1f1elemf19.Tmpfs = f1f1elemf19f6 + f1f1elemf20.Tmpfs = f1f1elemf20f6 } - f1f1elem.LinuxParameters = f1f1elemf19 + f1f1elem.LinuxParameters = f1f1elemf20 } if f1f1iter.LogConfiguration != nil { - f1f1elemf20 := &svcapitypes.LogConfiguration{} + f1f1elemf21 := &svcapitypes.LogConfiguration{} if f1f1iter.LogConfiguration.LogDriver != nil { - f1f1elemf20.LogDriver = f1f1iter.LogConfiguration.LogDriver + f1f1elemf21.LogDriver = f1f1iter.LogConfiguration.LogDriver } if f1f1iter.LogConfiguration.Options != nil { - f1f1elemf20f1 := map[string]*string{} - for f1f1elemf20f1key, f1f1elemf20f1valiter := range f1f1iter.LogConfiguration.Options { - var f1f1elemf20f1val string - f1f1elemf20f1val = *f1f1elemf20f1valiter - f1f1elemf20f1[f1f1elemf20f1key] = &f1f1elemf20f1val + f1f1elemf21f1 := map[string]*string{} + for f1f1elemf21f1key, f1f1elemf21f1valiter := range f1f1iter.LogConfiguration.Options { + var f1f1elemf21f1val string + f1f1elemf21f1val = *f1f1elemf21f1valiter + f1f1elemf21f1[f1f1elemf21f1key] = &f1f1elemf21f1val } - f1f1elemf20.Options = f1f1elemf20f1 + f1f1elemf21.Options = f1f1elemf21f1 } if f1f1iter.LogConfiguration.SecretOptions != nil { - f1f1elemf20f2 := []*svcapitypes.Secret{} - for _, f1f1elemf20f2iter := range f1f1iter.LogConfiguration.SecretOptions { - f1f1elemf20f2elem := &svcapitypes.Secret{} - if f1f1elemf20f2iter.Name != nil { - f1f1elemf20f2elem.Name = f1f1elemf20f2iter.Name + f1f1elemf21f2 := []*svcapitypes.Secret{} + for _, f1f1elemf21f2iter := range f1f1iter.LogConfiguration.SecretOptions { + f1f1elemf21f2elem := &svcapitypes.Secret{} + if f1f1elemf21f2iter.Name != nil { + f1f1elemf21f2elem.Name = f1f1elemf21f2iter.Name } - if f1f1elemf20f2iter.ValueFrom != nil { - f1f1elemf20f2elem.ValueFrom = f1f1elemf20f2iter.ValueFrom + if f1f1elemf21f2iter.ValueFrom != nil { + f1f1elemf21f2elem.ValueFrom = f1f1elemf21f2iter.ValueFrom } - f1f1elemf20f2 = append(f1f1elemf20f2, f1f1elemf20f2elem) + f1f1elemf21f2 = append(f1f1elemf21f2, f1f1elemf21f2elem) } - f1f1elemf20.SecretOptions = f1f1elemf20f2 + f1f1elemf21.SecretOptions = f1f1elemf21f2 } - f1f1elem.LogConfiguration = f1f1elemf20 + f1f1elem.LogConfiguration = f1f1elemf21 } if f1f1iter.Memory != nil { f1f1elem.Memory = f1f1iter.Memory @@ -446,50 +455,50 @@ func (e *external) Create(ctx context.Context, mg cpresource.Managed) (managed.E f1f1elem.MemoryReservation = f1f1iter.MemoryReservation } if f1f1iter.MountPoints != nil { - f1f1elemf23 := []*svcapitypes.MountPoint{} - for _, f1f1elemf23iter := range f1f1iter.MountPoints { - f1f1elemf23elem := &svcapitypes.MountPoint{} - if f1f1elemf23iter.ContainerPath != nil { - f1f1elemf23elem.ContainerPath = f1f1elemf23iter.ContainerPath + f1f1elemf24 := []*svcapitypes.MountPoint{} + for _, f1f1elemf24iter := range f1f1iter.MountPoints { + f1f1elemf24elem := &svcapitypes.MountPoint{} + if f1f1elemf24iter.ContainerPath != nil { + f1f1elemf24elem.ContainerPath = f1f1elemf24iter.ContainerPath } - if f1f1elemf23iter.ReadOnly != nil { - f1f1elemf23elem.ReadOnly = f1f1elemf23iter.ReadOnly + if f1f1elemf24iter.ReadOnly != nil { + f1f1elemf24elem.ReadOnly = f1f1elemf24iter.ReadOnly } - if f1f1elemf23iter.SourceVolume != nil { - f1f1elemf23elem.SourceVolume = f1f1elemf23iter.SourceVolume + if f1f1elemf24iter.SourceVolume != nil { + f1f1elemf24elem.SourceVolume = f1f1elemf24iter.SourceVolume } - f1f1elemf23 = append(f1f1elemf23, f1f1elemf23elem) + f1f1elemf24 = append(f1f1elemf24, f1f1elemf24elem) } - f1f1elem.MountPoints = f1f1elemf23 + f1f1elem.MountPoints = f1f1elemf24 } if f1f1iter.Name != nil { f1f1elem.Name = f1f1iter.Name } if f1f1iter.PortMappings != nil { - f1f1elemf25 := []*svcapitypes.PortMapping{} - for _, f1f1elemf25iter := range f1f1iter.PortMappings { - f1f1elemf25elem := &svcapitypes.PortMapping{} - if f1f1elemf25iter.AppProtocol != nil { - f1f1elemf25elem.AppProtocol = f1f1elemf25iter.AppProtocol + f1f1elemf26 := []*svcapitypes.PortMapping{} + for _, f1f1elemf26iter := range f1f1iter.PortMappings { + f1f1elemf26elem := &svcapitypes.PortMapping{} + if f1f1elemf26iter.AppProtocol != nil { + f1f1elemf26elem.AppProtocol = f1f1elemf26iter.AppProtocol } - if f1f1elemf25iter.ContainerPort != nil { - f1f1elemf25elem.ContainerPort = f1f1elemf25iter.ContainerPort + if f1f1elemf26iter.ContainerPort != nil { + f1f1elemf26elem.ContainerPort = f1f1elemf26iter.ContainerPort } - if f1f1elemf25iter.ContainerPortRange != nil { - f1f1elemf25elem.ContainerPortRange = f1f1elemf25iter.ContainerPortRange + if f1f1elemf26iter.ContainerPortRange != nil { + f1f1elemf26elem.ContainerPortRange = f1f1elemf26iter.ContainerPortRange } - if f1f1elemf25iter.HostPort != nil { - f1f1elemf25elem.HostPort = f1f1elemf25iter.HostPort + if f1f1elemf26iter.HostPort != nil { + f1f1elemf26elem.HostPort = f1f1elemf26iter.HostPort } - if f1f1elemf25iter.Name != nil { - f1f1elemf25elem.Name = f1f1elemf25iter.Name + if f1f1elemf26iter.Name != nil { + f1f1elemf26elem.Name = f1f1elemf26iter.Name } - if f1f1elemf25iter.Protocol != nil { - f1f1elemf25elem.Protocol = f1f1elemf25iter.Protocol + if f1f1elemf26iter.Protocol != nil { + f1f1elemf26elem.Protocol = f1f1elemf26iter.Protocol } - f1f1elemf25 = append(f1f1elemf25, f1f1elemf25elem) + f1f1elemf26 = append(f1f1elemf26, f1f1elemf26elem) } - f1f1elem.PortMappings = f1f1elemf25 + f1f1elem.PortMappings = f1f1elemf26 } if f1f1iter.Privileged != nil { f1f1elem.Privileged = f1f1iter.Privileged @@ -501,39 +510,39 @@ func (e *external) Create(ctx context.Context, mg cpresource.Managed) (managed.E f1f1elem.ReadonlyRootFilesystem = f1f1iter.ReadonlyRootFilesystem } if f1f1iter.RepositoryCredentials != nil { - f1f1elemf29 := &svcapitypes.RepositoryCredentials{} + f1f1elemf30 := &svcapitypes.RepositoryCredentials{} if f1f1iter.RepositoryCredentials.CredentialsParameter != nil { - f1f1elemf29.CredentialsParameter = f1f1iter.RepositoryCredentials.CredentialsParameter + f1f1elemf30.CredentialsParameter = f1f1iter.RepositoryCredentials.CredentialsParameter } - f1f1elem.RepositoryCredentials = f1f1elemf29 + f1f1elem.RepositoryCredentials = f1f1elemf30 } if f1f1iter.ResourceRequirements != nil { - f1f1elemf30 := []*svcapitypes.ResourceRequirement{} - for _, f1f1elemf30iter := range f1f1iter.ResourceRequirements { - f1f1elemf30elem := &svcapitypes.ResourceRequirement{} - if f1f1elemf30iter.Type != nil { - f1f1elemf30elem.Type = f1f1elemf30iter.Type + f1f1elemf31 := []*svcapitypes.ResourceRequirement{} + for _, f1f1elemf31iter := range f1f1iter.ResourceRequirements { + f1f1elemf31elem := &svcapitypes.ResourceRequirement{} + if f1f1elemf31iter.Type != nil { + f1f1elemf31elem.Type = f1f1elemf31iter.Type } - if f1f1elemf30iter.Value != nil { - f1f1elemf30elem.Value = f1f1elemf30iter.Value + if f1f1elemf31iter.Value != nil { + f1f1elemf31elem.Value = f1f1elemf31iter.Value } - f1f1elemf30 = append(f1f1elemf30, f1f1elemf30elem) + f1f1elemf31 = append(f1f1elemf31, f1f1elemf31elem) } - f1f1elem.ResourceRequirements = f1f1elemf30 + f1f1elem.ResourceRequirements = f1f1elemf31 } if f1f1iter.Secrets != nil { - f1f1elemf31 := []*svcapitypes.Secret{} - for _, f1f1elemf31iter := range f1f1iter.Secrets { - f1f1elemf31elem := &svcapitypes.Secret{} - if f1f1elemf31iter.Name != nil { - f1f1elemf31elem.Name = f1f1elemf31iter.Name + f1f1elemf32 := []*svcapitypes.Secret{} + for _, f1f1elemf32iter := range f1f1iter.Secrets { + f1f1elemf32elem := &svcapitypes.Secret{} + if f1f1elemf32iter.Name != nil { + f1f1elemf32elem.Name = f1f1elemf32iter.Name } - if f1f1elemf31iter.ValueFrom != nil { - f1f1elemf31elem.ValueFrom = f1f1elemf31iter.ValueFrom + if f1f1elemf32iter.ValueFrom != nil { + f1f1elemf32elem.ValueFrom = f1f1elemf32iter.ValueFrom } - f1f1elemf31 = append(f1f1elemf31, f1f1elemf31elem) + f1f1elemf32 = append(f1f1elemf32, f1f1elemf32elem) } - f1f1elem.Secrets = f1f1elemf31 + f1f1elem.Secrets = f1f1elemf32 } if f1f1iter.StartTimeout != nil { f1f1elem.StartTimeout = f1f1iter.StartTimeout @@ -542,52 +551,52 @@ func (e *external) Create(ctx context.Context, mg cpresource.Managed) (managed.E f1f1elem.StopTimeout = f1f1iter.StopTimeout } if f1f1iter.SystemControls != nil { - f1f1elemf34 := []*svcapitypes.SystemControl{} - for _, f1f1elemf34iter := range f1f1iter.SystemControls { - f1f1elemf34elem := &svcapitypes.SystemControl{} - if f1f1elemf34iter.Namespace != nil { - f1f1elemf34elem.Namespace = f1f1elemf34iter.Namespace + f1f1elemf35 := []*svcapitypes.SystemControl{} + for _, f1f1elemf35iter := range f1f1iter.SystemControls { + f1f1elemf35elem := &svcapitypes.SystemControl{} + if f1f1elemf35iter.Namespace != nil { + f1f1elemf35elem.Namespace = f1f1elemf35iter.Namespace } - if f1f1elemf34iter.Value != nil { - f1f1elemf34elem.Value = f1f1elemf34iter.Value + if f1f1elemf35iter.Value != nil { + f1f1elemf35elem.Value = f1f1elemf35iter.Value } - f1f1elemf34 = append(f1f1elemf34, f1f1elemf34elem) + f1f1elemf35 = append(f1f1elemf35, f1f1elemf35elem) } - f1f1elem.SystemControls = f1f1elemf34 + f1f1elem.SystemControls = f1f1elemf35 } if f1f1iter.Ulimits != nil { - f1f1elemf35 := []*svcapitypes.Ulimit{} - for _, f1f1elemf35iter := range f1f1iter.Ulimits { - f1f1elemf35elem := &svcapitypes.Ulimit{} - if f1f1elemf35iter.HardLimit != nil { - f1f1elemf35elem.HardLimit = f1f1elemf35iter.HardLimit + f1f1elemf36 := []*svcapitypes.Ulimit{} + for _, f1f1elemf36iter := range f1f1iter.Ulimits { + f1f1elemf36elem := &svcapitypes.Ulimit{} + if f1f1elemf36iter.HardLimit != nil { + f1f1elemf36elem.HardLimit = f1f1elemf36iter.HardLimit } - if f1f1elemf35iter.Name != nil { - f1f1elemf35elem.Name = f1f1elemf35iter.Name + if f1f1elemf36iter.Name != nil { + f1f1elemf36elem.Name = f1f1elemf36iter.Name } - if f1f1elemf35iter.SoftLimit != nil { - f1f1elemf35elem.SoftLimit = f1f1elemf35iter.SoftLimit + if f1f1elemf36iter.SoftLimit != nil { + f1f1elemf36elem.SoftLimit = f1f1elemf36iter.SoftLimit } - f1f1elemf35 = append(f1f1elemf35, f1f1elemf35elem) + f1f1elemf36 = append(f1f1elemf36, f1f1elemf36elem) } - f1f1elem.Ulimits = f1f1elemf35 + f1f1elem.Ulimits = f1f1elemf36 } if f1f1iter.User != nil { f1f1elem.User = f1f1iter.User } if f1f1iter.VolumesFrom != nil { - f1f1elemf37 := []*svcapitypes.VolumeFrom{} - for _, f1f1elemf37iter := range f1f1iter.VolumesFrom { - f1f1elemf37elem := &svcapitypes.VolumeFrom{} - if f1f1elemf37iter.ReadOnly != nil { - f1f1elemf37elem.ReadOnly = f1f1elemf37iter.ReadOnly + f1f1elemf38 := []*svcapitypes.VolumeFrom{} + for _, f1f1elemf38iter := range f1f1iter.VolumesFrom { + f1f1elemf38elem := &svcapitypes.VolumeFrom{} + if f1f1elemf38iter.ReadOnly != nil { + f1f1elemf38elem.ReadOnly = f1f1elemf38iter.ReadOnly } - if f1f1elemf37iter.SourceContainer != nil { - f1f1elemf37elem.SourceContainer = f1f1elemf37iter.SourceContainer + if f1f1elemf38iter.SourceContainer != nil { + f1f1elemf38elem.SourceContainer = f1f1elemf38iter.SourceContainer } - f1f1elemf37 = append(f1f1elemf37, f1f1elemf37elem) + f1f1elemf38 = append(f1f1elemf38, f1f1elemf38elem) } - f1f1elem.VolumesFrom = f1f1elemf37 + f1f1elem.VolumesFrom = f1f1elemf38 } if f1f1iter.WorkingDirectory != nil { f1f1elem.WorkingDirectory = f1f1iter.WorkingDirectory diff --git a/pkg/controller/ecs/taskdefinition/zz_conversions.go b/pkg/controller/ecs/taskdefinition/zz_conversions.go index 7f98520ad2..cded58ba5e 100644 --- a/pkg/controller/ecs/taskdefinition/zz_conversions.go +++ b/pkg/controller/ecs/taskdefinition/zz_conversions.go @@ -84,153 +84,162 @@ func GenerateTaskDefinition(resp *svcsdk.DescribeTaskDefinitionOutput) *svcapity if f1f1iter.Cpu != nil { f1f1elem.CPU = f1f1iter.Cpu } + if f1f1iter.CredentialSpecs != nil { + f1f1elemf2 := []*string{} + for _, f1f1elemf2iter := range f1f1iter.CredentialSpecs { + var f1f1elemf2elem string + f1f1elemf2elem = *f1f1elemf2iter + f1f1elemf2 = append(f1f1elemf2, &f1f1elemf2elem) + } + f1f1elem.CredentialSpecs = f1f1elemf2 + } if f1f1iter.DependsOn != nil { - f1f1elemf2 := []*svcapitypes.ContainerDependency{} - for _, f1f1elemf2iter := range f1f1iter.DependsOn { - f1f1elemf2elem := &svcapitypes.ContainerDependency{} - if f1f1elemf2iter.Condition != nil { - f1f1elemf2elem.Condition = f1f1elemf2iter.Condition + f1f1elemf3 := []*svcapitypes.ContainerDependency{} + for _, f1f1elemf3iter := range f1f1iter.DependsOn { + f1f1elemf3elem := &svcapitypes.ContainerDependency{} + if f1f1elemf3iter.Condition != nil { + f1f1elemf3elem.Condition = f1f1elemf3iter.Condition } - if f1f1elemf2iter.ContainerName != nil { - f1f1elemf2elem.ContainerName = f1f1elemf2iter.ContainerName + if f1f1elemf3iter.ContainerName != nil { + f1f1elemf3elem.ContainerName = f1f1elemf3iter.ContainerName } - f1f1elemf2 = append(f1f1elemf2, f1f1elemf2elem) + f1f1elemf3 = append(f1f1elemf3, f1f1elemf3elem) } - f1f1elem.DependsOn = f1f1elemf2 + f1f1elem.DependsOn = f1f1elemf3 } if f1f1iter.DisableNetworking != nil { f1f1elem.DisableNetworking = f1f1iter.DisableNetworking } if f1f1iter.DnsSearchDomains != nil { - f1f1elemf4 := []*string{} - for _, f1f1elemf4iter := range f1f1iter.DnsSearchDomains { - var f1f1elemf4elem string - f1f1elemf4elem = *f1f1elemf4iter - f1f1elemf4 = append(f1f1elemf4, &f1f1elemf4elem) - } - f1f1elem.DNSSearchDomains = f1f1elemf4 - } - if f1f1iter.DnsServers != nil { f1f1elemf5 := []*string{} - for _, f1f1elemf5iter := range f1f1iter.DnsServers { + for _, f1f1elemf5iter := range f1f1iter.DnsSearchDomains { var f1f1elemf5elem string f1f1elemf5elem = *f1f1elemf5iter f1f1elemf5 = append(f1f1elemf5, &f1f1elemf5elem) } - f1f1elem.DNSServers = f1f1elemf5 + f1f1elem.DNSSearchDomains = f1f1elemf5 } - if f1f1iter.DockerLabels != nil { - f1f1elemf6 := map[string]*string{} - for f1f1elemf6key, f1f1elemf6valiter := range f1f1iter.DockerLabels { - var f1f1elemf6val string - f1f1elemf6val = *f1f1elemf6valiter - f1f1elemf6[f1f1elemf6key] = &f1f1elemf6val + if f1f1iter.DnsServers != nil { + f1f1elemf6 := []*string{} + for _, f1f1elemf6iter := range f1f1iter.DnsServers { + var f1f1elemf6elem string + f1f1elemf6elem = *f1f1elemf6iter + f1f1elemf6 = append(f1f1elemf6, &f1f1elemf6elem) } - f1f1elem.DockerLabels = f1f1elemf6 + f1f1elem.DNSServers = f1f1elemf6 } - if f1f1iter.DockerSecurityOptions != nil { - f1f1elemf7 := []*string{} - for _, f1f1elemf7iter := range f1f1iter.DockerSecurityOptions { - var f1f1elemf7elem string - f1f1elemf7elem = *f1f1elemf7iter - f1f1elemf7 = append(f1f1elemf7, &f1f1elemf7elem) + if f1f1iter.DockerLabels != nil { + f1f1elemf7 := map[string]*string{} + for f1f1elemf7key, f1f1elemf7valiter := range f1f1iter.DockerLabels { + var f1f1elemf7val string + f1f1elemf7val = *f1f1elemf7valiter + f1f1elemf7[f1f1elemf7key] = &f1f1elemf7val } - f1f1elem.DockerSecurityOptions = f1f1elemf7 + f1f1elem.DockerLabels = f1f1elemf7 } - if f1f1iter.EntryPoint != nil { + if f1f1iter.DockerSecurityOptions != nil { f1f1elemf8 := []*string{} - for _, f1f1elemf8iter := range f1f1iter.EntryPoint { + for _, f1f1elemf8iter := range f1f1iter.DockerSecurityOptions { var f1f1elemf8elem string f1f1elemf8elem = *f1f1elemf8iter f1f1elemf8 = append(f1f1elemf8, &f1f1elemf8elem) } - f1f1elem.EntryPoint = f1f1elemf8 + f1f1elem.DockerSecurityOptions = f1f1elemf8 + } + if f1f1iter.EntryPoint != nil { + f1f1elemf9 := []*string{} + for _, f1f1elemf9iter := range f1f1iter.EntryPoint { + var f1f1elemf9elem string + f1f1elemf9elem = *f1f1elemf9iter + f1f1elemf9 = append(f1f1elemf9, &f1f1elemf9elem) + } + f1f1elem.EntryPoint = f1f1elemf9 } if f1f1iter.Environment != nil { - f1f1elemf9 := []*svcapitypes.KeyValuePair{} - for _, f1f1elemf9iter := range f1f1iter.Environment { - f1f1elemf9elem := &svcapitypes.KeyValuePair{} - if f1f1elemf9iter.Name != nil { - f1f1elemf9elem.Name = f1f1elemf9iter.Name + f1f1elemf10 := []*svcapitypes.KeyValuePair{} + for _, f1f1elemf10iter := range f1f1iter.Environment { + f1f1elemf10elem := &svcapitypes.KeyValuePair{} + if f1f1elemf10iter.Name != nil { + f1f1elemf10elem.Name = f1f1elemf10iter.Name } - if f1f1elemf9iter.Value != nil { - f1f1elemf9elem.Value = f1f1elemf9iter.Value + if f1f1elemf10iter.Value != nil { + f1f1elemf10elem.Value = f1f1elemf10iter.Value } - f1f1elemf9 = append(f1f1elemf9, f1f1elemf9elem) + f1f1elemf10 = append(f1f1elemf10, f1f1elemf10elem) } - f1f1elem.Environment = f1f1elemf9 + f1f1elem.Environment = f1f1elemf10 } if f1f1iter.EnvironmentFiles != nil { - f1f1elemf10 := []*svcapitypes.EnvironmentFile{} - for _, f1f1elemf10iter := range f1f1iter.EnvironmentFiles { - f1f1elemf10elem := &svcapitypes.EnvironmentFile{} - if f1f1elemf10iter.Type != nil { - f1f1elemf10elem.Type = f1f1elemf10iter.Type + f1f1elemf11 := []*svcapitypes.EnvironmentFile{} + for _, f1f1elemf11iter := range f1f1iter.EnvironmentFiles { + f1f1elemf11elem := &svcapitypes.EnvironmentFile{} + if f1f1elemf11iter.Type != nil { + f1f1elemf11elem.Type = f1f1elemf11iter.Type } - if f1f1elemf10iter.Value != nil { - f1f1elemf10elem.Value = f1f1elemf10iter.Value + if f1f1elemf11iter.Value != nil { + f1f1elemf11elem.Value = f1f1elemf11iter.Value } - f1f1elemf10 = append(f1f1elemf10, f1f1elemf10elem) + f1f1elemf11 = append(f1f1elemf11, f1f1elemf11elem) } - f1f1elem.EnvironmentFiles = f1f1elemf10 + f1f1elem.EnvironmentFiles = f1f1elemf11 } if f1f1iter.Essential != nil { f1f1elem.Essential = f1f1iter.Essential } if f1f1iter.ExtraHosts != nil { - f1f1elemf12 := []*svcapitypes.HostEntry{} - for _, f1f1elemf12iter := range f1f1iter.ExtraHosts { - f1f1elemf12elem := &svcapitypes.HostEntry{} - if f1f1elemf12iter.Hostname != nil { - f1f1elemf12elem.Hostname = f1f1elemf12iter.Hostname + f1f1elemf13 := []*svcapitypes.HostEntry{} + for _, f1f1elemf13iter := range f1f1iter.ExtraHosts { + f1f1elemf13elem := &svcapitypes.HostEntry{} + if f1f1elemf13iter.Hostname != nil { + f1f1elemf13elem.Hostname = f1f1elemf13iter.Hostname } - if f1f1elemf12iter.IpAddress != nil { - f1f1elemf12elem.IPAddress = f1f1elemf12iter.IpAddress + if f1f1elemf13iter.IpAddress != nil { + f1f1elemf13elem.IPAddress = f1f1elemf13iter.IpAddress } - f1f1elemf12 = append(f1f1elemf12, f1f1elemf12elem) + f1f1elemf13 = append(f1f1elemf13, f1f1elemf13elem) } - f1f1elem.ExtraHosts = f1f1elemf12 + f1f1elem.ExtraHosts = f1f1elemf13 } if f1f1iter.FirelensConfiguration != nil { - f1f1elemf13 := &svcapitypes.FirelensConfiguration{} + f1f1elemf14 := &svcapitypes.FirelensConfiguration{} if f1f1iter.FirelensConfiguration.Options != nil { - f1f1elemf13f0 := map[string]*string{} - for f1f1elemf13f0key, f1f1elemf13f0valiter := range f1f1iter.FirelensConfiguration.Options { - var f1f1elemf13f0val string - f1f1elemf13f0val = *f1f1elemf13f0valiter - f1f1elemf13f0[f1f1elemf13f0key] = &f1f1elemf13f0val + f1f1elemf14f0 := map[string]*string{} + for f1f1elemf14f0key, f1f1elemf14f0valiter := range f1f1iter.FirelensConfiguration.Options { + var f1f1elemf14f0val string + f1f1elemf14f0val = *f1f1elemf14f0valiter + f1f1elemf14f0[f1f1elemf14f0key] = &f1f1elemf14f0val } - f1f1elemf13.Options = f1f1elemf13f0 + f1f1elemf14.Options = f1f1elemf14f0 } if f1f1iter.FirelensConfiguration.Type != nil { - f1f1elemf13.Type = f1f1iter.FirelensConfiguration.Type + f1f1elemf14.Type = f1f1iter.FirelensConfiguration.Type } - f1f1elem.FirelensConfiguration = f1f1elemf13 + f1f1elem.FirelensConfiguration = f1f1elemf14 } if f1f1iter.HealthCheck != nil { - f1f1elemf14 := &svcapitypes.HealthCheck{} + f1f1elemf15 := &svcapitypes.HealthCheck{} if f1f1iter.HealthCheck.Command != nil { - f1f1elemf14f0 := []*string{} - for _, f1f1elemf14f0iter := range f1f1iter.HealthCheck.Command { - var f1f1elemf14f0elem string - f1f1elemf14f0elem = *f1f1elemf14f0iter - f1f1elemf14f0 = append(f1f1elemf14f0, &f1f1elemf14f0elem) + f1f1elemf15f0 := []*string{} + for _, f1f1elemf15f0iter := range f1f1iter.HealthCheck.Command { + var f1f1elemf15f0elem string + f1f1elemf15f0elem = *f1f1elemf15f0iter + f1f1elemf15f0 = append(f1f1elemf15f0, &f1f1elemf15f0elem) } - f1f1elemf14.Command = f1f1elemf14f0 + f1f1elemf15.Command = f1f1elemf15f0 } if f1f1iter.HealthCheck.Interval != nil { - f1f1elemf14.Interval = f1f1iter.HealthCheck.Interval + f1f1elemf15.Interval = f1f1iter.HealthCheck.Interval } if f1f1iter.HealthCheck.Retries != nil { - f1f1elemf14.Retries = f1f1iter.HealthCheck.Retries + f1f1elemf15.Retries = f1f1iter.HealthCheck.Retries } if f1f1iter.HealthCheck.StartPeriod != nil { - f1f1elemf14.StartPeriod = f1f1iter.HealthCheck.StartPeriod + f1f1elemf15.StartPeriod = f1f1iter.HealthCheck.StartPeriod } if f1f1iter.HealthCheck.Timeout != nil { - f1f1elemf14.Timeout = f1f1iter.HealthCheck.Timeout + f1f1elemf15.Timeout = f1f1iter.HealthCheck.Timeout } - f1f1elem.HealthCheck = f1f1elemf14 + f1f1elem.HealthCheck = f1f1elemf15 } if f1f1iter.Hostname != nil { f1f1elem.Hostname = f1f1iter.Hostname @@ -242,127 +251,127 @@ func GenerateTaskDefinition(resp *svcsdk.DescribeTaskDefinitionOutput) *svcapity f1f1elem.Interactive = f1f1iter.Interactive } if f1f1iter.Links != nil { - f1f1elemf18 := []*string{} - for _, f1f1elemf18iter := range f1f1iter.Links { - var f1f1elemf18elem string - f1f1elemf18elem = *f1f1elemf18iter - f1f1elemf18 = append(f1f1elemf18, &f1f1elemf18elem) + f1f1elemf19 := []*string{} + for _, f1f1elemf19iter := range f1f1iter.Links { + var f1f1elemf19elem string + f1f1elemf19elem = *f1f1elemf19iter + f1f1elemf19 = append(f1f1elemf19, &f1f1elemf19elem) } - f1f1elem.Links = f1f1elemf18 + f1f1elem.Links = f1f1elemf19 } if f1f1iter.LinuxParameters != nil { - f1f1elemf19 := &svcapitypes.LinuxParameters{} + f1f1elemf20 := &svcapitypes.LinuxParameters{} if f1f1iter.LinuxParameters.Capabilities != nil { - f1f1elemf19f0 := &svcapitypes.KernelCapabilities{} + f1f1elemf20f0 := &svcapitypes.KernelCapabilities{} if f1f1iter.LinuxParameters.Capabilities.Add != nil { - f1f1elemf19f0f0 := []*string{} - for _, f1f1elemf19f0f0iter := range f1f1iter.LinuxParameters.Capabilities.Add { - var f1f1elemf19f0f0elem string - f1f1elemf19f0f0elem = *f1f1elemf19f0f0iter - f1f1elemf19f0f0 = append(f1f1elemf19f0f0, &f1f1elemf19f0f0elem) + f1f1elemf20f0f0 := []*string{} + for _, f1f1elemf20f0f0iter := range f1f1iter.LinuxParameters.Capabilities.Add { + var f1f1elemf20f0f0elem string + f1f1elemf20f0f0elem = *f1f1elemf20f0f0iter + f1f1elemf20f0f0 = append(f1f1elemf20f0f0, &f1f1elemf20f0f0elem) } - f1f1elemf19f0.Add = f1f1elemf19f0f0 + f1f1elemf20f0.Add = f1f1elemf20f0f0 } if f1f1iter.LinuxParameters.Capabilities.Drop != nil { - f1f1elemf19f0f1 := []*string{} - for _, f1f1elemf19f0f1iter := range f1f1iter.LinuxParameters.Capabilities.Drop { - var f1f1elemf19f0f1elem string - f1f1elemf19f0f1elem = *f1f1elemf19f0f1iter - f1f1elemf19f0f1 = append(f1f1elemf19f0f1, &f1f1elemf19f0f1elem) + f1f1elemf20f0f1 := []*string{} + for _, f1f1elemf20f0f1iter := range f1f1iter.LinuxParameters.Capabilities.Drop { + var f1f1elemf20f0f1elem string + f1f1elemf20f0f1elem = *f1f1elemf20f0f1iter + f1f1elemf20f0f1 = append(f1f1elemf20f0f1, &f1f1elemf20f0f1elem) } - f1f1elemf19f0.Drop = f1f1elemf19f0f1 + f1f1elemf20f0.Drop = f1f1elemf20f0f1 } - f1f1elemf19.Capabilities = f1f1elemf19f0 + f1f1elemf20.Capabilities = f1f1elemf20f0 } if f1f1iter.LinuxParameters.Devices != nil { - f1f1elemf19f1 := []*svcapitypes.Device{} - for _, f1f1elemf19f1iter := range f1f1iter.LinuxParameters.Devices { - f1f1elemf19f1elem := &svcapitypes.Device{} - if f1f1elemf19f1iter.ContainerPath != nil { - f1f1elemf19f1elem.ContainerPath = f1f1elemf19f1iter.ContainerPath + f1f1elemf20f1 := []*svcapitypes.Device{} + for _, f1f1elemf20f1iter := range f1f1iter.LinuxParameters.Devices { + f1f1elemf20f1elem := &svcapitypes.Device{} + if f1f1elemf20f1iter.ContainerPath != nil { + f1f1elemf20f1elem.ContainerPath = f1f1elemf20f1iter.ContainerPath } - if f1f1elemf19f1iter.HostPath != nil { - f1f1elemf19f1elem.HostPath = f1f1elemf19f1iter.HostPath + if f1f1elemf20f1iter.HostPath != nil { + f1f1elemf20f1elem.HostPath = f1f1elemf20f1iter.HostPath } - if f1f1elemf19f1iter.Permissions != nil { - f1f1elemf19f1elemf2 := []*string{} - for _, f1f1elemf19f1elemf2iter := range f1f1elemf19f1iter.Permissions { - var f1f1elemf19f1elemf2elem string - f1f1elemf19f1elemf2elem = *f1f1elemf19f1elemf2iter - f1f1elemf19f1elemf2 = append(f1f1elemf19f1elemf2, &f1f1elemf19f1elemf2elem) + if f1f1elemf20f1iter.Permissions != nil { + f1f1elemf20f1elemf2 := []*string{} + for _, f1f1elemf20f1elemf2iter := range f1f1elemf20f1iter.Permissions { + var f1f1elemf20f1elemf2elem string + f1f1elemf20f1elemf2elem = *f1f1elemf20f1elemf2iter + f1f1elemf20f1elemf2 = append(f1f1elemf20f1elemf2, &f1f1elemf20f1elemf2elem) } - f1f1elemf19f1elem.Permissions = f1f1elemf19f1elemf2 + f1f1elemf20f1elem.Permissions = f1f1elemf20f1elemf2 } - f1f1elemf19f1 = append(f1f1elemf19f1, f1f1elemf19f1elem) + f1f1elemf20f1 = append(f1f1elemf20f1, f1f1elemf20f1elem) } - f1f1elemf19.Devices = f1f1elemf19f1 + f1f1elemf20.Devices = f1f1elemf20f1 } if f1f1iter.LinuxParameters.InitProcessEnabled != nil { - f1f1elemf19.InitProcessEnabled = f1f1iter.LinuxParameters.InitProcessEnabled + f1f1elemf20.InitProcessEnabled = f1f1iter.LinuxParameters.InitProcessEnabled } if f1f1iter.LinuxParameters.MaxSwap != nil { - f1f1elemf19.MaxSwap = f1f1iter.LinuxParameters.MaxSwap + f1f1elemf20.MaxSwap = f1f1iter.LinuxParameters.MaxSwap } if f1f1iter.LinuxParameters.SharedMemorySize != nil { - f1f1elemf19.SharedMemorySize = f1f1iter.LinuxParameters.SharedMemorySize + f1f1elemf20.SharedMemorySize = f1f1iter.LinuxParameters.SharedMemorySize } if f1f1iter.LinuxParameters.Swappiness != nil { - f1f1elemf19.Swappiness = f1f1iter.LinuxParameters.Swappiness + f1f1elemf20.Swappiness = f1f1iter.LinuxParameters.Swappiness } if f1f1iter.LinuxParameters.Tmpfs != nil { - f1f1elemf19f6 := []*svcapitypes.Tmpfs{} - for _, f1f1elemf19f6iter := range f1f1iter.LinuxParameters.Tmpfs { - f1f1elemf19f6elem := &svcapitypes.Tmpfs{} - if f1f1elemf19f6iter.ContainerPath != nil { - f1f1elemf19f6elem.ContainerPath = f1f1elemf19f6iter.ContainerPath + f1f1elemf20f6 := []*svcapitypes.Tmpfs{} + for _, f1f1elemf20f6iter := range f1f1iter.LinuxParameters.Tmpfs { + f1f1elemf20f6elem := &svcapitypes.Tmpfs{} + if f1f1elemf20f6iter.ContainerPath != nil { + f1f1elemf20f6elem.ContainerPath = f1f1elemf20f6iter.ContainerPath } - if f1f1elemf19f6iter.MountOptions != nil { - f1f1elemf19f6elemf1 := []*string{} - for _, f1f1elemf19f6elemf1iter := range f1f1elemf19f6iter.MountOptions { - var f1f1elemf19f6elemf1elem string - f1f1elemf19f6elemf1elem = *f1f1elemf19f6elemf1iter - f1f1elemf19f6elemf1 = append(f1f1elemf19f6elemf1, &f1f1elemf19f6elemf1elem) + if f1f1elemf20f6iter.MountOptions != nil { + f1f1elemf20f6elemf1 := []*string{} + for _, f1f1elemf20f6elemf1iter := range f1f1elemf20f6iter.MountOptions { + var f1f1elemf20f6elemf1elem string + f1f1elemf20f6elemf1elem = *f1f1elemf20f6elemf1iter + f1f1elemf20f6elemf1 = append(f1f1elemf20f6elemf1, &f1f1elemf20f6elemf1elem) } - f1f1elemf19f6elem.MountOptions = f1f1elemf19f6elemf1 + f1f1elemf20f6elem.MountOptions = f1f1elemf20f6elemf1 } - if f1f1elemf19f6iter.Size != nil { - f1f1elemf19f6elem.Size = f1f1elemf19f6iter.Size + if f1f1elemf20f6iter.Size != nil { + f1f1elemf20f6elem.Size = f1f1elemf20f6iter.Size } - f1f1elemf19f6 = append(f1f1elemf19f6, f1f1elemf19f6elem) + f1f1elemf20f6 = append(f1f1elemf20f6, f1f1elemf20f6elem) } - f1f1elemf19.Tmpfs = f1f1elemf19f6 + f1f1elemf20.Tmpfs = f1f1elemf20f6 } - f1f1elem.LinuxParameters = f1f1elemf19 + f1f1elem.LinuxParameters = f1f1elemf20 } if f1f1iter.LogConfiguration != nil { - f1f1elemf20 := &svcapitypes.LogConfiguration{} + f1f1elemf21 := &svcapitypes.LogConfiguration{} if f1f1iter.LogConfiguration.LogDriver != nil { - f1f1elemf20.LogDriver = f1f1iter.LogConfiguration.LogDriver + f1f1elemf21.LogDriver = f1f1iter.LogConfiguration.LogDriver } if f1f1iter.LogConfiguration.Options != nil { - f1f1elemf20f1 := map[string]*string{} - for f1f1elemf20f1key, f1f1elemf20f1valiter := range f1f1iter.LogConfiguration.Options { - var f1f1elemf20f1val string - f1f1elemf20f1val = *f1f1elemf20f1valiter - f1f1elemf20f1[f1f1elemf20f1key] = &f1f1elemf20f1val + f1f1elemf21f1 := map[string]*string{} + for f1f1elemf21f1key, f1f1elemf21f1valiter := range f1f1iter.LogConfiguration.Options { + var f1f1elemf21f1val string + f1f1elemf21f1val = *f1f1elemf21f1valiter + f1f1elemf21f1[f1f1elemf21f1key] = &f1f1elemf21f1val } - f1f1elemf20.Options = f1f1elemf20f1 + f1f1elemf21.Options = f1f1elemf21f1 } if f1f1iter.LogConfiguration.SecretOptions != nil { - f1f1elemf20f2 := []*svcapitypes.Secret{} - for _, f1f1elemf20f2iter := range f1f1iter.LogConfiguration.SecretOptions { - f1f1elemf20f2elem := &svcapitypes.Secret{} - if f1f1elemf20f2iter.Name != nil { - f1f1elemf20f2elem.Name = f1f1elemf20f2iter.Name + f1f1elemf21f2 := []*svcapitypes.Secret{} + for _, f1f1elemf21f2iter := range f1f1iter.LogConfiguration.SecretOptions { + f1f1elemf21f2elem := &svcapitypes.Secret{} + if f1f1elemf21f2iter.Name != nil { + f1f1elemf21f2elem.Name = f1f1elemf21f2iter.Name } - if f1f1elemf20f2iter.ValueFrom != nil { - f1f1elemf20f2elem.ValueFrom = f1f1elemf20f2iter.ValueFrom + if f1f1elemf21f2iter.ValueFrom != nil { + f1f1elemf21f2elem.ValueFrom = f1f1elemf21f2iter.ValueFrom } - f1f1elemf20f2 = append(f1f1elemf20f2, f1f1elemf20f2elem) + f1f1elemf21f2 = append(f1f1elemf21f2, f1f1elemf21f2elem) } - f1f1elemf20.SecretOptions = f1f1elemf20f2 + f1f1elemf21.SecretOptions = f1f1elemf21f2 } - f1f1elem.LogConfiguration = f1f1elemf20 + f1f1elem.LogConfiguration = f1f1elemf21 } if f1f1iter.Memory != nil { f1f1elem.Memory = f1f1iter.Memory @@ -371,50 +380,50 @@ func GenerateTaskDefinition(resp *svcsdk.DescribeTaskDefinitionOutput) *svcapity f1f1elem.MemoryReservation = f1f1iter.MemoryReservation } if f1f1iter.MountPoints != nil { - f1f1elemf23 := []*svcapitypes.MountPoint{} - for _, f1f1elemf23iter := range f1f1iter.MountPoints { - f1f1elemf23elem := &svcapitypes.MountPoint{} - if f1f1elemf23iter.ContainerPath != nil { - f1f1elemf23elem.ContainerPath = f1f1elemf23iter.ContainerPath + f1f1elemf24 := []*svcapitypes.MountPoint{} + for _, f1f1elemf24iter := range f1f1iter.MountPoints { + f1f1elemf24elem := &svcapitypes.MountPoint{} + if f1f1elemf24iter.ContainerPath != nil { + f1f1elemf24elem.ContainerPath = f1f1elemf24iter.ContainerPath } - if f1f1elemf23iter.ReadOnly != nil { - f1f1elemf23elem.ReadOnly = f1f1elemf23iter.ReadOnly + if f1f1elemf24iter.ReadOnly != nil { + f1f1elemf24elem.ReadOnly = f1f1elemf24iter.ReadOnly } - if f1f1elemf23iter.SourceVolume != nil { - f1f1elemf23elem.SourceVolume = f1f1elemf23iter.SourceVolume + if f1f1elemf24iter.SourceVolume != nil { + f1f1elemf24elem.SourceVolume = f1f1elemf24iter.SourceVolume } - f1f1elemf23 = append(f1f1elemf23, f1f1elemf23elem) + f1f1elemf24 = append(f1f1elemf24, f1f1elemf24elem) } - f1f1elem.MountPoints = f1f1elemf23 + f1f1elem.MountPoints = f1f1elemf24 } if f1f1iter.Name != nil { f1f1elem.Name = f1f1iter.Name } if f1f1iter.PortMappings != nil { - f1f1elemf25 := []*svcapitypes.PortMapping{} - for _, f1f1elemf25iter := range f1f1iter.PortMappings { - f1f1elemf25elem := &svcapitypes.PortMapping{} - if f1f1elemf25iter.AppProtocol != nil { - f1f1elemf25elem.AppProtocol = f1f1elemf25iter.AppProtocol + f1f1elemf26 := []*svcapitypes.PortMapping{} + for _, f1f1elemf26iter := range f1f1iter.PortMappings { + f1f1elemf26elem := &svcapitypes.PortMapping{} + if f1f1elemf26iter.AppProtocol != nil { + f1f1elemf26elem.AppProtocol = f1f1elemf26iter.AppProtocol } - if f1f1elemf25iter.ContainerPort != nil { - f1f1elemf25elem.ContainerPort = f1f1elemf25iter.ContainerPort + if f1f1elemf26iter.ContainerPort != nil { + f1f1elemf26elem.ContainerPort = f1f1elemf26iter.ContainerPort } - if f1f1elemf25iter.ContainerPortRange != nil { - f1f1elemf25elem.ContainerPortRange = f1f1elemf25iter.ContainerPortRange + if f1f1elemf26iter.ContainerPortRange != nil { + f1f1elemf26elem.ContainerPortRange = f1f1elemf26iter.ContainerPortRange } - if f1f1elemf25iter.HostPort != nil { - f1f1elemf25elem.HostPort = f1f1elemf25iter.HostPort + if f1f1elemf26iter.HostPort != nil { + f1f1elemf26elem.HostPort = f1f1elemf26iter.HostPort } - if f1f1elemf25iter.Name != nil { - f1f1elemf25elem.Name = f1f1elemf25iter.Name + if f1f1elemf26iter.Name != nil { + f1f1elemf26elem.Name = f1f1elemf26iter.Name } - if f1f1elemf25iter.Protocol != nil { - f1f1elemf25elem.Protocol = f1f1elemf25iter.Protocol + if f1f1elemf26iter.Protocol != nil { + f1f1elemf26elem.Protocol = f1f1elemf26iter.Protocol } - f1f1elemf25 = append(f1f1elemf25, f1f1elemf25elem) + f1f1elemf26 = append(f1f1elemf26, f1f1elemf26elem) } - f1f1elem.PortMappings = f1f1elemf25 + f1f1elem.PortMappings = f1f1elemf26 } if f1f1iter.Privileged != nil { f1f1elem.Privileged = f1f1iter.Privileged @@ -426,39 +435,39 @@ func GenerateTaskDefinition(resp *svcsdk.DescribeTaskDefinitionOutput) *svcapity f1f1elem.ReadonlyRootFilesystem = f1f1iter.ReadonlyRootFilesystem } if f1f1iter.RepositoryCredentials != nil { - f1f1elemf29 := &svcapitypes.RepositoryCredentials{} + f1f1elemf30 := &svcapitypes.RepositoryCredentials{} if f1f1iter.RepositoryCredentials.CredentialsParameter != nil { - f1f1elemf29.CredentialsParameter = f1f1iter.RepositoryCredentials.CredentialsParameter + f1f1elemf30.CredentialsParameter = f1f1iter.RepositoryCredentials.CredentialsParameter } - f1f1elem.RepositoryCredentials = f1f1elemf29 + f1f1elem.RepositoryCredentials = f1f1elemf30 } if f1f1iter.ResourceRequirements != nil { - f1f1elemf30 := []*svcapitypes.ResourceRequirement{} - for _, f1f1elemf30iter := range f1f1iter.ResourceRequirements { - f1f1elemf30elem := &svcapitypes.ResourceRequirement{} - if f1f1elemf30iter.Type != nil { - f1f1elemf30elem.Type = f1f1elemf30iter.Type + f1f1elemf31 := []*svcapitypes.ResourceRequirement{} + for _, f1f1elemf31iter := range f1f1iter.ResourceRequirements { + f1f1elemf31elem := &svcapitypes.ResourceRequirement{} + if f1f1elemf31iter.Type != nil { + f1f1elemf31elem.Type = f1f1elemf31iter.Type } - if f1f1elemf30iter.Value != nil { - f1f1elemf30elem.Value = f1f1elemf30iter.Value + if f1f1elemf31iter.Value != nil { + f1f1elemf31elem.Value = f1f1elemf31iter.Value } - f1f1elemf30 = append(f1f1elemf30, f1f1elemf30elem) + f1f1elemf31 = append(f1f1elemf31, f1f1elemf31elem) } - f1f1elem.ResourceRequirements = f1f1elemf30 + f1f1elem.ResourceRequirements = f1f1elemf31 } if f1f1iter.Secrets != nil { - f1f1elemf31 := []*svcapitypes.Secret{} - for _, f1f1elemf31iter := range f1f1iter.Secrets { - f1f1elemf31elem := &svcapitypes.Secret{} - if f1f1elemf31iter.Name != nil { - f1f1elemf31elem.Name = f1f1elemf31iter.Name + f1f1elemf32 := []*svcapitypes.Secret{} + for _, f1f1elemf32iter := range f1f1iter.Secrets { + f1f1elemf32elem := &svcapitypes.Secret{} + if f1f1elemf32iter.Name != nil { + f1f1elemf32elem.Name = f1f1elemf32iter.Name } - if f1f1elemf31iter.ValueFrom != nil { - f1f1elemf31elem.ValueFrom = f1f1elemf31iter.ValueFrom + if f1f1elemf32iter.ValueFrom != nil { + f1f1elemf32elem.ValueFrom = f1f1elemf32iter.ValueFrom } - f1f1elemf31 = append(f1f1elemf31, f1f1elemf31elem) + f1f1elemf32 = append(f1f1elemf32, f1f1elemf32elem) } - f1f1elem.Secrets = f1f1elemf31 + f1f1elem.Secrets = f1f1elemf32 } if f1f1iter.StartTimeout != nil { f1f1elem.StartTimeout = f1f1iter.StartTimeout @@ -467,52 +476,52 @@ func GenerateTaskDefinition(resp *svcsdk.DescribeTaskDefinitionOutput) *svcapity f1f1elem.StopTimeout = f1f1iter.StopTimeout } if f1f1iter.SystemControls != nil { - f1f1elemf34 := []*svcapitypes.SystemControl{} - for _, f1f1elemf34iter := range f1f1iter.SystemControls { - f1f1elemf34elem := &svcapitypes.SystemControl{} - if f1f1elemf34iter.Namespace != nil { - f1f1elemf34elem.Namespace = f1f1elemf34iter.Namespace + f1f1elemf35 := []*svcapitypes.SystemControl{} + for _, f1f1elemf35iter := range f1f1iter.SystemControls { + f1f1elemf35elem := &svcapitypes.SystemControl{} + if f1f1elemf35iter.Namespace != nil { + f1f1elemf35elem.Namespace = f1f1elemf35iter.Namespace } - if f1f1elemf34iter.Value != nil { - f1f1elemf34elem.Value = f1f1elemf34iter.Value + if f1f1elemf35iter.Value != nil { + f1f1elemf35elem.Value = f1f1elemf35iter.Value } - f1f1elemf34 = append(f1f1elemf34, f1f1elemf34elem) + f1f1elemf35 = append(f1f1elemf35, f1f1elemf35elem) } - f1f1elem.SystemControls = f1f1elemf34 + f1f1elem.SystemControls = f1f1elemf35 } if f1f1iter.Ulimits != nil { - f1f1elemf35 := []*svcapitypes.Ulimit{} - for _, f1f1elemf35iter := range f1f1iter.Ulimits { - f1f1elemf35elem := &svcapitypes.Ulimit{} - if f1f1elemf35iter.HardLimit != nil { - f1f1elemf35elem.HardLimit = f1f1elemf35iter.HardLimit + f1f1elemf36 := []*svcapitypes.Ulimit{} + for _, f1f1elemf36iter := range f1f1iter.Ulimits { + f1f1elemf36elem := &svcapitypes.Ulimit{} + if f1f1elemf36iter.HardLimit != nil { + f1f1elemf36elem.HardLimit = f1f1elemf36iter.HardLimit } - if f1f1elemf35iter.Name != nil { - f1f1elemf35elem.Name = f1f1elemf35iter.Name + if f1f1elemf36iter.Name != nil { + f1f1elemf36elem.Name = f1f1elemf36iter.Name } - if f1f1elemf35iter.SoftLimit != nil { - f1f1elemf35elem.SoftLimit = f1f1elemf35iter.SoftLimit + if f1f1elemf36iter.SoftLimit != nil { + f1f1elemf36elem.SoftLimit = f1f1elemf36iter.SoftLimit } - f1f1elemf35 = append(f1f1elemf35, f1f1elemf35elem) + f1f1elemf36 = append(f1f1elemf36, f1f1elemf36elem) } - f1f1elem.Ulimits = f1f1elemf35 + f1f1elem.Ulimits = f1f1elemf36 } if f1f1iter.User != nil { f1f1elem.User = f1f1iter.User } if f1f1iter.VolumesFrom != nil { - f1f1elemf37 := []*svcapitypes.VolumeFrom{} - for _, f1f1elemf37iter := range f1f1iter.VolumesFrom { - f1f1elemf37elem := &svcapitypes.VolumeFrom{} - if f1f1elemf37iter.ReadOnly != nil { - f1f1elemf37elem.ReadOnly = f1f1elemf37iter.ReadOnly + f1f1elemf38 := []*svcapitypes.VolumeFrom{} + for _, f1f1elemf38iter := range f1f1iter.VolumesFrom { + f1f1elemf38elem := &svcapitypes.VolumeFrom{} + if f1f1elemf38iter.ReadOnly != nil { + f1f1elemf38elem.ReadOnly = f1f1elemf38iter.ReadOnly } - if f1f1elemf37iter.SourceContainer != nil { - f1f1elemf37elem.SourceContainer = f1f1elemf37iter.SourceContainer + if f1f1elemf38iter.SourceContainer != nil { + f1f1elemf38elem.SourceContainer = f1f1elemf38iter.SourceContainer } - f1f1elemf37 = append(f1f1elemf37, f1f1elemf37elem) + f1f1elemf38 = append(f1f1elemf38, f1f1elemf38elem) } - f1f1elem.VolumesFrom = f1f1elemf37 + f1f1elem.VolumesFrom = f1f1elemf38 } if f1f1iter.WorkingDirectory != nil { f1f1elem.WorkingDirectory = f1f1iter.WorkingDirectory @@ -784,153 +793,162 @@ func GenerateRegisterTaskDefinitionInput(cr *svcapitypes.TaskDefinition) *svcsdk if f0iter.CPU != nil { f0elem.SetCpu(*f0iter.CPU) } + if f0iter.CredentialSpecs != nil { + f0elemf2 := []*string{} + for _, f0elemf2iter := range f0iter.CredentialSpecs { + var f0elemf2elem string + f0elemf2elem = *f0elemf2iter + f0elemf2 = append(f0elemf2, &f0elemf2elem) + } + f0elem.SetCredentialSpecs(f0elemf2) + } if f0iter.DependsOn != nil { - f0elemf2 := []*svcsdk.ContainerDependency{} - for _, f0elemf2iter := range f0iter.DependsOn { - f0elemf2elem := &svcsdk.ContainerDependency{} - if f0elemf2iter.Condition != nil { - f0elemf2elem.SetCondition(*f0elemf2iter.Condition) + f0elemf3 := []*svcsdk.ContainerDependency{} + for _, f0elemf3iter := range f0iter.DependsOn { + f0elemf3elem := &svcsdk.ContainerDependency{} + if f0elemf3iter.Condition != nil { + f0elemf3elem.SetCondition(*f0elemf3iter.Condition) } - if f0elemf2iter.ContainerName != nil { - f0elemf2elem.SetContainerName(*f0elemf2iter.ContainerName) + if f0elemf3iter.ContainerName != nil { + f0elemf3elem.SetContainerName(*f0elemf3iter.ContainerName) } - f0elemf2 = append(f0elemf2, f0elemf2elem) + f0elemf3 = append(f0elemf3, f0elemf3elem) } - f0elem.SetDependsOn(f0elemf2) + f0elem.SetDependsOn(f0elemf3) } if f0iter.DisableNetworking != nil { f0elem.SetDisableNetworking(*f0iter.DisableNetworking) } if f0iter.DNSSearchDomains != nil { - f0elemf4 := []*string{} - for _, f0elemf4iter := range f0iter.DNSSearchDomains { - var f0elemf4elem string - f0elemf4elem = *f0elemf4iter - f0elemf4 = append(f0elemf4, &f0elemf4elem) - } - f0elem.SetDnsSearchDomains(f0elemf4) - } - if f0iter.DNSServers != nil { f0elemf5 := []*string{} - for _, f0elemf5iter := range f0iter.DNSServers { + for _, f0elemf5iter := range f0iter.DNSSearchDomains { var f0elemf5elem string f0elemf5elem = *f0elemf5iter f0elemf5 = append(f0elemf5, &f0elemf5elem) } - f0elem.SetDnsServers(f0elemf5) + f0elem.SetDnsSearchDomains(f0elemf5) } - if f0iter.DockerLabels != nil { - f0elemf6 := map[string]*string{} - for f0elemf6key, f0elemf6valiter := range f0iter.DockerLabels { - var f0elemf6val string - f0elemf6val = *f0elemf6valiter - f0elemf6[f0elemf6key] = &f0elemf6val + if f0iter.DNSServers != nil { + f0elemf6 := []*string{} + for _, f0elemf6iter := range f0iter.DNSServers { + var f0elemf6elem string + f0elemf6elem = *f0elemf6iter + f0elemf6 = append(f0elemf6, &f0elemf6elem) } - f0elem.SetDockerLabels(f0elemf6) + f0elem.SetDnsServers(f0elemf6) } - if f0iter.DockerSecurityOptions != nil { - f0elemf7 := []*string{} - for _, f0elemf7iter := range f0iter.DockerSecurityOptions { - var f0elemf7elem string - f0elemf7elem = *f0elemf7iter - f0elemf7 = append(f0elemf7, &f0elemf7elem) + if f0iter.DockerLabels != nil { + f0elemf7 := map[string]*string{} + for f0elemf7key, f0elemf7valiter := range f0iter.DockerLabels { + var f0elemf7val string + f0elemf7val = *f0elemf7valiter + f0elemf7[f0elemf7key] = &f0elemf7val } - f0elem.SetDockerSecurityOptions(f0elemf7) + f0elem.SetDockerLabels(f0elemf7) } - if f0iter.EntryPoint != nil { + if f0iter.DockerSecurityOptions != nil { f0elemf8 := []*string{} - for _, f0elemf8iter := range f0iter.EntryPoint { + for _, f0elemf8iter := range f0iter.DockerSecurityOptions { var f0elemf8elem string f0elemf8elem = *f0elemf8iter f0elemf8 = append(f0elemf8, &f0elemf8elem) } - f0elem.SetEntryPoint(f0elemf8) + f0elem.SetDockerSecurityOptions(f0elemf8) + } + if f0iter.EntryPoint != nil { + f0elemf9 := []*string{} + for _, f0elemf9iter := range f0iter.EntryPoint { + var f0elemf9elem string + f0elemf9elem = *f0elemf9iter + f0elemf9 = append(f0elemf9, &f0elemf9elem) + } + f0elem.SetEntryPoint(f0elemf9) } if f0iter.Environment != nil { - f0elemf9 := []*svcsdk.KeyValuePair{} - for _, f0elemf9iter := range f0iter.Environment { - f0elemf9elem := &svcsdk.KeyValuePair{} - if f0elemf9iter.Name != nil { - f0elemf9elem.SetName(*f0elemf9iter.Name) + f0elemf10 := []*svcsdk.KeyValuePair{} + for _, f0elemf10iter := range f0iter.Environment { + f0elemf10elem := &svcsdk.KeyValuePair{} + if f0elemf10iter.Name != nil { + f0elemf10elem.SetName(*f0elemf10iter.Name) } - if f0elemf9iter.Value != nil { - f0elemf9elem.SetValue(*f0elemf9iter.Value) + if f0elemf10iter.Value != nil { + f0elemf10elem.SetValue(*f0elemf10iter.Value) } - f0elemf9 = append(f0elemf9, f0elemf9elem) + f0elemf10 = append(f0elemf10, f0elemf10elem) } - f0elem.SetEnvironment(f0elemf9) + f0elem.SetEnvironment(f0elemf10) } if f0iter.EnvironmentFiles != nil { - f0elemf10 := []*svcsdk.EnvironmentFile{} - for _, f0elemf10iter := range f0iter.EnvironmentFiles { - f0elemf10elem := &svcsdk.EnvironmentFile{} - if f0elemf10iter.Type != nil { - f0elemf10elem.SetType(*f0elemf10iter.Type) + f0elemf11 := []*svcsdk.EnvironmentFile{} + for _, f0elemf11iter := range f0iter.EnvironmentFiles { + f0elemf11elem := &svcsdk.EnvironmentFile{} + if f0elemf11iter.Type != nil { + f0elemf11elem.SetType(*f0elemf11iter.Type) } - if f0elemf10iter.Value != nil { - f0elemf10elem.SetValue(*f0elemf10iter.Value) + if f0elemf11iter.Value != nil { + f0elemf11elem.SetValue(*f0elemf11iter.Value) } - f0elemf10 = append(f0elemf10, f0elemf10elem) + f0elemf11 = append(f0elemf11, f0elemf11elem) } - f0elem.SetEnvironmentFiles(f0elemf10) + f0elem.SetEnvironmentFiles(f0elemf11) } if f0iter.Essential != nil { f0elem.SetEssential(*f0iter.Essential) } if f0iter.ExtraHosts != nil { - f0elemf12 := []*svcsdk.HostEntry{} - for _, f0elemf12iter := range f0iter.ExtraHosts { - f0elemf12elem := &svcsdk.HostEntry{} - if f0elemf12iter.Hostname != nil { - f0elemf12elem.SetHostname(*f0elemf12iter.Hostname) + f0elemf13 := []*svcsdk.HostEntry{} + for _, f0elemf13iter := range f0iter.ExtraHosts { + f0elemf13elem := &svcsdk.HostEntry{} + if f0elemf13iter.Hostname != nil { + f0elemf13elem.SetHostname(*f0elemf13iter.Hostname) } - if f0elemf12iter.IPAddress != nil { - f0elemf12elem.SetIpAddress(*f0elemf12iter.IPAddress) + if f0elemf13iter.IPAddress != nil { + f0elemf13elem.SetIpAddress(*f0elemf13iter.IPAddress) } - f0elemf12 = append(f0elemf12, f0elemf12elem) + f0elemf13 = append(f0elemf13, f0elemf13elem) } - f0elem.SetExtraHosts(f0elemf12) + f0elem.SetExtraHosts(f0elemf13) } if f0iter.FirelensConfiguration != nil { - f0elemf13 := &svcsdk.FirelensConfiguration{} + f0elemf14 := &svcsdk.FirelensConfiguration{} if f0iter.FirelensConfiguration.Options != nil { - f0elemf13f0 := map[string]*string{} - for f0elemf13f0key, f0elemf13f0valiter := range f0iter.FirelensConfiguration.Options { - var f0elemf13f0val string - f0elemf13f0val = *f0elemf13f0valiter - f0elemf13f0[f0elemf13f0key] = &f0elemf13f0val + f0elemf14f0 := map[string]*string{} + for f0elemf14f0key, f0elemf14f0valiter := range f0iter.FirelensConfiguration.Options { + var f0elemf14f0val string + f0elemf14f0val = *f0elemf14f0valiter + f0elemf14f0[f0elemf14f0key] = &f0elemf14f0val } - f0elemf13.SetOptions(f0elemf13f0) + f0elemf14.SetOptions(f0elemf14f0) } if f0iter.FirelensConfiguration.Type != nil { - f0elemf13.SetType(*f0iter.FirelensConfiguration.Type) + f0elemf14.SetType(*f0iter.FirelensConfiguration.Type) } - f0elem.SetFirelensConfiguration(f0elemf13) + f0elem.SetFirelensConfiguration(f0elemf14) } if f0iter.HealthCheck != nil { - f0elemf14 := &svcsdk.HealthCheck{} + f0elemf15 := &svcsdk.HealthCheck{} if f0iter.HealthCheck.Command != nil { - f0elemf14f0 := []*string{} - for _, f0elemf14f0iter := range f0iter.HealthCheck.Command { - var f0elemf14f0elem string - f0elemf14f0elem = *f0elemf14f0iter - f0elemf14f0 = append(f0elemf14f0, &f0elemf14f0elem) + f0elemf15f0 := []*string{} + for _, f0elemf15f0iter := range f0iter.HealthCheck.Command { + var f0elemf15f0elem string + f0elemf15f0elem = *f0elemf15f0iter + f0elemf15f0 = append(f0elemf15f0, &f0elemf15f0elem) } - f0elemf14.SetCommand(f0elemf14f0) + f0elemf15.SetCommand(f0elemf15f0) } if f0iter.HealthCheck.Interval != nil { - f0elemf14.SetInterval(*f0iter.HealthCheck.Interval) + f0elemf15.SetInterval(*f0iter.HealthCheck.Interval) } if f0iter.HealthCheck.Retries != nil { - f0elemf14.SetRetries(*f0iter.HealthCheck.Retries) + f0elemf15.SetRetries(*f0iter.HealthCheck.Retries) } if f0iter.HealthCheck.StartPeriod != nil { - f0elemf14.SetStartPeriod(*f0iter.HealthCheck.StartPeriod) + f0elemf15.SetStartPeriod(*f0iter.HealthCheck.StartPeriod) } if f0iter.HealthCheck.Timeout != nil { - f0elemf14.SetTimeout(*f0iter.HealthCheck.Timeout) + f0elemf15.SetTimeout(*f0iter.HealthCheck.Timeout) } - f0elem.SetHealthCheck(f0elemf14) + f0elem.SetHealthCheck(f0elemf15) } if f0iter.Hostname != nil { f0elem.SetHostname(*f0iter.Hostname) @@ -942,127 +960,127 @@ func GenerateRegisterTaskDefinitionInput(cr *svcapitypes.TaskDefinition) *svcsdk f0elem.SetInteractive(*f0iter.Interactive) } if f0iter.Links != nil { - f0elemf18 := []*string{} - for _, f0elemf18iter := range f0iter.Links { - var f0elemf18elem string - f0elemf18elem = *f0elemf18iter - f0elemf18 = append(f0elemf18, &f0elemf18elem) + f0elemf19 := []*string{} + for _, f0elemf19iter := range f0iter.Links { + var f0elemf19elem string + f0elemf19elem = *f0elemf19iter + f0elemf19 = append(f0elemf19, &f0elemf19elem) } - f0elem.SetLinks(f0elemf18) + f0elem.SetLinks(f0elemf19) } if f0iter.LinuxParameters != nil { - f0elemf19 := &svcsdk.LinuxParameters{} + f0elemf20 := &svcsdk.LinuxParameters{} if f0iter.LinuxParameters.Capabilities != nil { - f0elemf19f0 := &svcsdk.KernelCapabilities{} + f0elemf20f0 := &svcsdk.KernelCapabilities{} if f0iter.LinuxParameters.Capabilities.Add != nil { - f0elemf19f0f0 := []*string{} - for _, f0elemf19f0f0iter := range f0iter.LinuxParameters.Capabilities.Add { - var f0elemf19f0f0elem string - f0elemf19f0f0elem = *f0elemf19f0f0iter - f0elemf19f0f0 = append(f0elemf19f0f0, &f0elemf19f0f0elem) + f0elemf20f0f0 := []*string{} + for _, f0elemf20f0f0iter := range f0iter.LinuxParameters.Capabilities.Add { + var f0elemf20f0f0elem string + f0elemf20f0f0elem = *f0elemf20f0f0iter + f0elemf20f0f0 = append(f0elemf20f0f0, &f0elemf20f0f0elem) } - f0elemf19f0.SetAdd(f0elemf19f0f0) + f0elemf20f0.SetAdd(f0elemf20f0f0) } if f0iter.LinuxParameters.Capabilities.Drop != nil { - f0elemf19f0f1 := []*string{} - for _, f0elemf19f0f1iter := range f0iter.LinuxParameters.Capabilities.Drop { - var f0elemf19f0f1elem string - f0elemf19f0f1elem = *f0elemf19f0f1iter - f0elemf19f0f1 = append(f0elemf19f0f1, &f0elemf19f0f1elem) + f0elemf20f0f1 := []*string{} + for _, f0elemf20f0f1iter := range f0iter.LinuxParameters.Capabilities.Drop { + var f0elemf20f0f1elem string + f0elemf20f0f1elem = *f0elemf20f0f1iter + f0elemf20f0f1 = append(f0elemf20f0f1, &f0elemf20f0f1elem) } - f0elemf19f0.SetDrop(f0elemf19f0f1) + f0elemf20f0.SetDrop(f0elemf20f0f1) } - f0elemf19.SetCapabilities(f0elemf19f0) + f0elemf20.SetCapabilities(f0elemf20f0) } if f0iter.LinuxParameters.Devices != nil { - f0elemf19f1 := []*svcsdk.Device{} - for _, f0elemf19f1iter := range f0iter.LinuxParameters.Devices { - f0elemf19f1elem := &svcsdk.Device{} - if f0elemf19f1iter.ContainerPath != nil { - f0elemf19f1elem.SetContainerPath(*f0elemf19f1iter.ContainerPath) + f0elemf20f1 := []*svcsdk.Device{} + for _, f0elemf20f1iter := range f0iter.LinuxParameters.Devices { + f0elemf20f1elem := &svcsdk.Device{} + if f0elemf20f1iter.ContainerPath != nil { + f0elemf20f1elem.SetContainerPath(*f0elemf20f1iter.ContainerPath) } - if f0elemf19f1iter.HostPath != nil { - f0elemf19f1elem.SetHostPath(*f0elemf19f1iter.HostPath) + if f0elemf20f1iter.HostPath != nil { + f0elemf20f1elem.SetHostPath(*f0elemf20f1iter.HostPath) } - if f0elemf19f1iter.Permissions != nil { - f0elemf19f1elemf2 := []*string{} - for _, f0elemf19f1elemf2iter := range f0elemf19f1iter.Permissions { - var f0elemf19f1elemf2elem string - f0elemf19f1elemf2elem = *f0elemf19f1elemf2iter - f0elemf19f1elemf2 = append(f0elemf19f1elemf2, &f0elemf19f1elemf2elem) + if f0elemf20f1iter.Permissions != nil { + f0elemf20f1elemf2 := []*string{} + for _, f0elemf20f1elemf2iter := range f0elemf20f1iter.Permissions { + var f0elemf20f1elemf2elem string + f0elemf20f1elemf2elem = *f0elemf20f1elemf2iter + f0elemf20f1elemf2 = append(f0elemf20f1elemf2, &f0elemf20f1elemf2elem) } - f0elemf19f1elem.SetPermissions(f0elemf19f1elemf2) + f0elemf20f1elem.SetPermissions(f0elemf20f1elemf2) } - f0elemf19f1 = append(f0elemf19f1, f0elemf19f1elem) + f0elemf20f1 = append(f0elemf20f1, f0elemf20f1elem) } - f0elemf19.SetDevices(f0elemf19f1) + f0elemf20.SetDevices(f0elemf20f1) } if f0iter.LinuxParameters.InitProcessEnabled != nil { - f0elemf19.SetInitProcessEnabled(*f0iter.LinuxParameters.InitProcessEnabled) + f0elemf20.SetInitProcessEnabled(*f0iter.LinuxParameters.InitProcessEnabled) } if f0iter.LinuxParameters.MaxSwap != nil { - f0elemf19.SetMaxSwap(*f0iter.LinuxParameters.MaxSwap) + f0elemf20.SetMaxSwap(*f0iter.LinuxParameters.MaxSwap) } if f0iter.LinuxParameters.SharedMemorySize != nil { - f0elemf19.SetSharedMemorySize(*f0iter.LinuxParameters.SharedMemorySize) + f0elemf20.SetSharedMemorySize(*f0iter.LinuxParameters.SharedMemorySize) } if f0iter.LinuxParameters.Swappiness != nil { - f0elemf19.SetSwappiness(*f0iter.LinuxParameters.Swappiness) + f0elemf20.SetSwappiness(*f0iter.LinuxParameters.Swappiness) } if f0iter.LinuxParameters.Tmpfs != nil { - f0elemf19f6 := []*svcsdk.Tmpfs{} - for _, f0elemf19f6iter := range f0iter.LinuxParameters.Tmpfs { - f0elemf19f6elem := &svcsdk.Tmpfs{} - if f0elemf19f6iter.ContainerPath != nil { - f0elemf19f6elem.SetContainerPath(*f0elemf19f6iter.ContainerPath) + f0elemf20f6 := []*svcsdk.Tmpfs{} + for _, f0elemf20f6iter := range f0iter.LinuxParameters.Tmpfs { + f0elemf20f6elem := &svcsdk.Tmpfs{} + if f0elemf20f6iter.ContainerPath != nil { + f0elemf20f6elem.SetContainerPath(*f0elemf20f6iter.ContainerPath) } - if f0elemf19f6iter.MountOptions != nil { - f0elemf19f6elemf1 := []*string{} - for _, f0elemf19f6elemf1iter := range f0elemf19f6iter.MountOptions { - var f0elemf19f6elemf1elem string - f0elemf19f6elemf1elem = *f0elemf19f6elemf1iter - f0elemf19f6elemf1 = append(f0elemf19f6elemf1, &f0elemf19f6elemf1elem) + if f0elemf20f6iter.MountOptions != nil { + f0elemf20f6elemf1 := []*string{} + for _, f0elemf20f6elemf1iter := range f0elemf20f6iter.MountOptions { + var f0elemf20f6elemf1elem string + f0elemf20f6elemf1elem = *f0elemf20f6elemf1iter + f0elemf20f6elemf1 = append(f0elemf20f6elemf1, &f0elemf20f6elemf1elem) } - f0elemf19f6elem.SetMountOptions(f0elemf19f6elemf1) + f0elemf20f6elem.SetMountOptions(f0elemf20f6elemf1) } - if f0elemf19f6iter.Size != nil { - f0elemf19f6elem.SetSize(*f0elemf19f6iter.Size) + if f0elemf20f6iter.Size != nil { + f0elemf20f6elem.SetSize(*f0elemf20f6iter.Size) } - f0elemf19f6 = append(f0elemf19f6, f0elemf19f6elem) + f0elemf20f6 = append(f0elemf20f6, f0elemf20f6elem) } - f0elemf19.SetTmpfs(f0elemf19f6) + f0elemf20.SetTmpfs(f0elemf20f6) } - f0elem.SetLinuxParameters(f0elemf19) + f0elem.SetLinuxParameters(f0elemf20) } if f0iter.LogConfiguration != nil { - f0elemf20 := &svcsdk.LogConfiguration{} + f0elemf21 := &svcsdk.LogConfiguration{} if f0iter.LogConfiguration.LogDriver != nil { - f0elemf20.SetLogDriver(*f0iter.LogConfiguration.LogDriver) + f0elemf21.SetLogDriver(*f0iter.LogConfiguration.LogDriver) } if f0iter.LogConfiguration.Options != nil { - f0elemf20f1 := map[string]*string{} - for f0elemf20f1key, f0elemf20f1valiter := range f0iter.LogConfiguration.Options { - var f0elemf20f1val string - f0elemf20f1val = *f0elemf20f1valiter - f0elemf20f1[f0elemf20f1key] = &f0elemf20f1val + f0elemf21f1 := map[string]*string{} + for f0elemf21f1key, f0elemf21f1valiter := range f0iter.LogConfiguration.Options { + var f0elemf21f1val string + f0elemf21f1val = *f0elemf21f1valiter + f0elemf21f1[f0elemf21f1key] = &f0elemf21f1val } - f0elemf20.SetOptions(f0elemf20f1) + f0elemf21.SetOptions(f0elemf21f1) } if f0iter.LogConfiguration.SecretOptions != nil { - f0elemf20f2 := []*svcsdk.Secret{} - for _, f0elemf20f2iter := range f0iter.LogConfiguration.SecretOptions { - f0elemf20f2elem := &svcsdk.Secret{} - if f0elemf20f2iter.Name != nil { - f0elemf20f2elem.SetName(*f0elemf20f2iter.Name) + f0elemf21f2 := []*svcsdk.Secret{} + for _, f0elemf21f2iter := range f0iter.LogConfiguration.SecretOptions { + f0elemf21f2elem := &svcsdk.Secret{} + if f0elemf21f2iter.Name != nil { + f0elemf21f2elem.SetName(*f0elemf21f2iter.Name) } - if f0elemf20f2iter.ValueFrom != nil { - f0elemf20f2elem.SetValueFrom(*f0elemf20f2iter.ValueFrom) + if f0elemf21f2iter.ValueFrom != nil { + f0elemf21f2elem.SetValueFrom(*f0elemf21f2iter.ValueFrom) } - f0elemf20f2 = append(f0elemf20f2, f0elemf20f2elem) + f0elemf21f2 = append(f0elemf21f2, f0elemf21f2elem) } - f0elemf20.SetSecretOptions(f0elemf20f2) + f0elemf21.SetSecretOptions(f0elemf21f2) } - f0elem.SetLogConfiguration(f0elemf20) + f0elem.SetLogConfiguration(f0elemf21) } if f0iter.Memory != nil { f0elem.SetMemory(*f0iter.Memory) @@ -1071,50 +1089,50 @@ func GenerateRegisterTaskDefinitionInput(cr *svcapitypes.TaskDefinition) *svcsdk f0elem.SetMemoryReservation(*f0iter.MemoryReservation) } if f0iter.MountPoints != nil { - f0elemf23 := []*svcsdk.MountPoint{} - for _, f0elemf23iter := range f0iter.MountPoints { - f0elemf23elem := &svcsdk.MountPoint{} - if f0elemf23iter.ContainerPath != nil { - f0elemf23elem.SetContainerPath(*f0elemf23iter.ContainerPath) + f0elemf24 := []*svcsdk.MountPoint{} + for _, f0elemf24iter := range f0iter.MountPoints { + f0elemf24elem := &svcsdk.MountPoint{} + if f0elemf24iter.ContainerPath != nil { + f0elemf24elem.SetContainerPath(*f0elemf24iter.ContainerPath) } - if f0elemf23iter.ReadOnly != nil { - f0elemf23elem.SetReadOnly(*f0elemf23iter.ReadOnly) + if f0elemf24iter.ReadOnly != nil { + f0elemf24elem.SetReadOnly(*f0elemf24iter.ReadOnly) } - if f0elemf23iter.SourceVolume != nil { - f0elemf23elem.SetSourceVolume(*f0elemf23iter.SourceVolume) + if f0elemf24iter.SourceVolume != nil { + f0elemf24elem.SetSourceVolume(*f0elemf24iter.SourceVolume) } - f0elemf23 = append(f0elemf23, f0elemf23elem) + f0elemf24 = append(f0elemf24, f0elemf24elem) } - f0elem.SetMountPoints(f0elemf23) + f0elem.SetMountPoints(f0elemf24) } if f0iter.Name != nil { f0elem.SetName(*f0iter.Name) } if f0iter.PortMappings != nil { - f0elemf25 := []*svcsdk.PortMapping{} - for _, f0elemf25iter := range f0iter.PortMappings { - f0elemf25elem := &svcsdk.PortMapping{} - if f0elemf25iter.AppProtocol != nil { - f0elemf25elem.SetAppProtocol(*f0elemf25iter.AppProtocol) + f0elemf26 := []*svcsdk.PortMapping{} + for _, f0elemf26iter := range f0iter.PortMappings { + f0elemf26elem := &svcsdk.PortMapping{} + if f0elemf26iter.AppProtocol != nil { + f0elemf26elem.SetAppProtocol(*f0elemf26iter.AppProtocol) } - if f0elemf25iter.ContainerPort != nil { - f0elemf25elem.SetContainerPort(*f0elemf25iter.ContainerPort) + if f0elemf26iter.ContainerPort != nil { + f0elemf26elem.SetContainerPort(*f0elemf26iter.ContainerPort) } - if f0elemf25iter.ContainerPortRange != nil { - f0elemf25elem.SetContainerPortRange(*f0elemf25iter.ContainerPortRange) + if f0elemf26iter.ContainerPortRange != nil { + f0elemf26elem.SetContainerPortRange(*f0elemf26iter.ContainerPortRange) } - if f0elemf25iter.HostPort != nil { - f0elemf25elem.SetHostPort(*f0elemf25iter.HostPort) + if f0elemf26iter.HostPort != nil { + f0elemf26elem.SetHostPort(*f0elemf26iter.HostPort) } - if f0elemf25iter.Name != nil { - f0elemf25elem.SetName(*f0elemf25iter.Name) + if f0elemf26iter.Name != nil { + f0elemf26elem.SetName(*f0elemf26iter.Name) } - if f0elemf25iter.Protocol != nil { - f0elemf25elem.SetProtocol(*f0elemf25iter.Protocol) + if f0elemf26iter.Protocol != nil { + f0elemf26elem.SetProtocol(*f0elemf26iter.Protocol) } - f0elemf25 = append(f0elemf25, f0elemf25elem) + f0elemf26 = append(f0elemf26, f0elemf26elem) } - f0elem.SetPortMappings(f0elemf25) + f0elem.SetPortMappings(f0elemf26) } if f0iter.Privileged != nil { f0elem.SetPrivileged(*f0iter.Privileged) @@ -1126,39 +1144,39 @@ func GenerateRegisterTaskDefinitionInput(cr *svcapitypes.TaskDefinition) *svcsdk f0elem.SetReadonlyRootFilesystem(*f0iter.ReadonlyRootFilesystem) } if f0iter.RepositoryCredentials != nil { - f0elemf29 := &svcsdk.RepositoryCredentials{} + f0elemf30 := &svcsdk.RepositoryCredentials{} if f0iter.RepositoryCredentials.CredentialsParameter != nil { - f0elemf29.SetCredentialsParameter(*f0iter.RepositoryCredentials.CredentialsParameter) + f0elemf30.SetCredentialsParameter(*f0iter.RepositoryCredentials.CredentialsParameter) } - f0elem.SetRepositoryCredentials(f0elemf29) + f0elem.SetRepositoryCredentials(f0elemf30) } if f0iter.ResourceRequirements != nil { - f0elemf30 := []*svcsdk.ResourceRequirement{} - for _, f0elemf30iter := range f0iter.ResourceRequirements { - f0elemf30elem := &svcsdk.ResourceRequirement{} - if f0elemf30iter.Type != nil { - f0elemf30elem.SetType(*f0elemf30iter.Type) + f0elemf31 := []*svcsdk.ResourceRequirement{} + for _, f0elemf31iter := range f0iter.ResourceRequirements { + f0elemf31elem := &svcsdk.ResourceRequirement{} + if f0elemf31iter.Type != nil { + f0elemf31elem.SetType(*f0elemf31iter.Type) } - if f0elemf30iter.Value != nil { - f0elemf30elem.SetValue(*f0elemf30iter.Value) + if f0elemf31iter.Value != nil { + f0elemf31elem.SetValue(*f0elemf31iter.Value) } - f0elemf30 = append(f0elemf30, f0elemf30elem) + f0elemf31 = append(f0elemf31, f0elemf31elem) } - f0elem.SetResourceRequirements(f0elemf30) + f0elem.SetResourceRequirements(f0elemf31) } if f0iter.Secrets != nil { - f0elemf31 := []*svcsdk.Secret{} - for _, f0elemf31iter := range f0iter.Secrets { - f0elemf31elem := &svcsdk.Secret{} - if f0elemf31iter.Name != nil { - f0elemf31elem.SetName(*f0elemf31iter.Name) + f0elemf32 := []*svcsdk.Secret{} + for _, f0elemf32iter := range f0iter.Secrets { + f0elemf32elem := &svcsdk.Secret{} + if f0elemf32iter.Name != nil { + f0elemf32elem.SetName(*f0elemf32iter.Name) } - if f0elemf31iter.ValueFrom != nil { - f0elemf31elem.SetValueFrom(*f0elemf31iter.ValueFrom) + if f0elemf32iter.ValueFrom != nil { + f0elemf32elem.SetValueFrom(*f0elemf32iter.ValueFrom) } - f0elemf31 = append(f0elemf31, f0elemf31elem) + f0elemf32 = append(f0elemf32, f0elemf32elem) } - f0elem.SetSecrets(f0elemf31) + f0elem.SetSecrets(f0elemf32) } if f0iter.StartTimeout != nil { f0elem.SetStartTimeout(*f0iter.StartTimeout) @@ -1167,52 +1185,52 @@ func GenerateRegisterTaskDefinitionInput(cr *svcapitypes.TaskDefinition) *svcsdk f0elem.SetStopTimeout(*f0iter.StopTimeout) } if f0iter.SystemControls != nil { - f0elemf34 := []*svcsdk.SystemControl{} - for _, f0elemf34iter := range f0iter.SystemControls { - f0elemf34elem := &svcsdk.SystemControl{} - if f0elemf34iter.Namespace != nil { - f0elemf34elem.SetNamespace(*f0elemf34iter.Namespace) + f0elemf35 := []*svcsdk.SystemControl{} + for _, f0elemf35iter := range f0iter.SystemControls { + f0elemf35elem := &svcsdk.SystemControl{} + if f0elemf35iter.Namespace != nil { + f0elemf35elem.SetNamespace(*f0elemf35iter.Namespace) } - if f0elemf34iter.Value != nil { - f0elemf34elem.SetValue(*f0elemf34iter.Value) + if f0elemf35iter.Value != nil { + f0elemf35elem.SetValue(*f0elemf35iter.Value) } - f0elemf34 = append(f0elemf34, f0elemf34elem) + f0elemf35 = append(f0elemf35, f0elemf35elem) } - f0elem.SetSystemControls(f0elemf34) + f0elem.SetSystemControls(f0elemf35) } if f0iter.Ulimits != nil { - f0elemf35 := []*svcsdk.Ulimit{} - for _, f0elemf35iter := range f0iter.Ulimits { - f0elemf35elem := &svcsdk.Ulimit{} - if f0elemf35iter.HardLimit != nil { - f0elemf35elem.SetHardLimit(*f0elemf35iter.HardLimit) + f0elemf36 := []*svcsdk.Ulimit{} + for _, f0elemf36iter := range f0iter.Ulimits { + f0elemf36elem := &svcsdk.Ulimit{} + if f0elemf36iter.HardLimit != nil { + f0elemf36elem.SetHardLimit(*f0elemf36iter.HardLimit) } - if f0elemf35iter.Name != nil { - f0elemf35elem.SetName(*f0elemf35iter.Name) + if f0elemf36iter.Name != nil { + f0elemf36elem.SetName(*f0elemf36iter.Name) } - if f0elemf35iter.SoftLimit != nil { - f0elemf35elem.SetSoftLimit(*f0elemf35iter.SoftLimit) + if f0elemf36iter.SoftLimit != nil { + f0elemf36elem.SetSoftLimit(*f0elemf36iter.SoftLimit) } - f0elemf35 = append(f0elemf35, f0elemf35elem) + f0elemf36 = append(f0elemf36, f0elemf36elem) } - f0elem.SetUlimits(f0elemf35) + f0elem.SetUlimits(f0elemf36) } if f0iter.User != nil { f0elem.SetUser(*f0iter.User) } if f0iter.VolumesFrom != nil { - f0elemf37 := []*svcsdk.VolumeFrom{} - for _, f0elemf37iter := range f0iter.VolumesFrom { - f0elemf37elem := &svcsdk.VolumeFrom{} - if f0elemf37iter.ReadOnly != nil { - f0elemf37elem.SetReadOnly(*f0elemf37iter.ReadOnly) + f0elemf38 := []*svcsdk.VolumeFrom{} + for _, f0elemf38iter := range f0iter.VolumesFrom { + f0elemf38elem := &svcsdk.VolumeFrom{} + if f0elemf38iter.ReadOnly != nil { + f0elemf38elem.SetReadOnly(*f0elemf38iter.ReadOnly) } - if f0elemf37iter.SourceContainer != nil { - f0elemf37elem.SetSourceContainer(*f0elemf37iter.SourceContainer) + if f0elemf38iter.SourceContainer != nil { + f0elemf38elem.SetSourceContainer(*f0elemf38iter.SourceContainer) } - f0elemf37 = append(f0elemf37, f0elemf37elem) + f0elemf38 = append(f0elemf38, f0elemf38elem) } - f0elem.SetVolumesFrom(f0elemf37) + f0elem.SetVolumesFrom(f0elemf38) } if f0iter.WorkingDirectory != nil { f0elem.SetWorkingDirectory(*f0iter.WorkingDirectory) diff --git a/pkg/controller/elbv2/loadbalancer/zz_controller.go b/pkg/controller/elbv2/loadbalancer/zz_controller.go index 648c0095df..83c3d3fcdc 100644 --- a/pkg/controller/elbv2/loadbalancer/zz_controller.go +++ b/pkg/controller/elbv2/loadbalancer/zz_controller.go @@ -181,6 +181,11 @@ func (e *external) Create(ctx context.Context, mg cpresource.Managed) (managed.E } else { cr.Status.AtProvider.DNSName = nil } + if elem.EnforceSecurityGroupInboundRulesOnPrivateLinkTraffic != nil { + cr.Status.AtProvider.EnforceSecurityGroupInboundRulesOnPrivateLinkTraffic = elem.EnforceSecurityGroupInboundRulesOnPrivateLinkTraffic + } else { + cr.Status.AtProvider.EnforceSecurityGroupInboundRulesOnPrivateLinkTraffic = nil + } if elem.IpAddressType != nil { cr.Spec.ForProvider.IPAddressType = elem.IpAddressType } else { @@ -202,25 +207,25 @@ func (e *external) Create(ctx context.Context, mg cpresource.Managed) (managed.E cr.Spec.ForProvider.Scheme = nil } if elem.SecurityGroups != nil { - f9 := []*string{} - for _, f9iter := range elem.SecurityGroups { - var f9elem string - f9elem = *f9iter - f9 = append(f9, &f9elem) + f10 := []*string{} + for _, f10iter := range elem.SecurityGroups { + var f10elem string + f10elem = *f10iter + f10 = append(f10, &f10elem) } - cr.Spec.ForProvider.SecurityGroups = f9 + cr.Spec.ForProvider.SecurityGroups = f10 } else { cr.Spec.ForProvider.SecurityGroups = nil } if elem.State != nil { - f10 := &svcapitypes.LoadBalancerState{} + f11 := &svcapitypes.LoadBalancerState{} if elem.State.Code != nil { - f10.Code = elem.State.Code + f11.Code = elem.State.Code } if elem.State.Reason != nil { - f10.Reason = elem.State.Reason + f11.Reason = elem.State.Reason } - cr.Status.AtProvider.State = f10 + cr.Status.AtProvider.State = f11 } else { cr.Status.AtProvider.State = nil } diff --git a/pkg/controller/elbv2/loadbalancer/zz_conversions.go b/pkg/controller/elbv2/loadbalancer/zz_conversions.go index 0631e3f2d0..b8de8c5d78 100644 --- a/pkg/controller/elbv2/loadbalancer/zz_conversions.go +++ b/pkg/controller/elbv2/loadbalancer/zz_conversions.go @@ -108,6 +108,11 @@ func GenerateLoadBalancer(resp *svcsdk.DescribeLoadBalancersOutput) *svcapitypes } else { cr.Status.AtProvider.DNSName = nil } + if elem.EnforceSecurityGroupInboundRulesOnPrivateLinkTraffic != nil { + cr.Status.AtProvider.EnforceSecurityGroupInboundRulesOnPrivateLinkTraffic = elem.EnforceSecurityGroupInboundRulesOnPrivateLinkTraffic + } else { + cr.Status.AtProvider.EnforceSecurityGroupInboundRulesOnPrivateLinkTraffic = nil + } if elem.IpAddressType != nil { cr.Spec.ForProvider.IPAddressType = elem.IpAddressType } else { @@ -129,25 +134,25 @@ func GenerateLoadBalancer(resp *svcsdk.DescribeLoadBalancersOutput) *svcapitypes cr.Spec.ForProvider.Scheme = nil } if elem.SecurityGroups != nil { - f9 := []*string{} - for _, f9iter := range elem.SecurityGroups { - var f9elem string - f9elem = *f9iter - f9 = append(f9, &f9elem) + f10 := []*string{} + for _, f10iter := range elem.SecurityGroups { + var f10elem string + f10elem = *f10iter + f10 = append(f10, &f10elem) } - cr.Spec.ForProvider.SecurityGroups = f9 + cr.Spec.ForProvider.SecurityGroups = f10 } else { cr.Spec.ForProvider.SecurityGroups = nil } if elem.State != nil { - f10 := &svcapitypes.LoadBalancerState{} + f11 := &svcapitypes.LoadBalancerState{} if elem.State.Code != nil { - f10.Code = elem.State.Code + f11.Code = elem.State.Code } if elem.State.Reason != nil { - f10.Reason = elem.State.Reason + f11.Reason = elem.State.Reason } - cr.Status.AtProvider.State = f10 + cr.Status.AtProvider.State = f11 } else { cr.Status.AtProvider.State = nil } diff --git a/pkg/controller/emrcontainers/jobrun/zz_conversions.go b/pkg/controller/emrcontainers/jobrun/zz_conversions.go index 0bf09d331b..296f59a349 100644 --- a/pkg/controller/emrcontainers/jobrun/zz_conversions.go +++ b/pkg/controller/emrcontainers/jobrun/zz_conversions.go @@ -112,6 +112,15 @@ func GenerateJobRun(resp *svcsdk.DescribeJobRunOutput) *svcapitypes.JobRun { } else { cr.Spec.ForProvider.ReleaseLabel = nil } + if resp.JobRun.RetryPolicyConfiguration != nil { + f11 := &svcapitypes.RetryPolicyConfiguration{} + if resp.JobRun.RetryPolicyConfiguration.MaxAttempts != nil { + f11.MaxAttempts = resp.JobRun.RetryPolicyConfiguration.MaxAttempts + } + cr.Spec.ForProvider.RetryPolicyConfiguration = f11 + } else { + cr.Spec.ForProvider.RetryPolicyConfiguration = nil + } if resp.JobRun.State != nil { cr.Status.AtProvider.State = resp.JobRun.State } else { @@ -123,13 +132,13 @@ func GenerateJobRun(resp *svcsdk.DescribeJobRunOutput) *svcapitypes.JobRun { cr.Status.AtProvider.StateDetails = nil } if resp.JobRun.Tags != nil { - f13 := map[string]*string{} - for f13key, f13valiter := range resp.JobRun.Tags { - var f13val string - f13val = *f13valiter - f13[f13key] = &f13val + f15 := map[string]*string{} + for f15key, f15valiter := range resp.JobRun.Tags { + var f15val string + f15val = *f15valiter + f15[f15key] = &f15val } - cr.Spec.ForProvider.Tags = f13 + cr.Spec.ForProvider.Tags = f15 } else { cr.Spec.ForProvider.Tags = nil } @@ -197,14 +206,21 @@ func GenerateStartJobRunInput(cr *svcapitypes.JobRun) *svcsdk.StartJobRunInput { if cr.Spec.ForProvider.ReleaseLabel != nil { res.SetReleaseLabel(*cr.Spec.ForProvider.ReleaseLabel) } + if cr.Spec.ForProvider.RetryPolicyConfiguration != nil { + f5 := &svcsdk.RetryPolicyConfiguration{} + if cr.Spec.ForProvider.RetryPolicyConfiguration.MaxAttempts != nil { + f5.SetMaxAttempts(*cr.Spec.ForProvider.RetryPolicyConfiguration.MaxAttempts) + } + res.SetRetryPolicyConfiguration(f5) + } if cr.Spec.ForProvider.Tags != nil { - f5 := map[string]*string{} - for f5key, f5valiter := range cr.Spec.ForProvider.Tags { - var f5val string - f5val = *f5valiter - f5[f5key] = &f5val + f6 := map[string]*string{} + for f6key, f6valiter := range cr.Spec.ForProvider.Tags { + var f6val string + f6val = *f6valiter + f6[f6key] = &f6val } - res.SetTags(f5) + res.SetTags(f6) } return res diff --git a/pkg/controller/globalaccelerator/accelerator/setup.go b/pkg/controller/globalaccelerator/accelerator/setup.go index ac1f6ab04a..c36b117334 100644 --- a/pkg/controller/globalaccelerator/accelerator/setup.go +++ b/pkg/controller/globalaccelerator/accelerator/setup.go @@ -13,7 +13,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/meta" "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" "github.com/crossplane/crossplane-runtime/pkg/resource" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" svcapitypes "github.com/crossplane-contrib/provider-aws/apis/globalaccelerator/v1alpha1" @@ -90,7 +90,7 @@ func (d gaClient) preDelete(ctx context.Context, cr *svcapitypes.Accelerator, ob return false, err } - if pointer.BoolDeref(descResp.Accelerator.Enabled, true) && pointer.StringDeref(descResp.Accelerator.Status, "") != svcsdk.AcceleratorStatusInProgress { + if ptr.Deref(descResp.Accelerator.Enabled, true) && ptr.Deref(descResp.Accelerator.Status, "") != svcsdk.AcceleratorStatusInProgress { enabled := false updReq := &svcsdk.UpdateAcceleratorInput{ Enabled: &enabled, @@ -135,11 +135,11 @@ func postCreate(_ context.Context, cr *svcapitypes.Accelerator, resp *svcsdk.Cre } func isUpToDate(_ context.Context, cr *svcapitypes.Accelerator, resp *svcsdk.DescribeAcceleratorOutput) (bool, string, error) { - if pointer.BoolDeref(cr.Spec.ForProvider.Enabled, false) != pointer.BoolDeref(resp.Accelerator.Enabled, false) { + if ptr.Deref(cr.Spec.ForProvider.Enabled, false) != ptr.Deref(resp.Accelerator.Enabled, false) { return false, "", nil } - if pointer.StringDeref(cr.Spec.ForProvider.Name, "") != pointer.StringDeref(resp.Accelerator.Name, "") { + if ptr.Deref(cr.Spec.ForProvider.Name, "") != ptr.Deref(resp.Accelerator.Name, "") { return false, "", nil } diff --git a/pkg/controller/globalaccelerator/endpointgroup/setup.go b/pkg/controller/globalaccelerator/endpointgroup/setup.go index d87f71a62b..995b2dc817 100644 --- a/pkg/controller/globalaccelerator/endpointgroup/setup.go +++ b/pkg/controller/globalaccelerator/endpointgroup/setup.go @@ -13,7 +13,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/meta" "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" "github.com/crossplane/crossplane-runtime/pkg/resource" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" svcapitypes "github.com/crossplane-contrib/provider-aws/apis/globalaccelerator/v1alpha1" @@ -63,7 +63,7 @@ func preObserve(ctx context.Context, cr *svcapitypes.EndpointGroup, obj *svcsdk. } func preCreate(_ context.Context, cr *svcapitypes.EndpointGroup, obj *svcsdk.CreateEndpointGroupInput) error { - obj.ListenerArn = aws.String(pointer.StringDeref(cr.Spec.ForProvider.CustomEndpointGroupParameters.ListenerARN, "")) + obj.ListenerArn = aws.String(ptr.Deref(cr.Spec.ForProvider.CustomEndpointGroupParameters.ListenerARN, "")) obj.IdempotencyToken = aws.String(string(cr.UID)) return nil } diff --git a/pkg/controller/globalaccelerator/listener/setup.go b/pkg/controller/globalaccelerator/listener/setup.go index e8a575569f..6389c8c1b3 100644 --- a/pkg/controller/globalaccelerator/listener/setup.go +++ b/pkg/controller/globalaccelerator/listener/setup.go @@ -6,6 +6,7 @@ import ( "github.com/aws/aws-sdk-go/aws" svcsdk "github.com/aws/aws-sdk-go/service/globalaccelerator" + "k8s.io/utils/ptr" xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" "github.com/crossplane/crossplane-runtime/pkg/connection" @@ -14,7 +15,6 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/meta" "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" "github.com/crossplane/crossplane-runtime/pkg/resource" - "k8s.io/utils/pointer" ctrl "sigs.k8s.io/controller-runtime" svcapitypes "github.com/crossplane-contrib/provider-aws/apis/globalaccelerator/v1alpha1" @@ -62,7 +62,7 @@ func preObserve(ctx context.Context, cr *svcapitypes.Listener, obj *svcsdk.Descr } func preCreate(_ context.Context, cr *svcapitypes.Listener, obj *svcsdk.CreateListenerInput) error { - obj.AcceleratorArn = aws.String(pointer.StringDeref(cr.Spec.ForProvider.CustomListenerParameters.AcceleratorArn, "")) + obj.AcceleratorArn = aws.String(ptr.Deref(cr.Spec.ForProvider.CustomListenerParameters.AcceleratorArn, "")) obj.IdempotencyToken = aws.String(string(cr.UID)) return nil } @@ -119,11 +119,11 @@ func isUpToDate(_ context.Context, cr *svcapitypes.Listener, resp *svcsdk.Descri } } - if pointer.StringDeref(cr.Spec.ForProvider.ClientAffinity, "") != *resp.Listener.ClientAffinity { + if ptr.Deref(cr.Spec.ForProvider.ClientAffinity, "") != *resp.Listener.ClientAffinity { return false, "", nil } - if pointer.StringDeref(cr.Spec.ForProvider.Protocol, "") != *resp.Listener.Protocol { + if ptr.Deref(cr.Spec.ForProvider.Protocol, "") != *resp.Listener.Protocol { return false, "", nil } diff --git a/pkg/controller/glue/job/zz_conversions.go b/pkg/controller/glue/job/zz_conversions.go index 610b6de5ec..c86564505a 100644 --- a/pkg/controller/glue/job/zz_conversions.go +++ b/pkg/controller/glue/job/zz_conversions.go @@ -99,2105 +99,3485 @@ func GenerateJob(resp *svcsdk.GetJobOutput) *svcapitypes.Job { } f1val.Aggregate = f1valf0 } + if f1valiter.AmazonRedshiftSource != nil { + f1valf1 := &svcapitypes.AmazonRedshiftSource{} + if f1valiter.AmazonRedshiftSource.Data != nil { + f1valf1f0 := &svcapitypes.AmazonRedshiftNodeData{} + if f1valiter.AmazonRedshiftSource.Data.AccessType != nil { + f1valf1f0.AccessType = f1valiter.AmazonRedshiftSource.Data.AccessType + } + if f1valiter.AmazonRedshiftSource.Data.Action != nil { + f1valf1f0.Action = f1valiter.AmazonRedshiftSource.Data.Action + } + if f1valiter.AmazonRedshiftSource.Data.AdvancedOptions != nil { + f1valf1f0f2 := []*svcapitypes.AmazonRedshiftAdvancedOption{} + for _, f1valf1f0f2iter := range f1valiter.AmazonRedshiftSource.Data.AdvancedOptions { + f1valf1f0f2elem := &svcapitypes.AmazonRedshiftAdvancedOption{} + if f1valf1f0f2iter.Key != nil { + f1valf1f0f2elem.Key = f1valf1f0f2iter.Key + } + if f1valf1f0f2iter.Value != nil { + f1valf1f0f2elem.Value = f1valf1f0f2iter.Value + } + f1valf1f0f2 = append(f1valf1f0f2, f1valf1f0f2elem) + } + f1valf1f0.AdvancedOptions = f1valf1f0f2 + } + if f1valiter.AmazonRedshiftSource.Data.CatalogDatabase != nil { + f1valf1f0f3 := &svcapitypes.Option{} + if f1valiter.AmazonRedshiftSource.Data.CatalogDatabase.Description != nil { + f1valf1f0f3.Description = f1valiter.AmazonRedshiftSource.Data.CatalogDatabase.Description + } + if f1valiter.AmazonRedshiftSource.Data.CatalogDatabase.Label != nil { + f1valf1f0f3.Label = f1valiter.AmazonRedshiftSource.Data.CatalogDatabase.Label + } + if f1valiter.AmazonRedshiftSource.Data.CatalogDatabase.Value != nil { + f1valf1f0f3.Value = f1valiter.AmazonRedshiftSource.Data.CatalogDatabase.Value + } + f1valf1f0.CatalogDatabase = f1valf1f0f3 + } + if f1valiter.AmazonRedshiftSource.Data.CatalogRedshiftSchema != nil { + f1valf1f0.CatalogRedshiftSchema = f1valiter.AmazonRedshiftSource.Data.CatalogRedshiftSchema + } + if f1valiter.AmazonRedshiftSource.Data.CatalogRedshiftTable != nil { + f1valf1f0.CatalogRedshiftTable = f1valiter.AmazonRedshiftSource.Data.CatalogRedshiftTable + } + if f1valiter.AmazonRedshiftSource.Data.CatalogTable != nil { + f1valf1f0f6 := &svcapitypes.Option{} + if f1valiter.AmazonRedshiftSource.Data.CatalogTable.Description != nil { + f1valf1f0f6.Description = f1valiter.AmazonRedshiftSource.Data.CatalogTable.Description + } + if f1valiter.AmazonRedshiftSource.Data.CatalogTable.Label != nil { + f1valf1f0f6.Label = f1valiter.AmazonRedshiftSource.Data.CatalogTable.Label + } + if f1valiter.AmazonRedshiftSource.Data.CatalogTable.Value != nil { + f1valf1f0f6.Value = f1valiter.AmazonRedshiftSource.Data.CatalogTable.Value + } + f1valf1f0.CatalogTable = f1valf1f0f6 + } + if f1valiter.AmazonRedshiftSource.Data.Connection != nil { + f1valf1f0f7 := &svcapitypes.Option{} + if f1valiter.AmazonRedshiftSource.Data.Connection.Description != nil { + f1valf1f0f7.Description = f1valiter.AmazonRedshiftSource.Data.Connection.Description + } + if f1valiter.AmazonRedshiftSource.Data.Connection.Label != nil { + f1valf1f0f7.Label = f1valiter.AmazonRedshiftSource.Data.Connection.Label + } + if f1valiter.AmazonRedshiftSource.Data.Connection.Value != nil { + f1valf1f0f7.Value = f1valiter.AmazonRedshiftSource.Data.Connection.Value + } + f1valf1f0.Connection = f1valf1f0f7 + } + if f1valiter.AmazonRedshiftSource.Data.CrawlerConnection != nil { + f1valf1f0.CrawlerConnection = f1valiter.AmazonRedshiftSource.Data.CrawlerConnection + } + if f1valiter.AmazonRedshiftSource.Data.IamRole != nil { + f1valf1f0f9 := &svcapitypes.Option{} + if f1valiter.AmazonRedshiftSource.Data.IamRole.Description != nil { + f1valf1f0f9.Description = f1valiter.AmazonRedshiftSource.Data.IamRole.Description + } + if f1valiter.AmazonRedshiftSource.Data.IamRole.Label != nil { + f1valf1f0f9.Label = f1valiter.AmazonRedshiftSource.Data.IamRole.Label + } + if f1valiter.AmazonRedshiftSource.Data.IamRole.Value != nil { + f1valf1f0f9.Value = f1valiter.AmazonRedshiftSource.Data.IamRole.Value + } + f1valf1f0.IAMRole = f1valf1f0f9 + } + if f1valiter.AmazonRedshiftSource.Data.MergeAction != nil { + f1valf1f0.MergeAction = f1valiter.AmazonRedshiftSource.Data.MergeAction + } + if f1valiter.AmazonRedshiftSource.Data.MergeClause != nil { + f1valf1f0.MergeClause = f1valiter.AmazonRedshiftSource.Data.MergeClause + } + if f1valiter.AmazonRedshiftSource.Data.MergeWhenMatched != nil { + f1valf1f0.MergeWhenMatched = f1valiter.AmazonRedshiftSource.Data.MergeWhenMatched + } + if f1valiter.AmazonRedshiftSource.Data.MergeWhenNotMatched != nil { + f1valf1f0.MergeWhenNotMatched = f1valiter.AmazonRedshiftSource.Data.MergeWhenNotMatched + } + if f1valiter.AmazonRedshiftSource.Data.PostAction != nil { + f1valf1f0.PostAction = f1valiter.AmazonRedshiftSource.Data.PostAction + } + if f1valiter.AmazonRedshiftSource.Data.PreAction != nil { + f1valf1f0.PreAction = f1valiter.AmazonRedshiftSource.Data.PreAction + } + if f1valiter.AmazonRedshiftSource.Data.SampleQuery != nil { + f1valf1f0.SampleQuery = f1valiter.AmazonRedshiftSource.Data.SampleQuery + } + if f1valiter.AmazonRedshiftSource.Data.Schema != nil { + f1valf1f0f17 := &svcapitypes.Option{} + if f1valiter.AmazonRedshiftSource.Data.Schema.Description != nil { + f1valf1f0f17.Description = f1valiter.AmazonRedshiftSource.Data.Schema.Description + } + if f1valiter.AmazonRedshiftSource.Data.Schema.Label != nil { + f1valf1f0f17.Label = f1valiter.AmazonRedshiftSource.Data.Schema.Label + } + if f1valiter.AmazonRedshiftSource.Data.Schema.Value != nil { + f1valf1f0f17.Value = f1valiter.AmazonRedshiftSource.Data.Schema.Value + } + f1valf1f0.Schema = f1valf1f0f17 + } + if f1valiter.AmazonRedshiftSource.Data.SelectedColumns != nil { + f1valf1f0f18 := []*svcapitypes.Option{} + for _, f1valf1f0f18iter := range f1valiter.AmazonRedshiftSource.Data.SelectedColumns { + f1valf1f0f18elem := &svcapitypes.Option{} + if f1valf1f0f18iter.Description != nil { + f1valf1f0f18elem.Description = f1valf1f0f18iter.Description + } + if f1valf1f0f18iter.Label != nil { + f1valf1f0f18elem.Label = f1valf1f0f18iter.Label + } + if f1valf1f0f18iter.Value != nil { + f1valf1f0f18elem.Value = f1valf1f0f18iter.Value + } + f1valf1f0f18 = append(f1valf1f0f18, f1valf1f0f18elem) + } + f1valf1f0.SelectedColumns = f1valf1f0f18 + } + if f1valiter.AmazonRedshiftSource.Data.SourceType != nil { + f1valf1f0.SourceType = f1valiter.AmazonRedshiftSource.Data.SourceType + } + if f1valiter.AmazonRedshiftSource.Data.StagingTable != nil { + f1valf1f0.StagingTable = f1valiter.AmazonRedshiftSource.Data.StagingTable + } + if f1valiter.AmazonRedshiftSource.Data.Table != nil { + f1valf1f0f21 := &svcapitypes.Option{} + if f1valiter.AmazonRedshiftSource.Data.Table.Description != nil { + f1valf1f0f21.Description = f1valiter.AmazonRedshiftSource.Data.Table.Description + } + if f1valiter.AmazonRedshiftSource.Data.Table.Label != nil { + f1valf1f0f21.Label = f1valiter.AmazonRedshiftSource.Data.Table.Label + } + if f1valiter.AmazonRedshiftSource.Data.Table.Value != nil { + f1valf1f0f21.Value = f1valiter.AmazonRedshiftSource.Data.Table.Value + } + f1valf1f0.Table = f1valf1f0f21 + } + if f1valiter.AmazonRedshiftSource.Data.TablePrefix != nil { + f1valf1f0.TablePrefix = f1valiter.AmazonRedshiftSource.Data.TablePrefix + } + if f1valiter.AmazonRedshiftSource.Data.TableSchema != nil { + f1valf1f0f23 := []*svcapitypes.Option{} + for _, f1valf1f0f23iter := range f1valiter.AmazonRedshiftSource.Data.TableSchema { + f1valf1f0f23elem := &svcapitypes.Option{} + if f1valf1f0f23iter.Description != nil { + f1valf1f0f23elem.Description = f1valf1f0f23iter.Description + } + if f1valf1f0f23iter.Label != nil { + f1valf1f0f23elem.Label = f1valf1f0f23iter.Label + } + if f1valf1f0f23iter.Value != nil { + f1valf1f0f23elem.Value = f1valf1f0f23iter.Value + } + f1valf1f0f23 = append(f1valf1f0f23, f1valf1f0f23elem) + } + f1valf1f0.TableSchema = f1valf1f0f23 + } + if f1valiter.AmazonRedshiftSource.Data.TempDir != nil { + f1valf1f0.TempDir = f1valiter.AmazonRedshiftSource.Data.TempDir + } + if f1valiter.AmazonRedshiftSource.Data.Upsert != nil { + f1valf1f0.Upsert = f1valiter.AmazonRedshiftSource.Data.Upsert + } + f1valf1.Data = f1valf1f0 + } + if f1valiter.AmazonRedshiftSource.Name != nil { + f1valf1.Name = f1valiter.AmazonRedshiftSource.Name + } + f1val.AmazonRedshiftSource = f1valf1 + } + if f1valiter.AmazonRedshiftTarget != nil { + f1valf2 := &svcapitypes.AmazonRedshiftTarget{} + if f1valiter.AmazonRedshiftTarget.Data != nil { + f1valf2f0 := &svcapitypes.AmazonRedshiftNodeData{} + if f1valiter.AmazonRedshiftTarget.Data.AccessType != nil { + f1valf2f0.AccessType = f1valiter.AmazonRedshiftTarget.Data.AccessType + } + if f1valiter.AmazonRedshiftTarget.Data.Action != nil { + f1valf2f0.Action = f1valiter.AmazonRedshiftTarget.Data.Action + } + if f1valiter.AmazonRedshiftTarget.Data.AdvancedOptions != nil { + f1valf2f0f2 := []*svcapitypes.AmazonRedshiftAdvancedOption{} + for _, f1valf2f0f2iter := range f1valiter.AmazonRedshiftTarget.Data.AdvancedOptions { + f1valf2f0f2elem := &svcapitypes.AmazonRedshiftAdvancedOption{} + if f1valf2f0f2iter.Key != nil { + f1valf2f0f2elem.Key = f1valf2f0f2iter.Key + } + if f1valf2f0f2iter.Value != nil { + f1valf2f0f2elem.Value = f1valf2f0f2iter.Value + } + f1valf2f0f2 = append(f1valf2f0f2, f1valf2f0f2elem) + } + f1valf2f0.AdvancedOptions = f1valf2f0f2 + } + if f1valiter.AmazonRedshiftTarget.Data.CatalogDatabase != nil { + f1valf2f0f3 := &svcapitypes.Option{} + if f1valiter.AmazonRedshiftTarget.Data.CatalogDatabase.Description != nil { + f1valf2f0f3.Description = f1valiter.AmazonRedshiftTarget.Data.CatalogDatabase.Description + } + if f1valiter.AmazonRedshiftTarget.Data.CatalogDatabase.Label != nil { + f1valf2f0f3.Label = f1valiter.AmazonRedshiftTarget.Data.CatalogDatabase.Label + } + if f1valiter.AmazonRedshiftTarget.Data.CatalogDatabase.Value != nil { + f1valf2f0f3.Value = f1valiter.AmazonRedshiftTarget.Data.CatalogDatabase.Value + } + f1valf2f0.CatalogDatabase = f1valf2f0f3 + } + if f1valiter.AmazonRedshiftTarget.Data.CatalogRedshiftSchema != nil { + f1valf2f0.CatalogRedshiftSchema = f1valiter.AmazonRedshiftTarget.Data.CatalogRedshiftSchema + } + if f1valiter.AmazonRedshiftTarget.Data.CatalogRedshiftTable != nil { + f1valf2f0.CatalogRedshiftTable = f1valiter.AmazonRedshiftTarget.Data.CatalogRedshiftTable + } + if f1valiter.AmazonRedshiftTarget.Data.CatalogTable != nil { + f1valf2f0f6 := &svcapitypes.Option{} + if f1valiter.AmazonRedshiftTarget.Data.CatalogTable.Description != nil { + f1valf2f0f6.Description = f1valiter.AmazonRedshiftTarget.Data.CatalogTable.Description + } + if f1valiter.AmazonRedshiftTarget.Data.CatalogTable.Label != nil { + f1valf2f0f6.Label = f1valiter.AmazonRedshiftTarget.Data.CatalogTable.Label + } + if f1valiter.AmazonRedshiftTarget.Data.CatalogTable.Value != nil { + f1valf2f0f6.Value = f1valiter.AmazonRedshiftTarget.Data.CatalogTable.Value + } + f1valf2f0.CatalogTable = f1valf2f0f6 + } + if f1valiter.AmazonRedshiftTarget.Data.Connection != nil { + f1valf2f0f7 := &svcapitypes.Option{} + if f1valiter.AmazonRedshiftTarget.Data.Connection.Description != nil { + f1valf2f0f7.Description = f1valiter.AmazonRedshiftTarget.Data.Connection.Description + } + if f1valiter.AmazonRedshiftTarget.Data.Connection.Label != nil { + f1valf2f0f7.Label = f1valiter.AmazonRedshiftTarget.Data.Connection.Label + } + if f1valiter.AmazonRedshiftTarget.Data.Connection.Value != nil { + f1valf2f0f7.Value = f1valiter.AmazonRedshiftTarget.Data.Connection.Value + } + f1valf2f0.Connection = f1valf2f0f7 + } + if f1valiter.AmazonRedshiftTarget.Data.CrawlerConnection != nil { + f1valf2f0.CrawlerConnection = f1valiter.AmazonRedshiftTarget.Data.CrawlerConnection + } + if f1valiter.AmazonRedshiftTarget.Data.IamRole != nil { + f1valf2f0f9 := &svcapitypes.Option{} + if f1valiter.AmazonRedshiftTarget.Data.IamRole.Description != nil { + f1valf2f0f9.Description = f1valiter.AmazonRedshiftTarget.Data.IamRole.Description + } + if f1valiter.AmazonRedshiftTarget.Data.IamRole.Label != nil { + f1valf2f0f9.Label = f1valiter.AmazonRedshiftTarget.Data.IamRole.Label + } + if f1valiter.AmazonRedshiftTarget.Data.IamRole.Value != nil { + f1valf2f0f9.Value = f1valiter.AmazonRedshiftTarget.Data.IamRole.Value + } + f1valf2f0.IAMRole = f1valf2f0f9 + } + if f1valiter.AmazonRedshiftTarget.Data.MergeAction != nil { + f1valf2f0.MergeAction = f1valiter.AmazonRedshiftTarget.Data.MergeAction + } + if f1valiter.AmazonRedshiftTarget.Data.MergeClause != nil { + f1valf2f0.MergeClause = f1valiter.AmazonRedshiftTarget.Data.MergeClause + } + if f1valiter.AmazonRedshiftTarget.Data.MergeWhenMatched != nil { + f1valf2f0.MergeWhenMatched = f1valiter.AmazonRedshiftTarget.Data.MergeWhenMatched + } + if f1valiter.AmazonRedshiftTarget.Data.MergeWhenNotMatched != nil { + f1valf2f0.MergeWhenNotMatched = f1valiter.AmazonRedshiftTarget.Data.MergeWhenNotMatched + } + if f1valiter.AmazonRedshiftTarget.Data.PostAction != nil { + f1valf2f0.PostAction = f1valiter.AmazonRedshiftTarget.Data.PostAction + } + if f1valiter.AmazonRedshiftTarget.Data.PreAction != nil { + f1valf2f0.PreAction = f1valiter.AmazonRedshiftTarget.Data.PreAction + } + if f1valiter.AmazonRedshiftTarget.Data.SampleQuery != nil { + f1valf2f0.SampleQuery = f1valiter.AmazonRedshiftTarget.Data.SampleQuery + } + if f1valiter.AmazonRedshiftTarget.Data.Schema != nil { + f1valf2f0f17 := &svcapitypes.Option{} + if f1valiter.AmazonRedshiftTarget.Data.Schema.Description != nil { + f1valf2f0f17.Description = f1valiter.AmazonRedshiftTarget.Data.Schema.Description + } + if f1valiter.AmazonRedshiftTarget.Data.Schema.Label != nil { + f1valf2f0f17.Label = f1valiter.AmazonRedshiftTarget.Data.Schema.Label + } + if f1valiter.AmazonRedshiftTarget.Data.Schema.Value != nil { + f1valf2f0f17.Value = f1valiter.AmazonRedshiftTarget.Data.Schema.Value + } + f1valf2f0.Schema = f1valf2f0f17 + } + if f1valiter.AmazonRedshiftTarget.Data.SelectedColumns != nil { + f1valf2f0f18 := []*svcapitypes.Option{} + for _, f1valf2f0f18iter := range f1valiter.AmazonRedshiftTarget.Data.SelectedColumns { + f1valf2f0f18elem := &svcapitypes.Option{} + if f1valf2f0f18iter.Description != nil { + f1valf2f0f18elem.Description = f1valf2f0f18iter.Description + } + if f1valf2f0f18iter.Label != nil { + f1valf2f0f18elem.Label = f1valf2f0f18iter.Label + } + if f1valf2f0f18iter.Value != nil { + f1valf2f0f18elem.Value = f1valf2f0f18iter.Value + } + f1valf2f0f18 = append(f1valf2f0f18, f1valf2f0f18elem) + } + f1valf2f0.SelectedColumns = f1valf2f0f18 + } + if f1valiter.AmazonRedshiftTarget.Data.SourceType != nil { + f1valf2f0.SourceType = f1valiter.AmazonRedshiftTarget.Data.SourceType + } + if f1valiter.AmazonRedshiftTarget.Data.StagingTable != nil { + f1valf2f0.StagingTable = f1valiter.AmazonRedshiftTarget.Data.StagingTable + } + if f1valiter.AmazonRedshiftTarget.Data.Table != nil { + f1valf2f0f21 := &svcapitypes.Option{} + if f1valiter.AmazonRedshiftTarget.Data.Table.Description != nil { + f1valf2f0f21.Description = f1valiter.AmazonRedshiftTarget.Data.Table.Description + } + if f1valiter.AmazonRedshiftTarget.Data.Table.Label != nil { + f1valf2f0f21.Label = f1valiter.AmazonRedshiftTarget.Data.Table.Label + } + if f1valiter.AmazonRedshiftTarget.Data.Table.Value != nil { + f1valf2f0f21.Value = f1valiter.AmazonRedshiftTarget.Data.Table.Value + } + f1valf2f0.Table = f1valf2f0f21 + } + if f1valiter.AmazonRedshiftTarget.Data.TablePrefix != nil { + f1valf2f0.TablePrefix = f1valiter.AmazonRedshiftTarget.Data.TablePrefix + } + if f1valiter.AmazonRedshiftTarget.Data.TableSchema != nil { + f1valf2f0f23 := []*svcapitypes.Option{} + for _, f1valf2f0f23iter := range f1valiter.AmazonRedshiftTarget.Data.TableSchema { + f1valf2f0f23elem := &svcapitypes.Option{} + if f1valf2f0f23iter.Description != nil { + f1valf2f0f23elem.Description = f1valf2f0f23iter.Description + } + if f1valf2f0f23iter.Label != nil { + f1valf2f0f23elem.Label = f1valf2f0f23iter.Label + } + if f1valf2f0f23iter.Value != nil { + f1valf2f0f23elem.Value = f1valf2f0f23iter.Value + } + f1valf2f0f23 = append(f1valf2f0f23, f1valf2f0f23elem) + } + f1valf2f0.TableSchema = f1valf2f0f23 + } + if f1valiter.AmazonRedshiftTarget.Data.TempDir != nil { + f1valf2f0.TempDir = f1valiter.AmazonRedshiftTarget.Data.TempDir + } + if f1valiter.AmazonRedshiftTarget.Data.Upsert != nil { + f1valf2f0.Upsert = f1valiter.AmazonRedshiftTarget.Data.Upsert + } + f1valf2.Data = f1valf2f0 + } + if f1valiter.AmazonRedshiftTarget.Inputs != nil { + f1valf2f1 := []*string{} + for _, f1valf2f1iter := range f1valiter.AmazonRedshiftTarget.Inputs { + var f1valf2f1elem string + f1valf2f1elem = *f1valf2f1iter + f1valf2f1 = append(f1valf2f1, &f1valf2f1elem) + } + f1valf2.Inputs = f1valf2f1 + } + if f1valiter.AmazonRedshiftTarget.Name != nil { + f1valf2.Name = f1valiter.AmazonRedshiftTarget.Name + } + f1val.AmazonRedshiftTarget = f1valf2 + } if f1valiter.ApplyMapping != nil { - f1valf1 := &svcapitypes.ApplyMapping{} + f1valf3 := &svcapitypes.ApplyMapping{} if f1valiter.ApplyMapping.Inputs != nil { - f1valf1f0 := []*string{} - for _, f1valf1f0iter := range f1valiter.ApplyMapping.Inputs { - var f1valf1f0elem string - f1valf1f0elem = *f1valf1f0iter - f1valf1f0 = append(f1valf1f0, &f1valf1f0elem) + f1valf3f0 := []*string{} + for _, f1valf3f0iter := range f1valiter.ApplyMapping.Inputs { + var f1valf3f0elem string + f1valf3f0elem = *f1valf3f0iter + f1valf3f0 = append(f1valf3f0, &f1valf3f0elem) } - f1valf1.Inputs = f1valf1f0 + f1valf3.Inputs = f1valf3f0 } if f1valiter.ApplyMapping.Mapping != nil { - f1valf1f1 := []*svcapitypes.Mapping{} - for _, f1valf1f1iter := range f1valiter.ApplyMapping.Mapping { - f1valf1f1elem := &svcapitypes.Mapping{} - if f1valf1f1iter.Dropped != nil { - f1valf1f1elem.Dropped = f1valf1f1iter.Dropped - } - if f1valf1f1iter.FromPath != nil { - f1valf1f1elemf1 := []*string{} - for _, f1valf1f1elemf1iter := range f1valf1f1iter.FromPath { - var f1valf1f1elemf1elem string - f1valf1f1elemf1elem = *f1valf1f1elemf1iter - f1valf1f1elemf1 = append(f1valf1f1elemf1, &f1valf1f1elemf1elem) + f1valf3f1 := []*svcapitypes.Mapping{} + for _, f1valf3f1iter := range f1valiter.ApplyMapping.Mapping { + f1valf3f1elem := &svcapitypes.Mapping{} + if f1valf3f1iter.Dropped != nil { + f1valf3f1elem.Dropped = f1valf3f1iter.Dropped + } + if f1valf3f1iter.FromPath != nil { + f1valf3f1elemf1 := []*string{} + for _, f1valf3f1elemf1iter := range f1valf3f1iter.FromPath { + var f1valf3f1elemf1elem string + f1valf3f1elemf1elem = *f1valf3f1elemf1iter + f1valf3f1elemf1 = append(f1valf3f1elemf1, &f1valf3f1elemf1elem) } - f1valf1f1elem.FromPath = f1valf1f1elemf1 + f1valf3f1elem.FromPath = f1valf3f1elemf1 } - if f1valf1f1iter.FromType != nil { - f1valf1f1elem.FromType = f1valf1f1iter.FromType + if f1valf3f1iter.FromType != nil { + f1valf3f1elem.FromType = f1valf3f1iter.FromType } - if f1valf1f1iter.ToKey != nil { - f1valf1f1elem.ToKey = f1valf1f1iter.ToKey + if f1valf3f1iter.ToKey != nil { + f1valf3f1elem.ToKey = f1valf3f1iter.ToKey } - if f1valf1f1iter.ToType != nil { - f1valf1f1elem.ToType = f1valf1f1iter.ToType + if f1valf3f1iter.ToType != nil { + f1valf3f1elem.ToType = f1valf3f1iter.ToType } - f1valf1f1 = append(f1valf1f1, f1valf1f1elem) + f1valf3f1 = append(f1valf3f1, f1valf3f1elem) } - f1valf1.Mapping = f1valf1f1 + f1valf3.Mapping = f1valf3f1 } if f1valiter.ApplyMapping.Name != nil { - f1valf1.Name = f1valiter.ApplyMapping.Name + f1valf3.Name = f1valiter.ApplyMapping.Name } - f1val.ApplyMapping = f1valf1 + f1val.ApplyMapping = f1valf3 } if f1valiter.AthenaConnectorSource != nil { - f1valf2 := &svcapitypes.AthenaConnectorSource{} + f1valf4 := &svcapitypes.AthenaConnectorSource{} if f1valiter.AthenaConnectorSource.ConnectionName != nil { - f1valf2.ConnectionName = f1valiter.AthenaConnectorSource.ConnectionName + f1valf4.ConnectionName = f1valiter.AthenaConnectorSource.ConnectionName } if f1valiter.AthenaConnectorSource.ConnectionTable != nil { - f1valf2.ConnectionTable = f1valiter.AthenaConnectorSource.ConnectionTable + f1valf4.ConnectionTable = f1valiter.AthenaConnectorSource.ConnectionTable } if f1valiter.AthenaConnectorSource.ConnectionType != nil { - f1valf2.ConnectionType = f1valiter.AthenaConnectorSource.ConnectionType + f1valf4.ConnectionType = f1valiter.AthenaConnectorSource.ConnectionType } if f1valiter.AthenaConnectorSource.ConnectorName != nil { - f1valf2.ConnectorName = f1valiter.AthenaConnectorSource.ConnectorName + f1valf4.ConnectorName = f1valiter.AthenaConnectorSource.ConnectorName } if f1valiter.AthenaConnectorSource.Name != nil { - f1valf2.Name = f1valiter.AthenaConnectorSource.Name + f1valf4.Name = f1valiter.AthenaConnectorSource.Name } if f1valiter.AthenaConnectorSource.OutputSchemas != nil { - f1valf2f5 := []*svcapitypes.GlueSchema{} - for _, f1valf2f5iter := range f1valiter.AthenaConnectorSource.OutputSchemas { - f1valf2f5elem := &svcapitypes.GlueSchema{} - if f1valf2f5iter.Columns != nil { - f1valf2f5elemf0 := []*svcapitypes.GlueStudioSchemaColumn{} - for _, f1valf2f5elemf0iter := range f1valf2f5iter.Columns { - f1valf2f5elemf0elem := &svcapitypes.GlueStudioSchemaColumn{} - if f1valf2f5elemf0iter.Name != nil { - f1valf2f5elemf0elem.Name = f1valf2f5elemf0iter.Name + f1valf4f5 := []*svcapitypes.GlueSchema{} + for _, f1valf4f5iter := range f1valiter.AthenaConnectorSource.OutputSchemas { + f1valf4f5elem := &svcapitypes.GlueSchema{} + if f1valf4f5iter.Columns != nil { + f1valf4f5elemf0 := []*svcapitypes.GlueStudioSchemaColumn{} + for _, f1valf4f5elemf0iter := range f1valf4f5iter.Columns { + f1valf4f5elemf0elem := &svcapitypes.GlueStudioSchemaColumn{} + if f1valf4f5elemf0iter.Name != nil { + f1valf4f5elemf0elem.Name = f1valf4f5elemf0iter.Name } - if f1valf2f5elemf0iter.Type != nil { - f1valf2f5elemf0elem.Type = f1valf2f5elemf0iter.Type + if f1valf4f5elemf0iter.Type != nil { + f1valf4f5elemf0elem.Type = f1valf4f5elemf0iter.Type } - f1valf2f5elemf0 = append(f1valf2f5elemf0, f1valf2f5elemf0elem) + f1valf4f5elemf0 = append(f1valf4f5elemf0, f1valf4f5elemf0elem) } - f1valf2f5elem.Columns = f1valf2f5elemf0 + f1valf4f5elem.Columns = f1valf4f5elemf0 } - f1valf2f5 = append(f1valf2f5, f1valf2f5elem) + f1valf4f5 = append(f1valf4f5, f1valf4f5elem) } - f1valf2.OutputSchemas = f1valf2f5 + f1valf4.OutputSchemas = f1valf4f5 } if f1valiter.AthenaConnectorSource.SchemaName != nil { - f1valf2.SchemaName = f1valiter.AthenaConnectorSource.SchemaName + f1valf4.SchemaName = f1valiter.AthenaConnectorSource.SchemaName + } + f1val.AthenaConnectorSource = f1valf4 + } + if f1valiter.CatalogDeltaSource != nil { + f1valf5 := &svcapitypes.CatalogDeltaSource{} + if f1valiter.CatalogDeltaSource.AdditionalDeltaOptions != nil { + f1valf5f0 := map[string]*string{} + for f1valf5f0key, f1valf5f0valiter := range f1valiter.CatalogDeltaSource.AdditionalDeltaOptions { + var f1valf5f0val string + f1valf5f0val = *f1valf5f0valiter + f1valf5f0[f1valf5f0key] = &f1valf5f0val + } + f1valf5.AdditionalDeltaOptions = f1valf5f0 + } + if f1valiter.CatalogDeltaSource.Database != nil { + f1valf5.Database = f1valiter.CatalogDeltaSource.Database + } + if f1valiter.CatalogDeltaSource.Name != nil { + f1valf5.Name = f1valiter.CatalogDeltaSource.Name + } + if f1valiter.CatalogDeltaSource.OutputSchemas != nil { + f1valf5f3 := []*svcapitypes.GlueSchema{} + for _, f1valf5f3iter := range f1valiter.CatalogDeltaSource.OutputSchemas { + f1valf5f3elem := &svcapitypes.GlueSchema{} + if f1valf5f3iter.Columns != nil { + f1valf5f3elemf0 := []*svcapitypes.GlueStudioSchemaColumn{} + for _, f1valf5f3elemf0iter := range f1valf5f3iter.Columns { + f1valf5f3elemf0elem := &svcapitypes.GlueStudioSchemaColumn{} + if f1valf5f3elemf0iter.Name != nil { + f1valf5f3elemf0elem.Name = f1valf5f3elemf0iter.Name + } + if f1valf5f3elemf0iter.Type != nil { + f1valf5f3elemf0elem.Type = f1valf5f3elemf0iter.Type + } + f1valf5f3elemf0 = append(f1valf5f3elemf0, f1valf5f3elemf0elem) + } + f1valf5f3elem.Columns = f1valf5f3elemf0 + } + f1valf5f3 = append(f1valf5f3, f1valf5f3elem) + } + f1valf5.OutputSchemas = f1valf5f3 + } + if f1valiter.CatalogDeltaSource.Table != nil { + f1valf5.Table = f1valiter.CatalogDeltaSource.Table + } + f1val.CatalogDeltaSource = f1valf5 + } + if f1valiter.CatalogHudiSource != nil { + f1valf6 := &svcapitypes.CatalogHudiSource{} + if f1valiter.CatalogHudiSource.AdditionalHudiOptions != nil { + f1valf6f0 := map[string]*string{} + for f1valf6f0key, f1valf6f0valiter := range f1valiter.CatalogHudiSource.AdditionalHudiOptions { + var f1valf6f0val string + f1valf6f0val = *f1valf6f0valiter + f1valf6f0[f1valf6f0key] = &f1valf6f0val + } + f1valf6.AdditionalHudiOptions = f1valf6f0 + } + if f1valiter.CatalogHudiSource.Database != nil { + f1valf6.Database = f1valiter.CatalogHudiSource.Database + } + if f1valiter.CatalogHudiSource.Name != nil { + f1valf6.Name = f1valiter.CatalogHudiSource.Name + } + if f1valiter.CatalogHudiSource.OutputSchemas != nil { + f1valf6f3 := []*svcapitypes.GlueSchema{} + for _, f1valf6f3iter := range f1valiter.CatalogHudiSource.OutputSchemas { + f1valf6f3elem := &svcapitypes.GlueSchema{} + if f1valf6f3iter.Columns != nil { + f1valf6f3elemf0 := []*svcapitypes.GlueStudioSchemaColumn{} + for _, f1valf6f3elemf0iter := range f1valf6f3iter.Columns { + f1valf6f3elemf0elem := &svcapitypes.GlueStudioSchemaColumn{} + if f1valf6f3elemf0iter.Name != nil { + f1valf6f3elemf0elem.Name = f1valf6f3elemf0iter.Name + } + if f1valf6f3elemf0iter.Type != nil { + f1valf6f3elemf0elem.Type = f1valf6f3elemf0iter.Type + } + f1valf6f3elemf0 = append(f1valf6f3elemf0, f1valf6f3elemf0elem) + } + f1valf6f3elem.Columns = f1valf6f3elemf0 + } + f1valf6f3 = append(f1valf6f3, f1valf6f3elem) + } + f1valf6.OutputSchemas = f1valf6f3 } - f1val.AthenaConnectorSource = f1valf2 + if f1valiter.CatalogHudiSource.Table != nil { + f1valf6.Table = f1valiter.CatalogHudiSource.Table + } + f1val.CatalogHudiSource = f1valf6 } if f1valiter.CatalogKafkaSource != nil { - f1valf3 := &svcapitypes.CatalogKafkaSource{} + f1valf7 := &svcapitypes.CatalogKafkaSource{} if f1valiter.CatalogKafkaSource.DataPreviewOptions != nil { - f1valf3f0 := &svcapitypes.StreamingDataPreviewOptions{} + f1valf7f0 := &svcapitypes.StreamingDataPreviewOptions{} if f1valiter.CatalogKafkaSource.DataPreviewOptions.PollingTime != nil { - f1valf3f0.PollingTime = f1valiter.CatalogKafkaSource.DataPreviewOptions.PollingTime + f1valf7f0.PollingTime = f1valiter.CatalogKafkaSource.DataPreviewOptions.PollingTime } if f1valiter.CatalogKafkaSource.DataPreviewOptions.RecordPollingLimit != nil { - f1valf3f0.RecordPollingLimit = f1valiter.CatalogKafkaSource.DataPreviewOptions.RecordPollingLimit + f1valf7f0.RecordPollingLimit = f1valiter.CatalogKafkaSource.DataPreviewOptions.RecordPollingLimit } - f1valf3.DataPreviewOptions = f1valf3f0 + f1valf7.DataPreviewOptions = f1valf7f0 } if f1valiter.CatalogKafkaSource.Database != nil { - f1valf3.Database = f1valiter.CatalogKafkaSource.Database + f1valf7.Database = f1valiter.CatalogKafkaSource.Database } if f1valiter.CatalogKafkaSource.DetectSchema != nil { - f1valf3.DetectSchema = f1valiter.CatalogKafkaSource.DetectSchema + f1valf7.DetectSchema = f1valiter.CatalogKafkaSource.DetectSchema } if f1valiter.CatalogKafkaSource.Name != nil { - f1valf3.Name = f1valiter.CatalogKafkaSource.Name + f1valf7.Name = f1valiter.CatalogKafkaSource.Name } if f1valiter.CatalogKafkaSource.StreamingOptions != nil { - f1valf3f4 := &svcapitypes.KafkaStreamingSourceOptions{} + f1valf7f4 := &svcapitypes.KafkaStreamingSourceOptions{} + if f1valiter.CatalogKafkaSource.StreamingOptions.AddRecordTimestamp != nil { + f1valf7f4.AddRecordTimestamp = f1valiter.CatalogKafkaSource.StreamingOptions.AddRecordTimestamp + } if f1valiter.CatalogKafkaSource.StreamingOptions.Assign != nil { - f1valf3f4.Assign = f1valiter.CatalogKafkaSource.StreamingOptions.Assign + f1valf7f4.Assign = f1valiter.CatalogKafkaSource.StreamingOptions.Assign } if f1valiter.CatalogKafkaSource.StreamingOptions.BootstrapServers != nil { - f1valf3f4.BootstrapServers = f1valiter.CatalogKafkaSource.StreamingOptions.BootstrapServers + f1valf7f4.BootstrapServers = f1valiter.CatalogKafkaSource.StreamingOptions.BootstrapServers } if f1valiter.CatalogKafkaSource.StreamingOptions.Classification != nil { - f1valf3f4.Classification = f1valiter.CatalogKafkaSource.StreamingOptions.Classification + f1valf7f4.Classification = f1valiter.CatalogKafkaSource.StreamingOptions.Classification } if f1valiter.CatalogKafkaSource.StreamingOptions.ConnectionName != nil { - f1valf3f4.ConnectionName = f1valiter.CatalogKafkaSource.StreamingOptions.ConnectionName + f1valf7f4.ConnectionName = f1valiter.CatalogKafkaSource.StreamingOptions.ConnectionName } if f1valiter.CatalogKafkaSource.StreamingOptions.Delimiter != nil { - f1valf3f4.Delimiter = f1valiter.CatalogKafkaSource.StreamingOptions.Delimiter + f1valf7f4.Delimiter = f1valiter.CatalogKafkaSource.StreamingOptions.Delimiter + } + if f1valiter.CatalogKafkaSource.StreamingOptions.EmitConsumerLagMetrics != nil { + f1valf7f4.EmitConsumerLagMetrics = f1valiter.CatalogKafkaSource.StreamingOptions.EmitConsumerLagMetrics } if f1valiter.CatalogKafkaSource.StreamingOptions.EndingOffsets != nil { - f1valf3f4.EndingOffsets = f1valiter.CatalogKafkaSource.StreamingOptions.EndingOffsets + f1valf7f4.EndingOffsets = f1valiter.CatalogKafkaSource.StreamingOptions.EndingOffsets + } + if f1valiter.CatalogKafkaSource.StreamingOptions.IncludeHeaders != nil { + f1valf7f4.IncludeHeaders = f1valiter.CatalogKafkaSource.StreamingOptions.IncludeHeaders } if f1valiter.CatalogKafkaSource.StreamingOptions.MaxOffsetsPerTrigger != nil { - f1valf3f4.MaxOffsetsPerTrigger = f1valiter.CatalogKafkaSource.StreamingOptions.MaxOffsetsPerTrigger + f1valf7f4.MaxOffsetsPerTrigger = f1valiter.CatalogKafkaSource.StreamingOptions.MaxOffsetsPerTrigger } if f1valiter.CatalogKafkaSource.StreamingOptions.MinPartitions != nil { - f1valf3f4.MinPartitions = f1valiter.CatalogKafkaSource.StreamingOptions.MinPartitions + f1valf7f4.MinPartitions = f1valiter.CatalogKafkaSource.StreamingOptions.MinPartitions } if f1valiter.CatalogKafkaSource.StreamingOptions.NumRetries != nil { - f1valf3f4.NumRetries = f1valiter.CatalogKafkaSource.StreamingOptions.NumRetries + f1valf7f4.NumRetries = f1valiter.CatalogKafkaSource.StreamingOptions.NumRetries } if f1valiter.CatalogKafkaSource.StreamingOptions.PollTimeoutMs != nil { - f1valf3f4.PollTimeoutMs = f1valiter.CatalogKafkaSource.StreamingOptions.PollTimeoutMs + f1valf7f4.PollTimeoutMs = f1valiter.CatalogKafkaSource.StreamingOptions.PollTimeoutMs } if f1valiter.CatalogKafkaSource.StreamingOptions.RetryIntervalMs != nil { - f1valf3f4.RetryIntervalMs = f1valiter.CatalogKafkaSource.StreamingOptions.RetryIntervalMs + f1valf7f4.RetryIntervalMs = f1valiter.CatalogKafkaSource.StreamingOptions.RetryIntervalMs } if f1valiter.CatalogKafkaSource.StreamingOptions.SecurityProtocol != nil { - f1valf3f4.SecurityProtocol = f1valiter.CatalogKafkaSource.StreamingOptions.SecurityProtocol + f1valf7f4.SecurityProtocol = f1valiter.CatalogKafkaSource.StreamingOptions.SecurityProtocol } if f1valiter.CatalogKafkaSource.StreamingOptions.StartingOffsets != nil { - f1valf3f4.StartingOffsets = f1valiter.CatalogKafkaSource.StreamingOptions.StartingOffsets + f1valf7f4.StartingOffsets = f1valiter.CatalogKafkaSource.StreamingOptions.StartingOffsets + } + if f1valiter.CatalogKafkaSource.StreamingOptions.StartingTimestamp != nil { + f1valf7f4.StartingTimestamp = &metav1.Time{*f1valiter.CatalogKafkaSource.StreamingOptions.StartingTimestamp} } if f1valiter.CatalogKafkaSource.StreamingOptions.SubscribePattern != nil { - f1valf3f4.SubscribePattern = f1valiter.CatalogKafkaSource.StreamingOptions.SubscribePattern + f1valf7f4.SubscribePattern = f1valiter.CatalogKafkaSource.StreamingOptions.SubscribePattern } if f1valiter.CatalogKafkaSource.StreamingOptions.TopicName != nil { - f1valf3f4.TopicName = f1valiter.CatalogKafkaSource.StreamingOptions.TopicName + f1valf7f4.TopicName = f1valiter.CatalogKafkaSource.StreamingOptions.TopicName } - f1valf3.StreamingOptions = f1valf3f4 + f1valf7.StreamingOptions = f1valf7f4 } if f1valiter.CatalogKafkaSource.Table != nil { - f1valf3.Table = f1valiter.CatalogKafkaSource.Table + f1valf7.Table = f1valiter.CatalogKafkaSource.Table } if f1valiter.CatalogKafkaSource.WindowSize != nil { - f1valf3.WindowSize = f1valiter.CatalogKafkaSource.WindowSize + f1valf7.WindowSize = f1valiter.CatalogKafkaSource.WindowSize } - f1val.CatalogKafkaSource = f1valf3 + f1val.CatalogKafkaSource = f1valf7 } if f1valiter.CatalogKinesisSource != nil { - f1valf4 := &svcapitypes.CatalogKinesisSource{} + f1valf8 := &svcapitypes.CatalogKinesisSource{} if f1valiter.CatalogKinesisSource.DataPreviewOptions != nil { - f1valf4f0 := &svcapitypes.StreamingDataPreviewOptions{} + f1valf8f0 := &svcapitypes.StreamingDataPreviewOptions{} if f1valiter.CatalogKinesisSource.DataPreviewOptions.PollingTime != nil { - f1valf4f0.PollingTime = f1valiter.CatalogKinesisSource.DataPreviewOptions.PollingTime + f1valf8f0.PollingTime = f1valiter.CatalogKinesisSource.DataPreviewOptions.PollingTime } if f1valiter.CatalogKinesisSource.DataPreviewOptions.RecordPollingLimit != nil { - f1valf4f0.RecordPollingLimit = f1valiter.CatalogKinesisSource.DataPreviewOptions.RecordPollingLimit + f1valf8f0.RecordPollingLimit = f1valiter.CatalogKinesisSource.DataPreviewOptions.RecordPollingLimit } - f1valf4.DataPreviewOptions = f1valf4f0 + f1valf8.DataPreviewOptions = f1valf8f0 } if f1valiter.CatalogKinesisSource.Database != nil { - f1valf4.Database = f1valiter.CatalogKinesisSource.Database + f1valf8.Database = f1valiter.CatalogKinesisSource.Database } if f1valiter.CatalogKinesisSource.DetectSchema != nil { - f1valf4.DetectSchema = f1valiter.CatalogKinesisSource.DetectSchema + f1valf8.DetectSchema = f1valiter.CatalogKinesisSource.DetectSchema } if f1valiter.CatalogKinesisSource.Name != nil { - f1valf4.Name = f1valiter.CatalogKinesisSource.Name + f1valf8.Name = f1valiter.CatalogKinesisSource.Name } if f1valiter.CatalogKinesisSource.StreamingOptions != nil { - f1valf4f4 := &svcapitypes.KinesisStreamingSourceOptions{} + f1valf8f4 := &svcapitypes.KinesisStreamingSourceOptions{} if f1valiter.CatalogKinesisSource.StreamingOptions.AddIdleTimeBetweenReads != nil { - f1valf4f4.AddIdleTimeBetweenReads = f1valiter.CatalogKinesisSource.StreamingOptions.AddIdleTimeBetweenReads + f1valf8f4.AddIdleTimeBetweenReads = f1valiter.CatalogKinesisSource.StreamingOptions.AddIdleTimeBetweenReads + } + if f1valiter.CatalogKinesisSource.StreamingOptions.AddRecordTimestamp != nil { + f1valf8f4.AddRecordTimestamp = f1valiter.CatalogKinesisSource.StreamingOptions.AddRecordTimestamp } if f1valiter.CatalogKinesisSource.StreamingOptions.AvoidEmptyBatches != nil { - f1valf4f4.AvoidEmptyBatches = f1valiter.CatalogKinesisSource.StreamingOptions.AvoidEmptyBatches + f1valf8f4.AvoidEmptyBatches = f1valiter.CatalogKinesisSource.StreamingOptions.AvoidEmptyBatches } if f1valiter.CatalogKinesisSource.StreamingOptions.Classification != nil { - f1valf4f4.Classification = f1valiter.CatalogKinesisSource.StreamingOptions.Classification + f1valf8f4.Classification = f1valiter.CatalogKinesisSource.StreamingOptions.Classification } if f1valiter.CatalogKinesisSource.StreamingOptions.Delimiter != nil { - f1valf4f4.Delimiter = f1valiter.CatalogKinesisSource.StreamingOptions.Delimiter + f1valf8f4.Delimiter = f1valiter.CatalogKinesisSource.StreamingOptions.Delimiter } if f1valiter.CatalogKinesisSource.StreamingOptions.DescribeShardInterval != nil { - f1valf4f4.DescribeShardInterval = f1valiter.CatalogKinesisSource.StreamingOptions.DescribeShardInterval + f1valf8f4.DescribeShardInterval = f1valiter.CatalogKinesisSource.StreamingOptions.DescribeShardInterval + } + if f1valiter.CatalogKinesisSource.StreamingOptions.EmitConsumerLagMetrics != nil { + f1valf8f4.EmitConsumerLagMetrics = f1valiter.CatalogKinesisSource.StreamingOptions.EmitConsumerLagMetrics } if f1valiter.CatalogKinesisSource.StreamingOptions.EndpointUrl != nil { - f1valf4f4.EndpointURL = f1valiter.CatalogKinesisSource.StreamingOptions.EndpointUrl + f1valf8f4.EndpointURL = f1valiter.CatalogKinesisSource.StreamingOptions.EndpointUrl } if f1valiter.CatalogKinesisSource.StreamingOptions.IdleTimeBetweenReadsInMs != nil { - f1valf4f4.IdleTimeBetweenReadsInMs = f1valiter.CatalogKinesisSource.StreamingOptions.IdleTimeBetweenReadsInMs + f1valf8f4.IdleTimeBetweenReadsInMs = f1valiter.CatalogKinesisSource.StreamingOptions.IdleTimeBetweenReadsInMs } if f1valiter.CatalogKinesisSource.StreamingOptions.MaxFetchRecordsPerShard != nil { - f1valf4f4.MaxFetchRecordsPerShard = f1valiter.CatalogKinesisSource.StreamingOptions.MaxFetchRecordsPerShard + f1valf8f4.MaxFetchRecordsPerShard = f1valiter.CatalogKinesisSource.StreamingOptions.MaxFetchRecordsPerShard } if f1valiter.CatalogKinesisSource.StreamingOptions.MaxFetchTimeInMs != nil { - f1valf4f4.MaxFetchTimeInMs = f1valiter.CatalogKinesisSource.StreamingOptions.MaxFetchTimeInMs + f1valf8f4.MaxFetchTimeInMs = f1valiter.CatalogKinesisSource.StreamingOptions.MaxFetchTimeInMs } if f1valiter.CatalogKinesisSource.StreamingOptions.MaxRecordPerRead != nil { - f1valf4f4.MaxRecordPerRead = f1valiter.CatalogKinesisSource.StreamingOptions.MaxRecordPerRead + f1valf8f4.MaxRecordPerRead = f1valiter.CatalogKinesisSource.StreamingOptions.MaxRecordPerRead } if f1valiter.CatalogKinesisSource.StreamingOptions.MaxRetryIntervalMs != nil { - f1valf4f4.MaxRetryIntervalMs = f1valiter.CatalogKinesisSource.StreamingOptions.MaxRetryIntervalMs + f1valf8f4.MaxRetryIntervalMs = f1valiter.CatalogKinesisSource.StreamingOptions.MaxRetryIntervalMs } if f1valiter.CatalogKinesisSource.StreamingOptions.NumRetries != nil { - f1valf4f4.NumRetries = f1valiter.CatalogKinesisSource.StreamingOptions.NumRetries + f1valf8f4.NumRetries = f1valiter.CatalogKinesisSource.StreamingOptions.NumRetries } if f1valiter.CatalogKinesisSource.StreamingOptions.RetryIntervalMs != nil { - f1valf4f4.RetryIntervalMs = f1valiter.CatalogKinesisSource.StreamingOptions.RetryIntervalMs + f1valf8f4.RetryIntervalMs = f1valiter.CatalogKinesisSource.StreamingOptions.RetryIntervalMs } if f1valiter.CatalogKinesisSource.StreamingOptions.RoleArn != nil { - f1valf4f4.RoleARN = f1valiter.CatalogKinesisSource.StreamingOptions.RoleArn + f1valf8f4.RoleARN = f1valiter.CatalogKinesisSource.StreamingOptions.RoleArn } if f1valiter.CatalogKinesisSource.StreamingOptions.RoleSessionName != nil { - f1valf4f4.RoleSessionName = f1valiter.CatalogKinesisSource.StreamingOptions.RoleSessionName + f1valf8f4.RoleSessionName = f1valiter.CatalogKinesisSource.StreamingOptions.RoleSessionName } if f1valiter.CatalogKinesisSource.StreamingOptions.StartingPosition != nil { - f1valf4f4.StartingPosition = f1valiter.CatalogKinesisSource.StreamingOptions.StartingPosition + f1valf8f4.StartingPosition = f1valiter.CatalogKinesisSource.StreamingOptions.StartingPosition + } + if f1valiter.CatalogKinesisSource.StreamingOptions.StartingTimestamp != nil { + f1valf8f4.StartingTimestamp = &metav1.Time{*f1valiter.CatalogKinesisSource.StreamingOptions.StartingTimestamp} } if f1valiter.CatalogKinesisSource.StreamingOptions.StreamArn != nil { - f1valf4f4.StreamARN = f1valiter.CatalogKinesisSource.StreamingOptions.StreamArn + f1valf8f4.StreamARN = f1valiter.CatalogKinesisSource.StreamingOptions.StreamArn } if f1valiter.CatalogKinesisSource.StreamingOptions.StreamName != nil { - f1valf4f4.StreamName = f1valiter.CatalogKinesisSource.StreamingOptions.StreamName + f1valf8f4.StreamName = f1valiter.CatalogKinesisSource.StreamingOptions.StreamName } - f1valf4.StreamingOptions = f1valf4f4 + f1valf8.StreamingOptions = f1valf8f4 } if f1valiter.CatalogKinesisSource.Table != nil { - f1valf4.Table = f1valiter.CatalogKinesisSource.Table + f1valf8.Table = f1valiter.CatalogKinesisSource.Table } if f1valiter.CatalogKinesisSource.WindowSize != nil { - f1valf4.WindowSize = f1valiter.CatalogKinesisSource.WindowSize + f1valf8.WindowSize = f1valiter.CatalogKinesisSource.WindowSize } - f1val.CatalogKinesisSource = f1valf4 + f1val.CatalogKinesisSource = f1valf8 } if f1valiter.CatalogSource != nil { - f1valf5 := &svcapitypes.CatalogSource{} + f1valf9 := &svcapitypes.CatalogSource{} if f1valiter.CatalogSource.Database != nil { - f1valf5.Database = f1valiter.CatalogSource.Database + f1valf9.Database = f1valiter.CatalogSource.Database } if f1valiter.CatalogSource.Name != nil { - f1valf5.Name = f1valiter.CatalogSource.Name + f1valf9.Name = f1valiter.CatalogSource.Name } if f1valiter.CatalogSource.Table != nil { - f1valf5.Table = f1valiter.CatalogSource.Table + f1valf9.Table = f1valiter.CatalogSource.Table } - f1val.CatalogSource = f1valf5 + f1val.CatalogSource = f1valf9 } if f1valiter.CatalogTarget != nil { - f1valf6 := &svcapitypes.BasicCatalogTarget{} + f1valf10 := &svcapitypes.BasicCatalogTarget{} if f1valiter.CatalogTarget.Database != nil { - f1valf6.Database = f1valiter.CatalogTarget.Database + f1valf10.Database = f1valiter.CatalogTarget.Database } if f1valiter.CatalogTarget.Inputs != nil { - f1valf6f1 := []*string{} - for _, f1valf6f1iter := range f1valiter.CatalogTarget.Inputs { - var f1valf6f1elem string - f1valf6f1elem = *f1valf6f1iter - f1valf6f1 = append(f1valf6f1, &f1valf6f1elem) + f1valf10f1 := []*string{} + for _, f1valf10f1iter := range f1valiter.CatalogTarget.Inputs { + var f1valf10f1elem string + f1valf10f1elem = *f1valf10f1iter + f1valf10f1 = append(f1valf10f1, &f1valf10f1elem) } - f1valf6.Inputs = f1valf6f1 + f1valf10.Inputs = f1valf10f1 } if f1valiter.CatalogTarget.Name != nil { - f1valf6.Name = f1valiter.CatalogTarget.Name + f1valf10.Name = f1valiter.CatalogTarget.Name } if f1valiter.CatalogTarget.Table != nil { - f1valf6.Table = f1valiter.CatalogTarget.Table + f1valf10.Table = f1valiter.CatalogTarget.Table } - f1val.CatalogTarget = f1valf6 + f1val.CatalogTarget = f1valf10 } if f1valiter.CustomCode != nil { - f1valf7 := &svcapitypes.CustomCode{} + f1valf11 := &svcapitypes.CustomCode{} if f1valiter.CustomCode.ClassName != nil { - f1valf7.ClassName = f1valiter.CustomCode.ClassName + f1valf11.ClassName = f1valiter.CustomCode.ClassName } if f1valiter.CustomCode.Code != nil { - f1valf7.Code = f1valiter.CustomCode.Code + f1valf11.Code = f1valiter.CustomCode.Code } if f1valiter.CustomCode.Inputs != nil { - f1valf7f2 := []*string{} - for _, f1valf7f2iter := range f1valiter.CustomCode.Inputs { - var f1valf7f2elem string - f1valf7f2elem = *f1valf7f2iter - f1valf7f2 = append(f1valf7f2, &f1valf7f2elem) + f1valf11f2 := []*string{} + for _, f1valf11f2iter := range f1valiter.CustomCode.Inputs { + var f1valf11f2elem string + f1valf11f2elem = *f1valf11f2iter + f1valf11f2 = append(f1valf11f2, &f1valf11f2elem) } - f1valf7.Inputs = f1valf7f2 + f1valf11.Inputs = f1valf11f2 } if f1valiter.CustomCode.Name != nil { - f1valf7.Name = f1valiter.CustomCode.Name + f1valf11.Name = f1valiter.CustomCode.Name } if f1valiter.CustomCode.OutputSchemas != nil { - f1valf7f4 := []*svcapitypes.GlueSchema{} - for _, f1valf7f4iter := range f1valiter.CustomCode.OutputSchemas { - f1valf7f4elem := &svcapitypes.GlueSchema{} - if f1valf7f4iter.Columns != nil { - f1valf7f4elemf0 := []*svcapitypes.GlueStudioSchemaColumn{} - for _, f1valf7f4elemf0iter := range f1valf7f4iter.Columns { - f1valf7f4elemf0elem := &svcapitypes.GlueStudioSchemaColumn{} - if f1valf7f4elemf0iter.Name != nil { - f1valf7f4elemf0elem.Name = f1valf7f4elemf0iter.Name + f1valf11f4 := []*svcapitypes.GlueSchema{} + for _, f1valf11f4iter := range f1valiter.CustomCode.OutputSchemas { + f1valf11f4elem := &svcapitypes.GlueSchema{} + if f1valf11f4iter.Columns != nil { + f1valf11f4elemf0 := []*svcapitypes.GlueStudioSchemaColumn{} + for _, f1valf11f4elemf0iter := range f1valf11f4iter.Columns { + f1valf11f4elemf0elem := &svcapitypes.GlueStudioSchemaColumn{} + if f1valf11f4elemf0iter.Name != nil { + f1valf11f4elemf0elem.Name = f1valf11f4elemf0iter.Name } - if f1valf7f4elemf0iter.Type != nil { - f1valf7f4elemf0elem.Type = f1valf7f4elemf0iter.Type + if f1valf11f4elemf0iter.Type != nil { + f1valf11f4elemf0elem.Type = f1valf11f4elemf0iter.Type } - f1valf7f4elemf0 = append(f1valf7f4elemf0, f1valf7f4elemf0elem) + f1valf11f4elemf0 = append(f1valf11f4elemf0, f1valf11f4elemf0elem) } - f1valf7f4elem.Columns = f1valf7f4elemf0 + f1valf11f4elem.Columns = f1valf11f4elemf0 } - f1valf7f4 = append(f1valf7f4, f1valf7f4elem) + f1valf11f4 = append(f1valf11f4, f1valf11f4elem) } - f1valf7.OutputSchemas = f1valf7f4 + f1valf11.OutputSchemas = f1valf11f4 + } + f1val.CustomCode = f1valf11 + } + if f1valiter.DirectJDBCSource != nil { + f1valf12 := &svcapitypes.DirectJDBCSource{} + if f1valiter.DirectJDBCSource.ConnectionName != nil { + f1valf12.ConnectionName = f1valiter.DirectJDBCSource.ConnectionName + } + if f1valiter.DirectJDBCSource.ConnectionType != nil { + f1valf12.ConnectionType = f1valiter.DirectJDBCSource.ConnectionType + } + if f1valiter.DirectJDBCSource.Database != nil { + f1valf12.Database = f1valiter.DirectJDBCSource.Database + } + if f1valiter.DirectJDBCSource.Name != nil { + f1valf12.Name = f1valiter.DirectJDBCSource.Name } - f1val.CustomCode = f1valf7 + if f1valiter.DirectJDBCSource.RedshiftTmpDir != nil { + f1valf12.RedshiftTmpDir = f1valiter.DirectJDBCSource.RedshiftTmpDir + } + if f1valiter.DirectJDBCSource.Table != nil { + f1valf12.Table = f1valiter.DirectJDBCSource.Table + } + f1val.DirectJDBCSource = f1valf12 } if f1valiter.DirectKafkaSource != nil { - f1valf8 := &svcapitypes.DirectKafkaSource{} + f1valf13 := &svcapitypes.DirectKafkaSource{} if f1valiter.DirectKafkaSource.DataPreviewOptions != nil { - f1valf8f0 := &svcapitypes.StreamingDataPreviewOptions{} + f1valf13f0 := &svcapitypes.StreamingDataPreviewOptions{} if f1valiter.DirectKafkaSource.DataPreviewOptions.PollingTime != nil { - f1valf8f0.PollingTime = f1valiter.DirectKafkaSource.DataPreviewOptions.PollingTime + f1valf13f0.PollingTime = f1valiter.DirectKafkaSource.DataPreviewOptions.PollingTime } if f1valiter.DirectKafkaSource.DataPreviewOptions.RecordPollingLimit != nil { - f1valf8f0.RecordPollingLimit = f1valiter.DirectKafkaSource.DataPreviewOptions.RecordPollingLimit + f1valf13f0.RecordPollingLimit = f1valiter.DirectKafkaSource.DataPreviewOptions.RecordPollingLimit } - f1valf8.DataPreviewOptions = f1valf8f0 + f1valf13.DataPreviewOptions = f1valf13f0 } if f1valiter.DirectKafkaSource.DetectSchema != nil { - f1valf8.DetectSchema = f1valiter.DirectKafkaSource.DetectSchema + f1valf13.DetectSchema = f1valiter.DirectKafkaSource.DetectSchema } if f1valiter.DirectKafkaSource.Name != nil { - f1valf8.Name = f1valiter.DirectKafkaSource.Name + f1valf13.Name = f1valiter.DirectKafkaSource.Name } if f1valiter.DirectKafkaSource.StreamingOptions != nil { - f1valf8f3 := &svcapitypes.KafkaStreamingSourceOptions{} + f1valf13f3 := &svcapitypes.KafkaStreamingSourceOptions{} + if f1valiter.DirectKafkaSource.StreamingOptions.AddRecordTimestamp != nil { + f1valf13f3.AddRecordTimestamp = f1valiter.DirectKafkaSource.StreamingOptions.AddRecordTimestamp + } if f1valiter.DirectKafkaSource.StreamingOptions.Assign != nil { - f1valf8f3.Assign = f1valiter.DirectKafkaSource.StreamingOptions.Assign + f1valf13f3.Assign = f1valiter.DirectKafkaSource.StreamingOptions.Assign } if f1valiter.DirectKafkaSource.StreamingOptions.BootstrapServers != nil { - f1valf8f3.BootstrapServers = f1valiter.DirectKafkaSource.StreamingOptions.BootstrapServers + f1valf13f3.BootstrapServers = f1valiter.DirectKafkaSource.StreamingOptions.BootstrapServers } if f1valiter.DirectKafkaSource.StreamingOptions.Classification != nil { - f1valf8f3.Classification = f1valiter.DirectKafkaSource.StreamingOptions.Classification + f1valf13f3.Classification = f1valiter.DirectKafkaSource.StreamingOptions.Classification } if f1valiter.DirectKafkaSource.StreamingOptions.ConnectionName != nil { - f1valf8f3.ConnectionName = f1valiter.DirectKafkaSource.StreamingOptions.ConnectionName + f1valf13f3.ConnectionName = f1valiter.DirectKafkaSource.StreamingOptions.ConnectionName } if f1valiter.DirectKafkaSource.StreamingOptions.Delimiter != nil { - f1valf8f3.Delimiter = f1valiter.DirectKafkaSource.StreamingOptions.Delimiter + f1valf13f3.Delimiter = f1valiter.DirectKafkaSource.StreamingOptions.Delimiter + } + if f1valiter.DirectKafkaSource.StreamingOptions.EmitConsumerLagMetrics != nil { + f1valf13f3.EmitConsumerLagMetrics = f1valiter.DirectKafkaSource.StreamingOptions.EmitConsumerLagMetrics } if f1valiter.DirectKafkaSource.StreamingOptions.EndingOffsets != nil { - f1valf8f3.EndingOffsets = f1valiter.DirectKafkaSource.StreamingOptions.EndingOffsets + f1valf13f3.EndingOffsets = f1valiter.DirectKafkaSource.StreamingOptions.EndingOffsets + } + if f1valiter.DirectKafkaSource.StreamingOptions.IncludeHeaders != nil { + f1valf13f3.IncludeHeaders = f1valiter.DirectKafkaSource.StreamingOptions.IncludeHeaders } if f1valiter.DirectKafkaSource.StreamingOptions.MaxOffsetsPerTrigger != nil { - f1valf8f3.MaxOffsetsPerTrigger = f1valiter.DirectKafkaSource.StreamingOptions.MaxOffsetsPerTrigger + f1valf13f3.MaxOffsetsPerTrigger = f1valiter.DirectKafkaSource.StreamingOptions.MaxOffsetsPerTrigger } if f1valiter.DirectKafkaSource.StreamingOptions.MinPartitions != nil { - f1valf8f3.MinPartitions = f1valiter.DirectKafkaSource.StreamingOptions.MinPartitions + f1valf13f3.MinPartitions = f1valiter.DirectKafkaSource.StreamingOptions.MinPartitions } if f1valiter.DirectKafkaSource.StreamingOptions.NumRetries != nil { - f1valf8f3.NumRetries = f1valiter.DirectKafkaSource.StreamingOptions.NumRetries + f1valf13f3.NumRetries = f1valiter.DirectKafkaSource.StreamingOptions.NumRetries } if f1valiter.DirectKafkaSource.StreamingOptions.PollTimeoutMs != nil { - f1valf8f3.PollTimeoutMs = f1valiter.DirectKafkaSource.StreamingOptions.PollTimeoutMs + f1valf13f3.PollTimeoutMs = f1valiter.DirectKafkaSource.StreamingOptions.PollTimeoutMs } if f1valiter.DirectKafkaSource.StreamingOptions.RetryIntervalMs != nil { - f1valf8f3.RetryIntervalMs = f1valiter.DirectKafkaSource.StreamingOptions.RetryIntervalMs + f1valf13f3.RetryIntervalMs = f1valiter.DirectKafkaSource.StreamingOptions.RetryIntervalMs } if f1valiter.DirectKafkaSource.StreamingOptions.SecurityProtocol != nil { - f1valf8f3.SecurityProtocol = f1valiter.DirectKafkaSource.StreamingOptions.SecurityProtocol + f1valf13f3.SecurityProtocol = f1valiter.DirectKafkaSource.StreamingOptions.SecurityProtocol } if f1valiter.DirectKafkaSource.StreamingOptions.StartingOffsets != nil { - f1valf8f3.StartingOffsets = f1valiter.DirectKafkaSource.StreamingOptions.StartingOffsets + f1valf13f3.StartingOffsets = f1valiter.DirectKafkaSource.StreamingOptions.StartingOffsets + } + if f1valiter.DirectKafkaSource.StreamingOptions.StartingTimestamp != nil { + f1valf13f3.StartingTimestamp = &metav1.Time{*f1valiter.DirectKafkaSource.StreamingOptions.StartingTimestamp} } if f1valiter.DirectKafkaSource.StreamingOptions.SubscribePattern != nil { - f1valf8f3.SubscribePattern = f1valiter.DirectKafkaSource.StreamingOptions.SubscribePattern + f1valf13f3.SubscribePattern = f1valiter.DirectKafkaSource.StreamingOptions.SubscribePattern } if f1valiter.DirectKafkaSource.StreamingOptions.TopicName != nil { - f1valf8f3.TopicName = f1valiter.DirectKafkaSource.StreamingOptions.TopicName + f1valf13f3.TopicName = f1valiter.DirectKafkaSource.StreamingOptions.TopicName } - f1valf8.StreamingOptions = f1valf8f3 + f1valf13.StreamingOptions = f1valf13f3 } if f1valiter.DirectKafkaSource.WindowSize != nil { - f1valf8.WindowSize = f1valiter.DirectKafkaSource.WindowSize + f1valf13.WindowSize = f1valiter.DirectKafkaSource.WindowSize } - f1val.DirectKafkaSource = f1valf8 + f1val.DirectKafkaSource = f1valf13 } if f1valiter.DirectKinesisSource != nil { - f1valf9 := &svcapitypes.DirectKinesisSource{} + f1valf14 := &svcapitypes.DirectKinesisSource{} if f1valiter.DirectKinesisSource.DataPreviewOptions != nil { - f1valf9f0 := &svcapitypes.StreamingDataPreviewOptions{} + f1valf14f0 := &svcapitypes.StreamingDataPreviewOptions{} if f1valiter.DirectKinesisSource.DataPreviewOptions.PollingTime != nil { - f1valf9f0.PollingTime = f1valiter.DirectKinesisSource.DataPreviewOptions.PollingTime + f1valf14f0.PollingTime = f1valiter.DirectKinesisSource.DataPreviewOptions.PollingTime } if f1valiter.DirectKinesisSource.DataPreviewOptions.RecordPollingLimit != nil { - f1valf9f0.RecordPollingLimit = f1valiter.DirectKinesisSource.DataPreviewOptions.RecordPollingLimit + f1valf14f0.RecordPollingLimit = f1valiter.DirectKinesisSource.DataPreviewOptions.RecordPollingLimit } - f1valf9.DataPreviewOptions = f1valf9f0 + f1valf14.DataPreviewOptions = f1valf14f0 } if f1valiter.DirectKinesisSource.DetectSchema != nil { - f1valf9.DetectSchema = f1valiter.DirectKinesisSource.DetectSchema + f1valf14.DetectSchema = f1valiter.DirectKinesisSource.DetectSchema } if f1valiter.DirectKinesisSource.Name != nil { - f1valf9.Name = f1valiter.DirectKinesisSource.Name + f1valf14.Name = f1valiter.DirectKinesisSource.Name } if f1valiter.DirectKinesisSource.StreamingOptions != nil { - f1valf9f3 := &svcapitypes.KinesisStreamingSourceOptions{} + f1valf14f3 := &svcapitypes.KinesisStreamingSourceOptions{} if f1valiter.DirectKinesisSource.StreamingOptions.AddIdleTimeBetweenReads != nil { - f1valf9f3.AddIdleTimeBetweenReads = f1valiter.DirectKinesisSource.StreamingOptions.AddIdleTimeBetweenReads + f1valf14f3.AddIdleTimeBetweenReads = f1valiter.DirectKinesisSource.StreamingOptions.AddIdleTimeBetweenReads + } + if f1valiter.DirectKinesisSource.StreamingOptions.AddRecordTimestamp != nil { + f1valf14f3.AddRecordTimestamp = f1valiter.DirectKinesisSource.StreamingOptions.AddRecordTimestamp } if f1valiter.DirectKinesisSource.StreamingOptions.AvoidEmptyBatches != nil { - f1valf9f3.AvoidEmptyBatches = f1valiter.DirectKinesisSource.StreamingOptions.AvoidEmptyBatches + f1valf14f3.AvoidEmptyBatches = f1valiter.DirectKinesisSource.StreamingOptions.AvoidEmptyBatches } if f1valiter.DirectKinesisSource.StreamingOptions.Classification != nil { - f1valf9f3.Classification = f1valiter.DirectKinesisSource.StreamingOptions.Classification + f1valf14f3.Classification = f1valiter.DirectKinesisSource.StreamingOptions.Classification } if f1valiter.DirectKinesisSource.StreamingOptions.Delimiter != nil { - f1valf9f3.Delimiter = f1valiter.DirectKinesisSource.StreamingOptions.Delimiter + f1valf14f3.Delimiter = f1valiter.DirectKinesisSource.StreamingOptions.Delimiter } if f1valiter.DirectKinesisSource.StreamingOptions.DescribeShardInterval != nil { - f1valf9f3.DescribeShardInterval = f1valiter.DirectKinesisSource.StreamingOptions.DescribeShardInterval + f1valf14f3.DescribeShardInterval = f1valiter.DirectKinesisSource.StreamingOptions.DescribeShardInterval + } + if f1valiter.DirectKinesisSource.StreamingOptions.EmitConsumerLagMetrics != nil { + f1valf14f3.EmitConsumerLagMetrics = f1valiter.DirectKinesisSource.StreamingOptions.EmitConsumerLagMetrics } if f1valiter.DirectKinesisSource.StreamingOptions.EndpointUrl != nil { - f1valf9f3.EndpointURL = f1valiter.DirectKinesisSource.StreamingOptions.EndpointUrl + f1valf14f3.EndpointURL = f1valiter.DirectKinesisSource.StreamingOptions.EndpointUrl } if f1valiter.DirectKinesisSource.StreamingOptions.IdleTimeBetweenReadsInMs != nil { - f1valf9f3.IdleTimeBetweenReadsInMs = f1valiter.DirectKinesisSource.StreamingOptions.IdleTimeBetweenReadsInMs + f1valf14f3.IdleTimeBetweenReadsInMs = f1valiter.DirectKinesisSource.StreamingOptions.IdleTimeBetweenReadsInMs } if f1valiter.DirectKinesisSource.StreamingOptions.MaxFetchRecordsPerShard != nil { - f1valf9f3.MaxFetchRecordsPerShard = f1valiter.DirectKinesisSource.StreamingOptions.MaxFetchRecordsPerShard + f1valf14f3.MaxFetchRecordsPerShard = f1valiter.DirectKinesisSource.StreamingOptions.MaxFetchRecordsPerShard } if f1valiter.DirectKinesisSource.StreamingOptions.MaxFetchTimeInMs != nil { - f1valf9f3.MaxFetchTimeInMs = f1valiter.DirectKinesisSource.StreamingOptions.MaxFetchTimeInMs + f1valf14f3.MaxFetchTimeInMs = f1valiter.DirectKinesisSource.StreamingOptions.MaxFetchTimeInMs } if f1valiter.DirectKinesisSource.StreamingOptions.MaxRecordPerRead != nil { - f1valf9f3.MaxRecordPerRead = f1valiter.DirectKinesisSource.StreamingOptions.MaxRecordPerRead + f1valf14f3.MaxRecordPerRead = f1valiter.DirectKinesisSource.StreamingOptions.MaxRecordPerRead } if f1valiter.DirectKinesisSource.StreamingOptions.MaxRetryIntervalMs != nil { - f1valf9f3.MaxRetryIntervalMs = f1valiter.DirectKinesisSource.StreamingOptions.MaxRetryIntervalMs + f1valf14f3.MaxRetryIntervalMs = f1valiter.DirectKinesisSource.StreamingOptions.MaxRetryIntervalMs } if f1valiter.DirectKinesisSource.StreamingOptions.NumRetries != nil { - f1valf9f3.NumRetries = f1valiter.DirectKinesisSource.StreamingOptions.NumRetries + f1valf14f3.NumRetries = f1valiter.DirectKinesisSource.StreamingOptions.NumRetries } if f1valiter.DirectKinesisSource.StreamingOptions.RetryIntervalMs != nil { - f1valf9f3.RetryIntervalMs = f1valiter.DirectKinesisSource.StreamingOptions.RetryIntervalMs + f1valf14f3.RetryIntervalMs = f1valiter.DirectKinesisSource.StreamingOptions.RetryIntervalMs } if f1valiter.DirectKinesisSource.StreamingOptions.RoleArn != nil { - f1valf9f3.RoleARN = f1valiter.DirectKinesisSource.StreamingOptions.RoleArn + f1valf14f3.RoleARN = f1valiter.DirectKinesisSource.StreamingOptions.RoleArn } if f1valiter.DirectKinesisSource.StreamingOptions.RoleSessionName != nil { - f1valf9f3.RoleSessionName = f1valiter.DirectKinesisSource.StreamingOptions.RoleSessionName + f1valf14f3.RoleSessionName = f1valiter.DirectKinesisSource.StreamingOptions.RoleSessionName } if f1valiter.DirectKinesisSource.StreamingOptions.StartingPosition != nil { - f1valf9f3.StartingPosition = f1valiter.DirectKinesisSource.StreamingOptions.StartingPosition + f1valf14f3.StartingPosition = f1valiter.DirectKinesisSource.StreamingOptions.StartingPosition + } + if f1valiter.DirectKinesisSource.StreamingOptions.StartingTimestamp != nil { + f1valf14f3.StartingTimestamp = &metav1.Time{*f1valiter.DirectKinesisSource.StreamingOptions.StartingTimestamp} } if f1valiter.DirectKinesisSource.StreamingOptions.StreamArn != nil { - f1valf9f3.StreamARN = f1valiter.DirectKinesisSource.StreamingOptions.StreamArn + f1valf14f3.StreamARN = f1valiter.DirectKinesisSource.StreamingOptions.StreamArn } if f1valiter.DirectKinesisSource.StreamingOptions.StreamName != nil { - f1valf9f3.StreamName = f1valiter.DirectKinesisSource.StreamingOptions.StreamName + f1valf14f3.StreamName = f1valiter.DirectKinesisSource.StreamingOptions.StreamName } - f1valf9.StreamingOptions = f1valf9f3 + f1valf14.StreamingOptions = f1valf14f3 } if f1valiter.DirectKinesisSource.WindowSize != nil { - f1valf9.WindowSize = f1valiter.DirectKinesisSource.WindowSize + f1valf14.WindowSize = f1valiter.DirectKinesisSource.WindowSize } - f1val.DirectKinesisSource = f1valf9 + f1val.DirectKinesisSource = f1valf14 } if f1valiter.DropDuplicates != nil { - f1valf10 := &svcapitypes.DropDuplicates{} + f1valf15 := &svcapitypes.DropDuplicates{} if f1valiter.DropDuplicates.Columns != nil { - f1valf10f0 := [][]*string{} - for _, f1valf10f0iter := range f1valiter.DropDuplicates.Columns { - f1valf10f0elem := []*string{} - for _, f1valf10f0elemiter := range f1valf10f0iter { - var f1valf10f0elemelem string - f1valf10f0elemelem = *f1valf10f0elemiter - f1valf10f0elem = append(f1valf10f0elem, &f1valf10f0elemelem) + f1valf15f0 := [][]*string{} + for _, f1valf15f0iter := range f1valiter.DropDuplicates.Columns { + f1valf15f0elem := []*string{} + for _, f1valf15f0elemiter := range f1valf15f0iter { + var f1valf15f0elemelem string + f1valf15f0elemelem = *f1valf15f0elemiter + f1valf15f0elem = append(f1valf15f0elem, &f1valf15f0elemelem) } - f1valf10f0 = append(f1valf10f0, f1valf10f0elem) + f1valf15f0 = append(f1valf15f0, f1valf15f0elem) } - f1valf10.Columns = f1valf10f0 + f1valf15.Columns = f1valf15f0 } if f1valiter.DropDuplicates.Inputs != nil { - f1valf10f1 := []*string{} - for _, f1valf10f1iter := range f1valiter.DropDuplicates.Inputs { - var f1valf10f1elem string - f1valf10f1elem = *f1valf10f1iter - f1valf10f1 = append(f1valf10f1, &f1valf10f1elem) + f1valf15f1 := []*string{} + for _, f1valf15f1iter := range f1valiter.DropDuplicates.Inputs { + var f1valf15f1elem string + f1valf15f1elem = *f1valf15f1iter + f1valf15f1 = append(f1valf15f1, &f1valf15f1elem) } - f1valf10.Inputs = f1valf10f1 + f1valf15.Inputs = f1valf15f1 } if f1valiter.DropDuplicates.Name != nil { - f1valf10.Name = f1valiter.DropDuplicates.Name + f1valf15.Name = f1valiter.DropDuplicates.Name } - f1val.DropDuplicates = f1valf10 + f1val.DropDuplicates = f1valf15 } if f1valiter.DropFields != nil { - f1valf11 := &svcapitypes.DropFields{} + f1valf16 := &svcapitypes.DropFields{} if f1valiter.DropFields.Inputs != nil { - f1valf11f0 := []*string{} - for _, f1valf11f0iter := range f1valiter.DropFields.Inputs { - var f1valf11f0elem string - f1valf11f0elem = *f1valf11f0iter - f1valf11f0 = append(f1valf11f0, &f1valf11f0elem) + f1valf16f0 := []*string{} + for _, f1valf16f0iter := range f1valiter.DropFields.Inputs { + var f1valf16f0elem string + f1valf16f0elem = *f1valf16f0iter + f1valf16f0 = append(f1valf16f0, &f1valf16f0elem) } - f1valf11.Inputs = f1valf11f0 + f1valf16.Inputs = f1valf16f0 } if f1valiter.DropFields.Name != nil { - f1valf11.Name = f1valiter.DropFields.Name + f1valf16.Name = f1valiter.DropFields.Name } if f1valiter.DropFields.Paths != nil { - f1valf11f2 := [][]*string{} - for _, f1valf11f2iter := range f1valiter.DropFields.Paths { - f1valf11f2elem := []*string{} - for _, f1valf11f2elemiter := range f1valf11f2iter { - var f1valf11f2elemelem string - f1valf11f2elemelem = *f1valf11f2elemiter - f1valf11f2elem = append(f1valf11f2elem, &f1valf11f2elemelem) + f1valf16f2 := [][]*string{} + for _, f1valf16f2iter := range f1valiter.DropFields.Paths { + f1valf16f2elem := []*string{} + for _, f1valf16f2elemiter := range f1valf16f2iter { + var f1valf16f2elemelem string + f1valf16f2elemelem = *f1valf16f2elemiter + f1valf16f2elem = append(f1valf16f2elem, &f1valf16f2elemelem) } - f1valf11f2 = append(f1valf11f2, f1valf11f2elem) + f1valf16f2 = append(f1valf16f2, f1valf16f2elem) } - f1valf11.Paths = f1valf11f2 + f1valf16.Paths = f1valf16f2 } - f1val.DropFields = f1valf11 + f1val.DropFields = f1valf16 } if f1valiter.DropNullFields != nil { - f1valf12 := &svcapitypes.DropNullFields{} + f1valf17 := &svcapitypes.DropNullFields{} if f1valiter.DropNullFields.Inputs != nil { - f1valf12f0 := []*string{} - for _, f1valf12f0iter := range f1valiter.DropNullFields.Inputs { - var f1valf12f0elem string - f1valf12f0elem = *f1valf12f0iter - f1valf12f0 = append(f1valf12f0, &f1valf12f0elem) + f1valf17f0 := []*string{} + for _, f1valf17f0iter := range f1valiter.DropNullFields.Inputs { + var f1valf17f0elem string + f1valf17f0elem = *f1valf17f0iter + f1valf17f0 = append(f1valf17f0, &f1valf17f0elem) } - f1valf12.Inputs = f1valf12f0 + f1valf17.Inputs = f1valf17f0 } if f1valiter.DropNullFields.Name != nil { - f1valf12.Name = f1valiter.DropNullFields.Name + f1valf17.Name = f1valiter.DropNullFields.Name } if f1valiter.DropNullFields.NullCheckBoxList != nil { - f1valf12f2 := &svcapitypes.NullCheckBoxList{} + f1valf17f2 := &svcapitypes.NullCheckBoxList{} if f1valiter.DropNullFields.NullCheckBoxList.IsEmpty != nil { - f1valf12f2.IsEmpty = f1valiter.DropNullFields.NullCheckBoxList.IsEmpty + f1valf17f2.IsEmpty = f1valiter.DropNullFields.NullCheckBoxList.IsEmpty } if f1valiter.DropNullFields.NullCheckBoxList.IsNegOne != nil { - f1valf12f2.IsNegOne = f1valiter.DropNullFields.NullCheckBoxList.IsNegOne + f1valf17f2.IsNegOne = f1valiter.DropNullFields.NullCheckBoxList.IsNegOne } if f1valiter.DropNullFields.NullCheckBoxList.IsNullString != nil { - f1valf12f2.IsNullString = f1valiter.DropNullFields.NullCheckBoxList.IsNullString + f1valf17f2.IsNullString = f1valiter.DropNullFields.NullCheckBoxList.IsNullString } - f1valf12.NullCheckBoxList = f1valf12f2 + f1valf17.NullCheckBoxList = f1valf17f2 } if f1valiter.DropNullFields.NullTextList != nil { - f1valf12f3 := []*svcapitypes.NullValueField{} - for _, f1valf12f3iter := range f1valiter.DropNullFields.NullTextList { - f1valf12f3elem := &svcapitypes.NullValueField{} - if f1valf12f3iter.Datatype != nil { - f1valf12f3elemf0 := &svcapitypes.Datatype{} - if f1valf12f3iter.Datatype.Id != nil { - f1valf12f3elemf0.ID = f1valf12f3iter.Datatype.Id + f1valf17f3 := []*svcapitypes.NullValueField{} + for _, f1valf17f3iter := range f1valiter.DropNullFields.NullTextList { + f1valf17f3elem := &svcapitypes.NullValueField{} + if f1valf17f3iter.Datatype != nil { + f1valf17f3elemf0 := &svcapitypes.Datatype{} + if f1valf17f3iter.Datatype.Id != nil { + f1valf17f3elemf0.ID = f1valf17f3iter.Datatype.Id } - if f1valf12f3iter.Datatype.Label != nil { - f1valf12f3elemf0.Label = f1valf12f3iter.Datatype.Label + if f1valf17f3iter.Datatype.Label != nil { + f1valf17f3elemf0.Label = f1valf17f3iter.Datatype.Label } - f1valf12f3elem.Datatype = f1valf12f3elemf0 + f1valf17f3elem.Datatype = f1valf17f3elemf0 } - if f1valf12f3iter.Value != nil { - f1valf12f3elem.Value = f1valf12f3iter.Value + if f1valf17f3iter.Value != nil { + f1valf17f3elem.Value = f1valf17f3iter.Value } - f1valf12f3 = append(f1valf12f3, f1valf12f3elem) + f1valf17f3 = append(f1valf17f3, f1valf17f3elem) } - f1valf12.NullTextList = f1valf12f3 + f1valf17.NullTextList = f1valf17f3 } - f1val.DropNullFields = f1valf12 + f1val.DropNullFields = f1valf17 } if f1valiter.DynamicTransform != nil { - f1valf13 := &svcapitypes.DynamicTransform{} + f1valf18 := &svcapitypes.DynamicTransform{} if f1valiter.DynamicTransform.FunctionName != nil { - f1valf13.FunctionName = f1valiter.DynamicTransform.FunctionName + f1valf18.FunctionName = f1valiter.DynamicTransform.FunctionName } if f1valiter.DynamicTransform.Inputs != nil { - f1valf13f1 := []*string{} - for _, f1valf13f1iter := range f1valiter.DynamicTransform.Inputs { - var f1valf13f1elem string - f1valf13f1elem = *f1valf13f1iter - f1valf13f1 = append(f1valf13f1, &f1valf13f1elem) + f1valf18f1 := []*string{} + for _, f1valf18f1iter := range f1valiter.DynamicTransform.Inputs { + var f1valf18f1elem string + f1valf18f1elem = *f1valf18f1iter + f1valf18f1 = append(f1valf18f1, &f1valf18f1elem) } - f1valf13.Inputs = f1valf13f1 + f1valf18.Inputs = f1valf18f1 } if f1valiter.DynamicTransform.Name != nil { - f1valf13.Name = f1valiter.DynamicTransform.Name + f1valf18.Name = f1valiter.DynamicTransform.Name + } + if f1valiter.DynamicTransform.OutputSchemas != nil { + f1valf18f3 := []*svcapitypes.GlueSchema{} + for _, f1valf18f3iter := range f1valiter.DynamicTransform.OutputSchemas { + f1valf18f3elem := &svcapitypes.GlueSchema{} + if f1valf18f3iter.Columns != nil { + f1valf18f3elemf0 := []*svcapitypes.GlueStudioSchemaColumn{} + for _, f1valf18f3elemf0iter := range f1valf18f3iter.Columns { + f1valf18f3elemf0elem := &svcapitypes.GlueStudioSchemaColumn{} + if f1valf18f3elemf0iter.Name != nil { + f1valf18f3elemf0elem.Name = f1valf18f3elemf0iter.Name + } + if f1valf18f3elemf0iter.Type != nil { + f1valf18f3elemf0elem.Type = f1valf18f3elemf0iter.Type + } + f1valf18f3elemf0 = append(f1valf18f3elemf0, f1valf18f3elemf0elem) + } + f1valf18f3elem.Columns = f1valf18f3elemf0 + } + f1valf18f3 = append(f1valf18f3, f1valf18f3elem) + } + f1valf18.OutputSchemas = f1valf18f3 } if f1valiter.DynamicTransform.Parameters != nil { - f1valf13f3 := []*svcapitypes.TransformConfigParameter{} - for _, f1valf13f3iter := range f1valiter.DynamicTransform.Parameters { - f1valf13f3elem := &svcapitypes.TransformConfigParameter{} - if f1valf13f3iter.IsOptional != nil { - f1valf13f3elem.IsOptional = f1valf13f3iter.IsOptional + f1valf18f4 := []*svcapitypes.TransformConfigParameter{} + for _, f1valf18f4iter := range f1valiter.DynamicTransform.Parameters { + f1valf18f4elem := &svcapitypes.TransformConfigParameter{} + if f1valf18f4iter.IsOptional != nil { + f1valf18f4elem.IsOptional = f1valf18f4iter.IsOptional } - if f1valf13f3iter.ListType != nil { - f1valf13f3elem.ListType = f1valf13f3iter.ListType + if f1valf18f4iter.ListType != nil { + f1valf18f4elem.ListType = f1valf18f4iter.ListType } - if f1valf13f3iter.Name != nil { - f1valf13f3elem.Name = f1valf13f3iter.Name + if f1valf18f4iter.Name != nil { + f1valf18f4elem.Name = f1valf18f4iter.Name } - if f1valf13f3iter.Type != nil { - f1valf13f3elem.Type = f1valf13f3iter.Type + if f1valf18f4iter.Type != nil { + f1valf18f4elem.Type = f1valf18f4iter.Type } - if f1valf13f3iter.ValidationMessage != nil { - f1valf13f3elem.ValidationMessage = f1valf13f3iter.ValidationMessage + if f1valf18f4iter.ValidationMessage != nil { + f1valf18f4elem.ValidationMessage = f1valf18f4iter.ValidationMessage } - if f1valf13f3iter.ValidationRule != nil { - f1valf13f3elem.ValidationRule = f1valf13f3iter.ValidationRule + if f1valf18f4iter.ValidationRule != nil { + f1valf18f4elem.ValidationRule = f1valf18f4iter.ValidationRule } - if f1valf13f3iter.Value != nil { - f1valf13f3elemf6 := []*string{} - for _, f1valf13f3elemf6iter := range f1valf13f3iter.Value { - var f1valf13f3elemf6elem string - f1valf13f3elemf6elem = *f1valf13f3elemf6iter - f1valf13f3elemf6 = append(f1valf13f3elemf6, &f1valf13f3elemf6elem) + if f1valf18f4iter.Value != nil { + f1valf18f4elemf6 := []*string{} + for _, f1valf18f4elemf6iter := range f1valf18f4iter.Value { + var f1valf18f4elemf6elem string + f1valf18f4elemf6elem = *f1valf18f4elemf6iter + f1valf18f4elemf6 = append(f1valf18f4elemf6, &f1valf18f4elemf6elem) } - f1valf13f3elem.Value = f1valf13f3elemf6 + f1valf18f4elem.Value = f1valf18f4elemf6 } - f1valf13f3 = append(f1valf13f3, f1valf13f3elem) + f1valf18f4 = append(f1valf18f4, f1valf18f4elem) } - f1valf13.Parameters = f1valf13f3 + f1valf18.Parameters = f1valf18f4 } if f1valiter.DynamicTransform.Path != nil { - f1valf13.Path = f1valiter.DynamicTransform.Path + f1valf18.Path = f1valiter.DynamicTransform.Path } if f1valiter.DynamicTransform.TransformName != nil { - f1valf13.TransformName = f1valiter.DynamicTransform.TransformName + f1valf18.TransformName = f1valiter.DynamicTransform.TransformName } if f1valiter.DynamicTransform.Version != nil { - f1valf13.Version = f1valiter.DynamicTransform.Version + f1valf18.Version = f1valiter.DynamicTransform.Version } - f1val.DynamicTransform = f1valf13 + f1val.DynamicTransform = f1valf18 } if f1valiter.DynamoDBCatalogSource != nil { - f1valf14 := &svcapitypes.DynamoDBCatalogSource{} + f1valf19 := &svcapitypes.DynamoDBCatalogSource{} if f1valiter.DynamoDBCatalogSource.Database != nil { - f1valf14.Database = f1valiter.DynamoDBCatalogSource.Database + f1valf19.Database = f1valiter.DynamoDBCatalogSource.Database } if f1valiter.DynamoDBCatalogSource.Name != nil { - f1valf14.Name = f1valiter.DynamoDBCatalogSource.Name + f1valf19.Name = f1valiter.DynamoDBCatalogSource.Name } if f1valiter.DynamoDBCatalogSource.Table != nil { - f1valf14.Table = f1valiter.DynamoDBCatalogSource.Table + f1valf19.Table = f1valiter.DynamoDBCatalogSource.Table } - f1val.DynamoDBCatalogSource = f1valf14 + f1val.DynamoDBCatalogSource = f1valf19 } if f1valiter.EvaluateDataQuality != nil { - f1valf15 := &svcapitypes.EvaluateDataQuality{} + f1valf20 := &svcapitypes.EvaluateDataQuality{} if f1valiter.EvaluateDataQuality.Inputs != nil { - f1valf15f0 := []*string{} - for _, f1valf15f0iter := range f1valiter.EvaluateDataQuality.Inputs { - var f1valf15f0elem string - f1valf15f0elem = *f1valf15f0iter - f1valf15f0 = append(f1valf15f0, &f1valf15f0elem) + f1valf20f0 := []*string{} + for _, f1valf20f0iter := range f1valiter.EvaluateDataQuality.Inputs { + var f1valf20f0elem string + f1valf20f0elem = *f1valf20f0iter + f1valf20f0 = append(f1valf20f0, &f1valf20f0elem) } - f1valf15.Inputs = f1valf15f0 + f1valf20.Inputs = f1valf20f0 } if f1valiter.EvaluateDataQuality.Name != nil { - f1valf15.Name = f1valiter.EvaluateDataQuality.Name + f1valf20.Name = f1valiter.EvaluateDataQuality.Name } if f1valiter.EvaluateDataQuality.Output != nil { - f1valf15.Output = f1valiter.EvaluateDataQuality.Output + f1valf20.Output = f1valiter.EvaluateDataQuality.Output } if f1valiter.EvaluateDataQuality.PublishingOptions != nil { - f1valf15f3 := &svcapitypes.DQResultsPublishingOptions{} + f1valf20f3 := &svcapitypes.DQResultsPublishingOptions{} if f1valiter.EvaluateDataQuality.PublishingOptions.CloudWatchMetricsEnabled != nil { - f1valf15f3.CloudWatchMetricsEnabled = f1valiter.EvaluateDataQuality.PublishingOptions.CloudWatchMetricsEnabled + f1valf20f3.CloudWatchMetricsEnabled = f1valiter.EvaluateDataQuality.PublishingOptions.CloudWatchMetricsEnabled } if f1valiter.EvaluateDataQuality.PublishingOptions.EvaluationContext != nil { - f1valf15f3.EvaluationContext = f1valiter.EvaluateDataQuality.PublishingOptions.EvaluationContext + f1valf20f3.EvaluationContext = f1valiter.EvaluateDataQuality.PublishingOptions.EvaluationContext } if f1valiter.EvaluateDataQuality.PublishingOptions.ResultsPublishingEnabled != nil { - f1valf15f3.ResultsPublishingEnabled = f1valiter.EvaluateDataQuality.PublishingOptions.ResultsPublishingEnabled + f1valf20f3.ResultsPublishingEnabled = f1valiter.EvaluateDataQuality.PublishingOptions.ResultsPublishingEnabled } if f1valiter.EvaluateDataQuality.PublishingOptions.ResultsS3Prefix != nil { - f1valf15f3.ResultsS3Prefix = f1valiter.EvaluateDataQuality.PublishingOptions.ResultsS3Prefix + f1valf20f3.ResultsS3Prefix = f1valiter.EvaluateDataQuality.PublishingOptions.ResultsS3Prefix } - f1valf15.PublishingOptions = f1valf15f3 + f1valf20.PublishingOptions = f1valf20f3 } if f1valiter.EvaluateDataQuality.Ruleset != nil { - f1valf15.Ruleset = f1valiter.EvaluateDataQuality.Ruleset + f1valf20.Ruleset = f1valiter.EvaluateDataQuality.Ruleset } if f1valiter.EvaluateDataQuality.StopJobOnFailureOptions != nil { - f1valf15f5 := &svcapitypes.DQStopJobOnFailureOptions{} + f1valf20f5 := &svcapitypes.DQStopJobOnFailureOptions{} if f1valiter.EvaluateDataQuality.StopJobOnFailureOptions.StopJobOnFailureTiming != nil { - f1valf15f5.StopJobOnFailureTiming = f1valiter.EvaluateDataQuality.StopJobOnFailureOptions.StopJobOnFailureTiming + f1valf20f5.StopJobOnFailureTiming = f1valiter.EvaluateDataQuality.StopJobOnFailureOptions.StopJobOnFailureTiming + } + f1valf20.StopJobOnFailureOptions = f1valf20f5 + } + f1val.EvaluateDataQuality = f1valf20 + } + if f1valiter.EvaluateDataQualityMultiFrame != nil { + f1valf21 := &svcapitypes.EvaluateDataQualityMultiFrame{} + if f1valiter.EvaluateDataQualityMultiFrame.AdditionalDataSources != nil { + f1valf21f0 := map[string]*string{} + for f1valf21f0key, f1valf21f0valiter := range f1valiter.EvaluateDataQualityMultiFrame.AdditionalDataSources { + var f1valf21f0val string + f1valf21f0val = *f1valf21f0valiter + f1valf21f0[f1valf21f0key] = &f1valf21f0val } - f1valf15.StopJobOnFailureOptions = f1valf15f5 + f1valf21.AdditionalDataSources = f1valf21f0 } - f1val.EvaluateDataQuality = f1valf15 + if f1valiter.EvaluateDataQualityMultiFrame.AdditionalOptions != nil { + f1valf21f1 := map[string]*string{} + for f1valf21f1key, f1valf21f1valiter := range f1valiter.EvaluateDataQualityMultiFrame.AdditionalOptions { + var f1valf21f1val string + f1valf21f1val = *f1valf21f1valiter + f1valf21f1[f1valf21f1key] = &f1valf21f1val + } + f1valf21.AdditionalOptions = f1valf21f1 + } + if f1valiter.EvaluateDataQualityMultiFrame.Inputs != nil { + f1valf21f2 := []*string{} + for _, f1valf21f2iter := range f1valiter.EvaluateDataQualityMultiFrame.Inputs { + var f1valf21f2elem string + f1valf21f2elem = *f1valf21f2iter + f1valf21f2 = append(f1valf21f2, &f1valf21f2elem) + } + f1valf21.Inputs = f1valf21f2 + } + if f1valiter.EvaluateDataQualityMultiFrame.Name != nil { + f1valf21.Name = f1valiter.EvaluateDataQualityMultiFrame.Name + } + if f1valiter.EvaluateDataQualityMultiFrame.PublishingOptions != nil { + f1valf21f4 := &svcapitypes.DQResultsPublishingOptions{} + if f1valiter.EvaluateDataQualityMultiFrame.PublishingOptions.CloudWatchMetricsEnabled != nil { + f1valf21f4.CloudWatchMetricsEnabled = f1valiter.EvaluateDataQualityMultiFrame.PublishingOptions.CloudWatchMetricsEnabled + } + if f1valiter.EvaluateDataQualityMultiFrame.PublishingOptions.EvaluationContext != nil { + f1valf21f4.EvaluationContext = f1valiter.EvaluateDataQualityMultiFrame.PublishingOptions.EvaluationContext + } + if f1valiter.EvaluateDataQualityMultiFrame.PublishingOptions.ResultsPublishingEnabled != nil { + f1valf21f4.ResultsPublishingEnabled = f1valiter.EvaluateDataQualityMultiFrame.PublishingOptions.ResultsPublishingEnabled + } + if f1valiter.EvaluateDataQualityMultiFrame.PublishingOptions.ResultsS3Prefix != nil { + f1valf21f4.ResultsS3Prefix = f1valiter.EvaluateDataQualityMultiFrame.PublishingOptions.ResultsS3Prefix + } + f1valf21.PublishingOptions = f1valf21f4 + } + if f1valiter.EvaluateDataQualityMultiFrame.Ruleset != nil { + f1valf21.Ruleset = f1valiter.EvaluateDataQualityMultiFrame.Ruleset + } + if f1valiter.EvaluateDataQualityMultiFrame.StopJobOnFailureOptions != nil { + f1valf21f6 := &svcapitypes.DQStopJobOnFailureOptions{} + if f1valiter.EvaluateDataQualityMultiFrame.StopJobOnFailureOptions.StopJobOnFailureTiming != nil { + f1valf21f6.StopJobOnFailureTiming = f1valiter.EvaluateDataQualityMultiFrame.StopJobOnFailureOptions.StopJobOnFailureTiming + } + f1valf21.StopJobOnFailureOptions = f1valf21f6 + } + f1val.EvaluateDataQualityMultiFrame = f1valf21 } if f1valiter.FillMissingValues != nil { - f1valf16 := &svcapitypes.FillMissingValues{} + f1valf22 := &svcapitypes.FillMissingValues{} if f1valiter.FillMissingValues.FilledPath != nil { - f1valf16.FilledPath = f1valiter.FillMissingValues.FilledPath + f1valf22.FilledPath = f1valiter.FillMissingValues.FilledPath } if f1valiter.FillMissingValues.ImputedPath != nil { - f1valf16.ImputedPath = f1valiter.FillMissingValues.ImputedPath + f1valf22.ImputedPath = f1valiter.FillMissingValues.ImputedPath } if f1valiter.FillMissingValues.Inputs != nil { - f1valf16f2 := []*string{} - for _, f1valf16f2iter := range f1valiter.FillMissingValues.Inputs { - var f1valf16f2elem string - f1valf16f2elem = *f1valf16f2iter - f1valf16f2 = append(f1valf16f2, &f1valf16f2elem) + f1valf22f2 := []*string{} + for _, f1valf22f2iter := range f1valiter.FillMissingValues.Inputs { + var f1valf22f2elem string + f1valf22f2elem = *f1valf22f2iter + f1valf22f2 = append(f1valf22f2, &f1valf22f2elem) } - f1valf16.Inputs = f1valf16f2 + f1valf22.Inputs = f1valf22f2 } if f1valiter.FillMissingValues.Name != nil { - f1valf16.Name = f1valiter.FillMissingValues.Name + f1valf22.Name = f1valiter.FillMissingValues.Name } - f1val.FillMissingValues = f1valf16 + f1val.FillMissingValues = f1valf22 } if f1valiter.Filter != nil { - f1valf17 := &svcapitypes.Filter{} + f1valf23 := &svcapitypes.Filter{} if f1valiter.Filter.Filters != nil { - f1valf17f0 := []*svcapitypes.FilterExpression{} - for _, f1valf17f0iter := range f1valiter.Filter.Filters { - f1valf17f0elem := &svcapitypes.FilterExpression{} - if f1valf17f0iter.Negated != nil { - f1valf17f0elem.Negated = f1valf17f0iter.Negated - } - if f1valf17f0iter.Operation != nil { - f1valf17f0elem.Operation = f1valf17f0iter.Operation - } - if f1valf17f0iter.Values != nil { - f1valf17f0elemf2 := []*svcapitypes.FilterValue{} - for _, f1valf17f0elemf2iter := range f1valf17f0iter.Values { - f1valf17f0elemf2elem := &svcapitypes.FilterValue{} - if f1valf17f0elemf2iter.Type != nil { - f1valf17f0elemf2elem.Type = f1valf17f0elemf2iter.Type + f1valf23f0 := []*svcapitypes.FilterExpression{} + for _, f1valf23f0iter := range f1valiter.Filter.Filters { + f1valf23f0elem := &svcapitypes.FilterExpression{} + if f1valf23f0iter.Negated != nil { + f1valf23f0elem.Negated = f1valf23f0iter.Negated + } + if f1valf23f0iter.Operation != nil { + f1valf23f0elem.Operation = f1valf23f0iter.Operation + } + if f1valf23f0iter.Values != nil { + f1valf23f0elemf2 := []*svcapitypes.FilterValue{} + for _, f1valf23f0elemf2iter := range f1valf23f0iter.Values { + f1valf23f0elemf2elem := &svcapitypes.FilterValue{} + if f1valf23f0elemf2iter.Type != nil { + f1valf23f0elemf2elem.Type = f1valf23f0elemf2iter.Type } - if f1valf17f0elemf2iter.Value != nil { - f1valf17f0elemf2elemf1 := []*string{} - for _, f1valf17f0elemf2elemf1iter := range f1valf17f0elemf2iter.Value { - var f1valf17f0elemf2elemf1elem string - f1valf17f0elemf2elemf1elem = *f1valf17f0elemf2elemf1iter - f1valf17f0elemf2elemf1 = append(f1valf17f0elemf2elemf1, &f1valf17f0elemf2elemf1elem) + if f1valf23f0elemf2iter.Value != nil { + f1valf23f0elemf2elemf1 := []*string{} + for _, f1valf23f0elemf2elemf1iter := range f1valf23f0elemf2iter.Value { + var f1valf23f0elemf2elemf1elem string + f1valf23f0elemf2elemf1elem = *f1valf23f0elemf2elemf1iter + f1valf23f0elemf2elemf1 = append(f1valf23f0elemf2elemf1, &f1valf23f0elemf2elemf1elem) } - f1valf17f0elemf2elem.Value = f1valf17f0elemf2elemf1 + f1valf23f0elemf2elem.Value = f1valf23f0elemf2elemf1 } - f1valf17f0elemf2 = append(f1valf17f0elemf2, f1valf17f0elemf2elem) + f1valf23f0elemf2 = append(f1valf23f0elemf2, f1valf23f0elemf2elem) } - f1valf17f0elem.Values = f1valf17f0elemf2 + f1valf23f0elem.Values = f1valf23f0elemf2 } - f1valf17f0 = append(f1valf17f0, f1valf17f0elem) + f1valf23f0 = append(f1valf23f0, f1valf23f0elem) } - f1valf17.Filters = f1valf17f0 + f1valf23.Filters = f1valf23f0 } if f1valiter.Filter.Inputs != nil { - f1valf17f1 := []*string{} - for _, f1valf17f1iter := range f1valiter.Filter.Inputs { - var f1valf17f1elem string - f1valf17f1elem = *f1valf17f1iter - f1valf17f1 = append(f1valf17f1, &f1valf17f1elem) + f1valf23f1 := []*string{} + for _, f1valf23f1iter := range f1valiter.Filter.Inputs { + var f1valf23f1elem string + f1valf23f1elem = *f1valf23f1iter + f1valf23f1 = append(f1valf23f1, &f1valf23f1elem) } - f1valf17.Inputs = f1valf17f1 + f1valf23.Inputs = f1valf23f1 } if f1valiter.Filter.LogicalOperator != nil { - f1valf17.LogicalOperator = f1valiter.Filter.LogicalOperator + f1valf23.LogicalOperator = f1valiter.Filter.LogicalOperator } if f1valiter.Filter.Name != nil { - f1valf17.Name = f1valiter.Filter.Name + f1valf23.Name = f1valiter.Filter.Name } - f1val.Filter = f1valf17 + f1val.Filter = f1valf23 } if f1valiter.GovernedCatalogSource != nil { - f1valf18 := &svcapitypes.GovernedCatalogSource{} + f1valf24 := &svcapitypes.GovernedCatalogSource{} if f1valiter.GovernedCatalogSource.AdditionalOptions != nil { - f1valf18f0 := &svcapitypes.S3SourceAdditionalOptions{} + f1valf24f0 := &svcapitypes.S3SourceAdditionalOptions{} if f1valiter.GovernedCatalogSource.AdditionalOptions.BoundedFiles != nil { - f1valf18f0.BoundedFiles = f1valiter.GovernedCatalogSource.AdditionalOptions.BoundedFiles + f1valf24f0.BoundedFiles = f1valiter.GovernedCatalogSource.AdditionalOptions.BoundedFiles } if f1valiter.GovernedCatalogSource.AdditionalOptions.BoundedSize != nil { - f1valf18f0.BoundedSize = f1valiter.GovernedCatalogSource.AdditionalOptions.BoundedSize + f1valf24f0.BoundedSize = f1valiter.GovernedCatalogSource.AdditionalOptions.BoundedSize } - f1valf18.AdditionalOptions = f1valf18f0 + f1valf24.AdditionalOptions = f1valf24f0 } if f1valiter.GovernedCatalogSource.Database != nil { - f1valf18.Database = f1valiter.GovernedCatalogSource.Database + f1valf24.Database = f1valiter.GovernedCatalogSource.Database } if f1valiter.GovernedCatalogSource.Name != nil { - f1valf18.Name = f1valiter.GovernedCatalogSource.Name + f1valf24.Name = f1valiter.GovernedCatalogSource.Name } if f1valiter.GovernedCatalogSource.PartitionPredicate != nil { - f1valf18.PartitionPredicate = f1valiter.GovernedCatalogSource.PartitionPredicate + f1valf24.PartitionPredicate = f1valiter.GovernedCatalogSource.PartitionPredicate } if f1valiter.GovernedCatalogSource.Table != nil { - f1valf18.Table = f1valiter.GovernedCatalogSource.Table + f1valf24.Table = f1valiter.GovernedCatalogSource.Table } - f1val.GovernedCatalogSource = f1valf18 + f1val.GovernedCatalogSource = f1valf24 } if f1valiter.GovernedCatalogTarget != nil { - f1valf19 := &svcapitypes.GovernedCatalogTarget{} + f1valf25 := &svcapitypes.GovernedCatalogTarget{} if f1valiter.GovernedCatalogTarget.Database != nil { - f1valf19.Database = f1valiter.GovernedCatalogTarget.Database + f1valf25.Database = f1valiter.GovernedCatalogTarget.Database } if f1valiter.GovernedCatalogTarget.Inputs != nil { - f1valf19f1 := []*string{} - for _, f1valf19f1iter := range f1valiter.GovernedCatalogTarget.Inputs { - var f1valf19f1elem string - f1valf19f1elem = *f1valf19f1iter - f1valf19f1 = append(f1valf19f1, &f1valf19f1elem) + f1valf25f1 := []*string{} + for _, f1valf25f1iter := range f1valiter.GovernedCatalogTarget.Inputs { + var f1valf25f1elem string + f1valf25f1elem = *f1valf25f1iter + f1valf25f1 = append(f1valf25f1, &f1valf25f1elem) } - f1valf19.Inputs = f1valf19f1 + f1valf25.Inputs = f1valf25f1 } if f1valiter.GovernedCatalogTarget.Name != nil { - f1valf19.Name = f1valiter.GovernedCatalogTarget.Name + f1valf25.Name = f1valiter.GovernedCatalogTarget.Name } if f1valiter.GovernedCatalogTarget.PartitionKeys != nil { - f1valf19f3 := [][]*string{} - for _, f1valf19f3iter := range f1valiter.GovernedCatalogTarget.PartitionKeys { - f1valf19f3elem := []*string{} - for _, f1valf19f3elemiter := range f1valf19f3iter { - var f1valf19f3elemelem string - f1valf19f3elemelem = *f1valf19f3elemiter - f1valf19f3elem = append(f1valf19f3elem, &f1valf19f3elemelem) + f1valf25f3 := [][]*string{} + for _, f1valf25f3iter := range f1valiter.GovernedCatalogTarget.PartitionKeys { + f1valf25f3elem := []*string{} + for _, f1valf25f3elemiter := range f1valf25f3iter { + var f1valf25f3elemelem string + f1valf25f3elemelem = *f1valf25f3elemiter + f1valf25f3elem = append(f1valf25f3elem, &f1valf25f3elemelem) } - f1valf19f3 = append(f1valf19f3, f1valf19f3elem) + f1valf25f3 = append(f1valf25f3, f1valf25f3elem) } - f1valf19.PartitionKeys = f1valf19f3 + f1valf25.PartitionKeys = f1valf25f3 } if f1valiter.GovernedCatalogTarget.SchemaChangePolicy != nil { - f1valf19f4 := &svcapitypes.CatalogSchemaChangePolicy{} + f1valf25f4 := &svcapitypes.CatalogSchemaChangePolicy{} if f1valiter.GovernedCatalogTarget.SchemaChangePolicy.EnableUpdateCatalog != nil { - f1valf19f4.EnableUpdateCatalog = f1valiter.GovernedCatalogTarget.SchemaChangePolicy.EnableUpdateCatalog + f1valf25f4.EnableUpdateCatalog = f1valiter.GovernedCatalogTarget.SchemaChangePolicy.EnableUpdateCatalog } if f1valiter.GovernedCatalogTarget.SchemaChangePolicy.UpdateBehavior != nil { - f1valf19f4.UpdateBehavior = f1valiter.GovernedCatalogTarget.SchemaChangePolicy.UpdateBehavior + f1valf25f4.UpdateBehavior = f1valiter.GovernedCatalogTarget.SchemaChangePolicy.UpdateBehavior } - f1valf19.SchemaChangePolicy = f1valf19f4 + f1valf25.SchemaChangePolicy = f1valf25f4 } if f1valiter.GovernedCatalogTarget.Table != nil { - f1valf19.Table = f1valiter.GovernedCatalogTarget.Table + f1valf25.Table = f1valiter.GovernedCatalogTarget.Table } - f1val.GovernedCatalogTarget = f1valf19 + f1val.GovernedCatalogTarget = f1valf25 } if f1valiter.JDBCConnectorSource != nil { - f1valf20 := &svcapitypes.JDBCConnectorSource{} + f1valf26 := &svcapitypes.JDBCConnectorSource{} if f1valiter.JDBCConnectorSource.AdditionalOptions != nil { - f1valf20f0 := &svcapitypes.JDBCConnectorOptions{} + f1valf26f0 := &svcapitypes.JDBCConnectorOptions{} if f1valiter.JDBCConnectorSource.AdditionalOptions.DataTypeMapping != nil { - f1valf20f0f0 := map[string]*string{} - for f1valf20f0f0key, f1valf20f0f0valiter := range f1valiter.JDBCConnectorSource.AdditionalOptions.DataTypeMapping { - var f1valf20f0f0val string - f1valf20f0f0val = *f1valf20f0f0valiter - f1valf20f0f0[f1valf20f0f0key] = &f1valf20f0f0val + f1valf26f0f0 := map[string]*string{} + for f1valf26f0f0key, f1valf26f0f0valiter := range f1valiter.JDBCConnectorSource.AdditionalOptions.DataTypeMapping { + var f1valf26f0f0val string + f1valf26f0f0val = *f1valf26f0f0valiter + f1valf26f0f0[f1valf26f0f0key] = &f1valf26f0f0val } - f1valf20f0.DataTypeMapping = f1valf20f0f0 + f1valf26f0.DataTypeMapping = f1valf26f0f0 } if f1valiter.JDBCConnectorSource.AdditionalOptions.FilterPredicate != nil { - f1valf20f0.FilterPredicate = f1valiter.JDBCConnectorSource.AdditionalOptions.FilterPredicate + f1valf26f0.FilterPredicate = f1valiter.JDBCConnectorSource.AdditionalOptions.FilterPredicate } if f1valiter.JDBCConnectorSource.AdditionalOptions.JobBookmarkKeys != nil { - f1valf20f0f2 := []*string{} - for _, f1valf20f0f2iter := range f1valiter.JDBCConnectorSource.AdditionalOptions.JobBookmarkKeys { - var f1valf20f0f2elem string - f1valf20f0f2elem = *f1valf20f0f2iter - f1valf20f0f2 = append(f1valf20f0f2, &f1valf20f0f2elem) + f1valf26f0f2 := []*string{} + for _, f1valf26f0f2iter := range f1valiter.JDBCConnectorSource.AdditionalOptions.JobBookmarkKeys { + var f1valf26f0f2elem string + f1valf26f0f2elem = *f1valf26f0f2iter + f1valf26f0f2 = append(f1valf26f0f2, &f1valf26f0f2elem) } - f1valf20f0.JobBookmarkKeys = f1valf20f0f2 + f1valf26f0.JobBookmarkKeys = f1valf26f0f2 } if f1valiter.JDBCConnectorSource.AdditionalOptions.JobBookmarkKeysSortOrder != nil { - f1valf20f0.JobBookmarkKeysSortOrder = f1valiter.JDBCConnectorSource.AdditionalOptions.JobBookmarkKeysSortOrder + f1valf26f0.JobBookmarkKeysSortOrder = f1valiter.JDBCConnectorSource.AdditionalOptions.JobBookmarkKeysSortOrder } if f1valiter.JDBCConnectorSource.AdditionalOptions.LowerBound != nil { - f1valf20f0.LowerBound = f1valiter.JDBCConnectorSource.AdditionalOptions.LowerBound + f1valf26f0.LowerBound = f1valiter.JDBCConnectorSource.AdditionalOptions.LowerBound } if f1valiter.JDBCConnectorSource.AdditionalOptions.NumPartitions != nil { - f1valf20f0.NumPartitions = f1valiter.JDBCConnectorSource.AdditionalOptions.NumPartitions + f1valf26f0.NumPartitions = f1valiter.JDBCConnectorSource.AdditionalOptions.NumPartitions } if f1valiter.JDBCConnectorSource.AdditionalOptions.PartitionColumn != nil { - f1valf20f0.PartitionColumn = f1valiter.JDBCConnectorSource.AdditionalOptions.PartitionColumn + f1valf26f0.PartitionColumn = f1valiter.JDBCConnectorSource.AdditionalOptions.PartitionColumn } if f1valiter.JDBCConnectorSource.AdditionalOptions.UpperBound != nil { - f1valf20f0.UpperBound = f1valiter.JDBCConnectorSource.AdditionalOptions.UpperBound + f1valf26f0.UpperBound = f1valiter.JDBCConnectorSource.AdditionalOptions.UpperBound } - f1valf20.AdditionalOptions = f1valf20f0 + f1valf26.AdditionalOptions = f1valf26f0 } if f1valiter.JDBCConnectorSource.ConnectionName != nil { - f1valf20.ConnectionName = f1valiter.JDBCConnectorSource.ConnectionName + f1valf26.ConnectionName = f1valiter.JDBCConnectorSource.ConnectionName } if f1valiter.JDBCConnectorSource.ConnectionTable != nil { - f1valf20.ConnectionTable = f1valiter.JDBCConnectorSource.ConnectionTable + f1valf26.ConnectionTable = f1valiter.JDBCConnectorSource.ConnectionTable } if f1valiter.JDBCConnectorSource.ConnectionType != nil { - f1valf20.ConnectionType = f1valiter.JDBCConnectorSource.ConnectionType + f1valf26.ConnectionType = f1valiter.JDBCConnectorSource.ConnectionType } if f1valiter.JDBCConnectorSource.ConnectorName != nil { - f1valf20.ConnectorName = f1valiter.JDBCConnectorSource.ConnectorName + f1valf26.ConnectorName = f1valiter.JDBCConnectorSource.ConnectorName } if f1valiter.JDBCConnectorSource.Name != nil { - f1valf20.Name = f1valiter.JDBCConnectorSource.Name + f1valf26.Name = f1valiter.JDBCConnectorSource.Name } if f1valiter.JDBCConnectorSource.OutputSchemas != nil { - f1valf20f6 := []*svcapitypes.GlueSchema{} - for _, f1valf20f6iter := range f1valiter.JDBCConnectorSource.OutputSchemas { - f1valf20f6elem := &svcapitypes.GlueSchema{} - if f1valf20f6iter.Columns != nil { - f1valf20f6elemf0 := []*svcapitypes.GlueStudioSchemaColumn{} - for _, f1valf20f6elemf0iter := range f1valf20f6iter.Columns { - f1valf20f6elemf0elem := &svcapitypes.GlueStudioSchemaColumn{} - if f1valf20f6elemf0iter.Name != nil { - f1valf20f6elemf0elem.Name = f1valf20f6elemf0iter.Name + f1valf26f6 := []*svcapitypes.GlueSchema{} + for _, f1valf26f6iter := range f1valiter.JDBCConnectorSource.OutputSchemas { + f1valf26f6elem := &svcapitypes.GlueSchema{} + if f1valf26f6iter.Columns != nil { + f1valf26f6elemf0 := []*svcapitypes.GlueStudioSchemaColumn{} + for _, f1valf26f6elemf0iter := range f1valf26f6iter.Columns { + f1valf26f6elemf0elem := &svcapitypes.GlueStudioSchemaColumn{} + if f1valf26f6elemf0iter.Name != nil { + f1valf26f6elemf0elem.Name = f1valf26f6elemf0iter.Name } - if f1valf20f6elemf0iter.Type != nil { - f1valf20f6elemf0elem.Type = f1valf20f6elemf0iter.Type + if f1valf26f6elemf0iter.Type != nil { + f1valf26f6elemf0elem.Type = f1valf26f6elemf0iter.Type } - f1valf20f6elemf0 = append(f1valf20f6elemf0, f1valf20f6elemf0elem) + f1valf26f6elemf0 = append(f1valf26f6elemf0, f1valf26f6elemf0elem) } - f1valf20f6elem.Columns = f1valf20f6elemf0 + f1valf26f6elem.Columns = f1valf26f6elemf0 } - f1valf20f6 = append(f1valf20f6, f1valf20f6elem) + f1valf26f6 = append(f1valf26f6, f1valf26f6elem) } - f1valf20.OutputSchemas = f1valf20f6 + f1valf26.OutputSchemas = f1valf26f6 } if f1valiter.JDBCConnectorSource.Query != nil { - f1valf20.Query = f1valiter.JDBCConnectorSource.Query + f1valf26.Query = f1valiter.JDBCConnectorSource.Query } - f1val.JDBCConnectorSource = f1valf20 + f1val.JDBCConnectorSource = f1valf26 } if f1valiter.JDBCConnectorTarget != nil { - f1valf21 := &svcapitypes.JDBCConnectorTarget{} + f1valf27 := &svcapitypes.JDBCConnectorTarget{} if f1valiter.JDBCConnectorTarget.AdditionalOptions != nil { - f1valf21f0 := map[string]*string{} - for f1valf21f0key, f1valf21f0valiter := range f1valiter.JDBCConnectorTarget.AdditionalOptions { - var f1valf21f0val string - f1valf21f0val = *f1valf21f0valiter - f1valf21f0[f1valf21f0key] = &f1valf21f0val + f1valf27f0 := map[string]*string{} + for f1valf27f0key, f1valf27f0valiter := range f1valiter.JDBCConnectorTarget.AdditionalOptions { + var f1valf27f0val string + f1valf27f0val = *f1valf27f0valiter + f1valf27f0[f1valf27f0key] = &f1valf27f0val } - f1valf21.AdditionalOptions = f1valf21f0 + f1valf27.AdditionalOptions = f1valf27f0 } if f1valiter.JDBCConnectorTarget.ConnectionName != nil { - f1valf21.ConnectionName = f1valiter.JDBCConnectorTarget.ConnectionName + f1valf27.ConnectionName = f1valiter.JDBCConnectorTarget.ConnectionName } if f1valiter.JDBCConnectorTarget.ConnectionTable != nil { - f1valf21.ConnectionTable = f1valiter.JDBCConnectorTarget.ConnectionTable + f1valf27.ConnectionTable = f1valiter.JDBCConnectorTarget.ConnectionTable } if f1valiter.JDBCConnectorTarget.ConnectionType != nil { - f1valf21.ConnectionType = f1valiter.JDBCConnectorTarget.ConnectionType + f1valf27.ConnectionType = f1valiter.JDBCConnectorTarget.ConnectionType } if f1valiter.JDBCConnectorTarget.ConnectorName != nil { - f1valf21.ConnectorName = f1valiter.JDBCConnectorTarget.ConnectorName + f1valf27.ConnectorName = f1valiter.JDBCConnectorTarget.ConnectorName } if f1valiter.JDBCConnectorTarget.Inputs != nil { - f1valf21f5 := []*string{} - for _, f1valf21f5iter := range f1valiter.JDBCConnectorTarget.Inputs { - var f1valf21f5elem string - f1valf21f5elem = *f1valf21f5iter - f1valf21f5 = append(f1valf21f5, &f1valf21f5elem) + f1valf27f5 := []*string{} + for _, f1valf27f5iter := range f1valiter.JDBCConnectorTarget.Inputs { + var f1valf27f5elem string + f1valf27f5elem = *f1valf27f5iter + f1valf27f5 = append(f1valf27f5, &f1valf27f5elem) } - f1valf21.Inputs = f1valf21f5 + f1valf27.Inputs = f1valf27f5 } if f1valiter.JDBCConnectorTarget.Name != nil { - f1valf21.Name = f1valiter.JDBCConnectorTarget.Name + f1valf27.Name = f1valiter.JDBCConnectorTarget.Name } if f1valiter.JDBCConnectorTarget.OutputSchemas != nil { - f1valf21f7 := []*svcapitypes.GlueSchema{} - for _, f1valf21f7iter := range f1valiter.JDBCConnectorTarget.OutputSchemas { - f1valf21f7elem := &svcapitypes.GlueSchema{} - if f1valf21f7iter.Columns != nil { - f1valf21f7elemf0 := []*svcapitypes.GlueStudioSchemaColumn{} - for _, f1valf21f7elemf0iter := range f1valf21f7iter.Columns { - f1valf21f7elemf0elem := &svcapitypes.GlueStudioSchemaColumn{} - if f1valf21f7elemf0iter.Name != nil { - f1valf21f7elemf0elem.Name = f1valf21f7elemf0iter.Name + f1valf27f7 := []*svcapitypes.GlueSchema{} + for _, f1valf27f7iter := range f1valiter.JDBCConnectorTarget.OutputSchemas { + f1valf27f7elem := &svcapitypes.GlueSchema{} + if f1valf27f7iter.Columns != nil { + f1valf27f7elemf0 := []*svcapitypes.GlueStudioSchemaColumn{} + for _, f1valf27f7elemf0iter := range f1valf27f7iter.Columns { + f1valf27f7elemf0elem := &svcapitypes.GlueStudioSchemaColumn{} + if f1valf27f7elemf0iter.Name != nil { + f1valf27f7elemf0elem.Name = f1valf27f7elemf0iter.Name } - if f1valf21f7elemf0iter.Type != nil { - f1valf21f7elemf0elem.Type = f1valf21f7elemf0iter.Type + if f1valf27f7elemf0iter.Type != nil { + f1valf27f7elemf0elem.Type = f1valf27f7elemf0iter.Type } - f1valf21f7elemf0 = append(f1valf21f7elemf0, f1valf21f7elemf0elem) + f1valf27f7elemf0 = append(f1valf27f7elemf0, f1valf27f7elemf0elem) } - f1valf21f7elem.Columns = f1valf21f7elemf0 + f1valf27f7elem.Columns = f1valf27f7elemf0 } - f1valf21f7 = append(f1valf21f7, f1valf21f7elem) + f1valf27f7 = append(f1valf27f7, f1valf27f7elem) } - f1valf21.OutputSchemas = f1valf21f7 + f1valf27.OutputSchemas = f1valf27f7 } - f1val.JDBCConnectorTarget = f1valf21 + f1val.JDBCConnectorTarget = f1valf27 } if f1valiter.Join != nil { - f1valf22 := &svcapitypes.Join{} + f1valf28 := &svcapitypes.Join{} if f1valiter.Join.Columns != nil { - f1valf22f0 := []*svcapitypes.JoinColumn{} - for _, f1valf22f0iter := range f1valiter.Join.Columns { - f1valf22f0elem := &svcapitypes.JoinColumn{} - if f1valf22f0iter.From != nil { - f1valf22f0elem.From = f1valf22f0iter.From - } - if f1valf22f0iter.Keys != nil { - f1valf22f0elemf1 := [][]*string{} - for _, f1valf22f0elemf1iter := range f1valf22f0iter.Keys { - f1valf22f0elemf1elem := []*string{} - for _, f1valf22f0elemf1elemiter := range f1valf22f0elemf1iter { - var f1valf22f0elemf1elemelem string - f1valf22f0elemf1elemelem = *f1valf22f0elemf1elemiter - f1valf22f0elemf1elem = append(f1valf22f0elemf1elem, &f1valf22f0elemf1elemelem) + f1valf28f0 := []*svcapitypes.JoinColumn{} + for _, f1valf28f0iter := range f1valiter.Join.Columns { + f1valf28f0elem := &svcapitypes.JoinColumn{} + if f1valf28f0iter.From != nil { + f1valf28f0elem.From = f1valf28f0iter.From + } + if f1valf28f0iter.Keys != nil { + f1valf28f0elemf1 := [][]*string{} + for _, f1valf28f0elemf1iter := range f1valf28f0iter.Keys { + f1valf28f0elemf1elem := []*string{} + for _, f1valf28f0elemf1elemiter := range f1valf28f0elemf1iter { + var f1valf28f0elemf1elemelem string + f1valf28f0elemf1elemelem = *f1valf28f0elemf1elemiter + f1valf28f0elemf1elem = append(f1valf28f0elemf1elem, &f1valf28f0elemf1elemelem) } - f1valf22f0elemf1 = append(f1valf22f0elemf1, f1valf22f0elemf1elem) + f1valf28f0elemf1 = append(f1valf28f0elemf1, f1valf28f0elemf1elem) } - f1valf22f0elem.Keys = f1valf22f0elemf1 + f1valf28f0elem.Keys = f1valf28f0elemf1 } - f1valf22f0 = append(f1valf22f0, f1valf22f0elem) + f1valf28f0 = append(f1valf28f0, f1valf28f0elem) } - f1valf22.Columns = f1valf22f0 + f1valf28.Columns = f1valf28f0 } if f1valiter.Join.Inputs != nil { - f1valf22f1 := []*string{} - for _, f1valf22f1iter := range f1valiter.Join.Inputs { - var f1valf22f1elem string - f1valf22f1elem = *f1valf22f1iter - f1valf22f1 = append(f1valf22f1, &f1valf22f1elem) + f1valf28f1 := []*string{} + for _, f1valf28f1iter := range f1valiter.Join.Inputs { + var f1valf28f1elem string + f1valf28f1elem = *f1valf28f1iter + f1valf28f1 = append(f1valf28f1, &f1valf28f1elem) } - f1valf22.Inputs = f1valf22f1 + f1valf28.Inputs = f1valf28f1 } if f1valiter.Join.JoinType != nil { - f1valf22.JoinType = f1valiter.Join.JoinType + f1valf28.JoinType = f1valiter.Join.JoinType } if f1valiter.Join.Name != nil { - f1valf22.Name = f1valiter.Join.Name + f1valf28.Name = f1valiter.Join.Name } - f1val.Join = f1valf22 + f1val.Join = f1valf28 } if f1valiter.Merge != nil { - f1valf23 := &svcapitypes.Merge{} + f1valf29 := &svcapitypes.Merge{} if f1valiter.Merge.Inputs != nil { - f1valf23f0 := []*string{} - for _, f1valf23f0iter := range f1valiter.Merge.Inputs { - var f1valf23f0elem string - f1valf23f0elem = *f1valf23f0iter - f1valf23f0 = append(f1valf23f0, &f1valf23f0elem) + f1valf29f0 := []*string{} + for _, f1valf29f0iter := range f1valiter.Merge.Inputs { + var f1valf29f0elem string + f1valf29f0elem = *f1valf29f0iter + f1valf29f0 = append(f1valf29f0, &f1valf29f0elem) } - f1valf23.Inputs = f1valf23f0 + f1valf29.Inputs = f1valf29f0 } if f1valiter.Merge.Name != nil { - f1valf23.Name = f1valiter.Merge.Name + f1valf29.Name = f1valiter.Merge.Name } if f1valiter.Merge.PrimaryKeys != nil { - f1valf23f2 := [][]*string{} - for _, f1valf23f2iter := range f1valiter.Merge.PrimaryKeys { - f1valf23f2elem := []*string{} - for _, f1valf23f2elemiter := range f1valf23f2iter { - var f1valf23f2elemelem string - f1valf23f2elemelem = *f1valf23f2elemiter - f1valf23f2elem = append(f1valf23f2elem, &f1valf23f2elemelem) + f1valf29f2 := [][]*string{} + for _, f1valf29f2iter := range f1valiter.Merge.PrimaryKeys { + f1valf29f2elem := []*string{} + for _, f1valf29f2elemiter := range f1valf29f2iter { + var f1valf29f2elemelem string + f1valf29f2elemelem = *f1valf29f2elemiter + f1valf29f2elem = append(f1valf29f2elem, &f1valf29f2elemelem) } - f1valf23f2 = append(f1valf23f2, f1valf23f2elem) + f1valf29f2 = append(f1valf29f2, f1valf29f2elem) } - f1valf23.PrimaryKeys = f1valf23f2 + f1valf29.PrimaryKeys = f1valf29f2 } if f1valiter.Merge.Source != nil { - f1valf23.Source = f1valiter.Merge.Source + f1valf29.Source = f1valiter.Merge.Source } - f1val.Merge = f1valf23 + f1val.Merge = f1valf29 } if f1valiter.MicrosoftSQLServerCatalogSource != nil { - f1valf24 := &svcapitypes.MicrosoftSQLServerCatalogSource{} + f1valf30 := &svcapitypes.MicrosoftSQLServerCatalogSource{} if f1valiter.MicrosoftSQLServerCatalogSource.Database != nil { - f1valf24.Database = f1valiter.MicrosoftSQLServerCatalogSource.Database + f1valf30.Database = f1valiter.MicrosoftSQLServerCatalogSource.Database } if f1valiter.MicrosoftSQLServerCatalogSource.Name != nil { - f1valf24.Name = f1valiter.MicrosoftSQLServerCatalogSource.Name + f1valf30.Name = f1valiter.MicrosoftSQLServerCatalogSource.Name } if f1valiter.MicrosoftSQLServerCatalogSource.Table != nil { - f1valf24.Table = f1valiter.MicrosoftSQLServerCatalogSource.Table + f1valf30.Table = f1valiter.MicrosoftSQLServerCatalogSource.Table } - f1val.MicrosoftSQLServerCatalogSource = f1valf24 + f1val.MicrosoftSQLServerCatalogSource = f1valf30 } if f1valiter.MicrosoftSQLServerCatalogTarget != nil { - f1valf25 := &svcapitypes.MicrosoftSQLServerCatalogTarget{} + f1valf31 := &svcapitypes.MicrosoftSQLServerCatalogTarget{} if f1valiter.MicrosoftSQLServerCatalogTarget.Database != nil { - f1valf25.Database = f1valiter.MicrosoftSQLServerCatalogTarget.Database + f1valf31.Database = f1valiter.MicrosoftSQLServerCatalogTarget.Database } if f1valiter.MicrosoftSQLServerCatalogTarget.Inputs != nil { - f1valf25f1 := []*string{} - for _, f1valf25f1iter := range f1valiter.MicrosoftSQLServerCatalogTarget.Inputs { - var f1valf25f1elem string - f1valf25f1elem = *f1valf25f1iter - f1valf25f1 = append(f1valf25f1, &f1valf25f1elem) + f1valf31f1 := []*string{} + for _, f1valf31f1iter := range f1valiter.MicrosoftSQLServerCatalogTarget.Inputs { + var f1valf31f1elem string + f1valf31f1elem = *f1valf31f1iter + f1valf31f1 = append(f1valf31f1, &f1valf31f1elem) } - f1valf25.Inputs = f1valf25f1 + f1valf31.Inputs = f1valf31f1 } if f1valiter.MicrosoftSQLServerCatalogTarget.Name != nil { - f1valf25.Name = f1valiter.MicrosoftSQLServerCatalogTarget.Name + f1valf31.Name = f1valiter.MicrosoftSQLServerCatalogTarget.Name } if f1valiter.MicrosoftSQLServerCatalogTarget.Table != nil { - f1valf25.Table = f1valiter.MicrosoftSQLServerCatalogTarget.Table + f1valf31.Table = f1valiter.MicrosoftSQLServerCatalogTarget.Table } - f1val.MicrosoftSQLServerCatalogTarget = f1valf25 + f1val.MicrosoftSQLServerCatalogTarget = f1valf31 } if f1valiter.MySQLCatalogSource != nil { - f1valf26 := &svcapitypes.MySQLCatalogSource{} + f1valf32 := &svcapitypes.MySQLCatalogSource{} if f1valiter.MySQLCatalogSource.Database != nil { - f1valf26.Database = f1valiter.MySQLCatalogSource.Database + f1valf32.Database = f1valiter.MySQLCatalogSource.Database } if f1valiter.MySQLCatalogSource.Name != nil { - f1valf26.Name = f1valiter.MySQLCatalogSource.Name + f1valf32.Name = f1valiter.MySQLCatalogSource.Name } if f1valiter.MySQLCatalogSource.Table != nil { - f1valf26.Table = f1valiter.MySQLCatalogSource.Table + f1valf32.Table = f1valiter.MySQLCatalogSource.Table } - f1val.MySQLCatalogSource = f1valf26 + f1val.MySQLCatalogSource = f1valf32 } if f1valiter.MySQLCatalogTarget != nil { - f1valf27 := &svcapitypes.MySQLCatalogTarget{} + f1valf33 := &svcapitypes.MySQLCatalogTarget{} if f1valiter.MySQLCatalogTarget.Database != nil { - f1valf27.Database = f1valiter.MySQLCatalogTarget.Database + f1valf33.Database = f1valiter.MySQLCatalogTarget.Database } if f1valiter.MySQLCatalogTarget.Inputs != nil { - f1valf27f1 := []*string{} - for _, f1valf27f1iter := range f1valiter.MySQLCatalogTarget.Inputs { - var f1valf27f1elem string - f1valf27f1elem = *f1valf27f1iter - f1valf27f1 = append(f1valf27f1, &f1valf27f1elem) + f1valf33f1 := []*string{} + for _, f1valf33f1iter := range f1valiter.MySQLCatalogTarget.Inputs { + var f1valf33f1elem string + f1valf33f1elem = *f1valf33f1iter + f1valf33f1 = append(f1valf33f1, &f1valf33f1elem) } - f1valf27.Inputs = f1valf27f1 + f1valf33.Inputs = f1valf33f1 } if f1valiter.MySQLCatalogTarget.Name != nil { - f1valf27.Name = f1valiter.MySQLCatalogTarget.Name + f1valf33.Name = f1valiter.MySQLCatalogTarget.Name } if f1valiter.MySQLCatalogTarget.Table != nil { - f1valf27.Table = f1valiter.MySQLCatalogTarget.Table + f1valf33.Table = f1valiter.MySQLCatalogTarget.Table } - f1val.MySQLCatalogTarget = f1valf27 + f1val.MySQLCatalogTarget = f1valf33 } if f1valiter.OracleSQLCatalogSource != nil { - f1valf28 := &svcapitypes.OracleSQLCatalogSource{} + f1valf34 := &svcapitypes.OracleSQLCatalogSource{} if f1valiter.OracleSQLCatalogSource.Database != nil { - f1valf28.Database = f1valiter.OracleSQLCatalogSource.Database + f1valf34.Database = f1valiter.OracleSQLCatalogSource.Database } if f1valiter.OracleSQLCatalogSource.Name != nil { - f1valf28.Name = f1valiter.OracleSQLCatalogSource.Name + f1valf34.Name = f1valiter.OracleSQLCatalogSource.Name } if f1valiter.OracleSQLCatalogSource.Table != nil { - f1valf28.Table = f1valiter.OracleSQLCatalogSource.Table + f1valf34.Table = f1valiter.OracleSQLCatalogSource.Table } - f1val.OracleSQLCatalogSource = f1valf28 + f1val.OracleSQLCatalogSource = f1valf34 } if f1valiter.OracleSQLCatalogTarget != nil { - f1valf29 := &svcapitypes.OracleSQLCatalogTarget{} + f1valf35 := &svcapitypes.OracleSQLCatalogTarget{} if f1valiter.OracleSQLCatalogTarget.Database != nil { - f1valf29.Database = f1valiter.OracleSQLCatalogTarget.Database + f1valf35.Database = f1valiter.OracleSQLCatalogTarget.Database } if f1valiter.OracleSQLCatalogTarget.Inputs != nil { - f1valf29f1 := []*string{} - for _, f1valf29f1iter := range f1valiter.OracleSQLCatalogTarget.Inputs { - var f1valf29f1elem string - f1valf29f1elem = *f1valf29f1iter - f1valf29f1 = append(f1valf29f1, &f1valf29f1elem) + f1valf35f1 := []*string{} + for _, f1valf35f1iter := range f1valiter.OracleSQLCatalogTarget.Inputs { + var f1valf35f1elem string + f1valf35f1elem = *f1valf35f1iter + f1valf35f1 = append(f1valf35f1, &f1valf35f1elem) } - f1valf29.Inputs = f1valf29f1 + f1valf35.Inputs = f1valf35f1 } if f1valiter.OracleSQLCatalogTarget.Name != nil { - f1valf29.Name = f1valiter.OracleSQLCatalogTarget.Name + f1valf35.Name = f1valiter.OracleSQLCatalogTarget.Name } if f1valiter.OracleSQLCatalogTarget.Table != nil { - f1valf29.Table = f1valiter.OracleSQLCatalogTarget.Table + f1valf35.Table = f1valiter.OracleSQLCatalogTarget.Table } - f1val.OracleSQLCatalogTarget = f1valf29 + f1val.OracleSQLCatalogTarget = f1valf35 } if f1valiter.PIIDetection != nil { - f1valf30 := &svcapitypes.PIIDetection{} + f1valf36 := &svcapitypes.PIIDetection{} if f1valiter.PIIDetection.EntityTypesToDetect != nil { - f1valf30f0 := []*string{} - for _, f1valf30f0iter := range f1valiter.PIIDetection.EntityTypesToDetect { - var f1valf30f0elem string - f1valf30f0elem = *f1valf30f0iter - f1valf30f0 = append(f1valf30f0, &f1valf30f0elem) + f1valf36f0 := []*string{} + for _, f1valf36f0iter := range f1valiter.PIIDetection.EntityTypesToDetect { + var f1valf36f0elem string + f1valf36f0elem = *f1valf36f0iter + f1valf36f0 = append(f1valf36f0, &f1valf36f0elem) } - f1valf30.EntityTypesToDetect = f1valf30f0 + f1valf36.EntityTypesToDetect = f1valf36f0 } if f1valiter.PIIDetection.Inputs != nil { - f1valf30f1 := []*string{} - for _, f1valf30f1iter := range f1valiter.PIIDetection.Inputs { - var f1valf30f1elem string - f1valf30f1elem = *f1valf30f1iter - f1valf30f1 = append(f1valf30f1, &f1valf30f1elem) + f1valf36f1 := []*string{} + for _, f1valf36f1iter := range f1valiter.PIIDetection.Inputs { + var f1valf36f1elem string + f1valf36f1elem = *f1valf36f1iter + f1valf36f1 = append(f1valf36f1, &f1valf36f1elem) } - f1valf30.Inputs = f1valf30f1 + f1valf36.Inputs = f1valf36f1 } if f1valiter.PIIDetection.MaskValue != nil { - f1valf30.MaskValue = f1valiter.PIIDetection.MaskValue + f1valf36.MaskValue = f1valiter.PIIDetection.MaskValue } if f1valiter.PIIDetection.Name != nil { - f1valf30.Name = f1valiter.PIIDetection.Name + f1valf36.Name = f1valiter.PIIDetection.Name } if f1valiter.PIIDetection.OutputColumnName != nil { - f1valf30.OutputColumnName = f1valiter.PIIDetection.OutputColumnName + f1valf36.OutputColumnName = f1valiter.PIIDetection.OutputColumnName } if f1valiter.PIIDetection.PiiType != nil { - f1valf30.PiiType = f1valiter.PIIDetection.PiiType + f1valf36.PiiType = f1valiter.PIIDetection.PiiType } if f1valiter.PIIDetection.SampleFraction != nil { - f1valf30.SampleFraction = f1valiter.PIIDetection.SampleFraction + f1valf36.SampleFraction = f1valiter.PIIDetection.SampleFraction } if f1valiter.PIIDetection.ThresholdFraction != nil { - f1valf30.ThresholdFraction = f1valiter.PIIDetection.ThresholdFraction + f1valf36.ThresholdFraction = f1valiter.PIIDetection.ThresholdFraction } - f1val.PIIDetection = f1valf30 + f1val.PIIDetection = f1valf36 } if f1valiter.PostgreSQLCatalogSource != nil { - f1valf31 := &svcapitypes.PostgreSQLCatalogSource{} + f1valf37 := &svcapitypes.PostgreSQLCatalogSource{} if f1valiter.PostgreSQLCatalogSource.Database != nil { - f1valf31.Database = f1valiter.PostgreSQLCatalogSource.Database + f1valf37.Database = f1valiter.PostgreSQLCatalogSource.Database } if f1valiter.PostgreSQLCatalogSource.Name != nil { - f1valf31.Name = f1valiter.PostgreSQLCatalogSource.Name + f1valf37.Name = f1valiter.PostgreSQLCatalogSource.Name } if f1valiter.PostgreSQLCatalogSource.Table != nil { - f1valf31.Table = f1valiter.PostgreSQLCatalogSource.Table + f1valf37.Table = f1valiter.PostgreSQLCatalogSource.Table } - f1val.PostgreSQLCatalogSource = f1valf31 + f1val.PostgreSQLCatalogSource = f1valf37 } if f1valiter.PostgreSQLCatalogTarget != nil { - f1valf32 := &svcapitypes.PostgreSQLCatalogTarget{} + f1valf38 := &svcapitypes.PostgreSQLCatalogTarget{} if f1valiter.PostgreSQLCatalogTarget.Database != nil { - f1valf32.Database = f1valiter.PostgreSQLCatalogTarget.Database + f1valf38.Database = f1valiter.PostgreSQLCatalogTarget.Database } if f1valiter.PostgreSQLCatalogTarget.Inputs != nil { - f1valf32f1 := []*string{} - for _, f1valf32f1iter := range f1valiter.PostgreSQLCatalogTarget.Inputs { - var f1valf32f1elem string - f1valf32f1elem = *f1valf32f1iter - f1valf32f1 = append(f1valf32f1, &f1valf32f1elem) + f1valf38f1 := []*string{} + for _, f1valf38f1iter := range f1valiter.PostgreSQLCatalogTarget.Inputs { + var f1valf38f1elem string + f1valf38f1elem = *f1valf38f1iter + f1valf38f1 = append(f1valf38f1, &f1valf38f1elem) } - f1valf32.Inputs = f1valf32f1 + f1valf38.Inputs = f1valf38f1 } if f1valiter.PostgreSQLCatalogTarget.Name != nil { - f1valf32.Name = f1valiter.PostgreSQLCatalogTarget.Name + f1valf38.Name = f1valiter.PostgreSQLCatalogTarget.Name } if f1valiter.PostgreSQLCatalogTarget.Table != nil { - f1valf32.Table = f1valiter.PostgreSQLCatalogTarget.Table + f1valf38.Table = f1valiter.PostgreSQLCatalogTarget.Table + } + f1val.PostgreSQLCatalogTarget = f1valf38 + } + if f1valiter.Recipe != nil { + f1valf39 := &svcapitypes.Recipe{} + if f1valiter.Recipe.Inputs != nil { + f1valf39f0 := []*string{} + for _, f1valf39f0iter := range f1valiter.Recipe.Inputs { + var f1valf39f0elem string + f1valf39f0elem = *f1valf39f0iter + f1valf39f0 = append(f1valf39f0, &f1valf39f0elem) + } + f1valf39.Inputs = f1valf39f0 + } + if f1valiter.Recipe.Name != nil { + f1valf39.Name = f1valiter.Recipe.Name + } + if f1valiter.Recipe.RecipeReference != nil { + f1valf39f2 := &svcapitypes.RecipeReference{} + if f1valiter.Recipe.RecipeReference.RecipeArn != nil { + f1valf39f2.RecipeARN = f1valiter.Recipe.RecipeReference.RecipeArn + } + if f1valiter.Recipe.RecipeReference.RecipeVersion != nil { + f1valf39f2.RecipeVersion = f1valiter.Recipe.RecipeReference.RecipeVersion + } + f1valf39.RecipeReference = f1valf39f2 } - f1val.PostgreSQLCatalogTarget = f1valf32 + f1val.Recipe = f1valf39 } if f1valiter.RedshiftSource != nil { - f1valf33 := &svcapitypes.RedshiftSource{} + f1valf40 := &svcapitypes.RedshiftSource{} if f1valiter.RedshiftSource.Database != nil { - f1valf33.Database = f1valiter.RedshiftSource.Database + f1valf40.Database = f1valiter.RedshiftSource.Database } if f1valiter.RedshiftSource.Name != nil { - f1valf33.Name = f1valiter.RedshiftSource.Name + f1valf40.Name = f1valiter.RedshiftSource.Name } if f1valiter.RedshiftSource.RedshiftTmpDir != nil { - f1valf33.RedshiftTmpDir = f1valiter.RedshiftSource.RedshiftTmpDir + f1valf40.RedshiftTmpDir = f1valiter.RedshiftSource.RedshiftTmpDir } if f1valiter.RedshiftSource.Table != nil { - f1valf33.Table = f1valiter.RedshiftSource.Table + f1valf40.Table = f1valiter.RedshiftSource.Table } if f1valiter.RedshiftSource.TmpDirIAMRole != nil { - f1valf33.TmpDirIAMRole = f1valiter.RedshiftSource.TmpDirIAMRole + f1valf40.TmpDirIAMRole = f1valiter.RedshiftSource.TmpDirIAMRole } - f1val.RedshiftSource = f1valf33 + f1val.RedshiftSource = f1valf40 } if f1valiter.RedshiftTarget != nil { - f1valf34 := &svcapitypes.RedshiftTarget{} + f1valf41 := &svcapitypes.RedshiftTarget{} if f1valiter.RedshiftTarget.Database != nil { - f1valf34.Database = f1valiter.RedshiftTarget.Database + f1valf41.Database = f1valiter.RedshiftTarget.Database } if f1valiter.RedshiftTarget.Inputs != nil { - f1valf34f1 := []*string{} - for _, f1valf34f1iter := range f1valiter.RedshiftTarget.Inputs { - var f1valf34f1elem string - f1valf34f1elem = *f1valf34f1iter - f1valf34f1 = append(f1valf34f1, &f1valf34f1elem) + f1valf41f1 := []*string{} + for _, f1valf41f1iter := range f1valiter.RedshiftTarget.Inputs { + var f1valf41f1elem string + f1valf41f1elem = *f1valf41f1iter + f1valf41f1 = append(f1valf41f1, &f1valf41f1elem) } - f1valf34.Inputs = f1valf34f1 + f1valf41.Inputs = f1valf41f1 } if f1valiter.RedshiftTarget.Name != nil { - f1valf34.Name = f1valiter.RedshiftTarget.Name + f1valf41.Name = f1valiter.RedshiftTarget.Name } if f1valiter.RedshiftTarget.RedshiftTmpDir != nil { - f1valf34.RedshiftTmpDir = f1valiter.RedshiftTarget.RedshiftTmpDir + f1valf41.RedshiftTmpDir = f1valiter.RedshiftTarget.RedshiftTmpDir } if f1valiter.RedshiftTarget.Table != nil { - f1valf34.Table = f1valiter.RedshiftTarget.Table + f1valf41.Table = f1valiter.RedshiftTarget.Table } if f1valiter.RedshiftTarget.TmpDirIAMRole != nil { - f1valf34.TmpDirIAMRole = f1valiter.RedshiftTarget.TmpDirIAMRole + f1valf41.TmpDirIAMRole = f1valiter.RedshiftTarget.TmpDirIAMRole } if f1valiter.RedshiftTarget.UpsertRedshiftOptions != nil { - f1valf34f6 := &svcapitypes.UpsertRedshiftTargetOptions{} + f1valf41f6 := &svcapitypes.UpsertRedshiftTargetOptions{} if f1valiter.RedshiftTarget.UpsertRedshiftOptions.ConnectionName != nil { - f1valf34f6.ConnectionName = f1valiter.RedshiftTarget.UpsertRedshiftOptions.ConnectionName + f1valf41f6.ConnectionName = f1valiter.RedshiftTarget.UpsertRedshiftOptions.ConnectionName } if f1valiter.RedshiftTarget.UpsertRedshiftOptions.TableLocation != nil { - f1valf34f6.TableLocation = f1valiter.RedshiftTarget.UpsertRedshiftOptions.TableLocation + f1valf41f6.TableLocation = f1valiter.RedshiftTarget.UpsertRedshiftOptions.TableLocation } if f1valiter.RedshiftTarget.UpsertRedshiftOptions.UpsertKeys != nil { - f1valf34f6f2 := []*string{} - for _, f1valf34f6f2iter := range f1valiter.RedshiftTarget.UpsertRedshiftOptions.UpsertKeys { - var f1valf34f6f2elem string - f1valf34f6f2elem = *f1valf34f6f2iter - f1valf34f6f2 = append(f1valf34f6f2, &f1valf34f6f2elem) + f1valf41f6f2 := []*string{} + for _, f1valf41f6f2iter := range f1valiter.RedshiftTarget.UpsertRedshiftOptions.UpsertKeys { + var f1valf41f6f2elem string + f1valf41f6f2elem = *f1valf41f6f2iter + f1valf41f6f2 = append(f1valf41f6f2, &f1valf41f6f2elem) } - f1valf34f6.UpsertKeys = f1valf34f6f2 + f1valf41f6.UpsertKeys = f1valf41f6f2 } - f1valf34.UpsertRedshiftOptions = f1valf34f6 + f1valf41.UpsertRedshiftOptions = f1valf41f6 } - f1val.RedshiftTarget = f1valf34 + f1val.RedshiftTarget = f1valf41 } if f1valiter.RelationalCatalogSource != nil { - f1valf35 := &svcapitypes.RelationalCatalogSource{} + f1valf42 := &svcapitypes.RelationalCatalogSource{} if f1valiter.RelationalCatalogSource.Database != nil { - f1valf35.Database = f1valiter.RelationalCatalogSource.Database + f1valf42.Database = f1valiter.RelationalCatalogSource.Database } if f1valiter.RelationalCatalogSource.Name != nil { - f1valf35.Name = f1valiter.RelationalCatalogSource.Name + f1valf42.Name = f1valiter.RelationalCatalogSource.Name } if f1valiter.RelationalCatalogSource.Table != nil { - f1valf35.Table = f1valiter.RelationalCatalogSource.Table + f1valf42.Table = f1valiter.RelationalCatalogSource.Table } - f1val.RelationalCatalogSource = f1valf35 + f1val.RelationalCatalogSource = f1valf42 } if f1valiter.RenameField != nil { - f1valf36 := &svcapitypes.RenameField{} + f1valf43 := &svcapitypes.RenameField{} if f1valiter.RenameField.Inputs != nil { - f1valf36f0 := []*string{} - for _, f1valf36f0iter := range f1valiter.RenameField.Inputs { - var f1valf36f0elem string - f1valf36f0elem = *f1valf36f0iter - f1valf36f0 = append(f1valf36f0, &f1valf36f0elem) + f1valf43f0 := []*string{} + for _, f1valf43f0iter := range f1valiter.RenameField.Inputs { + var f1valf43f0elem string + f1valf43f0elem = *f1valf43f0iter + f1valf43f0 = append(f1valf43f0, &f1valf43f0elem) } - f1valf36.Inputs = f1valf36f0 + f1valf43.Inputs = f1valf43f0 } if f1valiter.RenameField.Name != nil { - f1valf36.Name = f1valiter.RenameField.Name + f1valf43.Name = f1valiter.RenameField.Name } if f1valiter.RenameField.SourcePath != nil { - f1valf36f2 := []*string{} - for _, f1valf36f2iter := range f1valiter.RenameField.SourcePath { - var f1valf36f2elem string - f1valf36f2elem = *f1valf36f2iter - f1valf36f2 = append(f1valf36f2, &f1valf36f2elem) + f1valf43f2 := []*string{} + for _, f1valf43f2iter := range f1valiter.RenameField.SourcePath { + var f1valf43f2elem string + f1valf43f2elem = *f1valf43f2iter + f1valf43f2 = append(f1valf43f2, &f1valf43f2elem) } - f1valf36.SourcePath = f1valf36f2 + f1valf43.SourcePath = f1valf43f2 } if f1valiter.RenameField.TargetPath != nil { - f1valf36f3 := []*string{} - for _, f1valf36f3iter := range f1valiter.RenameField.TargetPath { - var f1valf36f3elem string - f1valf36f3elem = *f1valf36f3iter - f1valf36f3 = append(f1valf36f3, &f1valf36f3elem) + f1valf43f3 := []*string{} + for _, f1valf43f3iter := range f1valiter.RenameField.TargetPath { + var f1valf43f3elem string + f1valf43f3elem = *f1valf43f3iter + f1valf43f3 = append(f1valf43f3, &f1valf43f3elem) + } + f1valf43.TargetPath = f1valf43f3 + } + f1val.RenameField = f1valf43 + } + if f1valiter.S3CatalogDeltaSource != nil { + f1valf44 := &svcapitypes.S3CatalogDeltaSource{} + if f1valiter.S3CatalogDeltaSource.AdditionalDeltaOptions != nil { + f1valf44f0 := map[string]*string{} + for f1valf44f0key, f1valf44f0valiter := range f1valiter.S3CatalogDeltaSource.AdditionalDeltaOptions { + var f1valf44f0val string + f1valf44f0val = *f1valf44f0valiter + f1valf44f0[f1valf44f0key] = &f1valf44f0val + } + f1valf44.AdditionalDeltaOptions = f1valf44f0 + } + if f1valiter.S3CatalogDeltaSource.Database != nil { + f1valf44.Database = f1valiter.S3CatalogDeltaSource.Database + } + if f1valiter.S3CatalogDeltaSource.Name != nil { + f1valf44.Name = f1valiter.S3CatalogDeltaSource.Name + } + if f1valiter.S3CatalogDeltaSource.OutputSchemas != nil { + f1valf44f3 := []*svcapitypes.GlueSchema{} + for _, f1valf44f3iter := range f1valiter.S3CatalogDeltaSource.OutputSchemas { + f1valf44f3elem := &svcapitypes.GlueSchema{} + if f1valf44f3iter.Columns != nil { + f1valf44f3elemf0 := []*svcapitypes.GlueStudioSchemaColumn{} + for _, f1valf44f3elemf0iter := range f1valf44f3iter.Columns { + f1valf44f3elemf0elem := &svcapitypes.GlueStudioSchemaColumn{} + if f1valf44f3elemf0iter.Name != nil { + f1valf44f3elemf0elem.Name = f1valf44f3elemf0iter.Name + } + if f1valf44f3elemf0iter.Type != nil { + f1valf44f3elemf0elem.Type = f1valf44f3elemf0iter.Type + } + f1valf44f3elemf0 = append(f1valf44f3elemf0, f1valf44f3elemf0elem) + } + f1valf44f3elem.Columns = f1valf44f3elemf0 + } + f1valf44f3 = append(f1valf44f3, f1valf44f3elem) + } + f1valf44.OutputSchemas = f1valf44f3 + } + if f1valiter.S3CatalogDeltaSource.Table != nil { + f1valf44.Table = f1valiter.S3CatalogDeltaSource.Table + } + f1val.S3CatalogDeltaSource = f1valf44 + } + if f1valiter.S3CatalogHudiSource != nil { + f1valf45 := &svcapitypes.S3CatalogHudiSource{} + if f1valiter.S3CatalogHudiSource.AdditionalHudiOptions != nil { + f1valf45f0 := map[string]*string{} + for f1valf45f0key, f1valf45f0valiter := range f1valiter.S3CatalogHudiSource.AdditionalHudiOptions { + var f1valf45f0val string + f1valf45f0val = *f1valf45f0valiter + f1valf45f0[f1valf45f0key] = &f1valf45f0val + } + f1valf45.AdditionalHudiOptions = f1valf45f0 + } + if f1valiter.S3CatalogHudiSource.Database != nil { + f1valf45.Database = f1valiter.S3CatalogHudiSource.Database + } + if f1valiter.S3CatalogHudiSource.Name != nil { + f1valf45.Name = f1valiter.S3CatalogHudiSource.Name + } + if f1valiter.S3CatalogHudiSource.OutputSchemas != nil { + f1valf45f3 := []*svcapitypes.GlueSchema{} + for _, f1valf45f3iter := range f1valiter.S3CatalogHudiSource.OutputSchemas { + f1valf45f3elem := &svcapitypes.GlueSchema{} + if f1valf45f3iter.Columns != nil { + f1valf45f3elemf0 := []*svcapitypes.GlueStudioSchemaColumn{} + for _, f1valf45f3elemf0iter := range f1valf45f3iter.Columns { + f1valf45f3elemf0elem := &svcapitypes.GlueStudioSchemaColumn{} + if f1valf45f3elemf0iter.Name != nil { + f1valf45f3elemf0elem.Name = f1valf45f3elemf0iter.Name + } + if f1valf45f3elemf0iter.Type != nil { + f1valf45f3elemf0elem.Type = f1valf45f3elemf0iter.Type + } + f1valf45f3elemf0 = append(f1valf45f3elemf0, f1valf45f3elemf0elem) + } + f1valf45f3elem.Columns = f1valf45f3elemf0 + } + f1valf45f3 = append(f1valf45f3, f1valf45f3elem) } - f1valf36.TargetPath = f1valf36f3 + f1valf45.OutputSchemas = f1valf45f3 } - f1val.RenameField = f1valf36 + if f1valiter.S3CatalogHudiSource.Table != nil { + f1valf45.Table = f1valiter.S3CatalogHudiSource.Table + } + f1val.S3CatalogHudiSource = f1valf45 } if f1valiter.S3CatalogSource != nil { - f1valf37 := &svcapitypes.S3CatalogSource{} + f1valf46 := &svcapitypes.S3CatalogSource{} if f1valiter.S3CatalogSource.AdditionalOptions != nil { - f1valf37f0 := &svcapitypes.S3SourceAdditionalOptions{} + f1valf46f0 := &svcapitypes.S3SourceAdditionalOptions{} if f1valiter.S3CatalogSource.AdditionalOptions.BoundedFiles != nil { - f1valf37f0.BoundedFiles = f1valiter.S3CatalogSource.AdditionalOptions.BoundedFiles + f1valf46f0.BoundedFiles = f1valiter.S3CatalogSource.AdditionalOptions.BoundedFiles } if f1valiter.S3CatalogSource.AdditionalOptions.BoundedSize != nil { - f1valf37f0.BoundedSize = f1valiter.S3CatalogSource.AdditionalOptions.BoundedSize + f1valf46f0.BoundedSize = f1valiter.S3CatalogSource.AdditionalOptions.BoundedSize } - f1valf37.AdditionalOptions = f1valf37f0 + f1valf46.AdditionalOptions = f1valf46f0 } if f1valiter.S3CatalogSource.Database != nil { - f1valf37.Database = f1valiter.S3CatalogSource.Database + f1valf46.Database = f1valiter.S3CatalogSource.Database } if f1valiter.S3CatalogSource.Name != nil { - f1valf37.Name = f1valiter.S3CatalogSource.Name + f1valf46.Name = f1valiter.S3CatalogSource.Name } if f1valiter.S3CatalogSource.PartitionPredicate != nil { - f1valf37.PartitionPredicate = f1valiter.S3CatalogSource.PartitionPredicate + f1valf46.PartitionPredicate = f1valiter.S3CatalogSource.PartitionPredicate } if f1valiter.S3CatalogSource.Table != nil { - f1valf37.Table = f1valiter.S3CatalogSource.Table + f1valf46.Table = f1valiter.S3CatalogSource.Table } - f1val.S3CatalogSource = f1valf37 + f1val.S3CatalogSource = f1valf46 } if f1valiter.S3CatalogTarget != nil { - f1valf38 := &svcapitypes.S3CatalogTarget{} + f1valf47 := &svcapitypes.S3CatalogTarget{} if f1valiter.S3CatalogTarget.Database != nil { - f1valf38.Database = f1valiter.S3CatalogTarget.Database + f1valf47.Database = f1valiter.S3CatalogTarget.Database } if f1valiter.S3CatalogTarget.Inputs != nil { - f1valf38f1 := []*string{} - for _, f1valf38f1iter := range f1valiter.S3CatalogTarget.Inputs { - var f1valf38f1elem string - f1valf38f1elem = *f1valf38f1iter - f1valf38f1 = append(f1valf38f1, &f1valf38f1elem) + f1valf47f1 := []*string{} + for _, f1valf47f1iter := range f1valiter.S3CatalogTarget.Inputs { + var f1valf47f1elem string + f1valf47f1elem = *f1valf47f1iter + f1valf47f1 = append(f1valf47f1, &f1valf47f1elem) } - f1valf38.Inputs = f1valf38f1 + f1valf47.Inputs = f1valf47f1 } if f1valiter.S3CatalogTarget.Name != nil { - f1valf38.Name = f1valiter.S3CatalogTarget.Name + f1valf47.Name = f1valiter.S3CatalogTarget.Name } if f1valiter.S3CatalogTarget.PartitionKeys != nil { - f1valf38f3 := [][]*string{} - for _, f1valf38f3iter := range f1valiter.S3CatalogTarget.PartitionKeys { - f1valf38f3elem := []*string{} - for _, f1valf38f3elemiter := range f1valf38f3iter { - var f1valf38f3elemelem string - f1valf38f3elemelem = *f1valf38f3elemiter - f1valf38f3elem = append(f1valf38f3elem, &f1valf38f3elemelem) + f1valf47f3 := [][]*string{} + for _, f1valf47f3iter := range f1valiter.S3CatalogTarget.PartitionKeys { + f1valf47f3elem := []*string{} + for _, f1valf47f3elemiter := range f1valf47f3iter { + var f1valf47f3elemelem string + f1valf47f3elemelem = *f1valf47f3elemiter + f1valf47f3elem = append(f1valf47f3elem, &f1valf47f3elemelem) } - f1valf38f3 = append(f1valf38f3, f1valf38f3elem) + f1valf47f3 = append(f1valf47f3, f1valf47f3elem) } - f1valf38.PartitionKeys = f1valf38f3 + f1valf47.PartitionKeys = f1valf47f3 } if f1valiter.S3CatalogTarget.SchemaChangePolicy != nil { - f1valf38f4 := &svcapitypes.CatalogSchemaChangePolicy{} + f1valf47f4 := &svcapitypes.CatalogSchemaChangePolicy{} if f1valiter.S3CatalogTarget.SchemaChangePolicy.EnableUpdateCatalog != nil { - f1valf38f4.EnableUpdateCatalog = f1valiter.S3CatalogTarget.SchemaChangePolicy.EnableUpdateCatalog + f1valf47f4.EnableUpdateCatalog = f1valiter.S3CatalogTarget.SchemaChangePolicy.EnableUpdateCatalog } if f1valiter.S3CatalogTarget.SchemaChangePolicy.UpdateBehavior != nil { - f1valf38f4.UpdateBehavior = f1valiter.S3CatalogTarget.SchemaChangePolicy.UpdateBehavior + f1valf47f4.UpdateBehavior = f1valiter.S3CatalogTarget.SchemaChangePolicy.UpdateBehavior } - f1valf38.SchemaChangePolicy = f1valf38f4 + f1valf47.SchemaChangePolicy = f1valf47f4 } if f1valiter.S3CatalogTarget.Table != nil { - f1valf38.Table = f1valiter.S3CatalogTarget.Table + f1valf47.Table = f1valiter.S3CatalogTarget.Table } - f1val.S3CatalogTarget = f1valf38 + f1val.S3CatalogTarget = f1valf47 } if f1valiter.S3CsvSource != nil { - f1valf39 := &svcapitypes.S3CsvSource{} + f1valf48 := &svcapitypes.S3CsvSource{} if f1valiter.S3CsvSource.AdditionalOptions != nil { - f1valf39f0 := &svcapitypes.S3DirectSourceAdditionalOptions{} + f1valf48f0 := &svcapitypes.S3DirectSourceAdditionalOptions{} if f1valiter.S3CsvSource.AdditionalOptions.BoundedFiles != nil { - f1valf39f0.BoundedFiles = f1valiter.S3CsvSource.AdditionalOptions.BoundedFiles + f1valf48f0.BoundedFiles = f1valiter.S3CsvSource.AdditionalOptions.BoundedFiles } if f1valiter.S3CsvSource.AdditionalOptions.BoundedSize != nil { - f1valf39f0.BoundedSize = f1valiter.S3CsvSource.AdditionalOptions.BoundedSize + f1valf48f0.BoundedSize = f1valiter.S3CsvSource.AdditionalOptions.BoundedSize } if f1valiter.S3CsvSource.AdditionalOptions.EnableSamplePath != nil { - f1valf39f0.EnableSamplePath = f1valiter.S3CsvSource.AdditionalOptions.EnableSamplePath + f1valf48f0.EnableSamplePath = f1valiter.S3CsvSource.AdditionalOptions.EnableSamplePath } if f1valiter.S3CsvSource.AdditionalOptions.SamplePath != nil { - f1valf39f0.SamplePath = f1valiter.S3CsvSource.AdditionalOptions.SamplePath + f1valf48f0.SamplePath = f1valiter.S3CsvSource.AdditionalOptions.SamplePath } - f1valf39.AdditionalOptions = f1valf39f0 + f1valf48.AdditionalOptions = f1valf48f0 } if f1valiter.S3CsvSource.CompressionType != nil { - f1valf39.CompressionType = f1valiter.S3CsvSource.CompressionType + f1valf48.CompressionType = f1valiter.S3CsvSource.CompressionType } if f1valiter.S3CsvSource.Escaper != nil { - f1valf39.Escaper = f1valiter.S3CsvSource.Escaper + f1valf48.Escaper = f1valiter.S3CsvSource.Escaper } if f1valiter.S3CsvSource.Exclusions != nil { - f1valf39f3 := []*string{} - for _, f1valf39f3iter := range f1valiter.S3CsvSource.Exclusions { - var f1valf39f3elem string - f1valf39f3elem = *f1valf39f3iter - f1valf39f3 = append(f1valf39f3, &f1valf39f3elem) + f1valf48f3 := []*string{} + for _, f1valf48f3iter := range f1valiter.S3CsvSource.Exclusions { + var f1valf48f3elem string + f1valf48f3elem = *f1valf48f3iter + f1valf48f3 = append(f1valf48f3, &f1valf48f3elem) } - f1valf39.Exclusions = f1valf39f3 + f1valf48.Exclusions = f1valf48f3 } if f1valiter.S3CsvSource.GroupFiles != nil { - f1valf39.GroupFiles = f1valiter.S3CsvSource.GroupFiles + f1valf48.GroupFiles = f1valiter.S3CsvSource.GroupFiles } if f1valiter.S3CsvSource.GroupSize != nil { - f1valf39.GroupSize = f1valiter.S3CsvSource.GroupSize + f1valf48.GroupSize = f1valiter.S3CsvSource.GroupSize } if f1valiter.S3CsvSource.MaxBand != nil { - f1valf39.MaxBand = f1valiter.S3CsvSource.MaxBand + f1valf48.MaxBand = f1valiter.S3CsvSource.MaxBand } if f1valiter.S3CsvSource.MaxFilesInBand != nil { - f1valf39.MaxFilesInBand = f1valiter.S3CsvSource.MaxFilesInBand + f1valf48.MaxFilesInBand = f1valiter.S3CsvSource.MaxFilesInBand } if f1valiter.S3CsvSource.Multiline != nil { - f1valf39.Multiline = f1valiter.S3CsvSource.Multiline + f1valf48.Multiline = f1valiter.S3CsvSource.Multiline } if f1valiter.S3CsvSource.Name != nil { - f1valf39.Name = f1valiter.S3CsvSource.Name + f1valf48.Name = f1valiter.S3CsvSource.Name } if f1valiter.S3CsvSource.OptimizePerformance != nil { - f1valf39.OptimizePerformance = f1valiter.S3CsvSource.OptimizePerformance + f1valf48.OptimizePerformance = f1valiter.S3CsvSource.OptimizePerformance } if f1valiter.S3CsvSource.OutputSchemas != nil { - f1valf39f11 := []*svcapitypes.GlueSchema{} - for _, f1valf39f11iter := range f1valiter.S3CsvSource.OutputSchemas { - f1valf39f11elem := &svcapitypes.GlueSchema{} - if f1valf39f11iter.Columns != nil { - f1valf39f11elemf0 := []*svcapitypes.GlueStudioSchemaColumn{} - for _, f1valf39f11elemf0iter := range f1valf39f11iter.Columns { - f1valf39f11elemf0elem := &svcapitypes.GlueStudioSchemaColumn{} - if f1valf39f11elemf0iter.Name != nil { - f1valf39f11elemf0elem.Name = f1valf39f11elemf0iter.Name + f1valf48f11 := []*svcapitypes.GlueSchema{} + for _, f1valf48f11iter := range f1valiter.S3CsvSource.OutputSchemas { + f1valf48f11elem := &svcapitypes.GlueSchema{} + if f1valf48f11iter.Columns != nil { + f1valf48f11elemf0 := []*svcapitypes.GlueStudioSchemaColumn{} + for _, f1valf48f11elemf0iter := range f1valf48f11iter.Columns { + f1valf48f11elemf0elem := &svcapitypes.GlueStudioSchemaColumn{} + if f1valf48f11elemf0iter.Name != nil { + f1valf48f11elemf0elem.Name = f1valf48f11elemf0iter.Name } - if f1valf39f11elemf0iter.Type != nil { - f1valf39f11elemf0elem.Type = f1valf39f11elemf0iter.Type + if f1valf48f11elemf0iter.Type != nil { + f1valf48f11elemf0elem.Type = f1valf48f11elemf0iter.Type } - f1valf39f11elemf0 = append(f1valf39f11elemf0, f1valf39f11elemf0elem) + f1valf48f11elemf0 = append(f1valf48f11elemf0, f1valf48f11elemf0elem) } - f1valf39f11elem.Columns = f1valf39f11elemf0 + f1valf48f11elem.Columns = f1valf48f11elemf0 } - f1valf39f11 = append(f1valf39f11, f1valf39f11elem) + f1valf48f11 = append(f1valf48f11, f1valf48f11elem) } - f1valf39.OutputSchemas = f1valf39f11 + f1valf48.OutputSchemas = f1valf48f11 } if f1valiter.S3CsvSource.Paths != nil { - f1valf39f12 := []*string{} - for _, f1valf39f12iter := range f1valiter.S3CsvSource.Paths { - var f1valf39f12elem string - f1valf39f12elem = *f1valf39f12iter - f1valf39f12 = append(f1valf39f12, &f1valf39f12elem) + f1valf48f12 := []*string{} + for _, f1valf48f12iter := range f1valiter.S3CsvSource.Paths { + var f1valf48f12elem string + f1valf48f12elem = *f1valf48f12iter + f1valf48f12 = append(f1valf48f12, &f1valf48f12elem) } - f1valf39.Paths = f1valf39f12 + f1valf48.Paths = f1valf48f12 } if f1valiter.S3CsvSource.QuoteChar != nil { - f1valf39.QuoteChar = f1valiter.S3CsvSource.QuoteChar + f1valf48.QuoteChar = f1valiter.S3CsvSource.QuoteChar } if f1valiter.S3CsvSource.Recurse != nil { - f1valf39.Recurse = f1valiter.S3CsvSource.Recurse + f1valf48.Recurse = f1valiter.S3CsvSource.Recurse } if f1valiter.S3CsvSource.Separator != nil { - f1valf39.Separator = f1valiter.S3CsvSource.Separator + f1valf48.Separator = f1valiter.S3CsvSource.Separator } if f1valiter.S3CsvSource.SkipFirst != nil { - f1valf39.SkipFirst = f1valiter.S3CsvSource.SkipFirst + f1valf48.SkipFirst = f1valiter.S3CsvSource.SkipFirst } if f1valiter.S3CsvSource.WithHeader != nil { - f1valf39.WithHeader = f1valiter.S3CsvSource.WithHeader + f1valf48.WithHeader = f1valiter.S3CsvSource.WithHeader } if f1valiter.S3CsvSource.WriteHeader != nil { - f1valf39.WriteHeader = f1valiter.S3CsvSource.WriteHeader + f1valf48.WriteHeader = f1valiter.S3CsvSource.WriteHeader + } + f1val.S3CsvSource = f1valf48 + } + if f1valiter.S3DeltaCatalogTarget != nil { + f1valf49 := &svcapitypes.S3DeltaCatalogTarget{} + if f1valiter.S3DeltaCatalogTarget.AdditionalOptions != nil { + f1valf49f0 := map[string]*string{} + for f1valf49f0key, f1valf49f0valiter := range f1valiter.S3DeltaCatalogTarget.AdditionalOptions { + var f1valf49f0val string + f1valf49f0val = *f1valf49f0valiter + f1valf49f0[f1valf49f0key] = &f1valf49f0val + } + f1valf49.AdditionalOptions = f1valf49f0 + } + if f1valiter.S3DeltaCatalogTarget.Database != nil { + f1valf49.Database = f1valiter.S3DeltaCatalogTarget.Database + } + if f1valiter.S3DeltaCatalogTarget.Inputs != nil { + f1valf49f2 := []*string{} + for _, f1valf49f2iter := range f1valiter.S3DeltaCatalogTarget.Inputs { + var f1valf49f2elem string + f1valf49f2elem = *f1valf49f2iter + f1valf49f2 = append(f1valf49f2, &f1valf49f2elem) + } + f1valf49.Inputs = f1valf49f2 + } + if f1valiter.S3DeltaCatalogTarget.Name != nil { + f1valf49.Name = f1valiter.S3DeltaCatalogTarget.Name + } + if f1valiter.S3DeltaCatalogTarget.PartitionKeys != nil { + f1valf49f4 := [][]*string{} + for _, f1valf49f4iter := range f1valiter.S3DeltaCatalogTarget.PartitionKeys { + f1valf49f4elem := []*string{} + for _, f1valf49f4elemiter := range f1valf49f4iter { + var f1valf49f4elemelem string + f1valf49f4elemelem = *f1valf49f4elemiter + f1valf49f4elem = append(f1valf49f4elem, &f1valf49f4elemelem) + } + f1valf49f4 = append(f1valf49f4, f1valf49f4elem) + } + f1valf49.PartitionKeys = f1valf49f4 + } + if f1valiter.S3DeltaCatalogTarget.SchemaChangePolicy != nil { + f1valf49f5 := &svcapitypes.CatalogSchemaChangePolicy{} + if f1valiter.S3DeltaCatalogTarget.SchemaChangePolicy.EnableUpdateCatalog != nil { + f1valf49f5.EnableUpdateCatalog = f1valiter.S3DeltaCatalogTarget.SchemaChangePolicy.EnableUpdateCatalog + } + if f1valiter.S3DeltaCatalogTarget.SchemaChangePolicy.UpdateBehavior != nil { + f1valf49f5.UpdateBehavior = f1valiter.S3DeltaCatalogTarget.SchemaChangePolicy.UpdateBehavior + } + f1valf49.SchemaChangePolicy = f1valf49f5 + } + if f1valiter.S3DeltaCatalogTarget.Table != nil { + f1valf49.Table = f1valiter.S3DeltaCatalogTarget.Table + } + f1val.S3DeltaCatalogTarget = f1valf49 + } + if f1valiter.S3DeltaDirectTarget != nil { + f1valf50 := &svcapitypes.S3DeltaDirectTarget{} + if f1valiter.S3DeltaDirectTarget.AdditionalOptions != nil { + f1valf50f0 := map[string]*string{} + for f1valf50f0key, f1valf50f0valiter := range f1valiter.S3DeltaDirectTarget.AdditionalOptions { + var f1valf50f0val string + f1valf50f0val = *f1valf50f0valiter + f1valf50f0[f1valf50f0key] = &f1valf50f0val + } + f1valf50.AdditionalOptions = f1valf50f0 + } + if f1valiter.S3DeltaDirectTarget.Compression != nil { + f1valf50.Compression = f1valiter.S3DeltaDirectTarget.Compression + } + if f1valiter.S3DeltaDirectTarget.Format != nil { + f1valf50.Format = f1valiter.S3DeltaDirectTarget.Format + } + if f1valiter.S3DeltaDirectTarget.Inputs != nil { + f1valf50f3 := []*string{} + for _, f1valf50f3iter := range f1valiter.S3DeltaDirectTarget.Inputs { + var f1valf50f3elem string + f1valf50f3elem = *f1valf50f3iter + f1valf50f3 = append(f1valf50f3, &f1valf50f3elem) + } + f1valf50.Inputs = f1valf50f3 + } + if f1valiter.S3DeltaDirectTarget.Name != nil { + f1valf50.Name = f1valiter.S3DeltaDirectTarget.Name + } + if f1valiter.S3DeltaDirectTarget.PartitionKeys != nil { + f1valf50f5 := [][]*string{} + for _, f1valf50f5iter := range f1valiter.S3DeltaDirectTarget.PartitionKeys { + f1valf50f5elem := []*string{} + for _, f1valf50f5elemiter := range f1valf50f5iter { + var f1valf50f5elemelem string + f1valf50f5elemelem = *f1valf50f5elemiter + f1valf50f5elem = append(f1valf50f5elem, &f1valf50f5elemelem) + } + f1valf50f5 = append(f1valf50f5, f1valf50f5elem) + } + f1valf50.PartitionKeys = f1valf50f5 + } + if f1valiter.S3DeltaDirectTarget.Path != nil { + f1valf50.Path = f1valiter.S3DeltaDirectTarget.Path + } + if f1valiter.S3DeltaDirectTarget.SchemaChangePolicy != nil { + f1valf50f7 := &svcapitypes.DirectSchemaChangePolicy{} + if f1valiter.S3DeltaDirectTarget.SchemaChangePolicy.Database != nil { + f1valf50f7.Database = f1valiter.S3DeltaDirectTarget.SchemaChangePolicy.Database + } + if f1valiter.S3DeltaDirectTarget.SchemaChangePolicy.EnableUpdateCatalog != nil { + f1valf50f7.EnableUpdateCatalog = f1valiter.S3DeltaDirectTarget.SchemaChangePolicy.EnableUpdateCatalog + } + if f1valiter.S3DeltaDirectTarget.SchemaChangePolicy.Table != nil { + f1valf50f7.Table = f1valiter.S3DeltaDirectTarget.SchemaChangePolicy.Table + } + if f1valiter.S3DeltaDirectTarget.SchemaChangePolicy.UpdateBehavior != nil { + f1valf50f7.UpdateBehavior = f1valiter.S3DeltaDirectTarget.SchemaChangePolicy.UpdateBehavior + } + f1valf50.SchemaChangePolicy = f1valf50f7 } - f1val.S3CsvSource = f1valf39 + f1val.S3DeltaDirectTarget = f1valf50 + } + if f1valiter.S3DeltaSource != nil { + f1valf51 := &svcapitypes.S3DeltaSource{} + if f1valiter.S3DeltaSource.AdditionalDeltaOptions != nil { + f1valf51f0 := map[string]*string{} + for f1valf51f0key, f1valf51f0valiter := range f1valiter.S3DeltaSource.AdditionalDeltaOptions { + var f1valf51f0val string + f1valf51f0val = *f1valf51f0valiter + f1valf51f0[f1valf51f0key] = &f1valf51f0val + } + f1valf51.AdditionalDeltaOptions = f1valf51f0 + } + if f1valiter.S3DeltaSource.AdditionalOptions != nil { + f1valf51f1 := &svcapitypes.S3DirectSourceAdditionalOptions{} + if f1valiter.S3DeltaSource.AdditionalOptions.BoundedFiles != nil { + f1valf51f1.BoundedFiles = f1valiter.S3DeltaSource.AdditionalOptions.BoundedFiles + } + if f1valiter.S3DeltaSource.AdditionalOptions.BoundedSize != nil { + f1valf51f1.BoundedSize = f1valiter.S3DeltaSource.AdditionalOptions.BoundedSize + } + if f1valiter.S3DeltaSource.AdditionalOptions.EnableSamplePath != nil { + f1valf51f1.EnableSamplePath = f1valiter.S3DeltaSource.AdditionalOptions.EnableSamplePath + } + if f1valiter.S3DeltaSource.AdditionalOptions.SamplePath != nil { + f1valf51f1.SamplePath = f1valiter.S3DeltaSource.AdditionalOptions.SamplePath + } + f1valf51.AdditionalOptions = f1valf51f1 + } + if f1valiter.S3DeltaSource.Name != nil { + f1valf51.Name = f1valiter.S3DeltaSource.Name + } + if f1valiter.S3DeltaSource.OutputSchemas != nil { + f1valf51f3 := []*svcapitypes.GlueSchema{} + for _, f1valf51f3iter := range f1valiter.S3DeltaSource.OutputSchemas { + f1valf51f3elem := &svcapitypes.GlueSchema{} + if f1valf51f3iter.Columns != nil { + f1valf51f3elemf0 := []*svcapitypes.GlueStudioSchemaColumn{} + for _, f1valf51f3elemf0iter := range f1valf51f3iter.Columns { + f1valf51f3elemf0elem := &svcapitypes.GlueStudioSchemaColumn{} + if f1valf51f3elemf0iter.Name != nil { + f1valf51f3elemf0elem.Name = f1valf51f3elemf0iter.Name + } + if f1valf51f3elemf0iter.Type != nil { + f1valf51f3elemf0elem.Type = f1valf51f3elemf0iter.Type + } + f1valf51f3elemf0 = append(f1valf51f3elemf0, f1valf51f3elemf0elem) + } + f1valf51f3elem.Columns = f1valf51f3elemf0 + } + f1valf51f3 = append(f1valf51f3, f1valf51f3elem) + } + f1valf51.OutputSchemas = f1valf51f3 + } + if f1valiter.S3DeltaSource.Paths != nil { + f1valf51f4 := []*string{} + for _, f1valf51f4iter := range f1valiter.S3DeltaSource.Paths { + var f1valf51f4elem string + f1valf51f4elem = *f1valf51f4iter + f1valf51f4 = append(f1valf51f4, &f1valf51f4elem) + } + f1valf51.Paths = f1valf51f4 + } + f1val.S3DeltaSource = f1valf51 } if f1valiter.S3DirectTarget != nil { - f1valf40 := &svcapitypes.S3DirectTarget{} + f1valf52 := &svcapitypes.S3DirectTarget{} if f1valiter.S3DirectTarget.Compression != nil { - f1valf40.Compression = f1valiter.S3DirectTarget.Compression + f1valf52.Compression = f1valiter.S3DirectTarget.Compression } if f1valiter.S3DirectTarget.Format != nil { - f1valf40.Format = f1valiter.S3DirectTarget.Format + f1valf52.Format = f1valiter.S3DirectTarget.Format } if f1valiter.S3DirectTarget.Inputs != nil { - f1valf40f2 := []*string{} - for _, f1valf40f2iter := range f1valiter.S3DirectTarget.Inputs { - var f1valf40f2elem string - f1valf40f2elem = *f1valf40f2iter - f1valf40f2 = append(f1valf40f2, &f1valf40f2elem) + f1valf52f2 := []*string{} + for _, f1valf52f2iter := range f1valiter.S3DirectTarget.Inputs { + var f1valf52f2elem string + f1valf52f2elem = *f1valf52f2iter + f1valf52f2 = append(f1valf52f2, &f1valf52f2elem) } - f1valf40.Inputs = f1valf40f2 + f1valf52.Inputs = f1valf52f2 } if f1valiter.S3DirectTarget.Name != nil { - f1valf40.Name = f1valiter.S3DirectTarget.Name + f1valf52.Name = f1valiter.S3DirectTarget.Name } if f1valiter.S3DirectTarget.PartitionKeys != nil { - f1valf40f4 := [][]*string{} - for _, f1valf40f4iter := range f1valiter.S3DirectTarget.PartitionKeys { - f1valf40f4elem := []*string{} - for _, f1valf40f4elemiter := range f1valf40f4iter { - var f1valf40f4elemelem string - f1valf40f4elemelem = *f1valf40f4elemiter - f1valf40f4elem = append(f1valf40f4elem, &f1valf40f4elemelem) + f1valf52f4 := [][]*string{} + for _, f1valf52f4iter := range f1valiter.S3DirectTarget.PartitionKeys { + f1valf52f4elem := []*string{} + for _, f1valf52f4elemiter := range f1valf52f4iter { + var f1valf52f4elemelem string + f1valf52f4elemelem = *f1valf52f4elemiter + f1valf52f4elem = append(f1valf52f4elem, &f1valf52f4elemelem) } - f1valf40f4 = append(f1valf40f4, f1valf40f4elem) + f1valf52f4 = append(f1valf52f4, f1valf52f4elem) } - f1valf40.PartitionKeys = f1valf40f4 + f1valf52.PartitionKeys = f1valf52f4 } if f1valiter.S3DirectTarget.Path != nil { - f1valf40.Path = f1valiter.S3DirectTarget.Path + f1valf52.Path = f1valiter.S3DirectTarget.Path } if f1valiter.S3DirectTarget.SchemaChangePolicy != nil { - f1valf40f6 := &svcapitypes.DirectSchemaChangePolicy{} + f1valf52f6 := &svcapitypes.DirectSchemaChangePolicy{} if f1valiter.S3DirectTarget.SchemaChangePolicy.Database != nil { - f1valf40f6.Database = f1valiter.S3DirectTarget.SchemaChangePolicy.Database + f1valf52f6.Database = f1valiter.S3DirectTarget.SchemaChangePolicy.Database } if f1valiter.S3DirectTarget.SchemaChangePolicy.EnableUpdateCatalog != nil { - f1valf40f6.EnableUpdateCatalog = f1valiter.S3DirectTarget.SchemaChangePolicy.EnableUpdateCatalog + f1valf52f6.EnableUpdateCatalog = f1valiter.S3DirectTarget.SchemaChangePolicy.EnableUpdateCatalog } if f1valiter.S3DirectTarget.SchemaChangePolicy.Table != nil { - f1valf40f6.Table = f1valiter.S3DirectTarget.SchemaChangePolicy.Table + f1valf52f6.Table = f1valiter.S3DirectTarget.SchemaChangePolicy.Table } if f1valiter.S3DirectTarget.SchemaChangePolicy.UpdateBehavior != nil { - f1valf40f6.UpdateBehavior = f1valiter.S3DirectTarget.SchemaChangePolicy.UpdateBehavior + f1valf52f6.UpdateBehavior = f1valiter.S3DirectTarget.SchemaChangePolicy.UpdateBehavior } - f1valf40.SchemaChangePolicy = f1valf40f6 + f1valf52.SchemaChangePolicy = f1valf52f6 } - f1val.S3DirectTarget = f1valf40 + f1val.S3DirectTarget = f1valf52 } if f1valiter.S3GlueParquetTarget != nil { - f1valf41 := &svcapitypes.S3GlueParquetTarget{} + f1valf53 := &svcapitypes.S3GlueParquetTarget{} if f1valiter.S3GlueParquetTarget.Compression != nil { - f1valf41.Compression = f1valiter.S3GlueParquetTarget.Compression + f1valf53.Compression = f1valiter.S3GlueParquetTarget.Compression } if f1valiter.S3GlueParquetTarget.Inputs != nil { - f1valf41f1 := []*string{} - for _, f1valf41f1iter := range f1valiter.S3GlueParquetTarget.Inputs { - var f1valf41f1elem string - f1valf41f1elem = *f1valf41f1iter - f1valf41f1 = append(f1valf41f1, &f1valf41f1elem) + f1valf53f1 := []*string{} + for _, f1valf53f1iter := range f1valiter.S3GlueParquetTarget.Inputs { + var f1valf53f1elem string + f1valf53f1elem = *f1valf53f1iter + f1valf53f1 = append(f1valf53f1, &f1valf53f1elem) } - f1valf41.Inputs = f1valf41f1 + f1valf53.Inputs = f1valf53f1 } if f1valiter.S3GlueParquetTarget.Name != nil { - f1valf41.Name = f1valiter.S3GlueParquetTarget.Name + f1valf53.Name = f1valiter.S3GlueParquetTarget.Name } if f1valiter.S3GlueParquetTarget.PartitionKeys != nil { - f1valf41f3 := [][]*string{} - for _, f1valf41f3iter := range f1valiter.S3GlueParquetTarget.PartitionKeys { - f1valf41f3elem := []*string{} - for _, f1valf41f3elemiter := range f1valf41f3iter { - var f1valf41f3elemelem string - f1valf41f3elemelem = *f1valf41f3elemiter - f1valf41f3elem = append(f1valf41f3elem, &f1valf41f3elemelem) + f1valf53f3 := [][]*string{} + for _, f1valf53f3iter := range f1valiter.S3GlueParquetTarget.PartitionKeys { + f1valf53f3elem := []*string{} + for _, f1valf53f3elemiter := range f1valf53f3iter { + var f1valf53f3elemelem string + f1valf53f3elemelem = *f1valf53f3elemiter + f1valf53f3elem = append(f1valf53f3elem, &f1valf53f3elemelem) } - f1valf41f3 = append(f1valf41f3, f1valf41f3elem) + f1valf53f3 = append(f1valf53f3, f1valf53f3elem) } - f1valf41.PartitionKeys = f1valf41f3 + f1valf53.PartitionKeys = f1valf53f3 } if f1valiter.S3GlueParquetTarget.Path != nil { - f1valf41.Path = f1valiter.S3GlueParquetTarget.Path + f1valf53.Path = f1valiter.S3GlueParquetTarget.Path } if f1valiter.S3GlueParquetTarget.SchemaChangePolicy != nil { - f1valf41f5 := &svcapitypes.DirectSchemaChangePolicy{} + f1valf53f5 := &svcapitypes.DirectSchemaChangePolicy{} if f1valiter.S3GlueParquetTarget.SchemaChangePolicy.Database != nil { - f1valf41f5.Database = f1valiter.S3GlueParquetTarget.SchemaChangePolicy.Database + f1valf53f5.Database = f1valiter.S3GlueParquetTarget.SchemaChangePolicy.Database } if f1valiter.S3GlueParquetTarget.SchemaChangePolicy.EnableUpdateCatalog != nil { - f1valf41f5.EnableUpdateCatalog = f1valiter.S3GlueParquetTarget.SchemaChangePolicy.EnableUpdateCatalog + f1valf53f5.EnableUpdateCatalog = f1valiter.S3GlueParquetTarget.SchemaChangePolicy.EnableUpdateCatalog } if f1valiter.S3GlueParquetTarget.SchemaChangePolicy.Table != nil { - f1valf41f5.Table = f1valiter.S3GlueParquetTarget.SchemaChangePolicy.Table + f1valf53f5.Table = f1valiter.S3GlueParquetTarget.SchemaChangePolicy.Table } if f1valiter.S3GlueParquetTarget.SchemaChangePolicy.UpdateBehavior != nil { - f1valf41f5.UpdateBehavior = f1valiter.S3GlueParquetTarget.SchemaChangePolicy.UpdateBehavior + f1valf53f5.UpdateBehavior = f1valiter.S3GlueParquetTarget.SchemaChangePolicy.UpdateBehavior } - f1valf41.SchemaChangePolicy = f1valf41f5 + f1valf53.SchemaChangePolicy = f1valf53f5 } - f1val.S3GlueParquetTarget = f1valf41 + f1val.S3GlueParquetTarget = f1valf53 + } + if f1valiter.S3HudiCatalogTarget != nil { + f1valf54 := &svcapitypes.S3HudiCatalogTarget{} + if f1valiter.S3HudiCatalogTarget.AdditionalOptions != nil { + f1valf54f0 := map[string]*string{} + for f1valf54f0key, f1valf54f0valiter := range f1valiter.S3HudiCatalogTarget.AdditionalOptions { + var f1valf54f0val string + f1valf54f0val = *f1valf54f0valiter + f1valf54f0[f1valf54f0key] = &f1valf54f0val + } + f1valf54.AdditionalOptions = f1valf54f0 + } + if f1valiter.S3HudiCatalogTarget.Database != nil { + f1valf54.Database = f1valiter.S3HudiCatalogTarget.Database + } + if f1valiter.S3HudiCatalogTarget.Inputs != nil { + f1valf54f2 := []*string{} + for _, f1valf54f2iter := range f1valiter.S3HudiCatalogTarget.Inputs { + var f1valf54f2elem string + f1valf54f2elem = *f1valf54f2iter + f1valf54f2 = append(f1valf54f2, &f1valf54f2elem) + } + f1valf54.Inputs = f1valf54f2 + } + if f1valiter.S3HudiCatalogTarget.Name != nil { + f1valf54.Name = f1valiter.S3HudiCatalogTarget.Name + } + if f1valiter.S3HudiCatalogTarget.PartitionKeys != nil { + f1valf54f4 := [][]*string{} + for _, f1valf54f4iter := range f1valiter.S3HudiCatalogTarget.PartitionKeys { + f1valf54f4elem := []*string{} + for _, f1valf54f4elemiter := range f1valf54f4iter { + var f1valf54f4elemelem string + f1valf54f4elemelem = *f1valf54f4elemiter + f1valf54f4elem = append(f1valf54f4elem, &f1valf54f4elemelem) + } + f1valf54f4 = append(f1valf54f4, f1valf54f4elem) + } + f1valf54.PartitionKeys = f1valf54f4 + } + if f1valiter.S3HudiCatalogTarget.SchemaChangePolicy != nil { + f1valf54f5 := &svcapitypes.CatalogSchemaChangePolicy{} + if f1valiter.S3HudiCatalogTarget.SchemaChangePolicy.EnableUpdateCatalog != nil { + f1valf54f5.EnableUpdateCatalog = f1valiter.S3HudiCatalogTarget.SchemaChangePolicy.EnableUpdateCatalog + } + if f1valiter.S3HudiCatalogTarget.SchemaChangePolicy.UpdateBehavior != nil { + f1valf54f5.UpdateBehavior = f1valiter.S3HudiCatalogTarget.SchemaChangePolicy.UpdateBehavior + } + f1valf54.SchemaChangePolicy = f1valf54f5 + } + if f1valiter.S3HudiCatalogTarget.Table != nil { + f1valf54.Table = f1valiter.S3HudiCatalogTarget.Table + } + f1val.S3HudiCatalogTarget = f1valf54 + } + if f1valiter.S3HudiDirectTarget != nil { + f1valf55 := &svcapitypes.S3HudiDirectTarget{} + if f1valiter.S3HudiDirectTarget.AdditionalOptions != nil { + f1valf55f0 := map[string]*string{} + for f1valf55f0key, f1valf55f0valiter := range f1valiter.S3HudiDirectTarget.AdditionalOptions { + var f1valf55f0val string + f1valf55f0val = *f1valf55f0valiter + f1valf55f0[f1valf55f0key] = &f1valf55f0val + } + f1valf55.AdditionalOptions = f1valf55f0 + } + if f1valiter.S3HudiDirectTarget.Compression != nil { + f1valf55.Compression = f1valiter.S3HudiDirectTarget.Compression + } + if f1valiter.S3HudiDirectTarget.Format != nil { + f1valf55.Format = f1valiter.S3HudiDirectTarget.Format + } + if f1valiter.S3HudiDirectTarget.Inputs != nil { + f1valf55f3 := []*string{} + for _, f1valf55f3iter := range f1valiter.S3HudiDirectTarget.Inputs { + var f1valf55f3elem string + f1valf55f3elem = *f1valf55f3iter + f1valf55f3 = append(f1valf55f3, &f1valf55f3elem) + } + f1valf55.Inputs = f1valf55f3 + } + if f1valiter.S3HudiDirectTarget.Name != nil { + f1valf55.Name = f1valiter.S3HudiDirectTarget.Name + } + if f1valiter.S3HudiDirectTarget.PartitionKeys != nil { + f1valf55f5 := [][]*string{} + for _, f1valf55f5iter := range f1valiter.S3HudiDirectTarget.PartitionKeys { + f1valf55f5elem := []*string{} + for _, f1valf55f5elemiter := range f1valf55f5iter { + var f1valf55f5elemelem string + f1valf55f5elemelem = *f1valf55f5elemiter + f1valf55f5elem = append(f1valf55f5elem, &f1valf55f5elemelem) + } + f1valf55f5 = append(f1valf55f5, f1valf55f5elem) + } + f1valf55.PartitionKeys = f1valf55f5 + } + if f1valiter.S3HudiDirectTarget.Path != nil { + f1valf55.Path = f1valiter.S3HudiDirectTarget.Path + } + if f1valiter.S3HudiDirectTarget.SchemaChangePolicy != nil { + f1valf55f7 := &svcapitypes.DirectSchemaChangePolicy{} + if f1valiter.S3HudiDirectTarget.SchemaChangePolicy.Database != nil { + f1valf55f7.Database = f1valiter.S3HudiDirectTarget.SchemaChangePolicy.Database + } + if f1valiter.S3HudiDirectTarget.SchemaChangePolicy.EnableUpdateCatalog != nil { + f1valf55f7.EnableUpdateCatalog = f1valiter.S3HudiDirectTarget.SchemaChangePolicy.EnableUpdateCatalog + } + if f1valiter.S3HudiDirectTarget.SchemaChangePolicy.Table != nil { + f1valf55f7.Table = f1valiter.S3HudiDirectTarget.SchemaChangePolicy.Table + } + if f1valiter.S3HudiDirectTarget.SchemaChangePolicy.UpdateBehavior != nil { + f1valf55f7.UpdateBehavior = f1valiter.S3HudiDirectTarget.SchemaChangePolicy.UpdateBehavior + } + f1valf55.SchemaChangePolicy = f1valf55f7 + } + f1val.S3HudiDirectTarget = f1valf55 + } + if f1valiter.S3HudiSource != nil { + f1valf56 := &svcapitypes.S3HudiSource{} + if f1valiter.S3HudiSource.AdditionalHudiOptions != nil { + f1valf56f0 := map[string]*string{} + for f1valf56f0key, f1valf56f0valiter := range f1valiter.S3HudiSource.AdditionalHudiOptions { + var f1valf56f0val string + f1valf56f0val = *f1valf56f0valiter + f1valf56f0[f1valf56f0key] = &f1valf56f0val + } + f1valf56.AdditionalHudiOptions = f1valf56f0 + } + if f1valiter.S3HudiSource.AdditionalOptions != nil { + f1valf56f1 := &svcapitypes.S3DirectSourceAdditionalOptions{} + if f1valiter.S3HudiSource.AdditionalOptions.BoundedFiles != nil { + f1valf56f1.BoundedFiles = f1valiter.S3HudiSource.AdditionalOptions.BoundedFiles + } + if f1valiter.S3HudiSource.AdditionalOptions.BoundedSize != nil { + f1valf56f1.BoundedSize = f1valiter.S3HudiSource.AdditionalOptions.BoundedSize + } + if f1valiter.S3HudiSource.AdditionalOptions.EnableSamplePath != nil { + f1valf56f1.EnableSamplePath = f1valiter.S3HudiSource.AdditionalOptions.EnableSamplePath + } + if f1valiter.S3HudiSource.AdditionalOptions.SamplePath != nil { + f1valf56f1.SamplePath = f1valiter.S3HudiSource.AdditionalOptions.SamplePath + } + f1valf56.AdditionalOptions = f1valf56f1 + } + if f1valiter.S3HudiSource.Name != nil { + f1valf56.Name = f1valiter.S3HudiSource.Name + } + if f1valiter.S3HudiSource.OutputSchemas != nil { + f1valf56f3 := []*svcapitypes.GlueSchema{} + for _, f1valf56f3iter := range f1valiter.S3HudiSource.OutputSchemas { + f1valf56f3elem := &svcapitypes.GlueSchema{} + if f1valf56f3iter.Columns != nil { + f1valf56f3elemf0 := []*svcapitypes.GlueStudioSchemaColumn{} + for _, f1valf56f3elemf0iter := range f1valf56f3iter.Columns { + f1valf56f3elemf0elem := &svcapitypes.GlueStudioSchemaColumn{} + if f1valf56f3elemf0iter.Name != nil { + f1valf56f3elemf0elem.Name = f1valf56f3elemf0iter.Name + } + if f1valf56f3elemf0iter.Type != nil { + f1valf56f3elemf0elem.Type = f1valf56f3elemf0iter.Type + } + f1valf56f3elemf0 = append(f1valf56f3elemf0, f1valf56f3elemf0elem) + } + f1valf56f3elem.Columns = f1valf56f3elemf0 + } + f1valf56f3 = append(f1valf56f3, f1valf56f3elem) + } + f1valf56.OutputSchemas = f1valf56f3 + } + if f1valiter.S3HudiSource.Paths != nil { + f1valf56f4 := []*string{} + for _, f1valf56f4iter := range f1valiter.S3HudiSource.Paths { + var f1valf56f4elem string + f1valf56f4elem = *f1valf56f4iter + f1valf56f4 = append(f1valf56f4, &f1valf56f4elem) + } + f1valf56.Paths = f1valf56f4 + } + f1val.S3HudiSource = f1valf56 } if f1valiter.S3JsonSource != nil { - f1valf42 := &svcapitypes.S3JSONSource{} + f1valf57 := &svcapitypes.S3JSONSource{} if f1valiter.S3JsonSource.AdditionalOptions != nil { - f1valf42f0 := &svcapitypes.S3DirectSourceAdditionalOptions{} + f1valf57f0 := &svcapitypes.S3DirectSourceAdditionalOptions{} if f1valiter.S3JsonSource.AdditionalOptions.BoundedFiles != nil { - f1valf42f0.BoundedFiles = f1valiter.S3JsonSource.AdditionalOptions.BoundedFiles + f1valf57f0.BoundedFiles = f1valiter.S3JsonSource.AdditionalOptions.BoundedFiles } if f1valiter.S3JsonSource.AdditionalOptions.BoundedSize != nil { - f1valf42f0.BoundedSize = f1valiter.S3JsonSource.AdditionalOptions.BoundedSize + f1valf57f0.BoundedSize = f1valiter.S3JsonSource.AdditionalOptions.BoundedSize } if f1valiter.S3JsonSource.AdditionalOptions.EnableSamplePath != nil { - f1valf42f0.EnableSamplePath = f1valiter.S3JsonSource.AdditionalOptions.EnableSamplePath + f1valf57f0.EnableSamplePath = f1valiter.S3JsonSource.AdditionalOptions.EnableSamplePath } if f1valiter.S3JsonSource.AdditionalOptions.SamplePath != nil { - f1valf42f0.SamplePath = f1valiter.S3JsonSource.AdditionalOptions.SamplePath + f1valf57f0.SamplePath = f1valiter.S3JsonSource.AdditionalOptions.SamplePath } - f1valf42.AdditionalOptions = f1valf42f0 + f1valf57.AdditionalOptions = f1valf57f0 } if f1valiter.S3JsonSource.CompressionType != nil { - f1valf42.CompressionType = f1valiter.S3JsonSource.CompressionType + f1valf57.CompressionType = f1valiter.S3JsonSource.CompressionType } if f1valiter.S3JsonSource.Exclusions != nil { - f1valf42f2 := []*string{} - for _, f1valf42f2iter := range f1valiter.S3JsonSource.Exclusions { - var f1valf42f2elem string - f1valf42f2elem = *f1valf42f2iter - f1valf42f2 = append(f1valf42f2, &f1valf42f2elem) + f1valf57f2 := []*string{} + for _, f1valf57f2iter := range f1valiter.S3JsonSource.Exclusions { + var f1valf57f2elem string + f1valf57f2elem = *f1valf57f2iter + f1valf57f2 = append(f1valf57f2, &f1valf57f2elem) } - f1valf42.Exclusions = f1valf42f2 + f1valf57.Exclusions = f1valf57f2 } if f1valiter.S3JsonSource.GroupFiles != nil { - f1valf42.GroupFiles = f1valiter.S3JsonSource.GroupFiles + f1valf57.GroupFiles = f1valiter.S3JsonSource.GroupFiles } if f1valiter.S3JsonSource.GroupSize != nil { - f1valf42.GroupSize = f1valiter.S3JsonSource.GroupSize + f1valf57.GroupSize = f1valiter.S3JsonSource.GroupSize } if f1valiter.S3JsonSource.JsonPath != nil { - f1valf42.JSONPath = f1valiter.S3JsonSource.JsonPath + f1valf57.JSONPath = f1valiter.S3JsonSource.JsonPath } if f1valiter.S3JsonSource.MaxBand != nil { - f1valf42.MaxBand = f1valiter.S3JsonSource.MaxBand + f1valf57.MaxBand = f1valiter.S3JsonSource.MaxBand } if f1valiter.S3JsonSource.MaxFilesInBand != nil { - f1valf42.MaxFilesInBand = f1valiter.S3JsonSource.MaxFilesInBand + f1valf57.MaxFilesInBand = f1valiter.S3JsonSource.MaxFilesInBand } if f1valiter.S3JsonSource.Multiline != nil { - f1valf42.Multiline = f1valiter.S3JsonSource.Multiline + f1valf57.Multiline = f1valiter.S3JsonSource.Multiline } if f1valiter.S3JsonSource.Name != nil { - f1valf42.Name = f1valiter.S3JsonSource.Name + f1valf57.Name = f1valiter.S3JsonSource.Name } if f1valiter.S3JsonSource.OutputSchemas != nil { - f1valf42f10 := []*svcapitypes.GlueSchema{} - for _, f1valf42f10iter := range f1valiter.S3JsonSource.OutputSchemas { - f1valf42f10elem := &svcapitypes.GlueSchema{} - if f1valf42f10iter.Columns != nil { - f1valf42f10elemf0 := []*svcapitypes.GlueStudioSchemaColumn{} - for _, f1valf42f10elemf0iter := range f1valf42f10iter.Columns { - f1valf42f10elemf0elem := &svcapitypes.GlueStudioSchemaColumn{} - if f1valf42f10elemf0iter.Name != nil { - f1valf42f10elemf0elem.Name = f1valf42f10elemf0iter.Name + f1valf57f10 := []*svcapitypes.GlueSchema{} + for _, f1valf57f10iter := range f1valiter.S3JsonSource.OutputSchemas { + f1valf57f10elem := &svcapitypes.GlueSchema{} + if f1valf57f10iter.Columns != nil { + f1valf57f10elemf0 := []*svcapitypes.GlueStudioSchemaColumn{} + for _, f1valf57f10elemf0iter := range f1valf57f10iter.Columns { + f1valf57f10elemf0elem := &svcapitypes.GlueStudioSchemaColumn{} + if f1valf57f10elemf0iter.Name != nil { + f1valf57f10elemf0elem.Name = f1valf57f10elemf0iter.Name } - if f1valf42f10elemf0iter.Type != nil { - f1valf42f10elemf0elem.Type = f1valf42f10elemf0iter.Type + if f1valf57f10elemf0iter.Type != nil { + f1valf57f10elemf0elem.Type = f1valf57f10elemf0iter.Type } - f1valf42f10elemf0 = append(f1valf42f10elemf0, f1valf42f10elemf0elem) + f1valf57f10elemf0 = append(f1valf57f10elemf0, f1valf57f10elemf0elem) } - f1valf42f10elem.Columns = f1valf42f10elemf0 + f1valf57f10elem.Columns = f1valf57f10elemf0 } - f1valf42f10 = append(f1valf42f10, f1valf42f10elem) + f1valf57f10 = append(f1valf57f10, f1valf57f10elem) } - f1valf42.OutputSchemas = f1valf42f10 + f1valf57.OutputSchemas = f1valf57f10 } if f1valiter.S3JsonSource.Paths != nil { - f1valf42f11 := []*string{} - for _, f1valf42f11iter := range f1valiter.S3JsonSource.Paths { - var f1valf42f11elem string - f1valf42f11elem = *f1valf42f11iter - f1valf42f11 = append(f1valf42f11, &f1valf42f11elem) + f1valf57f11 := []*string{} + for _, f1valf57f11iter := range f1valiter.S3JsonSource.Paths { + var f1valf57f11elem string + f1valf57f11elem = *f1valf57f11iter + f1valf57f11 = append(f1valf57f11, &f1valf57f11elem) } - f1valf42.Paths = f1valf42f11 + f1valf57.Paths = f1valf57f11 } if f1valiter.S3JsonSource.Recurse != nil { - f1valf42.Recurse = f1valiter.S3JsonSource.Recurse + f1valf57.Recurse = f1valiter.S3JsonSource.Recurse } - f1val.S3JSONSource = f1valf42 + f1val.S3JSONSource = f1valf57 } if f1valiter.S3ParquetSource != nil { - f1valf43 := &svcapitypes.S3ParquetSource{} + f1valf58 := &svcapitypes.S3ParquetSource{} if f1valiter.S3ParquetSource.AdditionalOptions != nil { - f1valf43f0 := &svcapitypes.S3DirectSourceAdditionalOptions{} + f1valf58f0 := &svcapitypes.S3DirectSourceAdditionalOptions{} if f1valiter.S3ParquetSource.AdditionalOptions.BoundedFiles != nil { - f1valf43f0.BoundedFiles = f1valiter.S3ParquetSource.AdditionalOptions.BoundedFiles + f1valf58f0.BoundedFiles = f1valiter.S3ParquetSource.AdditionalOptions.BoundedFiles } if f1valiter.S3ParquetSource.AdditionalOptions.BoundedSize != nil { - f1valf43f0.BoundedSize = f1valiter.S3ParquetSource.AdditionalOptions.BoundedSize + f1valf58f0.BoundedSize = f1valiter.S3ParquetSource.AdditionalOptions.BoundedSize } if f1valiter.S3ParquetSource.AdditionalOptions.EnableSamplePath != nil { - f1valf43f0.EnableSamplePath = f1valiter.S3ParquetSource.AdditionalOptions.EnableSamplePath + f1valf58f0.EnableSamplePath = f1valiter.S3ParquetSource.AdditionalOptions.EnableSamplePath } if f1valiter.S3ParquetSource.AdditionalOptions.SamplePath != nil { - f1valf43f0.SamplePath = f1valiter.S3ParquetSource.AdditionalOptions.SamplePath + f1valf58f0.SamplePath = f1valiter.S3ParquetSource.AdditionalOptions.SamplePath } - f1valf43.AdditionalOptions = f1valf43f0 + f1valf58.AdditionalOptions = f1valf58f0 } if f1valiter.S3ParquetSource.CompressionType != nil { - f1valf43.CompressionType = f1valiter.S3ParquetSource.CompressionType + f1valf58.CompressionType = f1valiter.S3ParquetSource.CompressionType } if f1valiter.S3ParquetSource.Exclusions != nil { - f1valf43f2 := []*string{} - for _, f1valf43f2iter := range f1valiter.S3ParquetSource.Exclusions { - var f1valf43f2elem string - f1valf43f2elem = *f1valf43f2iter - f1valf43f2 = append(f1valf43f2, &f1valf43f2elem) + f1valf58f2 := []*string{} + for _, f1valf58f2iter := range f1valiter.S3ParquetSource.Exclusions { + var f1valf58f2elem string + f1valf58f2elem = *f1valf58f2iter + f1valf58f2 = append(f1valf58f2, &f1valf58f2elem) } - f1valf43.Exclusions = f1valf43f2 + f1valf58.Exclusions = f1valf58f2 } if f1valiter.S3ParquetSource.GroupFiles != nil { - f1valf43.GroupFiles = f1valiter.S3ParquetSource.GroupFiles + f1valf58.GroupFiles = f1valiter.S3ParquetSource.GroupFiles } if f1valiter.S3ParquetSource.GroupSize != nil { - f1valf43.GroupSize = f1valiter.S3ParquetSource.GroupSize + f1valf58.GroupSize = f1valiter.S3ParquetSource.GroupSize } if f1valiter.S3ParquetSource.MaxBand != nil { - f1valf43.MaxBand = f1valiter.S3ParquetSource.MaxBand + f1valf58.MaxBand = f1valiter.S3ParquetSource.MaxBand } if f1valiter.S3ParquetSource.MaxFilesInBand != nil { - f1valf43.MaxFilesInBand = f1valiter.S3ParquetSource.MaxFilesInBand + f1valf58.MaxFilesInBand = f1valiter.S3ParquetSource.MaxFilesInBand } if f1valiter.S3ParquetSource.Name != nil { - f1valf43.Name = f1valiter.S3ParquetSource.Name + f1valf58.Name = f1valiter.S3ParquetSource.Name } if f1valiter.S3ParquetSource.OutputSchemas != nil { - f1valf43f8 := []*svcapitypes.GlueSchema{} - for _, f1valf43f8iter := range f1valiter.S3ParquetSource.OutputSchemas { - f1valf43f8elem := &svcapitypes.GlueSchema{} - if f1valf43f8iter.Columns != nil { - f1valf43f8elemf0 := []*svcapitypes.GlueStudioSchemaColumn{} - for _, f1valf43f8elemf0iter := range f1valf43f8iter.Columns { - f1valf43f8elemf0elem := &svcapitypes.GlueStudioSchemaColumn{} - if f1valf43f8elemf0iter.Name != nil { - f1valf43f8elemf0elem.Name = f1valf43f8elemf0iter.Name + f1valf58f8 := []*svcapitypes.GlueSchema{} + for _, f1valf58f8iter := range f1valiter.S3ParquetSource.OutputSchemas { + f1valf58f8elem := &svcapitypes.GlueSchema{} + if f1valf58f8iter.Columns != nil { + f1valf58f8elemf0 := []*svcapitypes.GlueStudioSchemaColumn{} + for _, f1valf58f8elemf0iter := range f1valf58f8iter.Columns { + f1valf58f8elemf0elem := &svcapitypes.GlueStudioSchemaColumn{} + if f1valf58f8elemf0iter.Name != nil { + f1valf58f8elemf0elem.Name = f1valf58f8elemf0iter.Name } - if f1valf43f8elemf0iter.Type != nil { - f1valf43f8elemf0elem.Type = f1valf43f8elemf0iter.Type + if f1valf58f8elemf0iter.Type != nil { + f1valf58f8elemf0elem.Type = f1valf58f8elemf0iter.Type } - f1valf43f8elemf0 = append(f1valf43f8elemf0, f1valf43f8elemf0elem) + f1valf58f8elemf0 = append(f1valf58f8elemf0, f1valf58f8elemf0elem) } - f1valf43f8elem.Columns = f1valf43f8elemf0 + f1valf58f8elem.Columns = f1valf58f8elemf0 } - f1valf43f8 = append(f1valf43f8, f1valf43f8elem) + f1valf58f8 = append(f1valf58f8, f1valf58f8elem) } - f1valf43.OutputSchemas = f1valf43f8 + f1valf58.OutputSchemas = f1valf58f8 } if f1valiter.S3ParquetSource.Paths != nil { - f1valf43f9 := []*string{} - for _, f1valf43f9iter := range f1valiter.S3ParquetSource.Paths { - var f1valf43f9elem string - f1valf43f9elem = *f1valf43f9iter - f1valf43f9 = append(f1valf43f9, &f1valf43f9elem) + f1valf58f9 := []*string{} + for _, f1valf58f9iter := range f1valiter.S3ParquetSource.Paths { + var f1valf58f9elem string + f1valf58f9elem = *f1valf58f9iter + f1valf58f9 = append(f1valf58f9, &f1valf58f9elem) } - f1valf43.Paths = f1valf43f9 + f1valf58.Paths = f1valf58f9 } if f1valiter.S3ParquetSource.Recurse != nil { - f1valf43.Recurse = f1valiter.S3ParquetSource.Recurse + f1valf58.Recurse = f1valiter.S3ParquetSource.Recurse } - f1val.S3ParquetSource = f1valf43 + f1val.S3ParquetSource = f1valf58 } if f1valiter.SelectFields != nil { - f1valf44 := &svcapitypes.SelectFields{} + f1valf59 := &svcapitypes.SelectFields{} if f1valiter.SelectFields.Inputs != nil { - f1valf44f0 := []*string{} - for _, f1valf44f0iter := range f1valiter.SelectFields.Inputs { - var f1valf44f0elem string - f1valf44f0elem = *f1valf44f0iter - f1valf44f0 = append(f1valf44f0, &f1valf44f0elem) + f1valf59f0 := []*string{} + for _, f1valf59f0iter := range f1valiter.SelectFields.Inputs { + var f1valf59f0elem string + f1valf59f0elem = *f1valf59f0iter + f1valf59f0 = append(f1valf59f0, &f1valf59f0elem) } - f1valf44.Inputs = f1valf44f0 + f1valf59.Inputs = f1valf59f0 } if f1valiter.SelectFields.Name != nil { - f1valf44.Name = f1valiter.SelectFields.Name + f1valf59.Name = f1valiter.SelectFields.Name } if f1valiter.SelectFields.Paths != nil { - f1valf44f2 := [][]*string{} - for _, f1valf44f2iter := range f1valiter.SelectFields.Paths { - f1valf44f2elem := []*string{} - for _, f1valf44f2elemiter := range f1valf44f2iter { - var f1valf44f2elemelem string - f1valf44f2elemelem = *f1valf44f2elemiter - f1valf44f2elem = append(f1valf44f2elem, &f1valf44f2elemelem) + f1valf59f2 := [][]*string{} + for _, f1valf59f2iter := range f1valiter.SelectFields.Paths { + f1valf59f2elem := []*string{} + for _, f1valf59f2elemiter := range f1valf59f2iter { + var f1valf59f2elemelem string + f1valf59f2elemelem = *f1valf59f2elemiter + f1valf59f2elem = append(f1valf59f2elem, &f1valf59f2elemelem) } - f1valf44f2 = append(f1valf44f2, f1valf44f2elem) + f1valf59f2 = append(f1valf59f2, f1valf59f2elem) } - f1valf44.Paths = f1valf44f2 + f1valf59.Paths = f1valf59f2 } - f1val.SelectFields = f1valf44 + f1val.SelectFields = f1valf59 } if f1valiter.SelectFromCollection != nil { - f1valf45 := &svcapitypes.SelectFromCollection{} + f1valf60 := &svcapitypes.SelectFromCollection{} if f1valiter.SelectFromCollection.Index != nil { - f1valf45.Index = f1valiter.SelectFromCollection.Index + f1valf60.Index = f1valiter.SelectFromCollection.Index } if f1valiter.SelectFromCollection.Inputs != nil { - f1valf45f1 := []*string{} - for _, f1valf45f1iter := range f1valiter.SelectFromCollection.Inputs { - var f1valf45f1elem string - f1valf45f1elem = *f1valf45f1iter - f1valf45f1 = append(f1valf45f1, &f1valf45f1elem) + f1valf60f1 := []*string{} + for _, f1valf60f1iter := range f1valiter.SelectFromCollection.Inputs { + var f1valf60f1elem string + f1valf60f1elem = *f1valf60f1iter + f1valf60f1 = append(f1valf60f1, &f1valf60f1elem) } - f1valf45.Inputs = f1valf45f1 + f1valf60.Inputs = f1valf60f1 } if f1valiter.SelectFromCollection.Name != nil { - f1valf45.Name = f1valiter.SelectFromCollection.Name + f1valf60.Name = f1valiter.SelectFromCollection.Name + } + f1val.SelectFromCollection = f1valf60 + } + if f1valiter.SnowflakeSource != nil { + f1valf61 := &svcapitypes.SnowflakeSource{} + if f1valiter.SnowflakeSource.Data != nil { + f1valf61f0 := &svcapitypes.SnowflakeNodeData{} + if f1valiter.SnowflakeSource.Data.Action != nil { + f1valf61f0.Action = f1valiter.SnowflakeSource.Data.Action + } + if f1valiter.SnowflakeSource.Data.AdditionalOptions != nil { + f1valf61f0f1 := map[string]*string{} + for f1valf61f0f1key, f1valf61f0f1valiter := range f1valiter.SnowflakeSource.Data.AdditionalOptions { + var f1valf61f0f1val string + f1valf61f0f1val = *f1valf61f0f1valiter + f1valf61f0f1[f1valf61f0f1key] = &f1valf61f0f1val + } + f1valf61f0.AdditionalOptions = f1valf61f0f1 + } + if f1valiter.SnowflakeSource.Data.AutoPushdown != nil { + f1valf61f0.AutoPushdown = f1valiter.SnowflakeSource.Data.AutoPushdown + } + if f1valiter.SnowflakeSource.Data.Connection != nil { + f1valf61f0f3 := &svcapitypes.Option{} + if f1valiter.SnowflakeSource.Data.Connection.Description != nil { + f1valf61f0f3.Description = f1valiter.SnowflakeSource.Data.Connection.Description + } + if f1valiter.SnowflakeSource.Data.Connection.Label != nil { + f1valf61f0f3.Label = f1valiter.SnowflakeSource.Data.Connection.Label + } + if f1valiter.SnowflakeSource.Data.Connection.Value != nil { + f1valf61f0f3.Value = f1valiter.SnowflakeSource.Data.Connection.Value + } + f1valf61f0.Connection = f1valf61f0f3 + } + if f1valiter.SnowflakeSource.Data.Database != nil { + f1valf61f0.Database = f1valiter.SnowflakeSource.Data.Database + } + if f1valiter.SnowflakeSource.Data.IamRole != nil { + f1valf61f0f5 := &svcapitypes.Option{} + if f1valiter.SnowflakeSource.Data.IamRole.Description != nil { + f1valf61f0f5.Description = f1valiter.SnowflakeSource.Data.IamRole.Description + } + if f1valiter.SnowflakeSource.Data.IamRole.Label != nil { + f1valf61f0f5.Label = f1valiter.SnowflakeSource.Data.IamRole.Label + } + if f1valiter.SnowflakeSource.Data.IamRole.Value != nil { + f1valf61f0f5.Value = f1valiter.SnowflakeSource.Data.IamRole.Value + } + f1valf61f0.IAMRole = f1valf61f0f5 + } + if f1valiter.SnowflakeSource.Data.MergeAction != nil { + f1valf61f0.MergeAction = f1valiter.SnowflakeSource.Data.MergeAction + } + if f1valiter.SnowflakeSource.Data.MergeClause != nil { + f1valf61f0.MergeClause = f1valiter.SnowflakeSource.Data.MergeClause + } + if f1valiter.SnowflakeSource.Data.MergeWhenMatched != nil { + f1valf61f0.MergeWhenMatched = f1valiter.SnowflakeSource.Data.MergeWhenMatched + } + if f1valiter.SnowflakeSource.Data.MergeWhenNotMatched != nil { + f1valf61f0.MergeWhenNotMatched = f1valiter.SnowflakeSource.Data.MergeWhenNotMatched + } + if f1valiter.SnowflakeSource.Data.PostAction != nil { + f1valf61f0.PostAction = f1valiter.SnowflakeSource.Data.PostAction + } + if f1valiter.SnowflakeSource.Data.PreAction != nil { + f1valf61f0.PreAction = f1valiter.SnowflakeSource.Data.PreAction + } + if f1valiter.SnowflakeSource.Data.SampleQuery != nil { + f1valf61f0.SampleQuery = f1valiter.SnowflakeSource.Data.SampleQuery + } + if f1valiter.SnowflakeSource.Data.Schema != nil { + f1valf61f0.Schema = f1valiter.SnowflakeSource.Data.Schema + } + if f1valiter.SnowflakeSource.Data.SelectedColumns != nil { + f1valf61f0f14 := []*svcapitypes.Option{} + for _, f1valf61f0f14iter := range f1valiter.SnowflakeSource.Data.SelectedColumns { + f1valf61f0f14elem := &svcapitypes.Option{} + if f1valf61f0f14iter.Description != nil { + f1valf61f0f14elem.Description = f1valf61f0f14iter.Description + } + if f1valf61f0f14iter.Label != nil { + f1valf61f0f14elem.Label = f1valf61f0f14iter.Label + } + if f1valf61f0f14iter.Value != nil { + f1valf61f0f14elem.Value = f1valf61f0f14iter.Value + } + f1valf61f0f14 = append(f1valf61f0f14, f1valf61f0f14elem) + } + f1valf61f0.SelectedColumns = f1valf61f0f14 + } + if f1valiter.SnowflakeSource.Data.SourceType != nil { + f1valf61f0.SourceType = f1valiter.SnowflakeSource.Data.SourceType + } + if f1valiter.SnowflakeSource.Data.StagingTable != nil { + f1valf61f0.StagingTable = f1valiter.SnowflakeSource.Data.StagingTable + } + if f1valiter.SnowflakeSource.Data.Table != nil { + f1valf61f0.Table = f1valiter.SnowflakeSource.Data.Table + } + if f1valiter.SnowflakeSource.Data.TableSchema != nil { + f1valf61f0f18 := []*svcapitypes.Option{} + for _, f1valf61f0f18iter := range f1valiter.SnowflakeSource.Data.TableSchema { + f1valf61f0f18elem := &svcapitypes.Option{} + if f1valf61f0f18iter.Description != nil { + f1valf61f0f18elem.Description = f1valf61f0f18iter.Description + } + if f1valf61f0f18iter.Label != nil { + f1valf61f0f18elem.Label = f1valf61f0f18iter.Label + } + if f1valf61f0f18iter.Value != nil { + f1valf61f0f18elem.Value = f1valf61f0f18iter.Value + } + f1valf61f0f18 = append(f1valf61f0f18, f1valf61f0f18elem) + } + f1valf61f0.TableSchema = f1valf61f0f18 + } + if f1valiter.SnowflakeSource.Data.TempDir != nil { + f1valf61f0.TempDir = f1valiter.SnowflakeSource.Data.TempDir + } + if f1valiter.SnowflakeSource.Data.Upsert != nil { + f1valf61f0.Upsert = f1valiter.SnowflakeSource.Data.Upsert + } + f1valf61.Data = f1valf61f0 + } + if f1valiter.SnowflakeSource.Name != nil { + f1valf61.Name = f1valiter.SnowflakeSource.Name } - f1val.SelectFromCollection = f1valf45 + if f1valiter.SnowflakeSource.OutputSchemas != nil { + f1valf61f2 := []*svcapitypes.GlueSchema{} + for _, f1valf61f2iter := range f1valiter.SnowflakeSource.OutputSchemas { + f1valf61f2elem := &svcapitypes.GlueSchema{} + if f1valf61f2iter.Columns != nil { + f1valf61f2elemf0 := []*svcapitypes.GlueStudioSchemaColumn{} + for _, f1valf61f2elemf0iter := range f1valf61f2iter.Columns { + f1valf61f2elemf0elem := &svcapitypes.GlueStudioSchemaColumn{} + if f1valf61f2elemf0iter.Name != nil { + f1valf61f2elemf0elem.Name = f1valf61f2elemf0iter.Name + } + if f1valf61f2elemf0iter.Type != nil { + f1valf61f2elemf0elem.Type = f1valf61f2elemf0iter.Type + } + f1valf61f2elemf0 = append(f1valf61f2elemf0, f1valf61f2elemf0elem) + } + f1valf61f2elem.Columns = f1valf61f2elemf0 + } + f1valf61f2 = append(f1valf61f2, f1valf61f2elem) + } + f1valf61.OutputSchemas = f1valf61f2 + } + f1val.SnowflakeSource = f1valf61 + } + if f1valiter.SnowflakeTarget != nil { + f1valf62 := &svcapitypes.SnowflakeTarget{} + if f1valiter.SnowflakeTarget.Data != nil { + f1valf62f0 := &svcapitypes.SnowflakeNodeData{} + if f1valiter.SnowflakeTarget.Data.Action != nil { + f1valf62f0.Action = f1valiter.SnowflakeTarget.Data.Action + } + if f1valiter.SnowflakeTarget.Data.AdditionalOptions != nil { + f1valf62f0f1 := map[string]*string{} + for f1valf62f0f1key, f1valf62f0f1valiter := range f1valiter.SnowflakeTarget.Data.AdditionalOptions { + var f1valf62f0f1val string + f1valf62f0f1val = *f1valf62f0f1valiter + f1valf62f0f1[f1valf62f0f1key] = &f1valf62f0f1val + } + f1valf62f0.AdditionalOptions = f1valf62f0f1 + } + if f1valiter.SnowflakeTarget.Data.AutoPushdown != nil { + f1valf62f0.AutoPushdown = f1valiter.SnowflakeTarget.Data.AutoPushdown + } + if f1valiter.SnowflakeTarget.Data.Connection != nil { + f1valf62f0f3 := &svcapitypes.Option{} + if f1valiter.SnowflakeTarget.Data.Connection.Description != nil { + f1valf62f0f3.Description = f1valiter.SnowflakeTarget.Data.Connection.Description + } + if f1valiter.SnowflakeTarget.Data.Connection.Label != nil { + f1valf62f0f3.Label = f1valiter.SnowflakeTarget.Data.Connection.Label + } + if f1valiter.SnowflakeTarget.Data.Connection.Value != nil { + f1valf62f0f3.Value = f1valiter.SnowflakeTarget.Data.Connection.Value + } + f1valf62f0.Connection = f1valf62f0f3 + } + if f1valiter.SnowflakeTarget.Data.Database != nil { + f1valf62f0.Database = f1valiter.SnowflakeTarget.Data.Database + } + if f1valiter.SnowflakeTarget.Data.IamRole != nil { + f1valf62f0f5 := &svcapitypes.Option{} + if f1valiter.SnowflakeTarget.Data.IamRole.Description != nil { + f1valf62f0f5.Description = f1valiter.SnowflakeTarget.Data.IamRole.Description + } + if f1valiter.SnowflakeTarget.Data.IamRole.Label != nil { + f1valf62f0f5.Label = f1valiter.SnowflakeTarget.Data.IamRole.Label + } + if f1valiter.SnowflakeTarget.Data.IamRole.Value != nil { + f1valf62f0f5.Value = f1valiter.SnowflakeTarget.Data.IamRole.Value + } + f1valf62f0.IAMRole = f1valf62f0f5 + } + if f1valiter.SnowflakeTarget.Data.MergeAction != nil { + f1valf62f0.MergeAction = f1valiter.SnowflakeTarget.Data.MergeAction + } + if f1valiter.SnowflakeTarget.Data.MergeClause != nil { + f1valf62f0.MergeClause = f1valiter.SnowflakeTarget.Data.MergeClause + } + if f1valiter.SnowflakeTarget.Data.MergeWhenMatched != nil { + f1valf62f0.MergeWhenMatched = f1valiter.SnowflakeTarget.Data.MergeWhenMatched + } + if f1valiter.SnowflakeTarget.Data.MergeWhenNotMatched != nil { + f1valf62f0.MergeWhenNotMatched = f1valiter.SnowflakeTarget.Data.MergeWhenNotMatched + } + if f1valiter.SnowflakeTarget.Data.PostAction != nil { + f1valf62f0.PostAction = f1valiter.SnowflakeTarget.Data.PostAction + } + if f1valiter.SnowflakeTarget.Data.PreAction != nil { + f1valf62f0.PreAction = f1valiter.SnowflakeTarget.Data.PreAction + } + if f1valiter.SnowflakeTarget.Data.SampleQuery != nil { + f1valf62f0.SampleQuery = f1valiter.SnowflakeTarget.Data.SampleQuery + } + if f1valiter.SnowflakeTarget.Data.Schema != nil { + f1valf62f0.Schema = f1valiter.SnowflakeTarget.Data.Schema + } + if f1valiter.SnowflakeTarget.Data.SelectedColumns != nil { + f1valf62f0f14 := []*svcapitypes.Option{} + for _, f1valf62f0f14iter := range f1valiter.SnowflakeTarget.Data.SelectedColumns { + f1valf62f0f14elem := &svcapitypes.Option{} + if f1valf62f0f14iter.Description != nil { + f1valf62f0f14elem.Description = f1valf62f0f14iter.Description + } + if f1valf62f0f14iter.Label != nil { + f1valf62f0f14elem.Label = f1valf62f0f14iter.Label + } + if f1valf62f0f14iter.Value != nil { + f1valf62f0f14elem.Value = f1valf62f0f14iter.Value + } + f1valf62f0f14 = append(f1valf62f0f14, f1valf62f0f14elem) + } + f1valf62f0.SelectedColumns = f1valf62f0f14 + } + if f1valiter.SnowflakeTarget.Data.SourceType != nil { + f1valf62f0.SourceType = f1valiter.SnowflakeTarget.Data.SourceType + } + if f1valiter.SnowflakeTarget.Data.StagingTable != nil { + f1valf62f0.StagingTable = f1valiter.SnowflakeTarget.Data.StagingTable + } + if f1valiter.SnowflakeTarget.Data.Table != nil { + f1valf62f0.Table = f1valiter.SnowflakeTarget.Data.Table + } + if f1valiter.SnowflakeTarget.Data.TableSchema != nil { + f1valf62f0f18 := []*svcapitypes.Option{} + for _, f1valf62f0f18iter := range f1valiter.SnowflakeTarget.Data.TableSchema { + f1valf62f0f18elem := &svcapitypes.Option{} + if f1valf62f0f18iter.Description != nil { + f1valf62f0f18elem.Description = f1valf62f0f18iter.Description + } + if f1valf62f0f18iter.Label != nil { + f1valf62f0f18elem.Label = f1valf62f0f18iter.Label + } + if f1valf62f0f18iter.Value != nil { + f1valf62f0f18elem.Value = f1valf62f0f18iter.Value + } + f1valf62f0f18 = append(f1valf62f0f18, f1valf62f0f18elem) + } + f1valf62f0.TableSchema = f1valf62f0f18 + } + if f1valiter.SnowflakeTarget.Data.TempDir != nil { + f1valf62f0.TempDir = f1valiter.SnowflakeTarget.Data.TempDir + } + if f1valiter.SnowflakeTarget.Data.Upsert != nil { + f1valf62f0.Upsert = f1valiter.SnowflakeTarget.Data.Upsert + } + f1valf62.Data = f1valf62f0 + } + if f1valiter.SnowflakeTarget.Inputs != nil { + f1valf62f1 := []*string{} + for _, f1valf62f1iter := range f1valiter.SnowflakeTarget.Inputs { + var f1valf62f1elem string + f1valf62f1elem = *f1valf62f1iter + f1valf62f1 = append(f1valf62f1, &f1valf62f1elem) + } + f1valf62.Inputs = f1valf62f1 + } + if f1valiter.SnowflakeTarget.Name != nil { + f1valf62.Name = f1valiter.SnowflakeTarget.Name + } + f1val.SnowflakeTarget = f1valf62 } if f1valiter.SparkConnectorSource != nil { - f1valf46 := &svcapitypes.SparkConnectorSource{} + f1valf63 := &svcapitypes.SparkConnectorSource{} if f1valiter.SparkConnectorSource.AdditionalOptions != nil { - f1valf46f0 := map[string]*string{} - for f1valf46f0key, f1valf46f0valiter := range f1valiter.SparkConnectorSource.AdditionalOptions { - var f1valf46f0val string - f1valf46f0val = *f1valf46f0valiter - f1valf46f0[f1valf46f0key] = &f1valf46f0val + f1valf63f0 := map[string]*string{} + for f1valf63f0key, f1valf63f0valiter := range f1valiter.SparkConnectorSource.AdditionalOptions { + var f1valf63f0val string + f1valf63f0val = *f1valf63f0valiter + f1valf63f0[f1valf63f0key] = &f1valf63f0val } - f1valf46.AdditionalOptions = f1valf46f0 + f1valf63.AdditionalOptions = f1valf63f0 } if f1valiter.SparkConnectorSource.ConnectionName != nil { - f1valf46.ConnectionName = f1valiter.SparkConnectorSource.ConnectionName + f1valf63.ConnectionName = f1valiter.SparkConnectorSource.ConnectionName } if f1valiter.SparkConnectorSource.ConnectionType != nil { - f1valf46.ConnectionType = f1valiter.SparkConnectorSource.ConnectionType + f1valf63.ConnectionType = f1valiter.SparkConnectorSource.ConnectionType } if f1valiter.SparkConnectorSource.ConnectorName != nil { - f1valf46.ConnectorName = f1valiter.SparkConnectorSource.ConnectorName + f1valf63.ConnectorName = f1valiter.SparkConnectorSource.ConnectorName } if f1valiter.SparkConnectorSource.Name != nil { - f1valf46.Name = f1valiter.SparkConnectorSource.Name + f1valf63.Name = f1valiter.SparkConnectorSource.Name } if f1valiter.SparkConnectorSource.OutputSchemas != nil { - f1valf46f5 := []*svcapitypes.GlueSchema{} - for _, f1valf46f5iter := range f1valiter.SparkConnectorSource.OutputSchemas { - f1valf46f5elem := &svcapitypes.GlueSchema{} - if f1valf46f5iter.Columns != nil { - f1valf46f5elemf0 := []*svcapitypes.GlueStudioSchemaColumn{} - for _, f1valf46f5elemf0iter := range f1valf46f5iter.Columns { - f1valf46f5elemf0elem := &svcapitypes.GlueStudioSchemaColumn{} - if f1valf46f5elemf0iter.Name != nil { - f1valf46f5elemf0elem.Name = f1valf46f5elemf0iter.Name + f1valf63f5 := []*svcapitypes.GlueSchema{} + for _, f1valf63f5iter := range f1valiter.SparkConnectorSource.OutputSchemas { + f1valf63f5elem := &svcapitypes.GlueSchema{} + if f1valf63f5iter.Columns != nil { + f1valf63f5elemf0 := []*svcapitypes.GlueStudioSchemaColumn{} + for _, f1valf63f5elemf0iter := range f1valf63f5iter.Columns { + f1valf63f5elemf0elem := &svcapitypes.GlueStudioSchemaColumn{} + if f1valf63f5elemf0iter.Name != nil { + f1valf63f5elemf0elem.Name = f1valf63f5elemf0iter.Name } - if f1valf46f5elemf0iter.Type != nil { - f1valf46f5elemf0elem.Type = f1valf46f5elemf0iter.Type + if f1valf63f5elemf0iter.Type != nil { + f1valf63f5elemf0elem.Type = f1valf63f5elemf0iter.Type } - f1valf46f5elemf0 = append(f1valf46f5elemf0, f1valf46f5elemf0elem) + f1valf63f5elemf0 = append(f1valf63f5elemf0, f1valf63f5elemf0elem) } - f1valf46f5elem.Columns = f1valf46f5elemf0 + f1valf63f5elem.Columns = f1valf63f5elemf0 } - f1valf46f5 = append(f1valf46f5, f1valf46f5elem) + f1valf63f5 = append(f1valf63f5, f1valf63f5elem) } - f1valf46.OutputSchemas = f1valf46f5 + f1valf63.OutputSchemas = f1valf63f5 } - f1val.SparkConnectorSource = f1valf46 + f1val.SparkConnectorSource = f1valf63 } if f1valiter.SparkConnectorTarget != nil { - f1valf47 := &svcapitypes.SparkConnectorTarget{} + f1valf64 := &svcapitypes.SparkConnectorTarget{} if f1valiter.SparkConnectorTarget.AdditionalOptions != nil { - f1valf47f0 := map[string]*string{} - for f1valf47f0key, f1valf47f0valiter := range f1valiter.SparkConnectorTarget.AdditionalOptions { - var f1valf47f0val string - f1valf47f0val = *f1valf47f0valiter - f1valf47f0[f1valf47f0key] = &f1valf47f0val + f1valf64f0 := map[string]*string{} + for f1valf64f0key, f1valf64f0valiter := range f1valiter.SparkConnectorTarget.AdditionalOptions { + var f1valf64f0val string + f1valf64f0val = *f1valf64f0valiter + f1valf64f0[f1valf64f0key] = &f1valf64f0val } - f1valf47.AdditionalOptions = f1valf47f0 + f1valf64.AdditionalOptions = f1valf64f0 } if f1valiter.SparkConnectorTarget.ConnectionName != nil { - f1valf47.ConnectionName = f1valiter.SparkConnectorTarget.ConnectionName + f1valf64.ConnectionName = f1valiter.SparkConnectorTarget.ConnectionName } if f1valiter.SparkConnectorTarget.ConnectionType != nil { - f1valf47.ConnectionType = f1valiter.SparkConnectorTarget.ConnectionType + f1valf64.ConnectionType = f1valiter.SparkConnectorTarget.ConnectionType } if f1valiter.SparkConnectorTarget.ConnectorName != nil { - f1valf47.ConnectorName = f1valiter.SparkConnectorTarget.ConnectorName + f1valf64.ConnectorName = f1valiter.SparkConnectorTarget.ConnectorName } if f1valiter.SparkConnectorTarget.Inputs != nil { - f1valf47f4 := []*string{} - for _, f1valf47f4iter := range f1valiter.SparkConnectorTarget.Inputs { - var f1valf47f4elem string - f1valf47f4elem = *f1valf47f4iter - f1valf47f4 = append(f1valf47f4, &f1valf47f4elem) + f1valf64f4 := []*string{} + for _, f1valf64f4iter := range f1valiter.SparkConnectorTarget.Inputs { + var f1valf64f4elem string + f1valf64f4elem = *f1valf64f4iter + f1valf64f4 = append(f1valf64f4, &f1valf64f4elem) } - f1valf47.Inputs = f1valf47f4 + f1valf64.Inputs = f1valf64f4 } if f1valiter.SparkConnectorTarget.Name != nil { - f1valf47.Name = f1valiter.SparkConnectorTarget.Name + f1valf64.Name = f1valiter.SparkConnectorTarget.Name } if f1valiter.SparkConnectorTarget.OutputSchemas != nil { - f1valf47f6 := []*svcapitypes.GlueSchema{} - for _, f1valf47f6iter := range f1valiter.SparkConnectorTarget.OutputSchemas { - f1valf47f6elem := &svcapitypes.GlueSchema{} - if f1valf47f6iter.Columns != nil { - f1valf47f6elemf0 := []*svcapitypes.GlueStudioSchemaColumn{} - for _, f1valf47f6elemf0iter := range f1valf47f6iter.Columns { - f1valf47f6elemf0elem := &svcapitypes.GlueStudioSchemaColumn{} - if f1valf47f6elemf0iter.Name != nil { - f1valf47f6elemf0elem.Name = f1valf47f6elemf0iter.Name + f1valf64f6 := []*svcapitypes.GlueSchema{} + for _, f1valf64f6iter := range f1valiter.SparkConnectorTarget.OutputSchemas { + f1valf64f6elem := &svcapitypes.GlueSchema{} + if f1valf64f6iter.Columns != nil { + f1valf64f6elemf0 := []*svcapitypes.GlueStudioSchemaColumn{} + for _, f1valf64f6elemf0iter := range f1valf64f6iter.Columns { + f1valf64f6elemf0elem := &svcapitypes.GlueStudioSchemaColumn{} + if f1valf64f6elemf0iter.Name != nil { + f1valf64f6elemf0elem.Name = f1valf64f6elemf0iter.Name } - if f1valf47f6elemf0iter.Type != nil { - f1valf47f6elemf0elem.Type = f1valf47f6elemf0iter.Type + if f1valf64f6elemf0iter.Type != nil { + f1valf64f6elemf0elem.Type = f1valf64f6elemf0iter.Type } - f1valf47f6elemf0 = append(f1valf47f6elemf0, f1valf47f6elemf0elem) + f1valf64f6elemf0 = append(f1valf64f6elemf0, f1valf64f6elemf0elem) } - f1valf47f6elem.Columns = f1valf47f6elemf0 + f1valf64f6elem.Columns = f1valf64f6elemf0 } - f1valf47f6 = append(f1valf47f6, f1valf47f6elem) + f1valf64f6 = append(f1valf64f6, f1valf64f6elem) } - f1valf47.OutputSchemas = f1valf47f6 + f1valf64.OutputSchemas = f1valf64f6 } - f1val.SparkConnectorTarget = f1valf47 + f1val.SparkConnectorTarget = f1valf64 } if f1valiter.SparkSQL != nil { - f1valf48 := &svcapitypes.SparkSQL{} + f1valf65 := &svcapitypes.SparkSQL{} if f1valiter.SparkSQL.Inputs != nil { - f1valf48f0 := []*string{} - for _, f1valf48f0iter := range f1valiter.SparkSQL.Inputs { - var f1valf48f0elem string - f1valf48f0elem = *f1valf48f0iter - f1valf48f0 = append(f1valf48f0, &f1valf48f0elem) + f1valf65f0 := []*string{} + for _, f1valf65f0iter := range f1valiter.SparkSQL.Inputs { + var f1valf65f0elem string + f1valf65f0elem = *f1valf65f0iter + f1valf65f0 = append(f1valf65f0, &f1valf65f0elem) } - f1valf48.Inputs = f1valf48f0 + f1valf65.Inputs = f1valf65f0 } if f1valiter.SparkSQL.Name != nil { - f1valf48.Name = f1valiter.SparkSQL.Name + f1valf65.Name = f1valiter.SparkSQL.Name } if f1valiter.SparkSQL.OutputSchemas != nil { - f1valf48f2 := []*svcapitypes.GlueSchema{} - for _, f1valf48f2iter := range f1valiter.SparkSQL.OutputSchemas { - f1valf48f2elem := &svcapitypes.GlueSchema{} - if f1valf48f2iter.Columns != nil { - f1valf48f2elemf0 := []*svcapitypes.GlueStudioSchemaColumn{} - for _, f1valf48f2elemf0iter := range f1valf48f2iter.Columns { - f1valf48f2elemf0elem := &svcapitypes.GlueStudioSchemaColumn{} - if f1valf48f2elemf0iter.Name != nil { - f1valf48f2elemf0elem.Name = f1valf48f2elemf0iter.Name + f1valf65f2 := []*svcapitypes.GlueSchema{} + for _, f1valf65f2iter := range f1valiter.SparkSQL.OutputSchemas { + f1valf65f2elem := &svcapitypes.GlueSchema{} + if f1valf65f2iter.Columns != nil { + f1valf65f2elemf0 := []*svcapitypes.GlueStudioSchemaColumn{} + for _, f1valf65f2elemf0iter := range f1valf65f2iter.Columns { + f1valf65f2elemf0elem := &svcapitypes.GlueStudioSchemaColumn{} + if f1valf65f2elemf0iter.Name != nil { + f1valf65f2elemf0elem.Name = f1valf65f2elemf0iter.Name } - if f1valf48f2elemf0iter.Type != nil { - f1valf48f2elemf0elem.Type = f1valf48f2elemf0iter.Type + if f1valf65f2elemf0iter.Type != nil { + f1valf65f2elemf0elem.Type = f1valf65f2elemf0iter.Type } - f1valf48f2elemf0 = append(f1valf48f2elemf0, f1valf48f2elemf0elem) + f1valf65f2elemf0 = append(f1valf65f2elemf0, f1valf65f2elemf0elem) } - f1valf48f2elem.Columns = f1valf48f2elemf0 + f1valf65f2elem.Columns = f1valf65f2elemf0 } - f1valf48f2 = append(f1valf48f2, f1valf48f2elem) + f1valf65f2 = append(f1valf65f2, f1valf65f2elem) } - f1valf48.OutputSchemas = f1valf48f2 + f1valf65.OutputSchemas = f1valf65f2 } if f1valiter.SparkSQL.SqlAliases != nil { - f1valf48f3 := []*svcapitypes.SQLAlias{} - for _, f1valf48f3iter := range f1valiter.SparkSQL.SqlAliases { - f1valf48f3elem := &svcapitypes.SQLAlias{} - if f1valf48f3iter.Alias != nil { - f1valf48f3elem.Alias = f1valf48f3iter.Alias + f1valf65f3 := []*svcapitypes.SQLAlias{} + for _, f1valf65f3iter := range f1valiter.SparkSQL.SqlAliases { + f1valf65f3elem := &svcapitypes.SQLAlias{} + if f1valf65f3iter.Alias != nil { + f1valf65f3elem.Alias = f1valf65f3iter.Alias } - if f1valf48f3iter.From != nil { - f1valf48f3elem.From = f1valf48f3iter.From + if f1valf65f3iter.From != nil { + f1valf65f3elem.From = f1valf65f3iter.From } - f1valf48f3 = append(f1valf48f3, f1valf48f3elem) + f1valf65f3 = append(f1valf65f3, f1valf65f3elem) } - f1valf48.SQLAliases = f1valf48f3 + f1valf65.SQLAliases = f1valf65f3 } if f1valiter.SparkSQL.SqlQuery != nil { - f1valf48.SQLQuery = f1valiter.SparkSQL.SqlQuery + f1valf65.SQLQuery = f1valiter.SparkSQL.SqlQuery } - f1val.SparkSQL = f1valf48 + f1val.SparkSQL = f1valf65 } if f1valiter.Spigot != nil { - f1valf49 := &svcapitypes.Spigot{} + f1valf66 := &svcapitypes.Spigot{} if f1valiter.Spigot.Inputs != nil { - f1valf49f0 := []*string{} - for _, f1valf49f0iter := range f1valiter.Spigot.Inputs { - var f1valf49f0elem string - f1valf49f0elem = *f1valf49f0iter - f1valf49f0 = append(f1valf49f0, &f1valf49f0elem) + f1valf66f0 := []*string{} + for _, f1valf66f0iter := range f1valiter.Spigot.Inputs { + var f1valf66f0elem string + f1valf66f0elem = *f1valf66f0iter + f1valf66f0 = append(f1valf66f0, &f1valf66f0elem) } - f1valf49.Inputs = f1valf49f0 + f1valf66.Inputs = f1valf66f0 } if f1valiter.Spigot.Name != nil { - f1valf49.Name = f1valiter.Spigot.Name + f1valf66.Name = f1valiter.Spigot.Name } if f1valiter.Spigot.Path != nil { - f1valf49.Path = f1valiter.Spigot.Path + f1valf66.Path = f1valiter.Spigot.Path } if f1valiter.Spigot.Prob != nil { - f1valf49.Prob = f1valiter.Spigot.Prob + f1valf66.Prob = f1valiter.Spigot.Prob } if f1valiter.Spigot.Topk != nil { - f1valf49.Topk = f1valiter.Spigot.Topk + f1valf66.Topk = f1valiter.Spigot.Topk } - f1val.Spigot = f1valf49 + f1val.Spigot = f1valf66 } if f1valiter.SplitFields != nil { - f1valf50 := &svcapitypes.SplitFields{} + f1valf67 := &svcapitypes.SplitFields{} if f1valiter.SplitFields.Inputs != nil { - f1valf50f0 := []*string{} - for _, f1valf50f0iter := range f1valiter.SplitFields.Inputs { - var f1valf50f0elem string - f1valf50f0elem = *f1valf50f0iter - f1valf50f0 = append(f1valf50f0, &f1valf50f0elem) + f1valf67f0 := []*string{} + for _, f1valf67f0iter := range f1valiter.SplitFields.Inputs { + var f1valf67f0elem string + f1valf67f0elem = *f1valf67f0iter + f1valf67f0 = append(f1valf67f0, &f1valf67f0elem) } - f1valf50.Inputs = f1valf50f0 + f1valf67.Inputs = f1valf67f0 } if f1valiter.SplitFields.Name != nil { - f1valf50.Name = f1valiter.SplitFields.Name + f1valf67.Name = f1valiter.SplitFields.Name } if f1valiter.SplitFields.Paths != nil { - f1valf50f2 := [][]*string{} - for _, f1valf50f2iter := range f1valiter.SplitFields.Paths { - f1valf50f2elem := []*string{} - for _, f1valf50f2elemiter := range f1valf50f2iter { - var f1valf50f2elemelem string - f1valf50f2elemelem = *f1valf50f2elemiter - f1valf50f2elem = append(f1valf50f2elem, &f1valf50f2elemelem) + f1valf67f2 := [][]*string{} + for _, f1valf67f2iter := range f1valiter.SplitFields.Paths { + f1valf67f2elem := []*string{} + for _, f1valf67f2elemiter := range f1valf67f2iter { + var f1valf67f2elemelem string + f1valf67f2elemelem = *f1valf67f2elemiter + f1valf67f2elem = append(f1valf67f2elem, &f1valf67f2elemelem) } - f1valf50f2 = append(f1valf50f2, f1valf50f2elem) + f1valf67f2 = append(f1valf67f2, f1valf67f2elem) } - f1valf50.Paths = f1valf50f2 + f1valf67.Paths = f1valf67f2 } - f1val.SplitFields = f1valf50 + f1val.SplitFields = f1valf67 } if f1valiter.Union != nil { - f1valf51 := &svcapitypes.Union{} + f1valf68 := &svcapitypes.Union{} if f1valiter.Union.Inputs != nil { - f1valf51f0 := []*string{} - for _, f1valf51f0iter := range f1valiter.Union.Inputs { - var f1valf51f0elem string - f1valf51f0elem = *f1valf51f0iter - f1valf51f0 = append(f1valf51f0, &f1valf51f0elem) + f1valf68f0 := []*string{} + for _, f1valf68f0iter := range f1valiter.Union.Inputs { + var f1valf68f0elem string + f1valf68f0elem = *f1valf68f0iter + f1valf68f0 = append(f1valf68f0, &f1valf68f0elem) } - f1valf51.Inputs = f1valf51f0 + f1valf68.Inputs = f1valf68f0 } if f1valiter.Union.Name != nil { - f1valf51.Name = f1valiter.Union.Name + f1valf68.Name = f1valiter.Union.Name } if f1valiter.Union.UnionType != nil { - f1valf51.UnionType = f1valiter.Union.UnionType + f1valf68.UnionType = f1valiter.Union.UnionType } - f1val.Union = f1valf51 + f1val.Union = f1valf68 } f1[f1key] = f1val } @@ -2213,6 +3593,9 @@ func GenerateJob(resp *svcsdk.GetJobOutput) *svcapitypes.Job { if resp.Job.Command.PythonVersion != nil { f2.PythonVersion = resp.Job.Command.PythonVersion } + if resp.Job.Command.Runtime != nil { + f2.Runtime = resp.Job.Command.Runtime + } if resp.Job.Command.ScriptLocation != nil { f2.ScriptLocation = resp.Job.Command.ScriptLocation } @@ -2414,2105 +3797,3485 @@ func GenerateCreateJobInput(cr *svcapitypes.Job) *svcsdk.CreateJobInput { } f1val.SetAggregate(f1valf0) } + if f1valiter.AmazonRedshiftSource != nil { + f1valf1 := &svcsdk.AmazonRedshiftSource{} + if f1valiter.AmazonRedshiftSource.Data != nil { + f1valf1f0 := &svcsdk.AmazonRedshiftNodeData{} + if f1valiter.AmazonRedshiftSource.Data.AccessType != nil { + f1valf1f0.SetAccessType(*f1valiter.AmazonRedshiftSource.Data.AccessType) + } + if f1valiter.AmazonRedshiftSource.Data.Action != nil { + f1valf1f0.SetAction(*f1valiter.AmazonRedshiftSource.Data.Action) + } + if f1valiter.AmazonRedshiftSource.Data.AdvancedOptions != nil { + f1valf1f0f2 := []*svcsdk.AmazonRedshiftAdvancedOption{} + for _, f1valf1f0f2iter := range f1valiter.AmazonRedshiftSource.Data.AdvancedOptions { + f1valf1f0f2elem := &svcsdk.AmazonRedshiftAdvancedOption{} + if f1valf1f0f2iter.Key != nil { + f1valf1f0f2elem.SetKey(*f1valf1f0f2iter.Key) + } + if f1valf1f0f2iter.Value != nil { + f1valf1f0f2elem.SetValue(*f1valf1f0f2iter.Value) + } + f1valf1f0f2 = append(f1valf1f0f2, f1valf1f0f2elem) + } + f1valf1f0.SetAdvancedOptions(f1valf1f0f2) + } + if f1valiter.AmazonRedshiftSource.Data.CatalogDatabase != nil { + f1valf1f0f3 := &svcsdk.Option{} + if f1valiter.AmazonRedshiftSource.Data.CatalogDatabase.Description != nil { + f1valf1f0f3.SetDescription(*f1valiter.AmazonRedshiftSource.Data.CatalogDatabase.Description) + } + if f1valiter.AmazonRedshiftSource.Data.CatalogDatabase.Label != nil { + f1valf1f0f3.SetLabel(*f1valiter.AmazonRedshiftSource.Data.CatalogDatabase.Label) + } + if f1valiter.AmazonRedshiftSource.Data.CatalogDatabase.Value != nil { + f1valf1f0f3.SetValue(*f1valiter.AmazonRedshiftSource.Data.CatalogDatabase.Value) + } + f1valf1f0.SetCatalogDatabase(f1valf1f0f3) + } + if f1valiter.AmazonRedshiftSource.Data.CatalogRedshiftSchema != nil { + f1valf1f0.SetCatalogRedshiftSchema(*f1valiter.AmazonRedshiftSource.Data.CatalogRedshiftSchema) + } + if f1valiter.AmazonRedshiftSource.Data.CatalogRedshiftTable != nil { + f1valf1f0.SetCatalogRedshiftTable(*f1valiter.AmazonRedshiftSource.Data.CatalogRedshiftTable) + } + if f1valiter.AmazonRedshiftSource.Data.CatalogTable != nil { + f1valf1f0f6 := &svcsdk.Option{} + if f1valiter.AmazonRedshiftSource.Data.CatalogTable.Description != nil { + f1valf1f0f6.SetDescription(*f1valiter.AmazonRedshiftSource.Data.CatalogTable.Description) + } + if f1valiter.AmazonRedshiftSource.Data.CatalogTable.Label != nil { + f1valf1f0f6.SetLabel(*f1valiter.AmazonRedshiftSource.Data.CatalogTable.Label) + } + if f1valiter.AmazonRedshiftSource.Data.CatalogTable.Value != nil { + f1valf1f0f6.SetValue(*f1valiter.AmazonRedshiftSource.Data.CatalogTable.Value) + } + f1valf1f0.SetCatalogTable(f1valf1f0f6) + } + if f1valiter.AmazonRedshiftSource.Data.Connection != nil { + f1valf1f0f7 := &svcsdk.Option{} + if f1valiter.AmazonRedshiftSource.Data.Connection.Description != nil { + f1valf1f0f7.SetDescription(*f1valiter.AmazonRedshiftSource.Data.Connection.Description) + } + if f1valiter.AmazonRedshiftSource.Data.Connection.Label != nil { + f1valf1f0f7.SetLabel(*f1valiter.AmazonRedshiftSource.Data.Connection.Label) + } + if f1valiter.AmazonRedshiftSource.Data.Connection.Value != nil { + f1valf1f0f7.SetValue(*f1valiter.AmazonRedshiftSource.Data.Connection.Value) + } + f1valf1f0.SetConnection(f1valf1f0f7) + } + if f1valiter.AmazonRedshiftSource.Data.CrawlerConnection != nil { + f1valf1f0.SetCrawlerConnection(*f1valiter.AmazonRedshiftSource.Data.CrawlerConnection) + } + if f1valiter.AmazonRedshiftSource.Data.IAMRole != nil { + f1valf1f0f9 := &svcsdk.Option{} + if f1valiter.AmazonRedshiftSource.Data.IAMRole.Description != nil { + f1valf1f0f9.SetDescription(*f1valiter.AmazonRedshiftSource.Data.IAMRole.Description) + } + if f1valiter.AmazonRedshiftSource.Data.IAMRole.Label != nil { + f1valf1f0f9.SetLabel(*f1valiter.AmazonRedshiftSource.Data.IAMRole.Label) + } + if f1valiter.AmazonRedshiftSource.Data.IAMRole.Value != nil { + f1valf1f0f9.SetValue(*f1valiter.AmazonRedshiftSource.Data.IAMRole.Value) + } + f1valf1f0.SetIamRole(f1valf1f0f9) + } + if f1valiter.AmazonRedshiftSource.Data.MergeAction != nil { + f1valf1f0.SetMergeAction(*f1valiter.AmazonRedshiftSource.Data.MergeAction) + } + if f1valiter.AmazonRedshiftSource.Data.MergeClause != nil { + f1valf1f0.SetMergeClause(*f1valiter.AmazonRedshiftSource.Data.MergeClause) + } + if f1valiter.AmazonRedshiftSource.Data.MergeWhenMatched != nil { + f1valf1f0.SetMergeWhenMatched(*f1valiter.AmazonRedshiftSource.Data.MergeWhenMatched) + } + if f1valiter.AmazonRedshiftSource.Data.MergeWhenNotMatched != nil { + f1valf1f0.SetMergeWhenNotMatched(*f1valiter.AmazonRedshiftSource.Data.MergeWhenNotMatched) + } + if f1valiter.AmazonRedshiftSource.Data.PostAction != nil { + f1valf1f0.SetPostAction(*f1valiter.AmazonRedshiftSource.Data.PostAction) + } + if f1valiter.AmazonRedshiftSource.Data.PreAction != nil { + f1valf1f0.SetPreAction(*f1valiter.AmazonRedshiftSource.Data.PreAction) + } + if f1valiter.AmazonRedshiftSource.Data.SampleQuery != nil { + f1valf1f0.SetSampleQuery(*f1valiter.AmazonRedshiftSource.Data.SampleQuery) + } + if f1valiter.AmazonRedshiftSource.Data.Schema != nil { + f1valf1f0f17 := &svcsdk.Option{} + if f1valiter.AmazonRedshiftSource.Data.Schema.Description != nil { + f1valf1f0f17.SetDescription(*f1valiter.AmazonRedshiftSource.Data.Schema.Description) + } + if f1valiter.AmazonRedshiftSource.Data.Schema.Label != nil { + f1valf1f0f17.SetLabel(*f1valiter.AmazonRedshiftSource.Data.Schema.Label) + } + if f1valiter.AmazonRedshiftSource.Data.Schema.Value != nil { + f1valf1f0f17.SetValue(*f1valiter.AmazonRedshiftSource.Data.Schema.Value) + } + f1valf1f0.SetSchema(f1valf1f0f17) + } + if f1valiter.AmazonRedshiftSource.Data.SelectedColumns != nil { + f1valf1f0f18 := []*svcsdk.Option{} + for _, f1valf1f0f18iter := range f1valiter.AmazonRedshiftSource.Data.SelectedColumns { + f1valf1f0f18elem := &svcsdk.Option{} + if f1valf1f0f18iter.Description != nil { + f1valf1f0f18elem.SetDescription(*f1valf1f0f18iter.Description) + } + if f1valf1f0f18iter.Label != nil { + f1valf1f0f18elem.SetLabel(*f1valf1f0f18iter.Label) + } + if f1valf1f0f18iter.Value != nil { + f1valf1f0f18elem.SetValue(*f1valf1f0f18iter.Value) + } + f1valf1f0f18 = append(f1valf1f0f18, f1valf1f0f18elem) + } + f1valf1f0.SetSelectedColumns(f1valf1f0f18) + } + if f1valiter.AmazonRedshiftSource.Data.SourceType != nil { + f1valf1f0.SetSourceType(*f1valiter.AmazonRedshiftSource.Data.SourceType) + } + if f1valiter.AmazonRedshiftSource.Data.StagingTable != nil { + f1valf1f0.SetStagingTable(*f1valiter.AmazonRedshiftSource.Data.StagingTable) + } + if f1valiter.AmazonRedshiftSource.Data.Table != nil { + f1valf1f0f21 := &svcsdk.Option{} + if f1valiter.AmazonRedshiftSource.Data.Table.Description != nil { + f1valf1f0f21.SetDescription(*f1valiter.AmazonRedshiftSource.Data.Table.Description) + } + if f1valiter.AmazonRedshiftSource.Data.Table.Label != nil { + f1valf1f0f21.SetLabel(*f1valiter.AmazonRedshiftSource.Data.Table.Label) + } + if f1valiter.AmazonRedshiftSource.Data.Table.Value != nil { + f1valf1f0f21.SetValue(*f1valiter.AmazonRedshiftSource.Data.Table.Value) + } + f1valf1f0.SetTable(f1valf1f0f21) + } + if f1valiter.AmazonRedshiftSource.Data.TablePrefix != nil { + f1valf1f0.SetTablePrefix(*f1valiter.AmazonRedshiftSource.Data.TablePrefix) + } + if f1valiter.AmazonRedshiftSource.Data.TableSchema != nil { + f1valf1f0f23 := []*svcsdk.Option{} + for _, f1valf1f0f23iter := range f1valiter.AmazonRedshiftSource.Data.TableSchema { + f1valf1f0f23elem := &svcsdk.Option{} + if f1valf1f0f23iter.Description != nil { + f1valf1f0f23elem.SetDescription(*f1valf1f0f23iter.Description) + } + if f1valf1f0f23iter.Label != nil { + f1valf1f0f23elem.SetLabel(*f1valf1f0f23iter.Label) + } + if f1valf1f0f23iter.Value != nil { + f1valf1f0f23elem.SetValue(*f1valf1f0f23iter.Value) + } + f1valf1f0f23 = append(f1valf1f0f23, f1valf1f0f23elem) + } + f1valf1f0.SetTableSchema(f1valf1f0f23) + } + if f1valiter.AmazonRedshiftSource.Data.TempDir != nil { + f1valf1f0.SetTempDir(*f1valiter.AmazonRedshiftSource.Data.TempDir) + } + if f1valiter.AmazonRedshiftSource.Data.Upsert != nil { + f1valf1f0.SetUpsert(*f1valiter.AmazonRedshiftSource.Data.Upsert) + } + f1valf1.SetData(f1valf1f0) + } + if f1valiter.AmazonRedshiftSource.Name != nil { + f1valf1.SetName(*f1valiter.AmazonRedshiftSource.Name) + } + f1val.SetAmazonRedshiftSource(f1valf1) + } + if f1valiter.AmazonRedshiftTarget != nil { + f1valf2 := &svcsdk.AmazonRedshiftTarget{} + if f1valiter.AmazonRedshiftTarget.Data != nil { + f1valf2f0 := &svcsdk.AmazonRedshiftNodeData{} + if f1valiter.AmazonRedshiftTarget.Data.AccessType != nil { + f1valf2f0.SetAccessType(*f1valiter.AmazonRedshiftTarget.Data.AccessType) + } + if f1valiter.AmazonRedshiftTarget.Data.Action != nil { + f1valf2f0.SetAction(*f1valiter.AmazonRedshiftTarget.Data.Action) + } + if f1valiter.AmazonRedshiftTarget.Data.AdvancedOptions != nil { + f1valf2f0f2 := []*svcsdk.AmazonRedshiftAdvancedOption{} + for _, f1valf2f0f2iter := range f1valiter.AmazonRedshiftTarget.Data.AdvancedOptions { + f1valf2f0f2elem := &svcsdk.AmazonRedshiftAdvancedOption{} + if f1valf2f0f2iter.Key != nil { + f1valf2f0f2elem.SetKey(*f1valf2f0f2iter.Key) + } + if f1valf2f0f2iter.Value != nil { + f1valf2f0f2elem.SetValue(*f1valf2f0f2iter.Value) + } + f1valf2f0f2 = append(f1valf2f0f2, f1valf2f0f2elem) + } + f1valf2f0.SetAdvancedOptions(f1valf2f0f2) + } + if f1valiter.AmazonRedshiftTarget.Data.CatalogDatabase != nil { + f1valf2f0f3 := &svcsdk.Option{} + if f1valiter.AmazonRedshiftTarget.Data.CatalogDatabase.Description != nil { + f1valf2f0f3.SetDescription(*f1valiter.AmazonRedshiftTarget.Data.CatalogDatabase.Description) + } + if f1valiter.AmazonRedshiftTarget.Data.CatalogDatabase.Label != nil { + f1valf2f0f3.SetLabel(*f1valiter.AmazonRedshiftTarget.Data.CatalogDatabase.Label) + } + if f1valiter.AmazonRedshiftTarget.Data.CatalogDatabase.Value != nil { + f1valf2f0f3.SetValue(*f1valiter.AmazonRedshiftTarget.Data.CatalogDatabase.Value) + } + f1valf2f0.SetCatalogDatabase(f1valf2f0f3) + } + if f1valiter.AmazonRedshiftTarget.Data.CatalogRedshiftSchema != nil { + f1valf2f0.SetCatalogRedshiftSchema(*f1valiter.AmazonRedshiftTarget.Data.CatalogRedshiftSchema) + } + if f1valiter.AmazonRedshiftTarget.Data.CatalogRedshiftTable != nil { + f1valf2f0.SetCatalogRedshiftTable(*f1valiter.AmazonRedshiftTarget.Data.CatalogRedshiftTable) + } + if f1valiter.AmazonRedshiftTarget.Data.CatalogTable != nil { + f1valf2f0f6 := &svcsdk.Option{} + if f1valiter.AmazonRedshiftTarget.Data.CatalogTable.Description != nil { + f1valf2f0f6.SetDescription(*f1valiter.AmazonRedshiftTarget.Data.CatalogTable.Description) + } + if f1valiter.AmazonRedshiftTarget.Data.CatalogTable.Label != nil { + f1valf2f0f6.SetLabel(*f1valiter.AmazonRedshiftTarget.Data.CatalogTable.Label) + } + if f1valiter.AmazonRedshiftTarget.Data.CatalogTable.Value != nil { + f1valf2f0f6.SetValue(*f1valiter.AmazonRedshiftTarget.Data.CatalogTable.Value) + } + f1valf2f0.SetCatalogTable(f1valf2f0f6) + } + if f1valiter.AmazonRedshiftTarget.Data.Connection != nil { + f1valf2f0f7 := &svcsdk.Option{} + if f1valiter.AmazonRedshiftTarget.Data.Connection.Description != nil { + f1valf2f0f7.SetDescription(*f1valiter.AmazonRedshiftTarget.Data.Connection.Description) + } + if f1valiter.AmazonRedshiftTarget.Data.Connection.Label != nil { + f1valf2f0f7.SetLabel(*f1valiter.AmazonRedshiftTarget.Data.Connection.Label) + } + if f1valiter.AmazonRedshiftTarget.Data.Connection.Value != nil { + f1valf2f0f7.SetValue(*f1valiter.AmazonRedshiftTarget.Data.Connection.Value) + } + f1valf2f0.SetConnection(f1valf2f0f7) + } + if f1valiter.AmazonRedshiftTarget.Data.CrawlerConnection != nil { + f1valf2f0.SetCrawlerConnection(*f1valiter.AmazonRedshiftTarget.Data.CrawlerConnection) + } + if f1valiter.AmazonRedshiftTarget.Data.IAMRole != nil { + f1valf2f0f9 := &svcsdk.Option{} + if f1valiter.AmazonRedshiftTarget.Data.IAMRole.Description != nil { + f1valf2f0f9.SetDescription(*f1valiter.AmazonRedshiftTarget.Data.IAMRole.Description) + } + if f1valiter.AmazonRedshiftTarget.Data.IAMRole.Label != nil { + f1valf2f0f9.SetLabel(*f1valiter.AmazonRedshiftTarget.Data.IAMRole.Label) + } + if f1valiter.AmazonRedshiftTarget.Data.IAMRole.Value != nil { + f1valf2f0f9.SetValue(*f1valiter.AmazonRedshiftTarget.Data.IAMRole.Value) + } + f1valf2f0.SetIamRole(f1valf2f0f9) + } + if f1valiter.AmazonRedshiftTarget.Data.MergeAction != nil { + f1valf2f0.SetMergeAction(*f1valiter.AmazonRedshiftTarget.Data.MergeAction) + } + if f1valiter.AmazonRedshiftTarget.Data.MergeClause != nil { + f1valf2f0.SetMergeClause(*f1valiter.AmazonRedshiftTarget.Data.MergeClause) + } + if f1valiter.AmazonRedshiftTarget.Data.MergeWhenMatched != nil { + f1valf2f0.SetMergeWhenMatched(*f1valiter.AmazonRedshiftTarget.Data.MergeWhenMatched) + } + if f1valiter.AmazonRedshiftTarget.Data.MergeWhenNotMatched != nil { + f1valf2f0.SetMergeWhenNotMatched(*f1valiter.AmazonRedshiftTarget.Data.MergeWhenNotMatched) + } + if f1valiter.AmazonRedshiftTarget.Data.PostAction != nil { + f1valf2f0.SetPostAction(*f1valiter.AmazonRedshiftTarget.Data.PostAction) + } + if f1valiter.AmazonRedshiftTarget.Data.PreAction != nil { + f1valf2f0.SetPreAction(*f1valiter.AmazonRedshiftTarget.Data.PreAction) + } + if f1valiter.AmazonRedshiftTarget.Data.SampleQuery != nil { + f1valf2f0.SetSampleQuery(*f1valiter.AmazonRedshiftTarget.Data.SampleQuery) + } + if f1valiter.AmazonRedshiftTarget.Data.Schema != nil { + f1valf2f0f17 := &svcsdk.Option{} + if f1valiter.AmazonRedshiftTarget.Data.Schema.Description != nil { + f1valf2f0f17.SetDescription(*f1valiter.AmazonRedshiftTarget.Data.Schema.Description) + } + if f1valiter.AmazonRedshiftTarget.Data.Schema.Label != nil { + f1valf2f0f17.SetLabel(*f1valiter.AmazonRedshiftTarget.Data.Schema.Label) + } + if f1valiter.AmazonRedshiftTarget.Data.Schema.Value != nil { + f1valf2f0f17.SetValue(*f1valiter.AmazonRedshiftTarget.Data.Schema.Value) + } + f1valf2f0.SetSchema(f1valf2f0f17) + } + if f1valiter.AmazonRedshiftTarget.Data.SelectedColumns != nil { + f1valf2f0f18 := []*svcsdk.Option{} + for _, f1valf2f0f18iter := range f1valiter.AmazonRedshiftTarget.Data.SelectedColumns { + f1valf2f0f18elem := &svcsdk.Option{} + if f1valf2f0f18iter.Description != nil { + f1valf2f0f18elem.SetDescription(*f1valf2f0f18iter.Description) + } + if f1valf2f0f18iter.Label != nil { + f1valf2f0f18elem.SetLabel(*f1valf2f0f18iter.Label) + } + if f1valf2f0f18iter.Value != nil { + f1valf2f0f18elem.SetValue(*f1valf2f0f18iter.Value) + } + f1valf2f0f18 = append(f1valf2f0f18, f1valf2f0f18elem) + } + f1valf2f0.SetSelectedColumns(f1valf2f0f18) + } + if f1valiter.AmazonRedshiftTarget.Data.SourceType != nil { + f1valf2f0.SetSourceType(*f1valiter.AmazonRedshiftTarget.Data.SourceType) + } + if f1valiter.AmazonRedshiftTarget.Data.StagingTable != nil { + f1valf2f0.SetStagingTable(*f1valiter.AmazonRedshiftTarget.Data.StagingTable) + } + if f1valiter.AmazonRedshiftTarget.Data.Table != nil { + f1valf2f0f21 := &svcsdk.Option{} + if f1valiter.AmazonRedshiftTarget.Data.Table.Description != nil { + f1valf2f0f21.SetDescription(*f1valiter.AmazonRedshiftTarget.Data.Table.Description) + } + if f1valiter.AmazonRedshiftTarget.Data.Table.Label != nil { + f1valf2f0f21.SetLabel(*f1valiter.AmazonRedshiftTarget.Data.Table.Label) + } + if f1valiter.AmazonRedshiftTarget.Data.Table.Value != nil { + f1valf2f0f21.SetValue(*f1valiter.AmazonRedshiftTarget.Data.Table.Value) + } + f1valf2f0.SetTable(f1valf2f0f21) + } + if f1valiter.AmazonRedshiftTarget.Data.TablePrefix != nil { + f1valf2f0.SetTablePrefix(*f1valiter.AmazonRedshiftTarget.Data.TablePrefix) + } + if f1valiter.AmazonRedshiftTarget.Data.TableSchema != nil { + f1valf2f0f23 := []*svcsdk.Option{} + for _, f1valf2f0f23iter := range f1valiter.AmazonRedshiftTarget.Data.TableSchema { + f1valf2f0f23elem := &svcsdk.Option{} + if f1valf2f0f23iter.Description != nil { + f1valf2f0f23elem.SetDescription(*f1valf2f0f23iter.Description) + } + if f1valf2f0f23iter.Label != nil { + f1valf2f0f23elem.SetLabel(*f1valf2f0f23iter.Label) + } + if f1valf2f0f23iter.Value != nil { + f1valf2f0f23elem.SetValue(*f1valf2f0f23iter.Value) + } + f1valf2f0f23 = append(f1valf2f0f23, f1valf2f0f23elem) + } + f1valf2f0.SetTableSchema(f1valf2f0f23) + } + if f1valiter.AmazonRedshiftTarget.Data.TempDir != nil { + f1valf2f0.SetTempDir(*f1valiter.AmazonRedshiftTarget.Data.TempDir) + } + if f1valiter.AmazonRedshiftTarget.Data.Upsert != nil { + f1valf2f0.SetUpsert(*f1valiter.AmazonRedshiftTarget.Data.Upsert) + } + f1valf2.SetData(f1valf2f0) + } + if f1valiter.AmazonRedshiftTarget.Inputs != nil { + f1valf2f1 := []*string{} + for _, f1valf2f1iter := range f1valiter.AmazonRedshiftTarget.Inputs { + var f1valf2f1elem string + f1valf2f1elem = *f1valf2f1iter + f1valf2f1 = append(f1valf2f1, &f1valf2f1elem) + } + f1valf2.SetInputs(f1valf2f1) + } + if f1valiter.AmazonRedshiftTarget.Name != nil { + f1valf2.SetName(*f1valiter.AmazonRedshiftTarget.Name) + } + f1val.SetAmazonRedshiftTarget(f1valf2) + } if f1valiter.ApplyMapping != nil { - f1valf1 := &svcsdk.ApplyMapping{} + f1valf3 := &svcsdk.ApplyMapping{} if f1valiter.ApplyMapping.Inputs != nil { - f1valf1f0 := []*string{} - for _, f1valf1f0iter := range f1valiter.ApplyMapping.Inputs { - var f1valf1f0elem string - f1valf1f0elem = *f1valf1f0iter - f1valf1f0 = append(f1valf1f0, &f1valf1f0elem) + f1valf3f0 := []*string{} + for _, f1valf3f0iter := range f1valiter.ApplyMapping.Inputs { + var f1valf3f0elem string + f1valf3f0elem = *f1valf3f0iter + f1valf3f0 = append(f1valf3f0, &f1valf3f0elem) } - f1valf1.SetInputs(f1valf1f0) + f1valf3.SetInputs(f1valf3f0) } if f1valiter.ApplyMapping.Mapping != nil { - f1valf1f1 := []*svcsdk.Mapping{} - for _, f1valf1f1iter := range f1valiter.ApplyMapping.Mapping { - f1valf1f1elem := &svcsdk.Mapping{} - if f1valf1f1iter.Dropped != nil { - f1valf1f1elem.SetDropped(*f1valf1f1iter.Dropped) - } - if f1valf1f1iter.FromPath != nil { - f1valf1f1elemf1 := []*string{} - for _, f1valf1f1elemf1iter := range f1valf1f1iter.FromPath { - var f1valf1f1elemf1elem string - f1valf1f1elemf1elem = *f1valf1f1elemf1iter - f1valf1f1elemf1 = append(f1valf1f1elemf1, &f1valf1f1elemf1elem) + f1valf3f1 := []*svcsdk.Mapping{} + for _, f1valf3f1iter := range f1valiter.ApplyMapping.Mapping { + f1valf3f1elem := &svcsdk.Mapping{} + if f1valf3f1iter.Dropped != nil { + f1valf3f1elem.SetDropped(*f1valf3f1iter.Dropped) + } + if f1valf3f1iter.FromPath != nil { + f1valf3f1elemf1 := []*string{} + for _, f1valf3f1elemf1iter := range f1valf3f1iter.FromPath { + var f1valf3f1elemf1elem string + f1valf3f1elemf1elem = *f1valf3f1elemf1iter + f1valf3f1elemf1 = append(f1valf3f1elemf1, &f1valf3f1elemf1elem) } - f1valf1f1elem.SetFromPath(f1valf1f1elemf1) + f1valf3f1elem.SetFromPath(f1valf3f1elemf1) } - if f1valf1f1iter.FromType != nil { - f1valf1f1elem.SetFromType(*f1valf1f1iter.FromType) + if f1valf3f1iter.FromType != nil { + f1valf3f1elem.SetFromType(*f1valf3f1iter.FromType) } - if f1valf1f1iter.ToKey != nil { - f1valf1f1elem.SetToKey(*f1valf1f1iter.ToKey) + if f1valf3f1iter.ToKey != nil { + f1valf3f1elem.SetToKey(*f1valf3f1iter.ToKey) } - if f1valf1f1iter.ToType != nil { - f1valf1f1elem.SetToType(*f1valf1f1iter.ToType) + if f1valf3f1iter.ToType != nil { + f1valf3f1elem.SetToType(*f1valf3f1iter.ToType) } - f1valf1f1 = append(f1valf1f1, f1valf1f1elem) + f1valf3f1 = append(f1valf3f1, f1valf3f1elem) } - f1valf1.SetMapping(f1valf1f1) + f1valf3.SetMapping(f1valf3f1) } if f1valiter.ApplyMapping.Name != nil { - f1valf1.SetName(*f1valiter.ApplyMapping.Name) + f1valf3.SetName(*f1valiter.ApplyMapping.Name) } - f1val.SetApplyMapping(f1valf1) + f1val.SetApplyMapping(f1valf3) } if f1valiter.AthenaConnectorSource != nil { - f1valf2 := &svcsdk.AthenaConnectorSource{} + f1valf4 := &svcsdk.AthenaConnectorSource{} if f1valiter.AthenaConnectorSource.ConnectionName != nil { - f1valf2.SetConnectionName(*f1valiter.AthenaConnectorSource.ConnectionName) + f1valf4.SetConnectionName(*f1valiter.AthenaConnectorSource.ConnectionName) } if f1valiter.AthenaConnectorSource.ConnectionTable != nil { - f1valf2.SetConnectionTable(*f1valiter.AthenaConnectorSource.ConnectionTable) + f1valf4.SetConnectionTable(*f1valiter.AthenaConnectorSource.ConnectionTable) } if f1valiter.AthenaConnectorSource.ConnectionType != nil { - f1valf2.SetConnectionType(*f1valiter.AthenaConnectorSource.ConnectionType) + f1valf4.SetConnectionType(*f1valiter.AthenaConnectorSource.ConnectionType) } if f1valiter.AthenaConnectorSource.ConnectorName != nil { - f1valf2.SetConnectorName(*f1valiter.AthenaConnectorSource.ConnectorName) + f1valf4.SetConnectorName(*f1valiter.AthenaConnectorSource.ConnectorName) } if f1valiter.AthenaConnectorSource.Name != nil { - f1valf2.SetName(*f1valiter.AthenaConnectorSource.Name) + f1valf4.SetName(*f1valiter.AthenaConnectorSource.Name) } if f1valiter.AthenaConnectorSource.OutputSchemas != nil { - f1valf2f5 := []*svcsdk.GlueSchema{} - for _, f1valf2f5iter := range f1valiter.AthenaConnectorSource.OutputSchemas { - f1valf2f5elem := &svcsdk.GlueSchema{} - if f1valf2f5iter.Columns != nil { - f1valf2f5elemf0 := []*svcsdk.GlueStudioSchemaColumn{} - for _, f1valf2f5elemf0iter := range f1valf2f5iter.Columns { - f1valf2f5elemf0elem := &svcsdk.GlueStudioSchemaColumn{} - if f1valf2f5elemf0iter.Name != nil { - f1valf2f5elemf0elem.SetName(*f1valf2f5elemf0iter.Name) + f1valf4f5 := []*svcsdk.GlueSchema{} + for _, f1valf4f5iter := range f1valiter.AthenaConnectorSource.OutputSchemas { + f1valf4f5elem := &svcsdk.GlueSchema{} + if f1valf4f5iter.Columns != nil { + f1valf4f5elemf0 := []*svcsdk.GlueStudioSchemaColumn{} + for _, f1valf4f5elemf0iter := range f1valf4f5iter.Columns { + f1valf4f5elemf0elem := &svcsdk.GlueStudioSchemaColumn{} + if f1valf4f5elemf0iter.Name != nil { + f1valf4f5elemf0elem.SetName(*f1valf4f5elemf0iter.Name) } - if f1valf2f5elemf0iter.Type != nil { - f1valf2f5elemf0elem.SetType(*f1valf2f5elemf0iter.Type) + if f1valf4f5elemf0iter.Type != nil { + f1valf4f5elemf0elem.SetType(*f1valf4f5elemf0iter.Type) } - f1valf2f5elemf0 = append(f1valf2f5elemf0, f1valf2f5elemf0elem) + f1valf4f5elemf0 = append(f1valf4f5elemf0, f1valf4f5elemf0elem) } - f1valf2f5elem.SetColumns(f1valf2f5elemf0) + f1valf4f5elem.SetColumns(f1valf4f5elemf0) } - f1valf2f5 = append(f1valf2f5, f1valf2f5elem) + f1valf4f5 = append(f1valf4f5, f1valf4f5elem) } - f1valf2.SetOutputSchemas(f1valf2f5) + f1valf4.SetOutputSchemas(f1valf4f5) } if f1valiter.AthenaConnectorSource.SchemaName != nil { - f1valf2.SetSchemaName(*f1valiter.AthenaConnectorSource.SchemaName) + f1valf4.SetSchemaName(*f1valiter.AthenaConnectorSource.SchemaName) } - f1val.SetAthenaConnectorSource(f1valf2) + f1val.SetAthenaConnectorSource(f1valf4) + } + if f1valiter.CatalogDeltaSource != nil { + f1valf5 := &svcsdk.CatalogDeltaSource{} + if f1valiter.CatalogDeltaSource.AdditionalDeltaOptions != nil { + f1valf5f0 := map[string]*string{} + for f1valf5f0key, f1valf5f0valiter := range f1valiter.CatalogDeltaSource.AdditionalDeltaOptions { + var f1valf5f0val string + f1valf5f0val = *f1valf5f0valiter + f1valf5f0[f1valf5f0key] = &f1valf5f0val + } + f1valf5.SetAdditionalDeltaOptions(f1valf5f0) + } + if f1valiter.CatalogDeltaSource.Database != nil { + f1valf5.SetDatabase(*f1valiter.CatalogDeltaSource.Database) + } + if f1valiter.CatalogDeltaSource.Name != nil { + f1valf5.SetName(*f1valiter.CatalogDeltaSource.Name) + } + if f1valiter.CatalogDeltaSource.OutputSchemas != nil { + f1valf5f3 := []*svcsdk.GlueSchema{} + for _, f1valf5f3iter := range f1valiter.CatalogDeltaSource.OutputSchemas { + f1valf5f3elem := &svcsdk.GlueSchema{} + if f1valf5f3iter.Columns != nil { + f1valf5f3elemf0 := []*svcsdk.GlueStudioSchemaColumn{} + for _, f1valf5f3elemf0iter := range f1valf5f3iter.Columns { + f1valf5f3elemf0elem := &svcsdk.GlueStudioSchemaColumn{} + if f1valf5f3elemf0iter.Name != nil { + f1valf5f3elemf0elem.SetName(*f1valf5f3elemf0iter.Name) + } + if f1valf5f3elemf0iter.Type != nil { + f1valf5f3elemf0elem.SetType(*f1valf5f3elemf0iter.Type) + } + f1valf5f3elemf0 = append(f1valf5f3elemf0, f1valf5f3elemf0elem) + } + f1valf5f3elem.SetColumns(f1valf5f3elemf0) + } + f1valf5f3 = append(f1valf5f3, f1valf5f3elem) + } + f1valf5.SetOutputSchemas(f1valf5f3) + } + if f1valiter.CatalogDeltaSource.Table != nil { + f1valf5.SetTable(*f1valiter.CatalogDeltaSource.Table) + } + f1val.SetCatalogDeltaSource(f1valf5) + } + if f1valiter.CatalogHudiSource != nil { + f1valf6 := &svcsdk.CatalogHudiSource{} + if f1valiter.CatalogHudiSource.AdditionalHudiOptions != nil { + f1valf6f0 := map[string]*string{} + for f1valf6f0key, f1valf6f0valiter := range f1valiter.CatalogHudiSource.AdditionalHudiOptions { + var f1valf6f0val string + f1valf6f0val = *f1valf6f0valiter + f1valf6f0[f1valf6f0key] = &f1valf6f0val + } + f1valf6.SetAdditionalHudiOptions(f1valf6f0) + } + if f1valiter.CatalogHudiSource.Database != nil { + f1valf6.SetDatabase(*f1valiter.CatalogHudiSource.Database) + } + if f1valiter.CatalogHudiSource.Name != nil { + f1valf6.SetName(*f1valiter.CatalogHudiSource.Name) + } + if f1valiter.CatalogHudiSource.OutputSchemas != nil { + f1valf6f3 := []*svcsdk.GlueSchema{} + for _, f1valf6f3iter := range f1valiter.CatalogHudiSource.OutputSchemas { + f1valf6f3elem := &svcsdk.GlueSchema{} + if f1valf6f3iter.Columns != nil { + f1valf6f3elemf0 := []*svcsdk.GlueStudioSchemaColumn{} + for _, f1valf6f3elemf0iter := range f1valf6f3iter.Columns { + f1valf6f3elemf0elem := &svcsdk.GlueStudioSchemaColumn{} + if f1valf6f3elemf0iter.Name != nil { + f1valf6f3elemf0elem.SetName(*f1valf6f3elemf0iter.Name) + } + if f1valf6f3elemf0iter.Type != nil { + f1valf6f3elemf0elem.SetType(*f1valf6f3elemf0iter.Type) + } + f1valf6f3elemf0 = append(f1valf6f3elemf0, f1valf6f3elemf0elem) + } + f1valf6f3elem.SetColumns(f1valf6f3elemf0) + } + f1valf6f3 = append(f1valf6f3, f1valf6f3elem) + } + f1valf6.SetOutputSchemas(f1valf6f3) + } + if f1valiter.CatalogHudiSource.Table != nil { + f1valf6.SetTable(*f1valiter.CatalogHudiSource.Table) + } + f1val.SetCatalogHudiSource(f1valf6) } if f1valiter.CatalogKafkaSource != nil { - f1valf3 := &svcsdk.CatalogKafkaSource{} + f1valf7 := &svcsdk.CatalogKafkaSource{} if f1valiter.CatalogKafkaSource.DataPreviewOptions != nil { - f1valf3f0 := &svcsdk.StreamingDataPreviewOptions{} + f1valf7f0 := &svcsdk.StreamingDataPreviewOptions{} if f1valiter.CatalogKafkaSource.DataPreviewOptions.PollingTime != nil { - f1valf3f0.SetPollingTime(*f1valiter.CatalogKafkaSource.DataPreviewOptions.PollingTime) + f1valf7f0.SetPollingTime(*f1valiter.CatalogKafkaSource.DataPreviewOptions.PollingTime) } if f1valiter.CatalogKafkaSource.DataPreviewOptions.RecordPollingLimit != nil { - f1valf3f0.SetRecordPollingLimit(*f1valiter.CatalogKafkaSource.DataPreviewOptions.RecordPollingLimit) + f1valf7f0.SetRecordPollingLimit(*f1valiter.CatalogKafkaSource.DataPreviewOptions.RecordPollingLimit) } - f1valf3.SetDataPreviewOptions(f1valf3f0) + f1valf7.SetDataPreviewOptions(f1valf7f0) } if f1valiter.CatalogKafkaSource.Database != nil { - f1valf3.SetDatabase(*f1valiter.CatalogKafkaSource.Database) + f1valf7.SetDatabase(*f1valiter.CatalogKafkaSource.Database) } if f1valiter.CatalogKafkaSource.DetectSchema != nil { - f1valf3.SetDetectSchema(*f1valiter.CatalogKafkaSource.DetectSchema) + f1valf7.SetDetectSchema(*f1valiter.CatalogKafkaSource.DetectSchema) } if f1valiter.CatalogKafkaSource.Name != nil { - f1valf3.SetName(*f1valiter.CatalogKafkaSource.Name) + f1valf7.SetName(*f1valiter.CatalogKafkaSource.Name) } if f1valiter.CatalogKafkaSource.StreamingOptions != nil { - f1valf3f4 := &svcsdk.KafkaStreamingSourceOptions{} + f1valf7f4 := &svcsdk.KafkaStreamingSourceOptions{} + if f1valiter.CatalogKafkaSource.StreamingOptions.AddRecordTimestamp != nil { + f1valf7f4.SetAddRecordTimestamp(*f1valiter.CatalogKafkaSource.StreamingOptions.AddRecordTimestamp) + } if f1valiter.CatalogKafkaSource.StreamingOptions.Assign != nil { - f1valf3f4.SetAssign(*f1valiter.CatalogKafkaSource.StreamingOptions.Assign) + f1valf7f4.SetAssign(*f1valiter.CatalogKafkaSource.StreamingOptions.Assign) } if f1valiter.CatalogKafkaSource.StreamingOptions.BootstrapServers != nil { - f1valf3f4.SetBootstrapServers(*f1valiter.CatalogKafkaSource.StreamingOptions.BootstrapServers) + f1valf7f4.SetBootstrapServers(*f1valiter.CatalogKafkaSource.StreamingOptions.BootstrapServers) } if f1valiter.CatalogKafkaSource.StreamingOptions.Classification != nil { - f1valf3f4.SetClassification(*f1valiter.CatalogKafkaSource.StreamingOptions.Classification) + f1valf7f4.SetClassification(*f1valiter.CatalogKafkaSource.StreamingOptions.Classification) } if f1valiter.CatalogKafkaSource.StreamingOptions.ConnectionName != nil { - f1valf3f4.SetConnectionName(*f1valiter.CatalogKafkaSource.StreamingOptions.ConnectionName) + f1valf7f4.SetConnectionName(*f1valiter.CatalogKafkaSource.StreamingOptions.ConnectionName) } if f1valiter.CatalogKafkaSource.StreamingOptions.Delimiter != nil { - f1valf3f4.SetDelimiter(*f1valiter.CatalogKafkaSource.StreamingOptions.Delimiter) + f1valf7f4.SetDelimiter(*f1valiter.CatalogKafkaSource.StreamingOptions.Delimiter) + } + if f1valiter.CatalogKafkaSource.StreamingOptions.EmitConsumerLagMetrics != nil { + f1valf7f4.SetEmitConsumerLagMetrics(*f1valiter.CatalogKafkaSource.StreamingOptions.EmitConsumerLagMetrics) } if f1valiter.CatalogKafkaSource.StreamingOptions.EndingOffsets != nil { - f1valf3f4.SetEndingOffsets(*f1valiter.CatalogKafkaSource.StreamingOptions.EndingOffsets) + f1valf7f4.SetEndingOffsets(*f1valiter.CatalogKafkaSource.StreamingOptions.EndingOffsets) + } + if f1valiter.CatalogKafkaSource.StreamingOptions.IncludeHeaders != nil { + f1valf7f4.SetIncludeHeaders(*f1valiter.CatalogKafkaSource.StreamingOptions.IncludeHeaders) } if f1valiter.CatalogKafkaSource.StreamingOptions.MaxOffsetsPerTrigger != nil { - f1valf3f4.SetMaxOffsetsPerTrigger(*f1valiter.CatalogKafkaSource.StreamingOptions.MaxOffsetsPerTrigger) + f1valf7f4.SetMaxOffsetsPerTrigger(*f1valiter.CatalogKafkaSource.StreamingOptions.MaxOffsetsPerTrigger) } if f1valiter.CatalogKafkaSource.StreamingOptions.MinPartitions != nil { - f1valf3f4.SetMinPartitions(*f1valiter.CatalogKafkaSource.StreamingOptions.MinPartitions) + f1valf7f4.SetMinPartitions(*f1valiter.CatalogKafkaSource.StreamingOptions.MinPartitions) } if f1valiter.CatalogKafkaSource.StreamingOptions.NumRetries != nil { - f1valf3f4.SetNumRetries(*f1valiter.CatalogKafkaSource.StreamingOptions.NumRetries) + f1valf7f4.SetNumRetries(*f1valiter.CatalogKafkaSource.StreamingOptions.NumRetries) } if f1valiter.CatalogKafkaSource.StreamingOptions.PollTimeoutMs != nil { - f1valf3f4.SetPollTimeoutMs(*f1valiter.CatalogKafkaSource.StreamingOptions.PollTimeoutMs) + f1valf7f4.SetPollTimeoutMs(*f1valiter.CatalogKafkaSource.StreamingOptions.PollTimeoutMs) } if f1valiter.CatalogKafkaSource.StreamingOptions.RetryIntervalMs != nil { - f1valf3f4.SetRetryIntervalMs(*f1valiter.CatalogKafkaSource.StreamingOptions.RetryIntervalMs) + f1valf7f4.SetRetryIntervalMs(*f1valiter.CatalogKafkaSource.StreamingOptions.RetryIntervalMs) } if f1valiter.CatalogKafkaSource.StreamingOptions.SecurityProtocol != nil { - f1valf3f4.SetSecurityProtocol(*f1valiter.CatalogKafkaSource.StreamingOptions.SecurityProtocol) + f1valf7f4.SetSecurityProtocol(*f1valiter.CatalogKafkaSource.StreamingOptions.SecurityProtocol) } if f1valiter.CatalogKafkaSource.StreamingOptions.StartingOffsets != nil { - f1valf3f4.SetStartingOffsets(*f1valiter.CatalogKafkaSource.StreamingOptions.StartingOffsets) + f1valf7f4.SetStartingOffsets(*f1valiter.CatalogKafkaSource.StreamingOptions.StartingOffsets) + } + if f1valiter.CatalogKafkaSource.StreamingOptions.StartingTimestamp != nil { + f1valf7f4.SetStartingTimestamp(f1valiter.CatalogKafkaSource.StreamingOptions.StartingTimestamp.Time) } if f1valiter.CatalogKafkaSource.StreamingOptions.SubscribePattern != nil { - f1valf3f4.SetSubscribePattern(*f1valiter.CatalogKafkaSource.StreamingOptions.SubscribePattern) + f1valf7f4.SetSubscribePattern(*f1valiter.CatalogKafkaSource.StreamingOptions.SubscribePattern) } if f1valiter.CatalogKafkaSource.StreamingOptions.TopicName != nil { - f1valf3f4.SetTopicName(*f1valiter.CatalogKafkaSource.StreamingOptions.TopicName) + f1valf7f4.SetTopicName(*f1valiter.CatalogKafkaSource.StreamingOptions.TopicName) } - f1valf3.SetStreamingOptions(f1valf3f4) + f1valf7.SetStreamingOptions(f1valf7f4) } if f1valiter.CatalogKafkaSource.Table != nil { - f1valf3.SetTable(*f1valiter.CatalogKafkaSource.Table) + f1valf7.SetTable(*f1valiter.CatalogKafkaSource.Table) } if f1valiter.CatalogKafkaSource.WindowSize != nil { - f1valf3.SetWindowSize(*f1valiter.CatalogKafkaSource.WindowSize) + f1valf7.SetWindowSize(*f1valiter.CatalogKafkaSource.WindowSize) } - f1val.SetCatalogKafkaSource(f1valf3) + f1val.SetCatalogKafkaSource(f1valf7) } if f1valiter.CatalogKinesisSource != nil { - f1valf4 := &svcsdk.CatalogKinesisSource{} + f1valf8 := &svcsdk.CatalogKinesisSource{} if f1valiter.CatalogKinesisSource.DataPreviewOptions != nil { - f1valf4f0 := &svcsdk.StreamingDataPreviewOptions{} + f1valf8f0 := &svcsdk.StreamingDataPreviewOptions{} if f1valiter.CatalogKinesisSource.DataPreviewOptions.PollingTime != nil { - f1valf4f0.SetPollingTime(*f1valiter.CatalogKinesisSource.DataPreviewOptions.PollingTime) + f1valf8f0.SetPollingTime(*f1valiter.CatalogKinesisSource.DataPreviewOptions.PollingTime) } if f1valiter.CatalogKinesisSource.DataPreviewOptions.RecordPollingLimit != nil { - f1valf4f0.SetRecordPollingLimit(*f1valiter.CatalogKinesisSource.DataPreviewOptions.RecordPollingLimit) + f1valf8f0.SetRecordPollingLimit(*f1valiter.CatalogKinesisSource.DataPreviewOptions.RecordPollingLimit) } - f1valf4.SetDataPreviewOptions(f1valf4f0) + f1valf8.SetDataPreviewOptions(f1valf8f0) } if f1valiter.CatalogKinesisSource.Database != nil { - f1valf4.SetDatabase(*f1valiter.CatalogKinesisSource.Database) + f1valf8.SetDatabase(*f1valiter.CatalogKinesisSource.Database) } if f1valiter.CatalogKinesisSource.DetectSchema != nil { - f1valf4.SetDetectSchema(*f1valiter.CatalogKinesisSource.DetectSchema) + f1valf8.SetDetectSchema(*f1valiter.CatalogKinesisSource.DetectSchema) } if f1valiter.CatalogKinesisSource.Name != nil { - f1valf4.SetName(*f1valiter.CatalogKinesisSource.Name) + f1valf8.SetName(*f1valiter.CatalogKinesisSource.Name) } if f1valiter.CatalogKinesisSource.StreamingOptions != nil { - f1valf4f4 := &svcsdk.KinesisStreamingSourceOptions{} + f1valf8f4 := &svcsdk.KinesisStreamingSourceOptions{} if f1valiter.CatalogKinesisSource.StreamingOptions.AddIdleTimeBetweenReads != nil { - f1valf4f4.SetAddIdleTimeBetweenReads(*f1valiter.CatalogKinesisSource.StreamingOptions.AddIdleTimeBetweenReads) + f1valf8f4.SetAddIdleTimeBetweenReads(*f1valiter.CatalogKinesisSource.StreamingOptions.AddIdleTimeBetweenReads) + } + if f1valiter.CatalogKinesisSource.StreamingOptions.AddRecordTimestamp != nil { + f1valf8f4.SetAddRecordTimestamp(*f1valiter.CatalogKinesisSource.StreamingOptions.AddRecordTimestamp) } if f1valiter.CatalogKinesisSource.StreamingOptions.AvoidEmptyBatches != nil { - f1valf4f4.SetAvoidEmptyBatches(*f1valiter.CatalogKinesisSource.StreamingOptions.AvoidEmptyBatches) + f1valf8f4.SetAvoidEmptyBatches(*f1valiter.CatalogKinesisSource.StreamingOptions.AvoidEmptyBatches) } if f1valiter.CatalogKinesisSource.StreamingOptions.Classification != nil { - f1valf4f4.SetClassification(*f1valiter.CatalogKinesisSource.StreamingOptions.Classification) + f1valf8f4.SetClassification(*f1valiter.CatalogKinesisSource.StreamingOptions.Classification) } if f1valiter.CatalogKinesisSource.StreamingOptions.Delimiter != nil { - f1valf4f4.SetDelimiter(*f1valiter.CatalogKinesisSource.StreamingOptions.Delimiter) + f1valf8f4.SetDelimiter(*f1valiter.CatalogKinesisSource.StreamingOptions.Delimiter) } if f1valiter.CatalogKinesisSource.StreamingOptions.DescribeShardInterval != nil { - f1valf4f4.SetDescribeShardInterval(*f1valiter.CatalogKinesisSource.StreamingOptions.DescribeShardInterval) + f1valf8f4.SetDescribeShardInterval(*f1valiter.CatalogKinesisSource.StreamingOptions.DescribeShardInterval) + } + if f1valiter.CatalogKinesisSource.StreamingOptions.EmitConsumerLagMetrics != nil { + f1valf8f4.SetEmitConsumerLagMetrics(*f1valiter.CatalogKinesisSource.StreamingOptions.EmitConsumerLagMetrics) } if f1valiter.CatalogKinesisSource.StreamingOptions.EndpointURL != nil { - f1valf4f4.SetEndpointUrl(*f1valiter.CatalogKinesisSource.StreamingOptions.EndpointURL) + f1valf8f4.SetEndpointUrl(*f1valiter.CatalogKinesisSource.StreamingOptions.EndpointURL) } if f1valiter.CatalogKinesisSource.StreamingOptions.IdleTimeBetweenReadsInMs != nil { - f1valf4f4.SetIdleTimeBetweenReadsInMs(*f1valiter.CatalogKinesisSource.StreamingOptions.IdleTimeBetweenReadsInMs) + f1valf8f4.SetIdleTimeBetweenReadsInMs(*f1valiter.CatalogKinesisSource.StreamingOptions.IdleTimeBetweenReadsInMs) } if f1valiter.CatalogKinesisSource.StreamingOptions.MaxFetchRecordsPerShard != nil { - f1valf4f4.SetMaxFetchRecordsPerShard(*f1valiter.CatalogKinesisSource.StreamingOptions.MaxFetchRecordsPerShard) + f1valf8f4.SetMaxFetchRecordsPerShard(*f1valiter.CatalogKinesisSource.StreamingOptions.MaxFetchRecordsPerShard) } if f1valiter.CatalogKinesisSource.StreamingOptions.MaxFetchTimeInMs != nil { - f1valf4f4.SetMaxFetchTimeInMs(*f1valiter.CatalogKinesisSource.StreamingOptions.MaxFetchTimeInMs) + f1valf8f4.SetMaxFetchTimeInMs(*f1valiter.CatalogKinesisSource.StreamingOptions.MaxFetchTimeInMs) } if f1valiter.CatalogKinesisSource.StreamingOptions.MaxRecordPerRead != nil { - f1valf4f4.SetMaxRecordPerRead(*f1valiter.CatalogKinesisSource.StreamingOptions.MaxRecordPerRead) + f1valf8f4.SetMaxRecordPerRead(*f1valiter.CatalogKinesisSource.StreamingOptions.MaxRecordPerRead) } if f1valiter.CatalogKinesisSource.StreamingOptions.MaxRetryIntervalMs != nil { - f1valf4f4.SetMaxRetryIntervalMs(*f1valiter.CatalogKinesisSource.StreamingOptions.MaxRetryIntervalMs) + f1valf8f4.SetMaxRetryIntervalMs(*f1valiter.CatalogKinesisSource.StreamingOptions.MaxRetryIntervalMs) } if f1valiter.CatalogKinesisSource.StreamingOptions.NumRetries != nil { - f1valf4f4.SetNumRetries(*f1valiter.CatalogKinesisSource.StreamingOptions.NumRetries) + f1valf8f4.SetNumRetries(*f1valiter.CatalogKinesisSource.StreamingOptions.NumRetries) } if f1valiter.CatalogKinesisSource.StreamingOptions.RetryIntervalMs != nil { - f1valf4f4.SetRetryIntervalMs(*f1valiter.CatalogKinesisSource.StreamingOptions.RetryIntervalMs) + f1valf8f4.SetRetryIntervalMs(*f1valiter.CatalogKinesisSource.StreamingOptions.RetryIntervalMs) } if f1valiter.CatalogKinesisSource.StreamingOptions.RoleARN != nil { - f1valf4f4.SetRoleArn(*f1valiter.CatalogKinesisSource.StreamingOptions.RoleARN) + f1valf8f4.SetRoleArn(*f1valiter.CatalogKinesisSource.StreamingOptions.RoleARN) } if f1valiter.CatalogKinesisSource.StreamingOptions.RoleSessionName != nil { - f1valf4f4.SetRoleSessionName(*f1valiter.CatalogKinesisSource.StreamingOptions.RoleSessionName) + f1valf8f4.SetRoleSessionName(*f1valiter.CatalogKinesisSource.StreamingOptions.RoleSessionName) } if f1valiter.CatalogKinesisSource.StreamingOptions.StartingPosition != nil { - f1valf4f4.SetStartingPosition(*f1valiter.CatalogKinesisSource.StreamingOptions.StartingPosition) + f1valf8f4.SetStartingPosition(*f1valiter.CatalogKinesisSource.StreamingOptions.StartingPosition) + } + if f1valiter.CatalogKinesisSource.StreamingOptions.StartingTimestamp != nil { + f1valf8f4.SetStartingTimestamp(f1valiter.CatalogKinesisSource.StreamingOptions.StartingTimestamp.Time) } if f1valiter.CatalogKinesisSource.StreamingOptions.StreamARN != nil { - f1valf4f4.SetStreamArn(*f1valiter.CatalogKinesisSource.StreamingOptions.StreamARN) + f1valf8f4.SetStreamArn(*f1valiter.CatalogKinesisSource.StreamingOptions.StreamARN) } if f1valiter.CatalogKinesisSource.StreamingOptions.StreamName != nil { - f1valf4f4.SetStreamName(*f1valiter.CatalogKinesisSource.StreamingOptions.StreamName) + f1valf8f4.SetStreamName(*f1valiter.CatalogKinesisSource.StreamingOptions.StreamName) } - f1valf4.SetStreamingOptions(f1valf4f4) + f1valf8.SetStreamingOptions(f1valf8f4) } if f1valiter.CatalogKinesisSource.Table != nil { - f1valf4.SetTable(*f1valiter.CatalogKinesisSource.Table) + f1valf8.SetTable(*f1valiter.CatalogKinesisSource.Table) } if f1valiter.CatalogKinesisSource.WindowSize != nil { - f1valf4.SetWindowSize(*f1valiter.CatalogKinesisSource.WindowSize) + f1valf8.SetWindowSize(*f1valiter.CatalogKinesisSource.WindowSize) } - f1val.SetCatalogKinesisSource(f1valf4) + f1val.SetCatalogKinesisSource(f1valf8) } if f1valiter.CatalogSource != nil { - f1valf5 := &svcsdk.CatalogSource{} + f1valf9 := &svcsdk.CatalogSource{} if f1valiter.CatalogSource.Database != nil { - f1valf5.SetDatabase(*f1valiter.CatalogSource.Database) + f1valf9.SetDatabase(*f1valiter.CatalogSource.Database) } if f1valiter.CatalogSource.Name != nil { - f1valf5.SetName(*f1valiter.CatalogSource.Name) + f1valf9.SetName(*f1valiter.CatalogSource.Name) } if f1valiter.CatalogSource.Table != nil { - f1valf5.SetTable(*f1valiter.CatalogSource.Table) + f1valf9.SetTable(*f1valiter.CatalogSource.Table) } - f1val.SetCatalogSource(f1valf5) + f1val.SetCatalogSource(f1valf9) } if f1valiter.CatalogTarget != nil { - f1valf6 := &svcsdk.BasicCatalogTarget{} + f1valf10 := &svcsdk.BasicCatalogTarget{} if f1valiter.CatalogTarget.Database != nil { - f1valf6.SetDatabase(*f1valiter.CatalogTarget.Database) + f1valf10.SetDatabase(*f1valiter.CatalogTarget.Database) } if f1valiter.CatalogTarget.Inputs != nil { - f1valf6f1 := []*string{} - for _, f1valf6f1iter := range f1valiter.CatalogTarget.Inputs { - var f1valf6f1elem string - f1valf6f1elem = *f1valf6f1iter - f1valf6f1 = append(f1valf6f1, &f1valf6f1elem) + f1valf10f1 := []*string{} + for _, f1valf10f1iter := range f1valiter.CatalogTarget.Inputs { + var f1valf10f1elem string + f1valf10f1elem = *f1valf10f1iter + f1valf10f1 = append(f1valf10f1, &f1valf10f1elem) } - f1valf6.SetInputs(f1valf6f1) + f1valf10.SetInputs(f1valf10f1) } if f1valiter.CatalogTarget.Name != nil { - f1valf6.SetName(*f1valiter.CatalogTarget.Name) + f1valf10.SetName(*f1valiter.CatalogTarget.Name) } if f1valiter.CatalogTarget.Table != nil { - f1valf6.SetTable(*f1valiter.CatalogTarget.Table) + f1valf10.SetTable(*f1valiter.CatalogTarget.Table) } - f1val.SetCatalogTarget(f1valf6) + f1val.SetCatalogTarget(f1valf10) } if f1valiter.CustomCode != nil { - f1valf7 := &svcsdk.CustomCode{} + f1valf11 := &svcsdk.CustomCode{} if f1valiter.CustomCode.ClassName != nil { - f1valf7.SetClassName(*f1valiter.CustomCode.ClassName) + f1valf11.SetClassName(*f1valiter.CustomCode.ClassName) } if f1valiter.CustomCode.Code != nil { - f1valf7.SetCode(*f1valiter.CustomCode.Code) + f1valf11.SetCode(*f1valiter.CustomCode.Code) } if f1valiter.CustomCode.Inputs != nil { - f1valf7f2 := []*string{} - for _, f1valf7f2iter := range f1valiter.CustomCode.Inputs { - var f1valf7f2elem string - f1valf7f2elem = *f1valf7f2iter - f1valf7f2 = append(f1valf7f2, &f1valf7f2elem) + f1valf11f2 := []*string{} + for _, f1valf11f2iter := range f1valiter.CustomCode.Inputs { + var f1valf11f2elem string + f1valf11f2elem = *f1valf11f2iter + f1valf11f2 = append(f1valf11f2, &f1valf11f2elem) } - f1valf7.SetInputs(f1valf7f2) + f1valf11.SetInputs(f1valf11f2) } if f1valiter.CustomCode.Name != nil { - f1valf7.SetName(*f1valiter.CustomCode.Name) + f1valf11.SetName(*f1valiter.CustomCode.Name) } if f1valiter.CustomCode.OutputSchemas != nil { - f1valf7f4 := []*svcsdk.GlueSchema{} - for _, f1valf7f4iter := range f1valiter.CustomCode.OutputSchemas { - f1valf7f4elem := &svcsdk.GlueSchema{} - if f1valf7f4iter.Columns != nil { - f1valf7f4elemf0 := []*svcsdk.GlueStudioSchemaColumn{} - for _, f1valf7f4elemf0iter := range f1valf7f4iter.Columns { - f1valf7f4elemf0elem := &svcsdk.GlueStudioSchemaColumn{} - if f1valf7f4elemf0iter.Name != nil { - f1valf7f4elemf0elem.SetName(*f1valf7f4elemf0iter.Name) + f1valf11f4 := []*svcsdk.GlueSchema{} + for _, f1valf11f4iter := range f1valiter.CustomCode.OutputSchemas { + f1valf11f4elem := &svcsdk.GlueSchema{} + if f1valf11f4iter.Columns != nil { + f1valf11f4elemf0 := []*svcsdk.GlueStudioSchemaColumn{} + for _, f1valf11f4elemf0iter := range f1valf11f4iter.Columns { + f1valf11f4elemf0elem := &svcsdk.GlueStudioSchemaColumn{} + if f1valf11f4elemf0iter.Name != nil { + f1valf11f4elemf0elem.SetName(*f1valf11f4elemf0iter.Name) } - if f1valf7f4elemf0iter.Type != nil { - f1valf7f4elemf0elem.SetType(*f1valf7f4elemf0iter.Type) + if f1valf11f4elemf0iter.Type != nil { + f1valf11f4elemf0elem.SetType(*f1valf11f4elemf0iter.Type) } - f1valf7f4elemf0 = append(f1valf7f4elemf0, f1valf7f4elemf0elem) + f1valf11f4elemf0 = append(f1valf11f4elemf0, f1valf11f4elemf0elem) } - f1valf7f4elem.SetColumns(f1valf7f4elemf0) + f1valf11f4elem.SetColumns(f1valf11f4elemf0) } - f1valf7f4 = append(f1valf7f4, f1valf7f4elem) + f1valf11f4 = append(f1valf11f4, f1valf11f4elem) } - f1valf7.SetOutputSchemas(f1valf7f4) + f1valf11.SetOutputSchemas(f1valf11f4) + } + f1val.SetCustomCode(f1valf11) + } + if f1valiter.DirectJDBCSource != nil { + f1valf12 := &svcsdk.DirectJDBCSource{} + if f1valiter.DirectJDBCSource.ConnectionName != nil { + f1valf12.SetConnectionName(*f1valiter.DirectJDBCSource.ConnectionName) + } + if f1valiter.DirectJDBCSource.ConnectionType != nil { + f1valf12.SetConnectionType(*f1valiter.DirectJDBCSource.ConnectionType) + } + if f1valiter.DirectJDBCSource.Database != nil { + f1valf12.SetDatabase(*f1valiter.DirectJDBCSource.Database) + } + if f1valiter.DirectJDBCSource.Name != nil { + f1valf12.SetName(*f1valiter.DirectJDBCSource.Name) + } + if f1valiter.DirectJDBCSource.RedshiftTmpDir != nil { + f1valf12.SetRedshiftTmpDir(*f1valiter.DirectJDBCSource.RedshiftTmpDir) + } + if f1valiter.DirectJDBCSource.Table != nil { + f1valf12.SetTable(*f1valiter.DirectJDBCSource.Table) } - f1val.SetCustomCode(f1valf7) + f1val.SetDirectJDBCSource(f1valf12) } if f1valiter.DirectKafkaSource != nil { - f1valf8 := &svcsdk.DirectKafkaSource{} + f1valf13 := &svcsdk.DirectKafkaSource{} if f1valiter.DirectKafkaSource.DataPreviewOptions != nil { - f1valf8f0 := &svcsdk.StreamingDataPreviewOptions{} + f1valf13f0 := &svcsdk.StreamingDataPreviewOptions{} if f1valiter.DirectKafkaSource.DataPreviewOptions.PollingTime != nil { - f1valf8f0.SetPollingTime(*f1valiter.DirectKafkaSource.DataPreviewOptions.PollingTime) + f1valf13f0.SetPollingTime(*f1valiter.DirectKafkaSource.DataPreviewOptions.PollingTime) } if f1valiter.DirectKafkaSource.DataPreviewOptions.RecordPollingLimit != nil { - f1valf8f0.SetRecordPollingLimit(*f1valiter.DirectKafkaSource.DataPreviewOptions.RecordPollingLimit) + f1valf13f0.SetRecordPollingLimit(*f1valiter.DirectKafkaSource.DataPreviewOptions.RecordPollingLimit) } - f1valf8.SetDataPreviewOptions(f1valf8f0) + f1valf13.SetDataPreviewOptions(f1valf13f0) } if f1valiter.DirectKafkaSource.DetectSchema != nil { - f1valf8.SetDetectSchema(*f1valiter.DirectKafkaSource.DetectSchema) + f1valf13.SetDetectSchema(*f1valiter.DirectKafkaSource.DetectSchema) } if f1valiter.DirectKafkaSource.Name != nil { - f1valf8.SetName(*f1valiter.DirectKafkaSource.Name) + f1valf13.SetName(*f1valiter.DirectKafkaSource.Name) } if f1valiter.DirectKafkaSource.StreamingOptions != nil { - f1valf8f3 := &svcsdk.KafkaStreamingSourceOptions{} + f1valf13f3 := &svcsdk.KafkaStreamingSourceOptions{} + if f1valiter.DirectKafkaSource.StreamingOptions.AddRecordTimestamp != nil { + f1valf13f3.SetAddRecordTimestamp(*f1valiter.DirectKafkaSource.StreamingOptions.AddRecordTimestamp) + } if f1valiter.DirectKafkaSource.StreamingOptions.Assign != nil { - f1valf8f3.SetAssign(*f1valiter.DirectKafkaSource.StreamingOptions.Assign) + f1valf13f3.SetAssign(*f1valiter.DirectKafkaSource.StreamingOptions.Assign) } if f1valiter.DirectKafkaSource.StreamingOptions.BootstrapServers != nil { - f1valf8f3.SetBootstrapServers(*f1valiter.DirectKafkaSource.StreamingOptions.BootstrapServers) + f1valf13f3.SetBootstrapServers(*f1valiter.DirectKafkaSource.StreamingOptions.BootstrapServers) } if f1valiter.DirectKafkaSource.StreamingOptions.Classification != nil { - f1valf8f3.SetClassification(*f1valiter.DirectKafkaSource.StreamingOptions.Classification) + f1valf13f3.SetClassification(*f1valiter.DirectKafkaSource.StreamingOptions.Classification) } if f1valiter.DirectKafkaSource.StreamingOptions.ConnectionName != nil { - f1valf8f3.SetConnectionName(*f1valiter.DirectKafkaSource.StreamingOptions.ConnectionName) + f1valf13f3.SetConnectionName(*f1valiter.DirectKafkaSource.StreamingOptions.ConnectionName) } if f1valiter.DirectKafkaSource.StreamingOptions.Delimiter != nil { - f1valf8f3.SetDelimiter(*f1valiter.DirectKafkaSource.StreamingOptions.Delimiter) + f1valf13f3.SetDelimiter(*f1valiter.DirectKafkaSource.StreamingOptions.Delimiter) + } + if f1valiter.DirectKafkaSource.StreamingOptions.EmitConsumerLagMetrics != nil { + f1valf13f3.SetEmitConsumerLagMetrics(*f1valiter.DirectKafkaSource.StreamingOptions.EmitConsumerLagMetrics) } if f1valiter.DirectKafkaSource.StreamingOptions.EndingOffsets != nil { - f1valf8f3.SetEndingOffsets(*f1valiter.DirectKafkaSource.StreamingOptions.EndingOffsets) + f1valf13f3.SetEndingOffsets(*f1valiter.DirectKafkaSource.StreamingOptions.EndingOffsets) + } + if f1valiter.DirectKafkaSource.StreamingOptions.IncludeHeaders != nil { + f1valf13f3.SetIncludeHeaders(*f1valiter.DirectKafkaSource.StreamingOptions.IncludeHeaders) } if f1valiter.DirectKafkaSource.StreamingOptions.MaxOffsetsPerTrigger != nil { - f1valf8f3.SetMaxOffsetsPerTrigger(*f1valiter.DirectKafkaSource.StreamingOptions.MaxOffsetsPerTrigger) + f1valf13f3.SetMaxOffsetsPerTrigger(*f1valiter.DirectKafkaSource.StreamingOptions.MaxOffsetsPerTrigger) } if f1valiter.DirectKafkaSource.StreamingOptions.MinPartitions != nil { - f1valf8f3.SetMinPartitions(*f1valiter.DirectKafkaSource.StreamingOptions.MinPartitions) + f1valf13f3.SetMinPartitions(*f1valiter.DirectKafkaSource.StreamingOptions.MinPartitions) } if f1valiter.DirectKafkaSource.StreamingOptions.NumRetries != nil { - f1valf8f3.SetNumRetries(*f1valiter.DirectKafkaSource.StreamingOptions.NumRetries) + f1valf13f3.SetNumRetries(*f1valiter.DirectKafkaSource.StreamingOptions.NumRetries) } if f1valiter.DirectKafkaSource.StreamingOptions.PollTimeoutMs != nil { - f1valf8f3.SetPollTimeoutMs(*f1valiter.DirectKafkaSource.StreamingOptions.PollTimeoutMs) + f1valf13f3.SetPollTimeoutMs(*f1valiter.DirectKafkaSource.StreamingOptions.PollTimeoutMs) } if f1valiter.DirectKafkaSource.StreamingOptions.RetryIntervalMs != nil { - f1valf8f3.SetRetryIntervalMs(*f1valiter.DirectKafkaSource.StreamingOptions.RetryIntervalMs) + f1valf13f3.SetRetryIntervalMs(*f1valiter.DirectKafkaSource.StreamingOptions.RetryIntervalMs) } if f1valiter.DirectKafkaSource.StreamingOptions.SecurityProtocol != nil { - f1valf8f3.SetSecurityProtocol(*f1valiter.DirectKafkaSource.StreamingOptions.SecurityProtocol) + f1valf13f3.SetSecurityProtocol(*f1valiter.DirectKafkaSource.StreamingOptions.SecurityProtocol) } if f1valiter.DirectKafkaSource.StreamingOptions.StartingOffsets != nil { - f1valf8f3.SetStartingOffsets(*f1valiter.DirectKafkaSource.StreamingOptions.StartingOffsets) + f1valf13f3.SetStartingOffsets(*f1valiter.DirectKafkaSource.StreamingOptions.StartingOffsets) + } + if f1valiter.DirectKafkaSource.StreamingOptions.StartingTimestamp != nil { + f1valf13f3.SetStartingTimestamp(f1valiter.DirectKafkaSource.StreamingOptions.StartingTimestamp.Time) } if f1valiter.DirectKafkaSource.StreamingOptions.SubscribePattern != nil { - f1valf8f3.SetSubscribePattern(*f1valiter.DirectKafkaSource.StreamingOptions.SubscribePattern) + f1valf13f3.SetSubscribePattern(*f1valiter.DirectKafkaSource.StreamingOptions.SubscribePattern) } if f1valiter.DirectKafkaSource.StreamingOptions.TopicName != nil { - f1valf8f3.SetTopicName(*f1valiter.DirectKafkaSource.StreamingOptions.TopicName) + f1valf13f3.SetTopicName(*f1valiter.DirectKafkaSource.StreamingOptions.TopicName) } - f1valf8.SetStreamingOptions(f1valf8f3) + f1valf13.SetStreamingOptions(f1valf13f3) } if f1valiter.DirectKafkaSource.WindowSize != nil { - f1valf8.SetWindowSize(*f1valiter.DirectKafkaSource.WindowSize) + f1valf13.SetWindowSize(*f1valiter.DirectKafkaSource.WindowSize) } - f1val.SetDirectKafkaSource(f1valf8) + f1val.SetDirectKafkaSource(f1valf13) } if f1valiter.DirectKinesisSource != nil { - f1valf9 := &svcsdk.DirectKinesisSource{} + f1valf14 := &svcsdk.DirectKinesisSource{} if f1valiter.DirectKinesisSource.DataPreviewOptions != nil { - f1valf9f0 := &svcsdk.StreamingDataPreviewOptions{} + f1valf14f0 := &svcsdk.StreamingDataPreviewOptions{} if f1valiter.DirectKinesisSource.DataPreviewOptions.PollingTime != nil { - f1valf9f0.SetPollingTime(*f1valiter.DirectKinesisSource.DataPreviewOptions.PollingTime) + f1valf14f0.SetPollingTime(*f1valiter.DirectKinesisSource.DataPreviewOptions.PollingTime) } if f1valiter.DirectKinesisSource.DataPreviewOptions.RecordPollingLimit != nil { - f1valf9f0.SetRecordPollingLimit(*f1valiter.DirectKinesisSource.DataPreviewOptions.RecordPollingLimit) + f1valf14f0.SetRecordPollingLimit(*f1valiter.DirectKinesisSource.DataPreviewOptions.RecordPollingLimit) } - f1valf9.SetDataPreviewOptions(f1valf9f0) + f1valf14.SetDataPreviewOptions(f1valf14f0) } if f1valiter.DirectKinesisSource.DetectSchema != nil { - f1valf9.SetDetectSchema(*f1valiter.DirectKinesisSource.DetectSchema) + f1valf14.SetDetectSchema(*f1valiter.DirectKinesisSource.DetectSchema) } if f1valiter.DirectKinesisSource.Name != nil { - f1valf9.SetName(*f1valiter.DirectKinesisSource.Name) + f1valf14.SetName(*f1valiter.DirectKinesisSource.Name) } if f1valiter.DirectKinesisSource.StreamingOptions != nil { - f1valf9f3 := &svcsdk.KinesisStreamingSourceOptions{} + f1valf14f3 := &svcsdk.KinesisStreamingSourceOptions{} if f1valiter.DirectKinesisSource.StreamingOptions.AddIdleTimeBetweenReads != nil { - f1valf9f3.SetAddIdleTimeBetweenReads(*f1valiter.DirectKinesisSource.StreamingOptions.AddIdleTimeBetweenReads) + f1valf14f3.SetAddIdleTimeBetweenReads(*f1valiter.DirectKinesisSource.StreamingOptions.AddIdleTimeBetweenReads) + } + if f1valiter.DirectKinesisSource.StreamingOptions.AddRecordTimestamp != nil { + f1valf14f3.SetAddRecordTimestamp(*f1valiter.DirectKinesisSource.StreamingOptions.AddRecordTimestamp) } if f1valiter.DirectKinesisSource.StreamingOptions.AvoidEmptyBatches != nil { - f1valf9f3.SetAvoidEmptyBatches(*f1valiter.DirectKinesisSource.StreamingOptions.AvoidEmptyBatches) + f1valf14f3.SetAvoidEmptyBatches(*f1valiter.DirectKinesisSource.StreamingOptions.AvoidEmptyBatches) } if f1valiter.DirectKinesisSource.StreamingOptions.Classification != nil { - f1valf9f3.SetClassification(*f1valiter.DirectKinesisSource.StreamingOptions.Classification) + f1valf14f3.SetClassification(*f1valiter.DirectKinesisSource.StreamingOptions.Classification) } if f1valiter.DirectKinesisSource.StreamingOptions.Delimiter != nil { - f1valf9f3.SetDelimiter(*f1valiter.DirectKinesisSource.StreamingOptions.Delimiter) + f1valf14f3.SetDelimiter(*f1valiter.DirectKinesisSource.StreamingOptions.Delimiter) } if f1valiter.DirectKinesisSource.StreamingOptions.DescribeShardInterval != nil { - f1valf9f3.SetDescribeShardInterval(*f1valiter.DirectKinesisSource.StreamingOptions.DescribeShardInterval) + f1valf14f3.SetDescribeShardInterval(*f1valiter.DirectKinesisSource.StreamingOptions.DescribeShardInterval) + } + if f1valiter.DirectKinesisSource.StreamingOptions.EmitConsumerLagMetrics != nil { + f1valf14f3.SetEmitConsumerLagMetrics(*f1valiter.DirectKinesisSource.StreamingOptions.EmitConsumerLagMetrics) } if f1valiter.DirectKinesisSource.StreamingOptions.EndpointURL != nil { - f1valf9f3.SetEndpointUrl(*f1valiter.DirectKinesisSource.StreamingOptions.EndpointURL) + f1valf14f3.SetEndpointUrl(*f1valiter.DirectKinesisSource.StreamingOptions.EndpointURL) } if f1valiter.DirectKinesisSource.StreamingOptions.IdleTimeBetweenReadsInMs != nil { - f1valf9f3.SetIdleTimeBetweenReadsInMs(*f1valiter.DirectKinesisSource.StreamingOptions.IdleTimeBetweenReadsInMs) + f1valf14f3.SetIdleTimeBetweenReadsInMs(*f1valiter.DirectKinesisSource.StreamingOptions.IdleTimeBetweenReadsInMs) } if f1valiter.DirectKinesisSource.StreamingOptions.MaxFetchRecordsPerShard != nil { - f1valf9f3.SetMaxFetchRecordsPerShard(*f1valiter.DirectKinesisSource.StreamingOptions.MaxFetchRecordsPerShard) + f1valf14f3.SetMaxFetchRecordsPerShard(*f1valiter.DirectKinesisSource.StreamingOptions.MaxFetchRecordsPerShard) } if f1valiter.DirectKinesisSource.StreamingOptions.MaxFetchTimeInMs != nil { - f1valf9f3.SetMaxFetchTimeInMs(*f1valiter.DirectKinesisSource.StreamingOptions.MaxFetchTimeInMs) + f1valf14f3.SetMaxFetchTimeInMs(*f1valiter.DirectKinesisSource.StreamingOptions.MaxFetchTimeInMs) } if f1valiter.DirectKinesisSource.StreamingOptions.MaxRecordPerRead != nil { - f1valf9f3.SetMaxRecordPerRead(*f1valiter.DirectKinesisSource.StreamingOptions.MaxRecordPerRead) + f1valf14f3.SetMaxRecordPerRead(*f1valiter.DirectKinesisSource.StreamingOptions.MaxRecordPerRead) } if f1valiter.DirectKinesisSource.StreamingOptions.MaxRetryIntervalMs != nil { - f1valf9f3.SetMaxRetryIntervalMs(*f1valiter.DirectKinesisSource.StreamingOptions.MaxRetryIntervalMs) + f1valf14f3.SetMaxRetryIntervalMs(*f1valiter.DirectKinesisSource.StreamingOptions.MaxRetryIntervalMs) } if f1valiter.DirectKinesisSource.StreamingOptions.NumRetries != nil { - f1valf9f3.SetNumRetries(*f1valiter.DirectKinesisSource.StreamingOptions.NumRetries) + f1valf14f3.SetNumRetries(*f1valiter.DirectKinesisSource.StreamingOptions.NumRetries) } if f1valiter.DirectKinesisSource.StreamingOptions.RetryIntervalMs != nil { - f1valf9f3.SetRetryIntervalMs(*f1valiter.DirectKinesisSource.StreamingOptions.RetryIntervalMs) + f1valf14f3.SetRetryIntervalMs(*f1valiter.DirectKinesisSource.StreamingOptions.RetryIntervalMs) } if f1valiter.DirectKinesisSource.StreamingOptions.RoleARN != nil { - f1valf9f3.SetRoleArn(*f1valiter.DirectKinesisSource.StreamingOptions.RoleARN) + f1valf14f3.SetRoleArn(*f1valiter.DirectKinesisSource.StreamingOptions.RoleARN) } if f1valiter.DirectKinesisSource.StreamingOptions.RoleSessionName != nil { - f1valf9f3.SetRoleSessionName(*f1valiter.DirectKinesisSource.StreamingOptions.RoleSessionName) + f1valf14f3.SetRoleSessionName(*f1valiter.DirectKinesisSource.StreamingOptions.RoleSessionName) } if f1valiter.DirectKinesisSource.StreamingOptions.StartingPosition != nil { - f1valf9f3.SetStartingPosition(*f1valiter.DirectKinesisSource.StreamingOptions.StartingPosition) + f1valf14f3.SetStartingPosition(*f1valiter.DirectKinesisSource.StreamingOptions.StartingPosition) + } + if f1valiter.DirectKinesisSource.StreamingOptions.StartingTimestamp != nil { + f1valf14f3.SetStartingTimestamp(f1valiter.DirectKinesisSource.StreamingOptions.StartingTimestamp.Time) } if f1valiter.DirectKinesisSource.StreamingOptions.StreamARN != nil { - f1valf9f3.SetStreamArn(*f1valiter.DirectKinesisSource.StreamingOptions.StreamARN) + f1valf14f3.SetStreamArn(*f1valiter.DirectKinesisSource.StreamingOptions.StreamARN) } if f1valiter.DirectKinesisSource.StreamingOptions.StreamName != nil { - f1valf9f3.SetStreamName(*f1valiter.DirectKinesisSource.StreamingOptions.StreamName) + f1valf14f3.SetStreamName(*f1valiter.DirectKinesisSource.StreamingOptions.StreamName) } - f1valf9.SetStreamingOptions(f1valf9f3) + f1valf14.SetStreamingOptions(f1valf14f3) } if f1valiter.DirectKinesisSource.WindowSize != nil { - f1valf9.SetWindowSize(*f1valiter.DirectKinesisSource.WindowSize) + f1valf14.SetWindowSize(*f1valiter.DirectKinesisSource.WindowSize) } - f1val.SetDirectKinesisSource(f1valf9) + f1val.SetDirectKinesisSource(f1valf14) } if f1valiter.DropDuplicates != nil { - f1valf10 := &svcsdk.DropDuplicates{} + f1valf15 := &svcsdk.DropDuplicates{} if f1valiter.DropDuplicates.Columns != nil { - f1valf10f0 := [][]*string{} - for _, f1valf10f0iter := range f1valiter.DropDuplicates.Columns { - f1valf10f0elem := []*string{} - for _, f1valf10f0elemiter := range f1valf10f0iter { - var f1valf10f0elemelem string - f1valf10f0elemelem = *f1valf10f0elemiter - f1valf10f0elem = append(f1valf10f0elem, &f1valf10f0elemelem) + f1valf15f0 := [][]*string{} + for _, f1valf15f0iter := range f1valiter.DropDuplicates.Columns { + f1valf15f0elem := []*string{} + for _, f1valf15f0elemiter := range f1valf15f0iter { + var f1valf15f0elemelem string + f1valf15f0elemelem = *f1valf15f0elemiter + f1valf15f0elem = append(f1valf15f0elem, &f1valf15f0elemelem) } - f1valf10f0 = append(f1valf10f0, f1valf10f0elem) + f1valf15f0 = append(f1valf15f0, f1valf15f0elem) } - f1valf10.SetColumns(f1valf10f0) + f1valf15.SetColumns(f1valf15f0) } if f1valiter.DropDuplicates.Inputs != nil { - f1valf10f1 := []*string{} - for _, f1valf10f1iter := range f1valiter.DropDuplicates.Inputs { - var f1valf10f1elem string - f1valf10f1elem = *f1valf10f1iter - f1valf10f1 = append(f1valf10f1, &f1valf10f1elem) + f1valf15f1 := []*string{} + for _, f1valf15f1iter := range f1valiter.DropDuplicates.Inputs { + var f1valf15f1elem string + f1valf15f1elem = *f1valf15f1iter + f1valf15f1 = append(f1valf15f1, &f1valf15f1elem) } - f1valf10.SetInputs(f1valf10f1) + f1valf15.SetInputs(f1valf15f1) } if f1valiter.DropDuplicates.Name != nil { - f1valf10.SetName(*f1valiter.DropDuplicates.Name) + f1valf15.SetName(*f1valiter.DropDuplicates.Name) } - f1val.SetDropDuplicates(f1valf10) + f1val.SetDropDuplicates(f1valf15) } if f1valiter.DropFields != nil { - f1valf11 := &svcsdk.DropFields{} + f1valf16 := &svcsdk.DropFields{} if f1valiter.DropFields.Inputs != nil { - f1valf11f0 := []*string{} - for _, f1valf11f0iter := range f1valiter.DropFields.Inputs { - var f1valf11f0elem string - f1valf11f0elem = *f1valf11f0iter - f1valf11f0 = append(f1valf11f0, &f1valf11f0elem) + f1valf16f0 := []*string{} + for _, f1valf16f0iter := range f1valiter.DropFields.Inputs { + var f1valf16f0elem string + f1valf16f0elem = *f1valf16f0iter + f1valf16f0 = append(f1valf16f0, &f1valf16f0elem) } - f1valf11.SetInputs(f1valf11f0) + f1valf16.SetInputs(f1valf16f0) } if f1valiter.DropFields.Name != nil { - f1valf11.SetName(*f1valiter.DropFields.Name) + f1valf16.SetName(*f1valiter.DropFields.Name) } if f1valiter.DropFields.Paths != nil { - f1valf11f2 := [][]*string{} - for _, f1valf11f2iter := range f1valiter.DropFields.Paths { - f1valf11f2elem := []*string{} - for _, f1valf11f2elemiter := range f1valf11f2iter { - var f1valf11f2elemelem string - f1valf11f2elemelem = *f1valf11f2elemiter - f1valf11f2elem = append(f1valf11f2elem, &f1valf11f2elemelem) + f1valf16f2 := [][]*string{} + for _, f1valf16f2iter := range f1valiter.DropFields.Paths { + f1valf16f2elem := []*string{} + for _, f1valf16f2elemiter := range f1valf16f2iter { + var f1valf16f2elemelem string + f1valf16f2elemelem = *f1valf16f2elemiter + f1valf16f2elem = append(f1valf16f2elem, &f1valf16f2elemelem) } - f1valf11f2 = append(f1valf11f2, f1valf11f2elem) + f1valf16f2 = append(f1valf16f2, f1valf16f2elem) } - f1valf11.SetPaths(f1valf11f2) + f1valf16.SetPaths(f1valf16f2) } - f1val.SetDropFields(f1valf11) + f1val.SetDropFields(f1valf16) } if f1valiter.DropNullFields != nil { - f1valf12 := &svcsdk.DropNullFields{} + f1valf17 := &svcsdk.DropNullFields{} if f1valiter.DropNullFields.Inputs != nil { - f1valf12f0 := []*string{} - for _, f1valf12f0iter := range f1valiter.DropNullFields.Inputs { - var f1valf12f0elem string - f1valf12f0elem = *f1valf12f0iter - f1valf12f0 = append(f1valf12f0, &f1valf12f0elem) + f1valf17f0 := []*string{} + for _, f1valf17f0iter := range f1valiter.DropNullFields.Inputs { + var f1valf17f0elem string + f1valf17f0elem = *f1valf17f0iter + f1valf17f0 = append(f1valf17f0, &f1valf17f0elem) } - f1valf12.SetInputs(f1valf12f0) + f1valf17.SetInputs(f1valf17f0) } if f1valiter.DropNullFields.Name != nil { - f1valf12.SetName(*f1valiter.DropNullFields.Name) + f1valf17.SetName(*f1valiter.DropNullFields.Name) } if f1valiter.DropNullFields.NullCheckBoxList != nil { - f1valf12f2 := &svcsdk.NullCheckBoxList{} + f1valf17f2 := &svcsdk.NullCheckBoxList{} if f1valiter.DropNullFields.NullCheckBoxList.IsEmpty != nil { - f1valf12f2.SetIsEmpty(*f1valiter.DropNullFields.NullCheckBoxList.IsEmpty) + f1valf17f2.SetIsEmpty(*f1valiter.DropNullFields.NullCheckBoxList.IsEmpty) } if f1valiter.DropNullFields.NullCheckBoxList.IsNegOne != nil { - f1valf12f2.SetIsNegOne(*f1valiter.DropNullFields.NullCheckBoxList.IsNegOne) + f1valf17f2.SetIsNegOne(*f1valiter.DropNullFields.NullCheckBoxList.IsNegOne) } if f1valiter.DropNullFields.NullCheckBoxList.IsNullString != nil { - f1valf12f2.SetIsNullString(*f1valiter.DropNullFields.NullCheckBoxList.IsNullString) + f1valf17f2.SetIsNullString(*f1valiter.DropNullFields.NullCheckBoxList.IsNullString) } - f1valf12.SetNullCheckBoxList(f1valf12f2) + f1valf17.SetNullCheckBoxList(f1valf17f2) } if f1valiter.DropNullFields.NullTextList != nil { - f1valf12f3 := []*svcsdk.NullValueField{} - for _, f1valf12f3iter := range f1valiter.DropNullFields.NullTextList { - f1valf12f3elem := &svcsdk.NullValueField{} - if f1valf12f3iter.Datatype != nil { - f1valf12f3elemf0 := &svcsdk.Datatype{} - if f1valf12f3iter.Datatype.ID != nil { - f1valf12f3elemf0.SetId(*f1valf12f3iter.Datatype.ID) + f1valf17f3 := []*svcsdk.NullValueField{} + for _, f1valf17f3iter := range f1valiter.DropNullFields.NullTextList { + f1valf17f3elem := &svcsdk.NullValueField{} + if f1valf17f3iter.Datatype != nil { + f1valf17f3elemf0 := &svcsdk.Datatype{} + if f1valf17f3iter.Datatype.ID != nil { + f1valf17f3elemf0.SetId(*f1valf17f3iter.Datatype.ID) } - if f1valf12f3iter.Datatype.Label != nil { - f1valf12f3elemf0.SetLabel(*f1valf12f3iter.Datatype.Label) + if f1valf17f3iter.Datatype.Label != nil { + f1valf17f3elemf0.SetLabel(*f1valf17f3iter.Datatype.Label) } - f1valf12f3elem.SetDatatype(f1valf12f3elemf0) + f1valf17f3elem.SetDatatype(f1valf17f3elemf0) } - if f1valf12f3iter.Value != nil { - f1valf12f3elem.SetValue(*f1valf12f3iter.Value) + if f1valf17f3iter.Value != nil { + f1valf17f3elem.SetValue(*f1valf17f3iter.Value) } - f1valf12f3 = append(f1valf12f3, f1valf12f3elem) + f1valf17f3 = append(f1valf17f3, f1valf17f3elem) } - f1valf12.SetNullTextList(f1valf12f3) + f1valf17.SetNullTextList(f1valf17f3) } - f1val.SetDropNullFields(f1valf12) + f1val.SetDropNullFields(f1valf17) } if f1valiter.DynamicTransform != nil { - f1valf13 := &svcsdk.DynamicTransform{} + f1valf18 := &svcsdk.DynamicTransform{} if f1valiter.DynamicTransform.FunctionName != nil { - f1valf13.SetFunctionName(*f1valiter.DynamicTransform.FunctionName) + f1valf18.SetFunctionName(*f1valiter.DynamicTransform.FunctionName) } if f1valiter.DynamicTransform.Inputs != nil { - f1valf13f1 := []*string{} - for _, f1valf13f1iter := range f1valiter.DynamicTransform.Inputs { - var f1valf13f1elem string - f1valf13f1elem = *f1valf13f1iter - f1valf13f1 = append(f1valf13f1, &f1valf13f1elem) + f1valf18f1 := []*string{} + for _, f1valf18f1iter := range f1valiter.DynamicTransform.Inputs { + var f1valf18f1elem string + f1valf18f1elem = *f1valf18f1iter + f1valf18f1 = append(f1valf18f1, &f1valf18f1elem) } - f1valf13.SetInputs(f1valf13f1) + f1valf18.SetInputs(f1valf18f1) } if f1valiter.DynamicTransform.Name != nil { - f1valf13.SetName(*f1valiter.DynamicTransform.Name) + f1valf18.SetName(*f1valiter.DynamicTransform.Name) + } + if f1valiter.DynamicTransform.OutputSchemas != nil { + f1valf18f3 := []*svcsdk.GlueSchema{} + for _, f1valf18f3iter := range f1valiter.DynamicTransform.OutputSchemas { + f1valf18f3elem := &svcsdk.GlueSchema{} + if f1valf18f3iter.Columns != nil { + f1valf18f3elemf0 := []*svcsdk.GlueStudioSchemaColumn{} + for _, f1valf18f3elemf0iter := range f1valf18f3iter.Columns { + f1valf18f3elemf0elem := &svcsdk.GlueStudioSchemaColumn{} + if f1valf18f3elemf0iter.Name != nil { + f1valf18f3elemf0elem.SetName(*f1valf18f3elemf0iter.Name) + } + if f1valf18f3elemf0iter.Type != nil { + f1valf18f3elemf0elem.SetType(*f1valf18f3elemf0iter.Type) + } + f1valf18f3elemf0 = append(f1valf18f3elemf0, f1valf18f3elemf0elem) + } + f1valf18f3elem.SetColumns(f1valf18f3elemf0) + } + f1valf18f3 = append(f1valf18f3, f1valf18f3elem) + } + f1valf18.SetOutputSchemas(f1valf18f3) } if f1valiter.DynamicTransform.Parameters != nil { - f1valf13f3 := []*svcsdk.TransformConfigParameter{} - for _, f1valf13f3iter := range f1valiter.DynamicTransform.Parameters { - f1valf13f3elem := &svcsdk.TransformConfigParameter{} - if f1valf13f3iter.IsOptional != nil { - f1valf13f3elem.SetIsOptional(*f1valf13f3iter.IsOptional) + f1valf18f4 := []*svcsdk.TransformConfigParameter{} + for _, f1valf18f4iter := range f1valiter.DynamicTransform.Parameters { + f1valf18f4elem := &svcsdk.TransformConfigParameter{} + if f1valf18f4iter.IsOptional != nil { + f1valf18f4elem.SetIsOptional(*f1valf18f4iter.IsOptional) } - if f1valf13f3iter.ListType != nil { - f1valf13f3elem.SetListType(*f1valf13f3iter.ListType) + if f1valf18f4iter.ListType != nil { + f1valf18f4elem.SetListType(*f1valf18f4iter.ListType) } - if f1valf13f3iter.Name != nil { - f1valf13f3elem.SetName(*f1valf13f3iter.Name) + if f1valf18f4iter.Name != nil { + f1valf18f4elem.SetName(*f1valf18f4iter.Name) } - if f1valf13f3iter.Type != nil { - f1valf13f3elem.SetType(*f1valf13f3iter.Type) + if f1valf18f4iter.Type != nil { + f1valf18f4elem.SetType(*f1valf18f4iter.Type) } - if f1valf13f3iter.ValidationMessage != nil { - f1valf13f3elem.SetValidationMessage(*f1valf13f3iter.ValidationMessage) + if f1valf18f4iter.ValidationMessage != nil { + f1valf18f4elem.SetValidationMessage(*f1valf18f4iter.ValidationMessage) } - if f1valf13f3iter.ValidationRule != nil { - f1valf13f3elem.SetValidationRule(*f1valf13f3iter.ValidationRule) + if f1valf18f4iter.ValidationRule != nil { + f1valf18f4elem.SetValidationRule(*f1valf18f4iter.ValidationRule) } - if f1valf13f3iter.Value != nil { - f1valf13f3elemf6 := []*string{} - for _, f1valf13f3elemf6iter := range f1valf13f3iter.Value { - var f1valf13f3elemf6elem string - f1valf13f3elemf6elem = *f1valf13f3elemf6iter - f1valf13f3elemf6 = append(f1valf13f3elemf6, &f1valf13f3elemf6elem) + if f1valf18f4iter.Value != nil { + f1valf18f4elemf6 := []*string{} + for _, f1valf18f4elemf6iter := range f1valf18f4iter.Value { + var f1valf18f4elemf6elem string + f1valf18f4elemf6elem = *f1valf18f4elemf6iter + f1valf18f4elemf6 = append(f1valf18f4elemf6, &f1valf18f4elemf6elem) } - f1valf13f3elem.SetValue(f1valf13f3elemf6) + f1valf18f4elem.SetValue(f1valf18f4elemf6) } - f1valf13f3 = append(f1valf13f3, f1valf13f3elem) + f1valf18f4 = append(f1valf18f4, f1valf18f4elem) } - f1valf13.SetParameters(f1valf13f3) + f1valf18.SetParameters(f1valf18f4) } if f1valiter.DynamicTransform.Path != nil { - f1valf13.SetPath(*f1valiter.DynamicTransform.Path) + f1valf18.SetPath(*f1valiter.DynamicTransform.Path) } if f1valiter.DynamicTransform.TransformName != nil { - f1valf13.SetTransformName(*f1valiter.DynamicTransform.TransformName) + f1valf18.SetTransformName(*f1valiter.DynamicTransform.TransformName) } if f1valiter.DynamicTransform.Version != nil { - f1valf13.SetVersion(*f1valiter.DynamicTransform.Version) + f1valf18.SetVersion(*f1valiter.DynamicTransform.Version) } - f1val.SetDynamicTransform(f1valf13) + f1val.SetDynamicTransform(f1valf18) } if f1valiter.DynamoDBCatalogSource != nil { - f1valf14 := &svcsdk.DynamoDBCatalogSource{} + f1valf19 := &svcsdk.DynamoDBCatalogSource{} if f1valiter.DynamoDBCatalogSource.Database != nil { - f1valf14.SetDatabase(*f1valiter.DynamoDBCatalogSource.Database) + f1valf19.SetDatabase(*f1valiter.DynamoDBCatalogSource.Database) } if f1valiter.DynamoDBCatalogSource.Name != nil { - f1valf14.SetName(*f1valiter.DynamoDBCatalogSource.Name) + f1valf19.SetName(*f1valiter.DynamoDBCatalogSource.Name) } if f1valiter.DynamoDBCatalogSource.Table != nil { - f1valf14.SetTable(*f1valiter.DynamoDBCatalogSource.Table) + f1valf19.SetTable(*f1valiter.DynamoDBCatalogSource.Table) } - f1val.SetDynamoDBCatalogSource(f1valf14) + f1val.SetDynamoDBCatalogSource(f1valf19) } if f1valiter.EvaluateDataQuality != nil { - f1valf15 := &svcsdk.EvaluateDataQuality{} + f1valf20 := &svcsdk.EvaluateDataQuality{} if f1valiter.EvaluateDataQuality.Inputs != nil { - f1valf15f0 := []*string{} - for _, f1valf15f0iter := range f1valiter.EvaluateDataQuality.Inputs { - var f1valf15f0elem string - f1valf15f0elem = *f1valf15f0iter - f1valf15f0 = append(f1valf15f0, &f1valf15f0elem) + f1valf20f0 := []*string{} + for _, f1valf20f0iter := range f1valiter.EvaluateDataQuality.Inputs { + var f1valf20f0elem string + f1valf20f0elem = *f1valf20f0iter + f1valf20f0 = append(f1valf20f0, &f1valf20f0elem) } - f1valf15.SetInputs(f1valf15f0) + f1valf20.SetInputs(f1valf20f0) } if f1valiter.EvaluateDataQuality.Name != nil { - f1valf15.SetName(*f1valiter.EvaluateDataQuality.Name) + f1valf20.SetName(*f1valiter.EvaluateDataQuality.Name) } if f1valiter.EvaluateDataQuality.Output != nil { - f1valf15.SetOutput(*f1valiter.EvaluateDataQuality.Output) + f1valf20.SetOutput(*f1valiter.EvaluateDataQuality.Output) } if f1valiter.EvaluateDataQuality.PublishingOptions != nil { - f1valf15f3 := &svcsdk.DQResultsPublishingOptions{} + f1valf20f3 := &svcsdk.DQResultsPublishingOptions{} if f1valiter.EvaluateDataQuality.PublishingOptions.CloudWatchMetricsEnabled != nil { - f1valf15f3.SetCloudWatchMetricsEnabled(*f1valiter.EvaluateDataQuality.PublishingOptions.CloudWatchMetricsEnabled) + f1valf20f3.SetCloudWatchMetricsEnabled(*f1valiter.EvaluateDataQuality.PublishingOptions.CloudWatchMetricsEnabled) } if f1valiter.EvaluateDataQuality.PublishingOptions.EvaluationContext != nil { - f1valf15f3.SetEvaluationContext(*f1valiter.EvaluateDataQuality.PublishingOptions.EvaluationContext) + f1valf20f3.SetEvaluationContext(*f1valiter.EvaluateDataQuality.PublishingOptions.EvaluationContext) } if f1valiter.EvaluateDataQuality.PublishingOptions.ResultsPublishingEnabled != nil { - f1valf15f3.SetResultsPublishingEnabled(*f1valiter.EvaluateDataQuality.PublishingOptions.ResultsPublishingEnabled) + f1valf20f3.SetResultsPublishingEnabled(*f1valiter.EvaluateDataQuality.PublishingOptions.ResultsPublishingEnabled) } if f1valiter.EvaluateDataQuality.PublishingOptions.ResultsS3Prefix != nil { - f1valf15f3.SetResultsS3Prefix(*f1valiter.EvaluateDataQuality.PublishingOptions.ResultsS3Prefix) + f1valf20f3.SetResultsS3Prefix(*f1valiter.EvaluateDataQuality.PublishingOptions.ResultsS3Prefix) } - f1valf15.SetPublishingOptions(f1valf15f3) + f1valf20.SetPublishingOptions(f1valf20f3) } if f1valiter.EvaluateDataQuality.Ruleset != nil { - f1valf15.SetRuleset(*f1valiter.EvaluateDataQuality.Ruleset) + f1valf20.SetRuleset(*f1valiter.EvaluateDataQuality.Ruleset) } if f1valiter.EvaluateDataQuality.StopJobOnFailureOptions != nil { - f1valf15f5 := &svcsdk.DQStopJobOnFailureOptions{} + f1valf20f5 := &svcsdk.DQStopJobOnFailureOptions{} if f1valiter.EvaluateDataQuality.StopJobOnFailureOptions.StopJobOnFailureTiming != nil { - f1valf15f5.SetStopJobOnFailureTiming(*f1valiter.EvaluateDataQuality.StopJobOnFailureOptions.StopJobOnFailureTiming) + f1valf20f5.SetStopJobOnFailureTiming(*f1valiter.EvaluateDataQuality.StopJobOnFailureOptions.StopJobOnFailureTiming) + } + f1valf20.SetStopJobOnFailureOptions(f1valf20f5) + } + f1val.SetEvaluateDataQuality(f1valf20) + } + if f1valiter.EvaluateDataQualityMultiFrame != nil { + f1valf21 := &svcsdk.EvaluateDataQualityMultiFrame{} + if f1valiter.EvaluateDataQualityMultiFrame.AdditionalDataSources != nil { + f1valf21f0 := map[string]*string{} + for f1valf21f0key, f1valf21f0valiter := range f1valiter.EvaluateDataQualityMultiFrame.AdditionalDataSources { + var f1valf21f0val string + f1valf21f0val = *f1valf21f0valiter + f1valf21f0[f1valf21f0key] = &f1valf21f0val + } + f1valf21.SetAdditionalDataSources(f1valf21f0) + } + if f1valiter.EvaluateDataQualityMultiFrame.AdditionalOptions != nil { + f1valf21f1 := map[string]*string{} + for f1valf21f1key, f1valf21f1valiter := range f1valiter.EvaluateDataQualityMultiFrame.AdditionalOptions { + var f1valf21f1val string + f1valf21f1val = *f1valf21f1valiter + f1valf21f1[f1valf21f1key] = &f1valf21f1val + } + f1valf21.SetAdditionalOptions(f1valf21f1) + } + if f1valiter.EvaluateDataQualityMultiFrame.Inputs != nil { + f1valf21f2 := []*string{} + for _, f1valf21f2iter := range f1valiter.EvaluateDataQualityMultiFrame.Inputs { + var f1valf21f2elem string + f1valf21f2elem = *f1valf21f2iter + f1valf21f2 = append(f1valf21f2, &f1valf21f2elem) + } + f1valf21.SetInputs(f1valf21f2) + } + if f1valiter.EvaluateDataQualityMultiFrame.Name != nil { + f1valf21.SetName(*f1valiter.EvaluateDataQualityMultiFrame.Name) + } + if f1valiter.EvaluateDataQualityMultiFrame.PublishingOptions != nil { + f1valf21f4 := &svcsdk.DQResultsPublishingOptions{} + if f1valiter.EvaluateDataQualityMultiFrame.PublishingOptions.CloudWatchMetricsEnabled != nil { + f1valf21f4.SetCloudWatchMetricsEnabled(*f1valiter.EvaluateDataQualityMultiFrame.PublishingOptions.CloudWatchMetricsEnabled) + } + if f1valiter.EvaluateDataQualityMultiFrame.PublishingOptions.EvaluationContext != nil { + f1valf21f4.SetEvaluationContext(*f1valiter.EvaluateDataQualityMultiFrame.PublishingOptions.EvaluationContext) + } + if f1valiter.EvaluateDataQualityMultiFrame.PublishingOptions.ResultsPublishingEnabled != nil { + f1valf21f4.SetResultsPublishingEnabled(*f1valiter.EvaluateDataQualityMultiFrame.PublishingOptions.ResultsPublishingEnabled) + } + if f1valiter.EvaluateDataQualityMultiFrame.PublishingOptions.ResultsS3Prefix != nil { + f1valf21f4.SetResultsS3Prefix(*f1valiter.EvaluateDataQualityMultiFrame.PublishingOptions.ResultsS3Prefix) } - f1valf15.SetStopJobOnFailureOptions(f1valf15f5) + f1valf21.SetPublishingOptions(f1valf21f4) } - f1val.SetEvaluateDataQuality(f1valf15) + if f1valiter.EvaluateDataQualityMultiFrame.Ruleset != nil { + f1valf21.SetRuleset(*f1valiter.EvaluateDataQualityMultiFrame.Ruleset) + } + if f1valiter.EvaluateDataQualityMultiFrame.StopJobOnFailureOptions != nil { + f1valf21f6 := &svcsdk.DQStopJobOnFailureOptions{} + if f1valiter.EvaluateDataQualityMultiFrame.StopJobOnFailureOptions.StopJobOnFailureTiming != nil { + f1valf21f6.SetStopJobOnFailureTiming(*f1valiter.EvaluateDataQualityMultiFrame.StopJobOnFailureOptions.StopJobOnFailureTiming) + } + f1valf21.SetStopJobOnFailureOptions(f1valf21f6) + } + f1val.SetEvaluateDataQualityMultiFrame(f1valf21) } if f1valiter.FillMissingValues != nil { - f1valf16 := &svcsdk.FillMissingValues{} + f1valf22 := &svcsdk.FillMissingValues{} if f1valiter.FillMissingValues.FilledPath != nil { - f1valf16.SetFilledPath(*f1valiter.FillMissingValues.FilledPath) + f1valf22.SetFilledPath(*f1valiter.FillMissingValues.FilledPath) } if f1valiter.FillMissingValues.ImputedPath != nil { - f1valf16.SetImputedPath(*f1valiter.FillMissingValues.ImputedPath) + f1valf22.SetImputedPath(*f1valiter.FillMissingValues.ImputedPath) } if f1valiter.FillMissingValues.Inputs != nil { - f1valf16f2 := []*string{} - for _, f1valf16f2iter := range f1valiter.FillMissingValues.Inputs { - var f1valf16f2elem string - f1valf16f2elem = *f1valf16f2iter - f1valf16f2 = append(f1valf16f2, &f1valf16f2elem) + f1valf22f2 := []*string{} + for _, f1valf22f2iter := range f1valiter.FillMissingValues.Inputs { + var f1valf22f2elem string + f1valf22f2elem = *f1valf22f2iter + f1valf22f2 = append(f1valf22f2, &f1valf22f2elem) } - f1valf16.SetInputs(f1valf16f2) + f1valf22.SetInputs(f1valf22f2) } if f1valiter.FillMissingValues.Name != nil { - f1valf16.SetName(*f1valiter.FillMissingValues.Name) + f1valf22.SetName(*f1valiter.FillMissingValues.Name) } - f1val.SetFillMissingValues(f1valf16) + f1val.SetFillMissingValues(f1valf22) } if f1valiter.Filter != nil { - f1valf17 := &svcsdk.Filter{} + f1valf23 := &svcsdk.Filter{} if f1valiter.Filter.Filters != nil { - f1valf17f0 := []*svcsdk.FilterExpression{} - for _, f1valf17f0iter := range f1valiter.Filter.Filters { - f1valf17f0elem := &svcsdk.FilterExpression{} - if f1valf17f0iter.Negated != nil { - f1valf17f0elem.SetNegated(*f1valf17f0iter.Negated) - } - if f1valf17f0iter.Operation != nil { - f1valf17f0elem.SetOperation(*f1valf17f0iter.Operation) - } - if f1valf17f0iter.Values != nil { - f1valf17f0elemf2 := []*svcsdk.FilterValue{} - for _, f1valf17f0elemf2iter := range f1valf17f0iter.Values { - f1valf17f0elemf2elem := &svcsdk.FilterValue{} - if f1valf17f0elemf2iter.Type != nil { - f1valf17f0elemf2elem.SetType(*f1valf17f0elemf2iter.Type) + f1valf23f0 := []*svcsdk.FilterExpression{} + for _, f1valf23f0iter := range f1valiter.Filter.Filters { + f1valf23f0elem := &svcsdk.FilterExpression{} + if f1valf23f0iter.Negated != nil { + f1valf23f0elem.SetNegated(*f1valf23f0iter.Negated) + } + if f1valf23f0iter.Operation != nil { + f1valf23f0elem.SetOperation(*f1valf23f0iter.Operation) + } + if f1valf23f0iter.Values != nil { + f1valf23f0elemf2 := []*svcsdk.FilterValue{} + for _, f1valf23f0elemf2iter := range f1valf23f0iter.Values { + f1valf23f0elemf2elem := &svcsdk.FilterValue{} + if f1valf23f0elemf2iter.Type != nil { + f1valf23f0elemf2elem.SetType(*f1valf23f0elemf2iter.Type) } - if f1valf17f0elemf2iter.Value != nil { - f1valf17f0elemf2elemf1 := []*string{} - for _, f1valf17f0elemf2elemf1iter := range f1valf17f0elemf2iter.Value { - var f1valf17f0elemf2elemf1elem string - f1valf17f0elemf2elemf1elem = *f1valf17f0elemf2elemf1iter - f1valf17f0elemf2elemf1 = append(f1valf17f0elemf2elemf1, &f1valf17f0elemf2elemf1elem) + if f1valf23f0elemf2iter.Value != nil { + f1valf23f0elemf2elemf1 := []*string{} + for _, f1valf23f0elemf2elemf1iter := range f1valf23f0elemf2iter.Value { + var f1valf23f0elemf2elemf1elem string + f1valf23f0elemf2elemf1elem = *f1valf23f0elemf2elemf1iter + f1valf23f0elemf2elemf1 = append(f1valf23f0elemf2elemf1, &f1valf23f0elemf2elemf1elem) } - f1valf17f0elemf2elem.SetValue(f1valf17f0elemf2elemf1) + f1valf23f0elemf2elem.SetValue(f1valf23f0elemf2elemf1) } - f1valf17f0elemf2 = append(f1valf17f0elemf2, f1valf17f0elemf2elem) + f1valf23f0elemf2 = append(f1valf23f0elemf2, f1valf23f0elemf2elem) } - f1valf17f0elem.SetValues(f1valf17f0elemf2) + f1valf23f0elem.SetValues(f1valf23f0elemf2) } - f1valf17f0 = append(f1valf17f0, f1valf17f0elem) + f1valf23f0 = append(f1valf23f0, f1valf23f0elem) } - f1valf17.SetFilters(f1valf17f0) + f1valf23.SetFilters(f1valf23f0) } if f1valiter.Filter.Inputs != nil { - f1valf17f1 := []*string{} - for _, f1valf17f1iter := range f1valiter.Filter.Inputs { - var f1valf17f1elem string - f1valf17f1elem = *f1valf17f1iter - f1valf17f1 = append(f1valf17f1, &f1valf17f1elem) + f1valf23f1 := []*string{} + for _, f1valf23f1iter := range f1valiter.Filter.Inputs { + var f1valf23f1elem string + f1valf23f1elem = *f1valf23f1iter + f1valf23f1 = append(f1valf23f1, &f1valf23f1elem) } - f1valf17.SetInputs(f1valf17f1) + f1valf23.SetInputs(f1valf23f1) } if f1valiter.Filter.LogicalOperator != nil { - f1valf17.SetLogicalOperator(*f1valiter.Filter.LogicalOperator) + f1valf23.SetLogicalOperator(*f1valiter.Filter.LogicalOperator) } if f1valiter.Filter.Name != nil { - f1valf17.SetName(*f1valiter.Filter.Name) + f1valf23.SetName(*f1valiter.Filter.Name) } - f1val.SetFilter(f1valf17) + f1val.SetFilter(f1valf23) } if f1valiter.GovernedCatalogSource != nil { - f1valf18 := &svcsdk.GovernedCatalogSource{} + f1valf24 := &svcsdk.GovernedCatalogSource{} if f1valiter.GovernedCatalogSource.AdditionalOptions != nil { - f1valf18f0 := &svcsdk.S3SourceAdditionalOptions{} + f1valf24f0 := &svcsdk.S3SourceAdditionalOptions{} if f1valiter.GovernedCatalogSource.AdditionalOptions.BoundedFiles != nil { - f1valf18f0.SetBoundedFiles(*f1valiter.GovernedCatalogSource.AdditionalOptions.BoundedFiles) + f1valf24f0.SetBoundedFiles(*f1valiter.GovernedCatalogSource.AdditionalOptions.BoundedFiles) } if f1valiter.GovernedCatalogSource.AdditionalOptions.BoundedSize != nil { - f1valf18f0.SetBoundedSize(*f1valiter.GovernedCatalogSource.AdditionalOptions.BoundedSize) + f1valf24f0.SetBoundedSize(*f1valiter.GovernedCatalogSource.AdditionalOptions.BoundedSize) } - f1valf18.SetAdditionalOptions(f1valf18f0) + f1valf24.SetAdditionalOptions(f1valf24f0) } if f1valiter.GovernedCatalogSource.Database != nil { - f1valf18.SetDatabase(*f1valiter.GovernedCatalogSource.Database) + f1valf24.SetDatabase(*f1valiter.GovernedCatalogSource.Database) } if f1valiter.GovernedCatalogSource.Name != nil { - f1valf18.SetName(*f1valiter.GovernedCatalogSource.Name) + f1valf24.SetName(*f1valiter.GovernedCatalogSource.Name) } if f1valiter.GovernedCatalogSource.PartitionPredicate != nil { - f1valf18.SetPartitionPredicate(*f1valiter.GovernedCatalogSource.PartitionPredicate) + f1valf24.SetPartitionPredicate(*f1valiter.GovernedCatalogSource.PartitionPredicate) } if f1valiter.GovernedCatalogSource.Table != nil { - f1valf18.SetTable(*f1valiter.GovernedCatalogSource.Table) + f1valf24.SetTable(*f1valiter.GovernedCatalogSource.Table) } - f1val.SetGovernedCatalogSource(f1valf18) + f1val.SetGovernedCatalogSource(f1valf24) } if f1valiter.GovernedCatalogTarget != nil { - f1valf19 := &svcsdk.GovernedCatalogTarget{} + f1valf25 := &svcsdk.GovernedCatalogTarget{} if f1valiter.GovernedCatalogTarget.Database != nil { - f1valf19.SetDatabase(*f1valiter.GovernedCatalogTarget.Database) + f1valf25.SetDatabase(*f1valiter.GovernedCatalogTarget.Database) } if f1valiter.GovernedCatalogTarget.Inputs != nil { - f1valf19f1 := []*string{} - for _, f1valf19f1iter := range f1valiter.GovernedCatalogTarget.Inputs { - var f1valf19f1elem string - f1valf19f1elem = *f1valf19f1iter - f1valf19f1 = append(f1valf19f1, &f1valf19f1elem) + f1valf25f1 := []*string{} + for _, f1valf25f1iter := range f1valiter.GovernedCatalogTarget.Inputs { + var f1valf25f1elem string + f1valf25f1elem = *f1valf25f1iter + f1valf25f1 = append(f1valf25f1, &f1valf25f1elem) } - f1valf19.SetInputs(f1valf19f1) + f1valf25.SetInputs(f1valf25f1) } if f1valiter.GovernedCatalogTarget.Name != nil { - f1valf19.SetName(*f1valiter.GovernedCatalogTarget.Name) + f1valf25.SetName(*f1valiter.GovernedCatalogTarget.Name) } if f1valiter.GovernedCatalogTarget.PartitionKeys != nil { - f1valf19f3 := [][]*string{} - for _, f1valf19f3iter := range f1valiter.GovernedCatalogTarget.PartitionKeys { - f1valf19f3elem := []*string{} - for _, f1valf19f3elemiter := range f1valf19f3iter { - var f1valf19f3elemelem string - f1valf19f3elemelem = *f1valf19f3elemiter - f1valf19f3elem = append(f1valf19f3elem, &f1valf19f3elemelem) + f1valf25f3 := [][]*string{} + for _, f1valf25f3iter := range f1valiter.GovernedCatalogTarget.PartitionKeys { + f1valf25f3elem := []*string{} + for _, f1valf25f3elemiter := range f1valf25f3iter { + var f1valf25f3elemelem string + f1valf25f3elemelem = *f1valf25f3elemiter + f1valf25f3elem = append(f1valf25f3elem, &f1valf25f3elemelem) } - f1valf19f3 = append(f1valf19f3, f1valf19f3elem) + f1valf25f3 = append(f1valf25f3, f1valf25f3elem) } - f1valf19.SetPartitionKeys(f1valf19f3) + f1valf25.SetPartitionKeys(f1valf25f3) } if f1valiter.GovernedCatalogTarget.SchemaChangePolicy != nil { - f1valf19f4 := &svcsdk.CatalogSchemaChangePolicy{} + f1valf25f4 := &svcsdk.CatalogSchemaChangePolicy{} if f1valiter.GovernedCatalogTarget.SchemaChangePolicy.EnableUpdateCatalog != nil { - f1valf19f4.SetEnableUpdateCatalog(*f1valiter.GovernedCatalogTarget.SchemaChangePolicy.EnableUpdateCatalog) + f1valf25f4.SetEnableUpdateCatalog(*f1valiter.GovernedCatalogTarget.SchemaChangePolicy.EnableUpdateCatalog) } if f1valiter.GovernedCatalogTarget.SchemaChangePolicy.UpdateBehavior != nil { - f1valf19f4.SetUpdateBehavior(*f1valiter.GovernedCatalogTarget.SchemaChangePolicy.UpdateBehavior) + f1valf25f4.SetUpdateBehavior(*f1valiter.GovernedCatalogTarget.SchemaChangePolicy.UpdateBehavior) } - f1valf19.SetSchemaChangePolicy(f1valf19f4) + f1valf25.SetSchemaChangePolicy(f1valf25f4) } if f1valiter.GovernedCatalogTarget.Table != nil { - f1valf19.SetTable(*f1valiter.GovernedCatalogTarget.Table) + f1valf25.SetTable(*f1valiter.GovernedCatalogTarget.Table) } - f1val.SetGovernedCatalogTarget(f1valf19) + f1val.SetGovernedCatalogTarget(f1valf25) } if f1valiter.JDBCConnectorSource != nil { - f1valf20 := &svcsdk.JDBCConnectorSource{} + f1valf26 := &svcsdk.JDBCConnectorSource{} if f1valiter.JDBCConnectorSource.AdditionalOptions != nil { - f1valf20f0 := &svcsdk.JDBCConnectorOptions{} + f1valf26f0 := &svcsdk.JDBCConnectorOptions{} if f1valiter.JDBCConnectorSource.AdditionalOptions.DataTypeMapping != nil { - f1valf20f0f0 := map[string]*string{} - for f1valf20f0f0key, f1valf20f0f0valiter := range f1valiter.JDBCConnectorSource.AdditionalOptions.DataTypeMapping { - var f1valf20f0f0val string - f1valf20f0f0val = *f1valf20f0f0valiter - f1valf20f0f0[f1valf20f0f0key] = &f1valf20f0f0val + f1valf26f0f0 := map[string]*string{} + for f1valf26f0f0key, f1valf26f0f0valiter := range f1valiter.JDBCConnectorSource.AdditionalOptions.DataTypeMapping { + var f1valf26f0f0val string + f1valf26f0f0val = *f1valf26f0f0valiter + f1valf26f0f0[f1valf26f0f0key] = &f1valf26f0f0val } - f1valf20f0.SetDataTypeMapping(f1valf20f0f0) + f1valf26f0.SetDataTypeMapping(f1valf26f0f0) } if f1valiter.JDBCConnectorSource.AdditionalOptions.FilterPredicate != nil { - f1valf20f0.SetFilterPredicate(*f1valiter.JDBCConnectorSource.AdditionalOptions.FilterPredicate) + f1valf26f0.SetFilterPredicate(*f1valiter.JDBCConnectorSource.AdditionalOptions.FilterPredicate) } if f1valiter.JDBCConnectorSource.AdditionalOptions.JobBookmarkKeys != nil { - f1valf20f0f2 := []*string{} - for _, f1valf20f0f2iter := range f1valiter.JDBCConnectorSource.AdditionalOptions.JobBookmarkKeys { - var f1valf20f0f2elem string - f1valf20f0f2elem = *f1valf20f0f2iter - f1valf20f0f2 = append(f1valf20f0f2, &f1valf20f0f2elem) + f1valf26f0f2 := []*string{} + for _, f1valf26f0f2iter := range f1valiter.JDBCConnectorSource.AdditionalOptions.JobBookmarkKeys { + var f1valf26f0f2elem string + f1valf26f0f2elem = *f1valf26f0f2iter + f1valf26f0f2 = append(f1valf26f0f2, &f1valf26f0f2elem) } - f1valf20f0.SetJobBookmarkKeys(f1valf20f0f2) + f1valf26f0.SetJobBookmarkKeys(f1valf26f0f2) } if f1valiter.JDBCConnectorSource.AdditionalOptions.JobBookmarkKeysSortOrder != nil { - f1valf20f0.SetJobBookmarkKeysSortOrder(*f1valiter.JDBCConnectorSource.AdditionalOptions.JobBookmarkKeysSortOrder) + f1valf26f0.SetJobBookmarkKeysSortOrder(*f1valiter.JDBCConnectorSource.AdditionalOptions.JobBookmarkKeysSortOrder) } if f1valiter.JDBCConnectorSource.AdditionalOptions.LowerBound != nil { - f1valf20f0.SetLowerBound(*f1valiter.JDBCConnectorSource.AdditionalOptions.LowerBound) + f1valf26f0.SetLowerBound(*f1valiter.JDBCConnectorSource.AdditionalOptions.LowerBound) } if f1valiter.JDBCConnectorSource.AdditionalOptions.NumPartitions != nil { - f1valf20f0.SetNumPartitions(*f1valiter.JDBCConnectorSource.AdditionalOptions.NumPartitions) + f1valf26f0.SetNumPartitions(*f1valiter.JDBCConnectorSource.AdditionalOptions.NumPartitions) } if f1valiter.JDBCConnectorSource.AdditionalOptions.PartitionColumn != nil { - f1valf20f0.SetPartitionColumn(*f1valiter.JDBCConnectorSource.AdditionalOptions.PartitionColumn) + f1valf26f0.SetPartitionColumn(*f1valiter.JDBCConnectorSource.AdditionalOptions.PartitionColumn) } if f1valiter.JDBCConnectorSource.AdditionalOptions.UpperBound != nil { - f1valf20f0.SetUpperBound(*f1valiter.JDBCConnectorSource.AdditionalOptions.UpperBound) + f1valf26f0.SetUpperBound(*f1valiter.JDBCConnectorSource.AdditionalOptions.UpperBound) } - f1valf20.SetAdditionalOptions(f1valf20f0) + f1valf26.SetAdditionalOptions(f1valf26f0) } if f1valiter.JDBCConnectorSource.ConnectionName != nil { - f1valf20.SetConnectionName(*f1valiter.JDBCConnectorSource.ConnectionName) + f1valf26.SetConnectionName(*f1valiter.JDBCConnectorSource.ConnectionName) } if f1valiter.JDBCConnectorSource.ConnectionTable != nil { - f1valf20.SetConnectionTable(*f1valiter.JDBCConnectorSource.ConnectionTable) + f1valf26.SetConnectionTable(*f1valiter.JDBCConnectorSource.ConnectionTable) } if f1valiter.JDBCConnectorSource.ConnectionType != nil { - f1valf20.SetConnectionType(*f1valiter.JDBCConnectorSource.ConnectionType) + f1valf26.SetConnectionType(*f1valiter.JDBCConnectorSource.ConnectionType) } if f1valiter.JDBCConnectorSource.ConnectorName != nil { - f1valf20.SetConnectorName(*f1valiter.JDBCConnectorSource.ConnectorName) + f1valf26.SetConnectorName(*f1valiter.JDBCConnectorSource.ConnectorName) } if f1valiter.JDBCConnectorSource.Name != nil { - f1valf20.SetName(*f1valiter.JDBCConnectorSource.Name) + f1valf26.SetName(*f1valiter.JDBCConnectorSource.Name) } if f1valiter.JDBCConnectorSource.OutputSchemas != nil { - f1valf20f6 := []*svcsdk.GlueSchema{} - for _, f1valf20f6iter := range f1valiter.JDBCConnectorSource.OutputSchemas { - f1valf20f6elem := &svcsdk.GlueSchema{} - if f1valf20f6iter.Columns != nil { - f1valf20f6elemf0 := []*svcsdk.GlueStudioSchemaColumn{} - for _, f1valf20f6elemf0iter := range f1valf20f6iter.Columns { - f1valf20f6elemf0elem := &svcsdk.GlueStudioSchemaColumn{} - if f1valf20f6elemf0iter.Name != nil { - f1valf20f6elemf0elem.SetName(*f1valf20f6elemf0iter.Name) + f1valf26f6 := []*svcsdk.GlueSchema{} + for _, f1valf26f6iter := range f1valiter.JDBCConnectorSource.OutputSchemas { + f1valf26f6elem := &svcsdk.GlueSchema{} + if f1valf26f6iter.Columns != nil { + f1valf26f6elemf0 := []*svcsdk.GlueStudioSchemaColumn{} + for _, f1valf26f6elemf0iter := range f1valf26f6iter.Columns { + f1valf26f6elemf0elem := &svcsdk.GlueStudioSchemaColumn{} + if f1valf26f6elemf0iter.Name != nil { + f1valf26f6elemf0elem.SetName(*f1valf26f6elemf0iter.Name) } - if f1valf20f6elemf0iter.Type != nil { - f1valf20f6elemf0elem.SetType(*f1valf20f6elemf0iter.Type) + if f1valf26f6elemf0iter.Type != nil { + f1valf26f6elemf0elem.SetType(*f1valf26f6elemf0iter.Type) } - f1valf20f6elemf0 = append(f1valf20f6elemf0, f1valf20f6elemf0elem) + f1valf26f6elemf0 = append(f1valf26f6elemf0, f1valf26f6elemf0elem) } - f1valf20f6elem.SetColumns(f1valf20f6elemf0) + f1valf26f6elem.SetColumns(f1valf26f6elemf0) } - f1valf20f6 = append(f1valf20f6, f1valf20f6elem) + f1valf26f6 = append(f1valf26f6, f1valf26f6elem) } - f1valf20.SetOutputSchemas(f1valf20f6) + f1valf26.SetOutputSchemas(f1valf26f6) } if f1valiter.JDBCConnectorSource.Query != nil { - f1valf20.SetQuery(*f1valiter.JDBCConnectorSource.Query) + f1valf26.SetQuery(*f1valiter.JDBCConnectorSource.Query) } - f1val.SetJDBCConnectorSource(f1valf20) + f1val.SetJDBCConnectorSource(f1valf26) } if f1valiter.JDBCConnectorTarget != nil { - f1valf21 := &svcsdk.JDBCConnectorTarget{} + f1valf27 := &svcsdk.JDBCConnectorTarget{} if f1valiter.JDBCConnectorTarget.AdditionalOptions != nil { - f1valf21f0 := map[string]*string{} - for f1valf21f0key, f1valf21f0valiter := range f1valiter.JDBCConnectorTarget.AdditionalOptions { - var f1valf21f0val string - f1valf21f0val = *f1valf21f0valiter - f1valf21f0[f1valf21f0key] = &f1valf21f0val + f1valf27f0 := map[string]*string{} + for f1valf27f0key, f1valf27f0valiter := range f1valiter.JDBCConnectorTarget.AdditionalOptions { + var f1valf27f0val string + f1valf27f0val = *f1valf27f0valiter + f1valf27f0[f1valf27f0key] = &f1valf27f0val } - f1valf21.SetAdditionalOptions(f1valf21f0) + f1valf27.SetAdditionalOptions(f1valf27f0) } if f1valiter.JDBCConnectorTarget.ConnectionName != nil { - f1valf21.SetConnectionName(*f1valiter.JDBCConnectorTarget.ConnectionName) + f1valf27.SetConnectionName(*f1valiter.JDBCConnectorTarget.ConnectionName) } if f1valiter.JDBCConnectorTarget.ConnectionTable != nil { - f1valf21.SetConnectionTable(*f1valiter.JDBCConnectorTarget.ConnectionTable) + f1valf27.SetConnectionTable(*f1valiter.JDBCConnectorTarget.ConnectionTable) } if f1valiter.JDBCConnectorTarget.ConnectionType != nil { - f1valf21.SetConnectionType(*f1valiter.JDBCConnectorTarget.ConnectionType) + f1valf27.SetConnectionType(*f1valiter.JDBCConnectorTarget.ConnectionType) } if f1valiter.JDBCConnectorTarget.ConnectorName != nil { - f1valf21.SetConnectorName(*f1valiter.JDBCConnectorTarget.ConnectorName) + f1valf27.SetConnectorName(*f1valiter.JDBCConnectorTarget.ConnectorName) } if f1valiter.JDBCConnectorTarget.Inputs != nil { - f1valf21f5 := []*string{} - for _, f1valf21f5iter := range f1valiter.JDBCConnectorTarget.Inputs { - var f1valf21f5elem string - f1valf21f5elem = *f1valf21f5iter - f1valf21f5 = append(f1valf21f5, &f1valf21f5elem) + f1valf27f5 := []*string{} + for _, f1valf27f5iter := range f1valiter.JDBCConnectorTarget.Inputs { + var f1valf27f5elem string + f1valf27f5elem = *f1valf27f5iter + f1valf27f5 = append(f1valf27f5, &f1valf27f5elem) } - f1valf21.SetInputs(f1valf21f5) + f1valf27.SetInputs(f1valf27f5) } if f1valiter.JDBCConnectorTarget.Name != nil { - f1valf21.SetName(*f1valiter.JDBCConnectorTarget.Name) + f1valf27.SetName(*f1valiter.JDBCConnectorTarget.Name) } if f1valiter.JDBCConnectorTarget.OutputSchemas != nil { - f1valf21f7 := []*svcsdk.GlueSchema{} - for _, f1valf21f7iter := range f1valiter.JDBCConnectorTarget.OutputSchemas { - f1valf21f7elem := &svcsdk.GlueSchema{} - if f1valf21f7iter.Columns != nil { - f1valf21f7elemf0 := []*svcsdk.GlueStudioSchemaColumn{} - for _, f1valf21f7elemf0iter := range f1valf21f7iter.Columns { - f1valf21f7elemf0elem := &svcsdk.GlueStudioSchemaColumn{} - if f1valf21f7elemf0iter.Name != nil { - f1valf21f7elemf0elem.SetName(*f1valf21f7elemf0iter.Name) + f1valf27f7 := []*svcsdk.GlueSchema{} + for _, f1valf27f7iter := range f1valiter.JDBCConnectorTarget.OutputSchemas { + f1valf27f7elem := &svcsdk.GlueSchema{} + if f1valf27f7iter.Columns != nil { + f1valf27f7elemf0 := []*svcsdk.GlueStudioSchemaColumn{} + for _, f1valf27f7elemf0iter := range f1valf27f7iter.Columns { + f1valf27f7elemf0elem := &svcsdk.GlueStudioSchemaColumn{} + if f1valf27f7elemf0iter.Name != nil { + f1valf27f7elemf0elem.SetName(*f1valf27f7elemf0iter.Name) } - if f1valf21f7elemf0iter.Type != nil { - f1valf21f7elemf0elem.SetType(*f1valf21f7elemf0iter.Type) + if f1valf27f7elemf0iter.Type != nil { + f1valf27f7elemf0elem.SetType(*f1valf27f7elemf0iter.Type) } - f1valf21f7elemf0 = append(f1valf21f7elemf0, f1valf21f7elemf0elem) + f1valf27f7elemf0 = append(f1valf27f7elemf0, f1valf27f7elemf0elem) } - f1valf21f7elem.SetColumns(f1valf21f7elemf0) + f1valf27f7elem.SetColumns(f1valf27f7elemf0) } - f1valf21f7 = append(f1valf21f7, f1valf21f7elem) + f1valf27f7 = append(f1valf27f7, f1valf27f7elem) } - f1valf21.SetOutputSchemas(f1valf21f7) + f1valf27.SetOutputSchemas(f1valf27f7) } - f1val.SetJDBCConnectorTarget(f1valf21) + f1val.SetJDBCConnectorTarget(f1valf27) } if f1valiter.Join != nil { - f1valf22 := &svcsdk.Join{} + f1valf28 := &svcsdk.Join{} if f1valiter.Join.Columns != nil { - f1valf22f0 := []*svcsdk.JoinColumn{} - for _, f1valf22f0iter := range f1valiter.Join.Columns { - f1valf22f0elem := &svcsdk.JoinColumn{} - if f1valf22f0iter.From != nil { - f1valf22f0elem.SetFrom(*f1valf22f0iter.From) - } - if f1valf22f0iter.Keys != nil { - f1valf22f0elemf1 := [][]*string{} - for _, f1valf22f0elemf1iter := range f1valf22f0iter.Keys { - f1valf22f0elemf1elem := []*string{} - for _, f1valf22f0elemf1elemiter := range f1valf22f0elemf1iter { - var f1valf22f0elemf1elemelem string - f1valf22f0elemf1elemelem = *f1valf22f0elemf1elemiter - f1valf22f0elemf1elem = append(f1valf22f0elemf1elem, &f1valf22f0elemf1elemelem) + f1valf28f0 := []*svcsdk.JoinColumn{} + for _, f1valf28f0iter := range f1valiter.Join.Columns { + f1valf28f0elem := &svcsdk.JoinColumn{} + if f1valf28f0iter.From != nil { + f1valf28f0elem.SetFrom(*f1valf28f0iter.From) + } + if f1valf28f0iter.Keys != nil { + f1valf28f0elemf1 := [][]*string{} + for _, f1valf28f0elemf1iter := range f1valf28f0iter.Keys { + f1valf28f0elemf1elem := []*string{} + for _, f1valf28f0elemf1elemiter := range f1valf28f0elemf1iter { + var f1valf28f0elemf1elemelem string + f1valf28f0elemf1elemelem = *f1valf28f0elemf1elemiter + f1valf28f0elemf1elem = append(f1valf28f0elemf1elem, &f1valf28f0elemf1elemelem) } - f1valf22f0elemf1 = append(f1valf22f0elemf1, f1valf22f0elemf1elem) + f1valf28f0elemf1 = append(f1valf28f0elemf1, f1valf28f0elemf1elem) } - f1valf22f0elem.SetKeys(f1valf22f0elemf1) + f1valf28f0elem.SetKeys(f1valf28f0elemf1) } - f1valf22f0 = append(f1valf22f0, f1valf22f0elem) + f1valf28f0 = append(f1valf28f0, f1valf28f0elem) } - f1valf22.SetColumns(f1valf22f0) + f1valf28.SetColumns(f1valf28f0) } if f1valiter.Join.Inputs != nil { - f1valf22f1 := []*string{} - for _, f1valf22f1iter := range f1valiter.Join.Inputs { - var f1valf22f1elem string - f1valf22f1elem = *f1valf22f1iter - f1valf22f1 = append(f1valf22f1, &f1valf22f1elem) + f1valf28f1 := []*string{} + for _, f1valf28f1iter := range f1valiter.Join.Inputs { + var f1valf28f1elem string + f1valf28f1elem = *f1valf28f1iter + f1valf28f1 = append(f1valf28f1, &f1valf28f1elem) } - f1valf22.SetInputs(f1valf22f1) + f1valf28.SetInputs(f1valf28f1) } if f1valiter.Join.JoinType != nil { - f1valf22.SetJoinType(*f1valiter.Join.JoinType) + f1valf28.SetJoinType(*f1valiter.Join.JoinType) } if f1valiter.Join.Name != nil { - f1valf22.SetName(*f1valiter.Join.Name) + f1valf28.SetName(*f1valiter.Join.Name) } - f1val.SetJoin(f1valf22) + f1val.SetJoin(f1valf28) } if f1valiter.Merge != nil { - f1valf23 := &svcsdk.Merge{} + f1valf29 := &svcsdk.Merge{} if f1valiter.Merge.Inputs != nil { - f1valf23f0 := []*string{} - for _, f1valf23f0iter := range f1valiter.Merge.Inputs { - var f1valf23f0elem string - f1valf23f0elem = *f1valf23f0iter - f1valf23f0 = append(f1valf23f0, &f1valf23f0elem) + f1valf29f0 := []*string{} + for _, f1valf29f0iter := range f1valiter.Merge.Inputs { + var f1valf29f0elem string + f1valf29f0elem = *f1valf29f0iter + f1valf29f0 = append(f1valf29f0, &f1valf29f0elem) } - f1valf23.SetInputs(f1valf23f0) + f1valf29.SetInputs(f1valf29f0) } if f1valiter.Merge.Name != nil { - f1valf23.SetName(*f1valiter.Merge.Name) + f1valf29.SetName(*f1valiter.Merge.Name) } if f1valiter.Merge.PrimaryKeys != nil { - f1valf23f2 := [][]*string{} - for _, f1valf23f2iter := range f1valiter.Merge.PrimaryKeys { - f1valf23f2elem := []*string{} - for _, f1valf23f2elemiter := range f1valf23f2iter { - var f1valf23f2elemelem string - f1valf23f2elemelem = *f1valf23f2elemiter - f1valf23f2elem = append(f1valf23f2elem, &f1valf23f2elemelem) + f1valf29f2 := [][]*string{} + for _, f1valf29f2iter := range f1valiter.Merge.PrimaryKeys { + f1valf29f2elem := []*string{} + for _, f1valf29f2elemiter := range f1valf29f2iter { + var f1valf29f2elemelem string + f1valf29f2elemelem = *f1valf29f2elemiter + f1valf29f2elem = append(f1valf29f2elem, &f1valf29f2elemelem) } - f1valf23f2 = append(f1valf23f2, f1valf23f2elem) + f1valf29f2 = append(f1valf29f2, f1valf29f2elem) } - f1valf23.SetPrimaryKeys(f1valf23f2) + f1valf29.SetPrimaryKeys(f1valf29f2) } if f1valiter.Merge.Source != nil { - f1valf23.SetSource(*f1valiter.Merge.Source) + f1valf29.SetSource(*f1valiter.Merge.Source) } - f1val.SetMerge(f1valf23) + f1val.SetMerge(f1valf29) } if f1valiter.MicrosoftSQLServerCatalogSource != nil { - f1valf24 := &svcsdk.MicrosoftSQLServerCatalogSource{} + f1valf30 := &svcsdk.MicrosoftSQLServerCatalogSource{} if f1valiter.MicrosoftSQLServerCatalogSource.Database != nil { - f1valf24.SetDatabase(*f1valiter.MicrosoftSQLServerCatalogSource.Database) + f1valf30.SetDatabase(*f1valiter.MicrosoftSQLServerCatalogSource.Database) } if f1valiter.MicrosoftSQLServerCatalogSource.Name != nil { - f1valf24.SetName(*f1valiter.MicrosoftSQLServerCatalogSource.Name) + f1valf30.SetName(*f1valiter.MicrosoftSQLServerCatalogSource.Name) } if f1valiter.MicrosoftSQLServerCatalogSource.Table != nil { - f1valf24.SetTable(*f1valiter.MicrosoftSQLServerCatalogSource.Table) + f1valf30.SetTable(*f1valiter.MicrosoftSQLServerCatalogSource.Table) } - f1val.SetMicrosoftSQLServerCatalogSource(f1valf24) + f1val.SetMicrosoftSQLServerCatalogSource(f1valf30) } if f1valiter.MicrosoftSQLServerCatalogTarget != nil { - f1valf25 := &svcsdk.MicrosoftSQLServerCatalogTarget{} + f1valf31 := &svcsdk.MicrosoftSQLServerCatalogTarget{} if f1valiter.MicrosoftSQLServerCatalogTarget.Database != nil { - f1valf25.SetDatabase(*f1valiter.MicrosoftSQLServerCatalogTarget.Database) + f1valf31.SetDatabase(*f1valiter.MicrosoftSQLServerCatalogTarget.Database) } if f1valiter.MicrosoftSQLServerCatalogTarget.Inputs != nil { - f1valf25f1 := []*string{} - for _, f1valf25f1iter := range f1valiter.MicrosoftSQLServerCatalogTarget.Inputs { - var f1valf25f1elem string - f1valf25f1elem = *f1valf25f1iter - f1valf25f1 = append(f1valf25f1, &f1valf25f1elem) + f1valf31f1 := []*string{} + for _, f1valf31f1iter := range f1valiter.MicrosoftSQLServerCatalogTarget.Inputs { + var f1valf31f1elem string + f1valf31f1elem = *f1valf31f1iter + f1valf31f1 = append(f1valf31f1, &f1valf31f1elem) } - f1valf25.SetInputs(f1valf25f1) + f1valf31.SetInputs(f1valf31f1) } if f1valiter.MicrosoftSQLServerCatalogTarget.Name != nil { - f1valf25.SetName(*f1valiter.MicrosoftSQLServerCatalogTarget.Name) + f1valf31.SetName(*f1valiter.MicrosoftSQLServerCatalogTarget.Name) } if f1valiter.MicrosoftSQLServerCatalogTarget.Table != nil { - f1valf25.SetTable(*f1valiter.MicrosoftSQLServerCatalogTarget.Table) + f1valf31.SetTable(*f1valiter.MicrosoftSQLServerCatalogTarget.Table) } - f1val.SetMicrosoftSQLServerCatalogTarget(f1valf25) + f1val.SetMicrosoftSQLServerCatalogTarget(f1valf31) } if f1valiter.MySQLCatalogSource != nil { - f1valf26 := &svcsdk.MySQLCatalogSource{} + f1valf32 := &svcsdk.MySQLCatalogSource{} if f1valiter.MySQLCatalogSource.Database != nil { - f1valf26.SetDatabase(*f1valiter.MySQLCatalogSource.Database) + f1valf32.SetDatabase(*f1valiter.MySQLCatalogSource.Database) } if f1valiter.MySQLCatalogSource.Name != nil { - f1valf26.SetName(*f1valiter.MySQLCatalogSource.Name) + f1valf32.SetName(*f1valiter.MySQLCatalogSource.Name) } if f1valiter.MySQLCatalogSource.Table != nil { - f1valf26.SetTable(*f1valiter.MySQLCatalogSource.Table) + f1valf32.SetTable(*f1valiter.MySQLCatalogSource.Table) } - f1val.SetMySQLCatalogSource(f1valf26) + f1val.SetMySQLCatalogSource(f1valf32) } if f1valiter.MySQLCatalogTarget != nil { - f1valf27 := &svcsdk.MySQLCatalogTarget{} + f1valf33 := &svcsdk.MySQLCatalogTarget{} if f1valiter.MySQLCatalogTarget.Database != nil { - f1valf27.SetDatabase(*f1valiter.MySQLCatalogTarget.Database) + f1valf33.SetDatabase(*f1valiter.MySQLCatalogTarget.Database) } if f1valiter.MySQLCatalogTarget.Inputs != nil { - f1valf27f1 := []*string{} - for _, f1valf27f1iter := range f1valiter.MySQLCatalogTarget.Inputs { - var f1valf27f1elem string - f1valf27f1elem = *f1valf27f1iter - f1valf27f1 = append(f1valf27f1, &f1valf27f1elem) + f1valf33f1 := []*string{} + for _, f1valf33f1iter := range f1valiter.MySQLCatalogTarget.Inputs { + var f1valf33f1elem string + f1valf33f1elem = *f1valf33f1iter + f1valf33f1 = append(f1valf33f1, &f1valf33f1elem) } - f1valf27.SetInputs(f1valf27f1) + f1valf33.SetInputs(f1valf33f1) } if f1valiter.MySQLCatalogTarget.Name != nil { - f1valf27.SetName(*f1valiter.MySQLCatalogTarget.Name) + f1valf33.SetName(*f1valiter.MySQLCatalogTarget.Name) } if f1valiter.MySQLCatalogTarget.Table != nil { - f1valf27.SetTable(*f1valiter.MySQLCatalogTarget.Table) + f1valf33.SetTable(*f1valiter.MySQLCatalogTarget.Table) } - f1val.SetMySQLCatalogTarget(f1valf27) + f1val.SetMySQLCatalogTarget(f1valf33) } if f1valiter.OracleSQLCatalogSource != nil { - f1valf28 := &svcsdk.OracleSQLCatalogSource{} + f1valf34 := &svcsdk.OracleSQLCatalogSource{} if f1valiter.OracleSQLCatalogSource.Database != nil { - f1valf28.SetDatabase(*f1valiter.OracleSQLCatalogSource.Database) + f1valf34.SetDatabase(*f1valiter.OracleSQLCatalogSource.Database) } if f1valiter.OracleSQLCatalogSource.Name != nil { - f1valf28.SetName(*f1valiter.OracleSQLCatalogSource.Name) + f1valf34.SetName(*f1valiter.OracleSQLCatalogSource.Name) } if f1valiter.OracleSQLCatalogSource.Table != nil { - f1valf28.SetTable(*f1valiter.OracleSQLCatalogSource.Table) + f1valf34.SetTable(*f1valiter.OracleSQLCatalogSource.Table) } - f1val.SetOracleSQLCatalogSource(f1valf28) + f1val.SetOracleSQLCatalogSource(f1valf34) } if f1valiter.OracleSQLCatalogTarget != nil { - f1valf29 := &svcsdk.OracleSQLCatalogTarget{} + f1valf35 := &svcsdk.OracleSQLCatalogTarget{} if f1valiter.OracleSQLCatalogTarget.Database != nil { - f1valf29.SetDatabase(*f1valiter.OracleSQLCatalogTarget.Database) + f1valf35.SetDatabase(*f1valiter.OracleSQLCatalogTarget.Database) } if f1valiter.OracleSQLCatalogTarget.Inputs != nil { - f1valf29f1 := []*string{} - for _, f1valf29f1iter := range f1valiter.OracleSQLCatalogTarget.Inputs { - var f1valf29f1elem string - f1valf29f1elem = *f1valf29f1iter - f1valf29f1 = append(f1valf29f1, &f1valf29f1elem) + f1valf35f1 := []*string{} + for _, f1valf35f1iter := range f1valiter.OracleSQLCatalogTarget.Inputs { + var f1valf35f1elem string + f1valf35f1elem = *f1valf35f1iter + f1valf35f1 = append(f1valf35f1, &f1valf35f1elem) } - f1valf29.SetInputs(f1valf29f1) + f1valf35.SetInputs(f1valf35f1) } if f1valiter.OracleSQLCatalogTarget.Name != nil { - f1valf29.SetName(*f1valiter.OracleSQLCatalogTarget.Name) + f1valf35.SetName(*f1valiter.OracleSQLCatalogTarget.Name) } if f1valiter.OracleSQLCatalogTarget.Table != nil { - f1valf29.SetTable(*f1valiter.OracleSQLCatalogTarget.Table) + f1valf35.SetTable(*f1valiter.OracleSQLCatalogTarget.Table) } - f1val.SetOracleSQLCatalogTarget(f1valf29) + f1val.SetOracleSQLCatalogTarget(f1valf35) } if f1valiter.PIIDetection != nil { - f1valf30 := &svcsdk.PIIDetection{} + f1valf36 := &svcsdk.PIIDetection{} if f1valiter.PIIDetection.EntityTypesToDetect != nil { - f1valf30f0 := []*string{} - for _, f1valf30f0iter := range f1valiter.PIIDetection.EntityTypesToDetect { - var f1valf30f0elem string - f1valf30f0elem = *f1valf30f0iter - f1valf30f0 = append(f1valf30f0, &f1valf30f0elem) + f1valf36f0 := []*string{} + for _, f1valf36f0iter := range f1valiter.PIIDetection.EntityTypesToDetect { + var f1valf36f0elem string + f1valf36f0elem = *f1valf36f0iter + f1valf36f0 = append(f1valf36f0, &f1valf36f0elem) } - f1valf30.SetEntityTypesToDetect(f1valf30f0) + f1valf36.SetEntityTypesToDetect(f1valf36f0) } if f1valiter.PIIDetection.Inputs != nil { - f1valf30f1 := []*string{} - for _, f1valf30f1iter := range f1valiter.PIIDetection.Inputs { - var f1valf30f1elem string - f1valf30f1elem = *f1valf30f1iter - f1valf30f1 = append(f1valf30f1, &f1valf30f1elem) + f1valf36f1 := []*string{} + for _, f1valf36f1iter := range f1valiter.PIIDetection.Inputs { + var f1valf36f1elem string + f1valf36f1elem = *f1valf36f1iter + f1valf36f1 = append(f1valf36f1, &f1valf36f1elem) } - f1valf30.SetInputs(f1valf30f1) + f1valf36.SetInputs(f1valf36f1) } if f1valiter.PIIDetection.MaskValue != nil { - f1valf30.SetMaskValue(*f1valiter.PIIDetection.MaskValue) + f1valf36.SetMaskValue(*f1valiter.PIIDetection.MaskValue) } if f1valiter.PIIDetection.Name != nil { - f1valf30.SetName(*f1valiter.PIIDetection.Name) + f1valf36.SetName(*f1valiter.PIIDetection.Name) } if f1valiter.PIIDetection.OutputColumnName != nil { - f1valf30.SetOutputColumnName(*f1valiter.PIIDetection.OutputColumnName) + f1valf36.SetOutputColumnName(*f1valiter.PIIDetection.OutputColumnName) } if f1valiter.PIIDetection.PiiType != nil { - f1valf30.SetPiiType(*f1valiter.PIIDetection.PiiType) + f1valf36.SetPiiType(*f1valiter.PIIDetection.PiiType) } if f1valiter.PIIDetection.SampleFraction != nil { - f1valf30.SetSampleFraction(*f1valiter.PIIDetection.SampleFraction) + f1valf36.SetSampleFraction(*f1valiter.PIIDetection.SampleFraction) } if f1valiter.PIIDetection.ThresholdFraction != nil { - f1valf30.SetThresholdFraction(*f1valiter.PIIDetection.ThresholdFraction) + f1valf36.SetThresholdFraction(*f1valiter.PIIDetection.ThresholdFraction) } - f1val.SetPIIDetection(f1valf30) + f1val.SetPIIDetection(f1valf36) } if f1valiter.PostgreSQLCatalogSource != nil { - f1valf31 := &svcsdk.PostgreSQLCatalogSource{} + f1valf37 := &svcsdk.PostgreSQLCatalogSource{} if f1valiter.PostgreSQLCatalogSource.Database != nil { - f1valf31.SetDatabase(*f1valiter.PostgreSQLCatalogSource.Database) + f1valf37.SetDatabase(*f1valiter.PostgreSQLCatalogSource.Database) } if f1valiter.PostgreSQLCatalogSource.Name != nil { - f1valf31.SetName(*f1valiter.PostgreSQLCatalogSource.Name) + f1valf37.SetName(*f1valiter.PostgreSQLCatalogSource.Name) } if f1valiter.PostgreSQLCatalogSource.Table != nil { - f1valf31.SetTable(*f1valiter.PostgreSQLCatalogSource.Table) + f1valf37.SetTable(*f1valiter.PostgreSQLCatalogSource.Table) } - f1val.SetPostgreSQLCatalogSource(f1valf31) + f1val.SetPostgreSQLCatalogSource(f1valf37) } if f1valiter.PostgreSQLCatalogTarget != nil { - f1valf32 := &svcsdk.PostgreSQLCatalogTarget{} + f1valf38 := &svcsdk.PostgreSQLCatalogTarget{} if f1valiter.PostgreSQLCatalogTarget.Database != nil { - f1valf32.SetDatabase(*f1valiter.PostgreSQLCatalogTarget.Database) + f1valf38.SetDatabase(*f1valiter.PostgreSQLCatalogTarget.Database) } if f1valiter.PostgreSQLCatalogTarget.Inputs != nil { - f1valf32f1 := []*string{} - for _, f1valf32f1iter := range f1valiter.PostgreSQLCatalogTarget.Inputs { - var f1valf32f1elem string - f1valf32f1elem = *f1valf32f1iter - f1valf32f1 = append(f1valf32f1, &f1valf32f1elem) + f1valf38f1 := []*string{} + for _, f1valf38f1iter := range f1valiter.PostgreSQLCatalogTarget.Inputs { + var f1valf38f1elem string + f1valf38f1elem = *f1valf38f1iter + f1valf38f1 = append(f1valf38f1, &f1valf38f1elem) } - f1valf32.SetInputs(f1valf32f1) + f1valf38.SetInputs(f1valf38f1) } if f1valiter.PostgreSQLCatalogTarget.Name != nil { - f1valf32.SetName(*f1valiter.PostgreSQLCatalogTarget.Name) + f1valf38.SetName(*f1valiter.PostgreSQLCatalogTarget.Name) } if f1valiter.PostgreSQLCatalogTarget.Table != nil { - f1valf32.SetTable(*f1valiter.PostgreSQLCatalogTarget.Table) + f1valf38.SetTable(*f1valiter.PostgreSQLCatalogTarget.Table) + } + f1val.SetPostgreSQLCatalogTarget(f1valf38) + } + if f1valiter.Recipe != nil { + f1valf39 := &svcsdk.Recipe{} + if f1valiter.Recipe.Inputs != nil { + f1valf39f0 := []*string{} + for _, f1valf39f0iter := range f1valiter.Recipe.Inputs { + var f1valf39f0elem string + f1valf39f0elem = *f1valf39f0iter + f1valf39f0 = append(f1valf39f0, &f1valf39f0elem) + } + f1valf39.SetInputs(f1valf39f0) + } + if f1valiter.Recipe.Name != nil { + f1valf39.SetName(*f1valiter.Recipe.Name) + } + if f1valiter.Recipe.RecipeReference != nil { + f1valf39f2 := &svcsdk.RecipeReference{} + if f1valiter.Recipe.RecipeReference.RecipeARN != nil { + f1valf39f2.SetRecipeArn(*f1valiter.Recipe.RecipeReference.RecipeARN) + } + if f1valiter.Recipe.RecipeReference.RecipeVersion != nil { + f1valf39f2.SetRecipeVersion(*f1valiter.Recipe.RecipeReference.RecipeVersion) + } + f1valf39.SetRecipeReference(f1valf39f2) } - f1val.SetPostgreSQLCatalogTarget(f1valf32) + f1val.SetRecipe(f1valf39) } if f1valiter.RedshiftSource != nil { - f1valf33 := &svcsdk.RedshiftSource{} + f1valf40 := &svcsdk.RedshiftSource{} if f1valiter.RedshiftSource.Database != nil { - f1valf33.SetDatabase(*f1valiter.RedshiftSource.Database) + f1valf40.SetDatabase(*f1valiter.RedshiftSource.Database) } if f1valiter.RedshiftSource.Name != nil { - f1valf33.SetName(*f1valiter.RedshiftSource.Name) + f1valf40.SetName(*f1valiter.RedshiftSource.Name) } if f1valiter.RedshiftSource.RedshiftTmpDir != nil { - f1valf33.SetRedshiftTmpDir(*f1valiter.RedshiftSource.RedshiftTmpDir) + f1valf40.SetRedshiftTmpDir(*f1valiter.RedshiftSource.RedshiftTmpDir) } if f1valiter.RedshiftSource.Table != nil { - f1valf33.SetTable(*f1valiter.RedshiftSource.Table) + f1valf40.SetTable(*f1valiter.RedshiftSource.Table) } if f1valiter.RedshiftSource.TmpDirIAMRole != nil { - f1valf33.SetTmpDirIAMRole(*f1valiter.RedshiftSource.TmpDirIAMRole) + f1valf40.SetTmpDirIAMRole(*f1valiter.RedshiftSource.TmpDirIAMRole) } - f1val.SetRedshiftSource(f1valf33) + f1val.SetRedshiftSource(f1valf40) } if f1valiter.RedshiftTarget != nil { - f1valf34 := &svcsdk.RedshiftTarget{} + f1valf41 := &svcsdk.RedshiftTarget{} if f1valiter.RedshiftTarget.Database != nil { - f1valf34.SetDatabase(*f1valiter.RedshiftTarget.Database) + f1valf41.SetDatabase(*f1valiter.RedshiftTarget.Database) } if f1valiter.RedshiftTarget.Inputs != nil { - f1valf34f1 := []*string{} - for _, f1valf34f1iter := range f1valiter.RedshiftTarget.Inputs { - var f1valf34f1elem string - f1valf34f1elem = *f1valf34f1iter - f1valf34f1 = append(f1valf34f1, &f1valf34f1elem) + f1valf41f1 := []*string{} + for _, f1valf41f1iter := range f1valiter.RedshiftTarget.Inputs { + var f1valf41f1elem string + f1valf41f1elem = *f1valf41f1iter + f1valf41f1 = append(f1valf41f1, &f1valf41f1elem) } - f1valf34.SetInputs(f1valf34f1) + f1valf41.SetInputs(f1valf41f1) } if f1valiter.RedshiftTarget.Name != nil { - f1valf34.SetName(*f1valiter.RedshiftTarget.Name) + f1valf41.SetName(*f1valiter.RedshiftTarget.Name) } if f1valiter.RedshiftTarget.RedshiftTmpDir != nil { - f1valf34.SetRedshiftTmpDir(*f1valiter.RedshiftTarget.RedshiftTmpDir) + f1valf41.SetRedshiftTmpDir(*f1valiter.RedshiftTarget.RedshiftTmpDir) } if f1valiter.RedshiftTarget.Table != nil { - f1valf34.SetTable(*f1valiter.RedshiftTarget.Table) + f1valf41.SetTable(*f1valiter.RedshiftTarget.Table) } if f1valiter.RedshiftTarget.TmpDirIAMRole != nil { - f1valf34.SetTmpDirIAMRole(*f1valiter.RedshiftTarget.TmpDirIAMRole) + f1valf41.SetTmpDirIAMRole(*f1valiter.RedshiftTarget.TmpDirIAMRole) } if f1valiter.RedshiftTarget.UpsertRedshiftOptions != nil { - f1valf34f6 := &svcsdk.UpsertRedshiftTargetOptions{} + f1valf41f6 := &svcsdk.UpsertRedshiftTargetOptions{} if f1valiter.RedshiftTarget.UpsertRedshiftOptions.ConnectionName != nil { - f1valf34f6.SetConnectionName(*f1valiter.RedshiftTarget.UpsertRedshiftOptions.ConnectionName) + f1valf41f6.SetConnectionName(*f1valiter.RedshiftTarget.UpsertRedshiftOptions.ConnectionName) } if f1valiter.RedshiftTarget.UpsertRedshiftOptions.TableLocation != nil { - f1valf34f6.SetTableLocation(*f1valiter.RedshiftTarget.UpsertRedshiftOptions.TableLocation) + f1valf41f6.SetTableLocation(*f1valiter.RedshiftTarget.UpsertRedshiftOptions.TableLocation) } if f1valiter.RedshiftTarget.UpsertRedshiftOptions.UpsertKeys != nil { - f1valf34f6f2 := []*string{} - for _, f1valf34f6f2iter := range f1valiter.RedshiftTarget.UpsertRedshiftOptions.UpsertKeys { - var f1valf34f6f2elem string - f1valf34f6f2elem = *f1valf34f6f2iter - f1valf34f6f2 = append(f1valf34f6f2, &f1valf34f6f2elem) + f1valf41f6f2 := []*string{} + for _, f1valf41f6f2iter := range f1valiter.RedshiftTarget.UpsertRedshiftOptions.UpsertKeys { + var f1valf41f6f2elem string + f1valf41f6f2elem = *f1valf41f6f2iter + f1valf41f6f2 = append(f1valf41f6f2, &f1valf41f6f2elem) } - f1valf34f6.SetUpsertKeys(f1valf34f6f2) + f1valf41f6.SetUpsertKeys(f1valf41f6f2) } - f1valf34.SetUpsertRedshiftOptions(f1valf34f6) + f1valf41.SetUpsertRedshiftOptions(f1valf41f6) } - f1val.SetRedshiftTarget(f1valf34) + f1val.SetRedshiftTarget(f1valf41) } if f1valiter.RelationalCatalogSource != nil { - f1valf35 := &svcsdk.RelationalCatalogSource{} + f1valf42 := &svcsdk.RelationalCatalogSource{} if f1valiter.RelationalCatalogSource.Database != nil { - f1valf35.SetDatabase(*f1valiter.RelationalCatalogSource.Database) + f1valf42.SetDatabase(*f1valiter.RelationalCatalogSource.Database) } if f1valiter.RelationalCatalogSource.Name != nil { - f1valf35.SetName(*f1valiter.RelationalCatalogSource.Name) + f1valf42.SetName(*f1valiter.RelationalCatalogSource.Name) } if f1valiter.RelationalCatalogSource.Table != nil { - f1valf35.SetTable(*f1valiter.RelationalCatalogSource.Table) + f1valf42.SetTable(*f1valiter.RelationalCatalogSource.Table) } - f1val.SetRelationalCatalogSource(f1valf35) + f1val.SetRelationalCatalogSource(f1valf42) } if f1valiter.RenameField != nil { - f1valf36 := &svcsdk.RenameField{} + f1valf43 := &svcsdk.RenameField{} if f1valiter.RenameField.Inputs != nil { - f1valf36f0 := []*string{} - for _, f1valf36f0iter := range f1valiter.RenameField.Inputs { - var f1valf36f0elem string - f1valf36f0elem = *f1valf36f0iter - f1valf36f0 = append(f1valf36f0, &f1valf36f0elem) + f1valf43f0 := []*string{} + for _, f1valf43f0iter := range f1valiter.RenameField.Inputs { + var f1valf43f0elem string + f1valf43f0elem = *f1valf43f0iter + f1valf43f0 = append(f1valf43f0, &f1valf43f0elem) } - f1valf36.SetInputs(f1valf36f0) + f1valf43.SetInputs(f1valf43f0) } if f1valiter.RenameField.Name != nil { - f1valf36.SetName(*f1valiter.RenameField.Name) + f1valf43.SetName(*f1valiter.RenameField.Name) } if f1valiter.RenameField.SourcePath != nil { - f1valf36f2 := []*string{} - for _, f1valf36f2iter := range f1valiter.RenameField.SourcePath { - var f1valf36f2elem string - f1valf36f2elem = *f1valf36f2iter - f1valf36f2 = append(f1valf36f2, &f1valf36f2elem) + f1valf43f2 := []*string{} + for _, f1valf43f2iter := range f1valiter.RenameField.SourcePath { + var f1valf43f2elem string + f1valf43f2elem = *f1valf43f2iter + f1valf43f2 = append(f1valf43f2, &f1valf43f2elem) } - f1valf36.SetSourcePath(f1valf36f2) + f1valf43.SetSourcePath(f1valf43f2) } if f1valiter.RenameField.TargetPath != nil { - f1valf36f3 := []*string{} - for _, f1valf36f3iter := range f1valiter.RenameField.TargetPath { - var f1valf36f3elem string - f1valf36f3elem = *f1valf36f3iter - f1valf36f3 = append(f1valf36f3, &f1valf36f3elem) + f1valf43f3 := []*string{} + for _, f1valf43f3iter := range f1valiter.RenameField.TargetPath { + var f1valf43f3elem string + f1valf43f3elem = *f1valf43f3iter + f1valf43f3 = append(f1valf43f3, &f1valf43f3elem) + } + f1valf43.SetTargetPath(f1valf43f3) + } + f1val.SetRenameField(f1valf43) + } + if f1valiter.S3CatalogDeltaSource != nil { + f1valf44 := &svcsdk.S3CatalogDeltaSource{} + if f1valiter.S3CatalogDeltaSource.AdditionalDeltaOptions != nil { + f1valf44f0 := map[string]*string{} + for f1valf44f0key, f1valf44f0valiter := range f1valiter.S3CatalogDeltaSource.AdditionalDeltaOptions { + var f1valf44f0val string + f1valf44f0val = *f1valf44f0valiter + f1valf44f0[f1valf44f0key] = &f1valf44f0val + } + f1valf44.SetAdditionalDeltaOptions(f1valf44f0) + } + if f1valiter.S3CatalogDeltaSource.Database != nil { + f1valf44.SetDatabase(*f1valiter.S3CatalogDeltaSource.Database) + } + if f1valiter.S3CatalogDeltaSource.Name != nil { + f1valf44.SetName(*f1valiter.S3CatalogDeltaSource.Name) + } + if f1valiter.S3CatalogDeltaSource.OutputSchemas != nil { + f1valf44f3 := []*svcsdk.GlueSchema{} + for _, f1valf44f3iter := range f1valiter.S3CatalogDeltaSource.OutputSchemas { + f1valf44f3elem := &svcsdk.GlueSchema{} + if f1valf44f3iter.Columns != nil { + f1valf44f3elemf0 := []*svcsdk.GlueStudioSchemaColumn{} + for _, f1valf44f3elemf0iter := range f1valf44f3iter.Columns { + f1valf44f3elemf0elem := &svcsdk.GlueStudioSchemaColumn{} + if f1valf44f3elemf0iter.Name != nil { + f1valf44f3elemf0elem.SetName(*f1valf44f3elemf0iter.Name) + } + if f1valf44f3elemf0iter.Type != nil { + f1valf44f3elemf0elem.SetType(*f1valf44f3elemf0iter.Type) + } + f1valf44f3elemf0 = append(f1valf44f3elemf0, f1valf44f3elemf0elem) + } + f1valf44f3elem.SetColumns(f1valf44f3elemf0) + } + f1valf44f3 = append(f1valf44f3, f1valf44f3elem) + } + f1valf44.SetOutputSchemas(f1valf44f3) + } + if f1valiter.S3CatalogDeltaSource.Table != nil { + f1valf44.SetTable(*f1valiter.S3CatalogDeltaSource.Table) + } + f1val.SetS3CatalogDeltaSource(f1valf44) + } + if f1valiter.S3CatalogHudiSource != nil { + f1valf45 := &svcsdk.S3CatalogHudiSource{} + if f1valiter.S3CatalogHudiSource.AdditionalHudiOptions != nil { + f1valf45f0 := map[string]*string{} + for f1valf45f0key, f1valf45f0valiter := range f1valiter.S3CatalogHudiSource.AdditionalHudiOptions { + var f1valf45f0val string + f1valf45f0val = *f1valf45f0valiter + f1valf45f0[f1valf45f0key] = &f1valf45f0val + } + f1valf45.SetAdditionalHudiOptions(f1valf45f0) + } + if f1valiter.S3CatalogHudiSource.Database != nil { + f1valf45.SetDatabase(*f1valiter.S3CatalogHudiSource.Database) + } + if f1valiter.S3CatalogHudiSource.Name != nil { + f1valf45.SetName(*f1valiter.S3CatalogHudiSource.Name) + } + if f1valiter.S3CatalogHudiSource.OutputSchemas != nil { + f1valf45f3 := []*svcsdk.GlueSchema{} + for _, f1valf45f3iter := range f1valiter.S3CatalogHudiSource.OutputSchemas { + f1valf45f3elem := &svcsdk.GlueSchema{} + if f1valf45f3iter.Columns != nil { + f1valf45f3elemf0 := []*svcsdk.GlueStudioSchemaColumn{} + for _, f1valf45f3elemf0iter := range f1valf45f3iter.Columns { + f1valf45f3elemf0elem := &svcsdk.GlueStudioSchemaColumn{} + if f1valf45f3elemf0iter.Name != nil { + f1valf45f3elemf0elem.SetName(*f1valf45f3elemf0iter.Name) + } + if f1valf45f3elemf0iter.Type != nil { + f1valf45f3elemf0elem.SetType(*f1valf45f3elemf0iter.Type) + } + f1valf45f3elemf0 = append(f1valf45f3elemf0, f1valf45f3elemf0elem) + } + f1valf45f3elem.SetColumns(f1valf45f3elemf0) + } + f1valf45f3 = append(f1valf45f3, f1valf45f3elem) } - f1valf36.SetTargetPath(f1valf36f3) + f1valf45.SetOutputSchemas(f1valf45f3) } - f1val.SetRenameField(f1valf36) + if f1valiter.S3CatalogHudiSource.Table != nil { + f1valf45.SetTable(*f1valiter.S3CatalogHudiSource.Table) + } + f1val.SetS3CatalogHudiSource(f1valf45) } if f1valiter.S3CatalogSource != nil { - f1valf37 := &svcsdk.S3CatalogSource{} + f1valf46 := &svcsdk.S3CatalogSource{} if f1valiter.S3CatalogSource.AdditionalOptions != nil { - f1valf37f0 := &svcsdk.S3SourceAdditionalOptions{} + f1valf46f0 := &svcsdk.S3SourceAdditionalOptions{} if f1valiter.S3CatalogSource.AdditionalOptions.BoundedFiles != nil { - f1valf37f0.SetBoundedFiles(*f1valiter.S3CatalogSource.AdditionalOptions.BoundedFiles) + f1valf46f0.SetBoundedFiles(*f1valiter.S3CatalogSource.AdditionalOptions.BoundedFiles) } if f1valiter.S3CatalogSource.AdditionalOptions.BoundedSize != nil { - f1valf37f0.SetBoundedSize(*f1valiter.S3CatalogSource.AdditionalOptions.BoundedSize) + f1valf46f0.SetBoundedSize(*f1valiter.S3CatalogSource.AdditionalOptions.BoundedSize) } - f1valf37.SetAdditionalOptions(f1valf37f0) + f1valf46.SetAdditionalOptions(f1valf46f0) } if f1valiter.S3CatalogSource.Database != nil { - f1valf37.SetDatabase(*f1valiter.S3CatalogSource.Database) + f1valf46.SetDatabase(*f1valiter.S3CatalogSource.Database) } if f1valiter.S3CatalogSource.Name != nil { - f1valf37.SetName(*f1valiter.S3CatalogSource.Name) + f1valf46.SetName(*f1valiter.S3CatalogSource.Name) } if f1valiter.S3CatalogSource.PartitionPredicate != nil { - f1valf37.SetPartitionPredicate(*f1valiter.S3CatalogSource.PartitionPredicate) + f1valf46.SetPartitionPredicate(*f1valiter.S3CatalogSource.PartitionPredicate) } if f1valiter.S3CatalogSource.Table != nil { - f1valf37.SetTable(*f1valiter.S3CatalogSource.Table) + f1valf46.SetTable(*f1valiter.S3CatalogSource.Table) } - f1val.SetS3CatalogSource(f1valf37) + f1val.SetS3CatalogSource(f1valf46) } if f1valiter.S3CatalogTarget != nil { - f1valf38 := &svcsdk.S3CatalogTarget{} + f1valf47 := &svcsdk.S3CatalogTarget{} if f1valiter.S3CatalogTarget.Database != nil { - f1valf38.SetDatabase(*f1valiter.S3CatalogTarget.Database) + f1valf47.SetDatabase(*f1valiter.S3CatalogTarget.Database) } if f1valiter.S3CatalogTarget.Inputs != nil { - f1valf38f1 := []*string{} - for _, f1valf38f1iter := range f1valiter.S3CatalogTarget.Inputs { - var f1valf38f1elem string - f1valf38f1elem = *f1valf38f1iter - f1valf38f1 = append(f1valf38f1, &f1valf38f1elem) + f1valf47f1 := []*string{} + for _, f1valf47f1iter := range f1valiter.S3CatalogTarget.Inputs { + var f1valf47f1elem string + f1valf47f1elem = *f1valf47f1iter + f1valf47f1 = append(f1valf47f1, &f1valf47f1elem) } - f1valf38.SetInputs(f1valf38f1) + f1valf47.SetInputs(f1valf47f1) } if f1valiter.S3CatalogTarget.Name != nil { - f1valf38.SetName(*f1valiter.S3CatalogTarget.Name) + f1valf47.SetName(*f1valiter.S3CatalogTarget.Name) } if f1valiter.S3CatalogTarget.PartitionKeys != nil { - f1valf38f3 := [][]*string{} - for _, f1valf38f3iter := range f1valiter.S3CatalogTarget.PartitionKeys { - f1valf38f3elem := []*string{} - for _, f1valf38f3elemiter := range f1valf38f3iter { - var f1valf38f3elemelem string - f1valf38f3elemelem = *f1valf38f3elemiter - f1valf38f3elem = append(f1valf38f3elem, &f1valf38f3elemelem) + f1valf47f3 := [][]*string{} + for _, f1valf47f3iter := range f1valiter.S3CatalogTarget.PartitionKeys { + f1valf47f3elem := []*string{} + for _, f1valf47f3elemiter := range f1valf47f3iter { + var f1valf47f3elemelem string + f1valf47f3elemelem = *f1valf47f3elemiter + f1valf47f3elem = append(f1valf47f3elem, &f1valf47f3elemelem) } - f1valf38f3 = append(f1valf38f3, f1valf38f3elem) + f1valf47f3 = append(f1valf47f3, f1valf47f3elem) } - f1valf38.SetPartitionKeys(f1valf38f3) + f1valf47.SetPartitionKeys(f1valf47f3) } if f1valiter.S3CatalogTarget.SchemaChangePolicy != nil { - f1valf38f4 := &svcsdk.CatalogSchemaChangePolicy{} + f1valf47f4 := &svcsdk.CatalogSchemaChangePolicy{} if f1valiter.S3CatalogTarget.SchemaChangePolicy.EnableUpdateCatalog != nil { - f1valf38f4.SetEnableUpdateCatalog(*f1valiter.S3CatalogTarget.SchemaChangePolicy.EnableUpdateCatalog) + f1valf47f4.SetEnableUpdateCatalog(*f1valiter.S3CatalogTarget.SchemaChangePolicy.EnableUpdateCatalog) } if f1valiter.S3CatalogTarget.SchemaChangePolicy.UpdateBehavior != nil { - f1valf38f4.SetUpdateBehavior(*f1valiter.S3CatalogTarget.SchemaChangePolicy.UpdateBehavior) + f1valf47f4.SetUpdateBehavior(*f1valiter.S3CatalogTarget.SchemaChangePolicy.UpdateBehavior) } - f1valf38.SetSchemaChangePolicy(f1valf38f4) + f1valf47.SetSchemaChangePolicy(f1valf47f4) } if f1valiter.S3CatalogTarget.Table != nil { - f1valf38.SetTable(*f1valiter.S3CatalogTarget.Table) + f1valf47.SetTable(*f1valiter.S3CatalogTarget.Table) } - f1val.SetS3CatalogTarget(f1valf38) + f1val.SetS3CatalogTarget(f1valf47) } if f1valiter.S3CsvSource != nil { - f1valf39 := &svcsdk.S3CsvSource{} + f1valf48 := &svcsdk.S3CsvSource{} if f1valiter.S3CsvSource.AdditionalOptions != nil { - f1valf39f0 := &svcsdk.S3DirectSourceAdditionalOptions{} + f1valf48f0 := &svcsdk.S3DirectSourceAdditionalOptions{} if f1valiter.S3CsvSource.AdditionalOptions.BoundedFiles != nil { - f1valf39f0.SetBoundedFiles(*f1valiter.S3CsvSource.AdditionalOptions.BoundedFiles) + f1valf48f0.SetBoundedFiles(*f1valiter.S3CsvSource.AdditionalOptions.BoundedFiles) } if f1valiter.S3CsvSource.AdditionalOptions.BoundedSize != nil { - f1valf39f0.SetBoundedSize(*f1valiter.S3CsvSource.AdditionalOptions.BoundedSize) + f1valf48f0.SetBoundedSize(*f1valiter.S3CsvSource.AdditionalOptions.BoundedSize) } if f1valiter.S3CsvSource.AdditionalOptions.EnableSamplePath != nil { - f1valf39f0.SetEnableSamplePath(*f1valiter.S3CsvSource.AdditionalOptions.EnableSamplePath) + f1valf48f0.SetEnableSamplePath(*f1valiter.S3CsvSource.AdditionalOptions.EnableSamplePath) } if f1valiter.S3CsvSource.AdditionalOptions.SamplePath != nil { - f1valf39f0.SetSamplePath(*f1valiter.S3CsvSource.AdditionalOptions.SamplePath) + f1valf48f0.SetSamplePath(*f1valiter.S3CsvSource.AdditionalOptions.SamplePath) } - f1valf39.SetAdditionalOptions(f1valf39f0) + f1valf48.SetAdditionalOptions(f1valf48f0) } if f1valiter.S3CsvSource.CompressionType != nil { - f1valf39.SetCompressionType(*f1valiter.S3CsvSource.CompressionType) + f1valf48.SetCompressionType(*f1valiter.S3CsvSource.CompressionType) } if f1valiter.S3CsvSource.Escaper != nil { - f1valf39.SetEscaper(*f1valiter.S3CsvSource.Escaper) + f1valf48.SetEscaper(*f1valiter.S3CsvSource.Escaper) } if f1valiter.S3CsvSource.Exclusions != nil { - f1valf39f3 := []*string{} - for _, f1valf39f3iter := range f1valiter.S3CsvSource.Exclusions { - var f1valf39f3elem string - f1valf39f3elem = *f1valf39f3iter - f1valf39f3 = append(f1valf39f3, &f1valf39f3elem) + f1valf48f3 := []*string{} + for _, f1valf48f3iter := range f1valiter.S3CsvSource.Exclusions { + var f1valf48f3elem string + f1valf48f3elem = *f1valf48f3iter + f1valf48f3 = append(f1valf48f3, &f1valf48f3elem) } - f1valf39.SetExclusions(f1valf39f3) + f1valf48.SetExclusions(f1valf48f3) } if f1valiter.S3CsvSource.GroupFiles != nil { - f1valf39.SetGroupFiles(*f1valiter.S3CsvSource.GroupFiles) + f1valf48.SetGroupFiles(*f1valiter.S3CsvSource.GroupFiles) } if f1valiter.S3CsvSource.GroupSize != nil { - f1valf39.SetGroupSize(*f1valiter.S3CsvSource.GroupSize) + f1valf48.SetGroupSize(*f1valiter.S3CsvSource.GroupSize) } if f1valiter.S3CsvSource.MaxBand != nil { - f1valf39.SetMaxBand(*f1valiter.S3CsvSource.MaxBand) + f1valf48.SetMaxBand(*f1valiter.S3CsvSource.MaxBand) } if f1valiter.S3CsvSource.MaxFilesInBand != nil { - f1valf39.SetMaxFilesInBand(*f1valiter.S3CsvSource.MaxFilesInBand) + f1valf48.SetMaxFilesInBand(*f1valiter.S3CsvSource.MaxFilesInBand) } if f1valiter.S3CsvSource.Multiline != nil { - f1valf39.SetMultiline(*f1valiter.S3CsvSource.Multiline) + f1valf48.SetMultiline(*f1valiter.S3CsvSource.Multiline) } if f1valiter.S3CsvSource.Name != nil { - f1valf39.SetName(*f1valiter.S3CsvSource.Name) + f1valf48.SetName(*f1valiter.S3CsvSource.Name) } if f1valiter.S3CsvSource.OptimizePerformance != nil { - f1valf39.SetOptimizePerformance(*f1valiter.S3CsvSource.OptimizePerformance) + f1valf48.SetOptimizePerformance(*f1valiter.S3CsvSource.OptimizePerformance) } if f1valiter.S3CsvSource.OutputSchemas != nil { - f1valf39f11 := []*svcsdk.GlueSchema{} - for _, f1valf39f11iter := range f1valiter.S3CsvSource.OutputSchemas { - f1valf39f11elem := &svcsdk.GlueSchema{} - if f1valf39f11iter.Columns != nil { - f1valf39f11elemf0 := []*svcsdk.GlueStudioSchemaColumn{} - for _, f1valf39f11elemf0iter := range f1valf39f11iter.Columns { - f1valf39f11elemf0elem := &svcsdk.GlueStudioSchemaColumn{} - if f1valf39f11elemf0iter.Name != nil { - f1valf39f11elemf0elem.SetName(*f1valf39f11elemf0iter.Name) + f1valf48f11 := []*svcsdk.GlueSchema{} + for _, f1valf48f11iter := range f1valiter.S3CsvSource.OutputSchemas { + f1valf48f11elem := &svcsdk.GlueSchema{} + if f1valf48f11iter.Columns != nil { + f1valf48f11elemf0 := []*svcsdk.GlueStudioSchemaColumn{} + for _, f1valf48f11elemf0iter := range f1valf48f11iter.Columns { + f1valf48f11elemf0elem := &svcsdk.GlueStudioSchemaColumn{} + if f1valf48f11elemf0iter.Name != nil { + f1valf48f11elemf0elem.SetName(*f1valf48f11elemf0iter.Name) } - if f1valf39f11elemf0iter.Type != nil { - f1valf39f11elemf0elem.SetType(*f1valf39f11elemf0iter.Type) + if f1valf48f11elemf0iter.Type != nil { + f1valf48f11elemf0elem.SetType(*f1valf48f11elemf0iter.Type) } - f1valf39f11elemf0 = append(f1valf39f11elemf0, f1valf39f11elemf0elem) + f1valf48f11elemf0 = append(f1valf48f11elemf0, f1valf48f11elemf0elem) } - f1valf39f11elem.SetColumns(f1valf39f11elemf0) + f1valf48f11elem.SetColumns(f1valf48f11elemf0) } - f1valf39f11 = append(f1valf39f11, f1valf39f11elem) + f1valf48f11 = append(f1valf48f11, f1valf48f11elem) } - f1valf39.SetOutputSchemas(f1valf39f11) + f1valf48.SetOutputSchemas(f1valf48f11) } if f1valiter.S3CsvSource.Paths != nil { - f1valf39f12 := []*string{} - for _, f1valf39f12iter := range f1valiter.S3CsvSource.Paths { - var f1valf39f12elem string - f1valf39f12elem = *f1valf39f12iter - f1valf39f12 = append(f1valf39f12, &f1valf39f12elem) + f1valf48f12 := []*string{} + for _, f1valf48f12iter := range f1valiter.S3CsvSource.Paths { + var f1valf48f12elem string + f1valf48f12elem = *f1valf48f12iter + f1valf48f12 = append(f1valf48f12, &f1valf48f12elem) } - f1valf39.SetPaths(f1valf39f12) + f1valf48.SetPaths(f1valf48f12) } if f1valiter.S3CsvSource.QuoteChar != nil { - f1valf39.SetQuoteChar(*f1valiter.S3CsvSource.QuoteChar) + f1valf48.SetQuoteChar(*f1valiter.S3CsvSource.QuoteChar) } if f1valiter.S3CsvSource.Recurse != nil { - f1valf39.SetRecurse(*f1valiter.S3CsvSource.Recurse) + f1valf48.SetRecurse(*f1valiter.S3CsvSource.Recurse) } if f1valiter.S3CsvSource.Separator != nil { - f1valf39.SetSeparator(*f1valiter.S3CsvSource.Separator) + f1valf48.SetSeparator(*f1valiter.S3CsvSource.Separator) } if f1valiter.S3CsvSource.SkipFirst != nil { - f1valf39.SetSkipFirst(*f1valiter.S3CsvSource.SkipFirst) + f1valf48.SetSkipFirst(*f1valiter.S3CsvSource.SkipFirst) } if f1valiter.S3CsvSource.WithHeader != nil { - f1valf39.SetWithHeader(*f1valiter.S3CsvSource.WithHeader) + f1valf48.SetWithHeader(*f1valiter.S3CsvSource.WithHeader) } if f1valiter.S3CsvSource.WriteHeader != nil { - f1valf39.SetWriteHeader(*f1valiter.S3CsvSource.WriteHeader) + f1valf48.SetWriteHeader(*f1valiter.S3CsvSource.WriteHeader) + } + f1val.SetS3CsvSource(f1valf48) + } + if f1valiter.S3DeltaCatalogTarget != nil { + f1valf49 := &svcsdk.S3DeltaCatalogTarget{} + if f1valiter.S3DeltaCatalogTarget.AdditionalOptions != nil { + f1valf49f0 := map[string]*string{} + for f1valf49f0key, f1valf49f0valiter := range f1valiter.S3DeltaCatalogTarget.AdditionalOptions { + var f1valf49f0val string + f1valf49f0val = *f1valf49f0valiter + f1valf49f0[f1valf49f0key] = &f1valf49f0val + } + f1valf49.SetAdditionalOptions(f1valf49f0) + } + if f1valiter.S3DeltaCatalogTarget.Database != nil { + f1valf49.SetDatabase(*f1valiter.S3DeltaCatalogTarget.Database) + } + if f1valiter.S3DeltaCatalogTarget.Inputs != nil { + f1valf49f2 := []*string{} + for _, f1valf49f2iter := range f1valiter.S3DeltaCatalogTarget.Inputs { + var f1valf49f2elem string + f1valf49f2elem = *f1valf49f2iter + f1valf49f2 = append(f1valf49f2, &f1valf49f2elem) + } + f1valf49.SetInputs(f1valf49f2) + } + if f1valiter.S3DeltaCatalogTarget.Name != nil { + f1valf49.SetName(*f1valiter.S3DeltaCatalogTarget.Name) + } + if f1valiter.S3DeltaCatalogTarget.PartitionKeys != nil { + f1valf49f4 := [][]*string{} + for _, f1valf49f4iter := range f1valiter.S3DeltaCatalogTarget.PartitionKeys { + f1valf49f4elem := []*string{} + for _, f1valf49f4elemiter := range f1valf49f4iter { + var f1valf49f4elemelem string + f1valf49f4elemelem = *f1valf49f4elemiter + f1valf49f4elem = append(f1valf49f4elem, &f1valf49f4elemelem) + } + f1valf49f4 = append(f1valf49f4, f1valf49f4elem) + } + f1valf49.SetPartitionKeys(f1valf49f4) + } + if f1valiter.S3DeltaCatalogTarget.SchemaChangePolicy != nil { + f1valf49f5 := &svcsdk.CatalogSchemaChangePolicy{} + if f1valiter.S3DeltaCatalogTarget.SchemaChangePolicy.EnableUpdateCatalog != nil { + f1valf49f5.SetEnableUpdateCatalog(*f1valiter.S3DeltaCatalogTarget.SchemaChangePolicy.EnableUpdateCatalog) + } + if f1valiter.S3DeltaCatalogTarget.SchemaChangePolicy.UpdateBehavior != nil { + f1valf49f5.SetUpdateBehavior(*f1valiter.S3DeltaCatalogTarget.SchemaChangePolicy.UpdateBehavior) + } + f1valf49.SetSchemaChangePolicy(f1valf49f5) + } + if f1valiter.S3DeltaCatalogTarget.Table != nil { + f1valf49.SetTable(*f1valiter.S3DeltaCatalogTarget.Table) + } + f1val.SetS3DeltaCatalogTarget(f1valf49) + } + if f1valiter.S3DeltaDirectTarget != nil { + f1valf50 := &svcsdk.S3DeltaDirectTarget{} + if f1valiter.S3DeltaDirectTarget.AdditionalOptions != nil { + f1valf50f0 := map[string]*string{} + for f1valf50f0key, f1valf50f0valiter := range f1valiter.S3DeltaDirectTarget.AdditionalOptions { + var f1valf50f0val string + f1valf50f0val = *f1valf50f0valiter + f1valf50f0[f1valf50f0key] = &f1valf50f0val + } + f1valf50.SetAdditionalOptions(f1valf50f0) + } + if f1valiter.S3DeltaDirectTarget.Compression != nil { + f1valf50.SetCompression(*f1valiter.S3DeltaDirectTarget.Compression) + } + if f1valiter.S3DeltaDirectTarget.Format != nil { + f1valf50.SetFormat(*f1valiter.S3DeltaDirectTarget.Format) + } + if f1valiter.S3DeltaDirectTarget.Inputs != nil { + f1valf50f3 := []*string{} + for _, f1valf50f3iter := range f1valiter.S3DeltaDirectTarget.Inputs { + var f1valf50f3elem string + f1valf50f3elem = *f1valf50f3iter + f1valf50f3 = append(f1valf50f3, &f1valf50f3elem) + } + f1valf50.SetInputs(f1valf50f3) + } + if f1valiter.S3DeltaDirectTarget.Name != nil { + f1valf50.SetName(*f1valiter.S3DeltaDirectTarget.Name) + } + if f1valiter.S3DeltaDirectTarget.PartitionKeys != nil { + f1valf50f5 := [][]*string{} + for _, f1valf50f5iter := range f1valiter.S3DeltaDirectTarget.PartitionKeys { + f1valf50f5elem := []*string{} + for _, f1valf50f5elemiter := range f1valf50f5iter { + var f1valf50f5elemelem string + f1valf50f5elemelem = *f1valf50f5elemiter + f1valf50f5elem = append(f1valf50f5elem, &f1valf50f5elemelem) + } + f1valf50f5 = append(f1valf50f5, f1valf50f5elem) + } + f1valf50.SetPartitionKeys(f1valf50f5) + } + if f1valiter.S3DeltaDirectTarget.Path != nil { + f1valf50.SetPath(*f1valiter.S3DeltaDirectTarget.Path) + } + if f1valiter.S3DeltaDirectTarget.SchemaChangePolicy != nil { + f1valf50f7 := &svcsdk.DirectSchemaChangePolicy{} + if f1valiter.S3DeltaDirectTarget.SchemaChangePolicy.Database != nil { + f1valf50f7.SetDatabase(*f1valiter.S3DeltaDirectTarget.SchemaChangePolicy.Database) + } + if f1valiter.S3DeltaDirectTarget.SchemaChangePolicy.EnableUpdateCatalog != nil { + f1valf50f7.SetEnableUpdateCatalog(*f1valiter.S3DeltaDirectTarget.SchemaChangePolicy.EnableUpdateCatalog) + } + if f1valiter.S3DeltaDirectTarget.SchemaChangePolicy.Table != nil { + f1valf50f7.SetTable(*f1valiter.S3DeltaDirectTarget.SchemaChangePolicy.Table) + } + if f1valiter.S3DeltaDirectTarget.SchemaChangePolicy.UpdateBehavior != nil { + f1valf50f7.SetUpdateBehavior(*f1valiter.S3DeltaDirectTarget.SchemaChangePolicy.UpdateBehavior) + } + f1valf50.SetSchemaChangePolicy(f1valf50f7) + } + f1val.SetS3DeltaDirectTarget(f1valf50) + } + if f1valiter.S3DeltaSource != nil { + f1valf51 := &svcsdk.S3DeltaSource{} + if f1valiter.S3DeltaSource.AdditionalDeltaOptions != nil { + f1valf51f0 := map[string]*string{} + for f1valf51f0key, f1valf51f0valiter := range f1valiter.S3DeltaSource.AdditionalDeltaOptions { + var f1valf51f0val string + f1valf51f0val = *f1valf51f0valiter + f1valf51f0[f1valf51f0key] = &f1valf51f0val + } + f1valf51.SetAdditionalDeltaOptions(f1valf51f0) + } + if f1valiter.S3DeltaSource.AdditionalOptions != nil { + f1valf51f1 := &svcsdk.S3DirectSourceAdditionalOptions{} + if f1valiter.S3DeltaSource.AdditionalOptions.BoundedFiles != nil { + f1valf51f1.SetBoundedFiles(*f1valiter.S3DeltaSource.AdditionalOptions.BoundedFiles) + } + if f1valiter.S3DeltaSource.AdditionalOptions.BoundedSize != nil { + f1valf51f1.SetBoundedSize(*f1valiter.S3DeltaSource.AdditionalOptions.BoundedSize) + } + if f1valiter.S3DeltaSource.AdditionalOptions.EnableSamplePath != nil { + f1valf51f1.SetEnableSamplePath(*f1valiter.S3DeltaSource.AdditionalOptions.EnableSamplePath) + } + if f1valiter.S3DeltaSource.AdditionalOptions.SamplePath != nil { + f1valf51f1.SetSamplePath(*f1valiter.S3DeltaSource.AdditionalOptions.SamplePath) + } + f1valf51.SetAdditionalOptions(f1valf51f1) + } + if f1valiter.S3DeltaSource.Name != nil { + f1valf51.SetName(*f1valiter.S3DeltaSource.Name) + } + if f1valiter.S3DeltaSource.OutputSchemas != nil { + f1valf51f3 := []*svcsdk.GlueSchema{} + for _, f1valf51f3iter := range f1valiter.S3DeltaSource.OutputSchemas { + f1valf51f3elem := &svcsdk.GlueSchema{} + if f1valf51f3iter.Columns != nil { + f1valf51f3elemf0 := []*svcsdk.GlueStudioSchemaColumn{} + for _, f1valf51f3elemf0iter := range f1valf51f3iter.Columns { + f1valf51f3elemf0elem := &svcsdk.GlueStudioSchemaColumn{} + if f1valf51f3elemf0iter.Name != nil { + f1valf51f3elemf0elem.SetName(*f1valf51f3elemf0iter.Name) + } + if f1valf51f3elemf0iter.Type != nil { + f1valf51f3elemf0elem.SetType(*f1valf51f3elemf0iter.Type) + } + f1valf51f3elemf0 = append(f1valf51f3elemf0, f1valf51f3elemf0elem) + } + f1valf51f3elem.SetColumns(f1valf51f3elemf0) + } + f1valf51f3 = append(f1valf51f3, f1valf51f3elem) + } + f1valf51.SetOutputSchemas(f1valf51f3) } - f1val.SetS3CsvSource(f1valf39) + if f1valiter.S3DeltaSource.Paths != nil { + f1valf51f4 := []*string{} + for _, f1valf51f4iter := range f1valiter.S3DeltaSource.Paths { + var f1valf51f4elem string + f1valf51f4elem = *f1valf51f4iter + f1valf51f4 = append(f1valf51f4, &f1valf51f4elem) + } + f1valf51.SetPaths(f1valf51f4) + } + f1val.SetS3DeltaSource(f1valf51) } if f1valiter.S3DirectTarget != nil { - f1valf40 := &svcsdk.S3DirectTarget{} + f1valf52 := &svcsdk.S3DirectTarget{} if f1valiter.S3DirectTarget.Compression != nil { - f1valf40.SetCompression(*f1valiter.S3DirectTarget.Compression) + f1valf52.SetCompression(*f1valiter.S3DirectTarget.Compression) } if f1valiter.S3DirectTarget.Format != nil { - f1valf40.SetFormat(*f1valiter.S3DirectTarget.Format) + f1valf52.SetFormat(*f1valiter.S3DirectTarget.Format) } if f1valiter.S3DirectTarget.Inputs != nil { - f1valf40f2 := []*string{} - for _, f1valf40f2iter := range f1valiter.S3DirectTarget.Inputs { - var f1valf40f2elem string - f1valf40f2elem = *f1valf40f2iter - f1valf40f2 = append(f1valf40f2, &f1valf40f2elem) + f1valf52f2 := []*string{} + for _, f1valf52f2iter := range f1valiter.S3DirectTarget.Inputs { + var f1valf52f2elem string + f1valf52f2elem = *f1valf52f2iter + f1valf52f2 = append(f1valf52f2, &f1valf52f2elem) } - f1valf40.SetInputs(f1valf40f2) + f1valf52.SetInputs(f1valf52f2) } if f1valiter.S3DirectTarget.Name != nil { - f1valf40.SetName(*f1valiter.S3DirectTarget.Name) + f1valf52.SetName(*f1valiter.S3DirectTarget.Name) } if f1valiter.S3DirectTarget.PartitionKeys != nil { - f1valf40f4 := [][]*string{} - for _, f1valf40f4iter := range f1valiter.S3DirectTarget.PartitionKeys { - f1valf40f4elem := []*string{} - for _, f1valf40f4elemiter := range f1valf40f4iter { - var f1valf40f4elemelem string - f1valf40f4elemelem = *f1valf40f4elemiter - f1valf40f4elem = append(f1valf40f4elem, &f1valf40f4elemelem) + f1valf52f4 := [][]*string{} + for _, f1valf52f4iter := range f1valiter.S3DirectTarget.PartitionKeys { + f1valf52f4elem := []*string{} + for _, f1valf52f4elemiter := range f1valf52f4iter { + var f1valf52f4elemelem string + f1valf52f4elemelem = *f1valf52f4elemiter + f1valf52f4elem = append(f1valf52f4elem, &f1valf52f4elemelem) } - f1valf40f4 = append(f1valf40f4, f1valf40f4elem) + f1valf52f4 = append(f1valf52f4, f1valf52f4elem) } - f1valf40.SetPartitionKeys(f1valf40f4) + f1valf52.SetPartitionKeys(f1valf52f4) } if f1valiter.S3DirectTarget.Path != nil { - f1valf40.SetPath(*f1valiter.S3DirectTarget.Path) + f1valf52.SetPath(*f1valiter.S3DirectTarget.Path) } if f1valiter.S3DirectTarget.SchemaChangePolicy != nil { - f1valf40f6 := &svcsdk.DirectSchemaChangePolicy{} + f1valf52f6 := &svcsdk.DirectSchemaChangePolicy{} if f1valiter.S3DirectTarget.SchemaChangePolicy.Database != nil { - f1valf40f6.SetDatabase(*f1valiter.S3DirectTarget.SchemaChangePolicy.Database) + f1valf52f6.SetDatabase(*f1valiter.S3DirectTarget.SchemaChangePolicy.Database) } if f1valiter.S3DirectTarget.SchemaChangePolicy.EnableUpdateCatalog != nil { - f1valf40f6.SetEnableUpdateCatalog(*f1valiter.S3DirectTarget.SchemaChangePolicy.EnableUpdateCatalog) + f1valf52f6.SetEnableUpdateCatalog(*f1valiter.S3DirectTarget.SchemaChangePolicy.EnableUpdateCatalog) } if f1valiter.S3DirectTarget.SchemaChangePolicy.Table != nil { - f1valf40f6.SetTable(*f1valiter.S3DirectTarget.SchemaChangePolicy.Table) + f1valf52f6.SetTable(*f1valiter.S3DirectTarget.SchemaChangePolicy.Table) } if f1valiter.S3DirectTarget.SchemaChangePolicy.UpdateBehavior != nil { - f1valf40f6.SetUpdateBehavior(*f1valiter.S3DirectTarget.SchemaChangePolicy.UpdateBehavior) + f1valf52f6.SetUpdateBehavior(*f1valiter.S3DirectTarget.SchemaChangePolicy.UpdateBehavior) } - f1valf40.SetSchemaChangePolicy(f1valf40f6) + f1valf52.SetSchemaChangePolicy(f1valf52f6) } - f1val.SetS3DirectTarget(f1valf40) + f1val.SetS3DirectTarget(f1valf52) } if f1valiter.S3GlueParquetTarget != nil { - f1valf41 := &svcsdk.S3GlueParquetTarget{} + f1valf53 := &svcsdk.S3GlueParquetTarget{} if f1valiter.S3GlueParquetTarget.Compression != nil { - f1valf41.SetCompression(*f1valiter.S3GlueParquetTarget.Compression) + f1valf53.SetCompression(*f1valiter.S3GlueParquetTarget.Compression) } if f1valiter.S3GlueParquetTarget.Inputs != nil { - f1valf41f1 := []*string{} - for _, f1valf41f1iter := range f1valiter.S3GlueParquetTarget.Inputs { - var f1valf41f1elem string - f1valf41f1elem = *f1valf41f1iter - f1valf41f1 = append(f1valf41f1, &f1valf41f1elem) + f1valf53f1 := []*string{} + for _, f1valf53f1iter := range f1valiter.S3GlueParquetTarget.Inputs { + var f1valf53f1elem string + f1valf53f1elem = *f1valf53f1iter + f1valf53f1 = append(f1valf53f1, &f1valf53f1elem) } - f1valf41.SetInputs(f1valf41f1) + f1valf53.SetInputs(f1valf53f1) } if f1valiter.S3GlueParquetTarget.Name != nil { - f1valf41.SetName(*f1valiter.S3GlueParquetTarget.Name) + f1valf53.SetName(*f1valiter.S3GlueParquetTarget.Name) } if f1valiter.S3GlueParquetTarget.PartitionKeys != nil { - f1valf41f3 := [][]*string{} - for _, f1valf41f3iter := range f1valiter.S3GlueParquetTarget.PartitionKeys { - f1valf41f3elem := []*string{} - for _, f1valf41f3elemiter := range f1valf41f3iter { - var f1valf41f3elemelem string - f1valf41f3elemelem = *f1valf41f3elemiter - f1valf41f3elem = append(f1valf41f3elem, &f1valf41f3elemelem) + f1valf53f3 := [][]*string{} + for _, f1valf53f3iter := range f1valiter.S3GlueParquetTarget.PartitionKeys { + f1valf53f3elem := []*string{} + for _, f1valf53f3elemiter := range f1valf53f3iter { + var f1valf53f3elemelem string + f1valf53f3elemelem = *f1valf53f3elemiter + f1valf53f3elem = append(f1valf53f3elem, &f1valf53f3elemelem) } - f1valf41f3 = append(f1valf41f3, f1valf41f3elem) + f1valf53f3 = append(f1valf53f3, f1valf53f3elem) } - f1valf41.SetPartitionKeys(f1valf41f3) + f1valf53.SetPartitionKeys(f1valf53f3) } if f1valiter.S3GlueParquetTarget.Path != nil { - f1valf41.SetPath(*f1valiter.S3GlueParquetTarget.Path) + f1valf53.SetPath(*f1valiter.S3GlueParquetTarget.Path) } if f1valiter.S3GlueParquetTarget.SchemaChangePolicy != nil { - f1valf41f5 := &svcsdk.DirectSchemaChangePolicy{} + f1valf53f5 := &svcsdk.DirectSchemaChangePolicy{} if f1valiter.S3GlueParquetTarget.SchemaChangePolicy.Database != nil { - f1valf41f5.SetDatabase(*f1valiter.S3GlueParquetTarget.SchemaChangePolicy.Database) + f1valf53f5.SetDatabase(*f1valiter.S3GlueParquetTarget.SchemaChangePolicy.Database) } if f1valiter.S3GlueParquetTarget.SchemaChangePolicy.EnableUpdateCatalog != nil { - f1valf41f5.SetEnableUpdateCatalog(*f1valiter.S3GlueParquetTarget.SchemaChangePolicy.EnableUpdateCatalog) + f1valf53f5.SetEnableUpdateCatalog(*f1valiter.S3GlueParquetTarget.SchemaChangePolicy.EnableUpdateCatalog) } if f1valiter.S3GlueParquetTarget.SchemaChangePolicy.Table != nil { - f1valf41f5.SetTable(*f1valiter.S3GlueParquetTarget.SchemaChangePolicy.Table) + f1valf53f5.SetTable(*f1valiter.S3GlueParquetTarget.SchemaChangePolicy.Table) } if f1valiter.S3GlueParquetTarget.SchemaChangePolicy.UpdateBehavior != nil { - f1valf41f5.SetUpdateBehavior(*f1valiter.S3GlueParquetTarget.SchemaChangePolicy.UpdateBehavior) + f1valf53f5.SetUpdateBehavior(*f1valiter.S3GlueParquetTarget.SchemaChangePolicy.UpdateBehavior) + } + f1valf53.SetSchemaChangePolicy(f1valf53f5) + } + f1val.SetS3GlueParquetTarget(f1valf53) + } + if f1valiter.S3HudiCatalogTarget != nil { + f1valf54 := &svcsdk.S3HudiCatalogTarget{} + if f1valiter.S3HudiCatalogTarget.AdditionalOptions != nil { + f1valf54f0 := map[string]*string{} + for f1valf54f0key, f1valf54f0valiter := range f1valiter.S3HudiCatalogTarget.AdditionalOptions { + var f1valf54f0val string + f1valf54f0val = *f1valf54f0valiter + f1valf54f0[f1valf54f0key] = &f1valf54f0val + } + f1valf54.SetAdditionalOptions(f1valf54f0) + } + if f1valiter.S3HudiCatalogTarget.Database != nil { + f1valf54.SetDatabase(*f1valiter.S3HudiCatalogTarget.Database) + } + if f1valiter.S3HudiCatalogTarget.Inputs != nil { + f1valf54f2 := []*string{} + for _, f1valf54f2iter := range f1valiter.S3HudiCatalogTarget.Inputs { + var f1valf54f2elem string + f1valf54f2elem = *f1valf54f2iter + f1valf54f2 = append(f1valf54f2, &f1valf54f2elem) + } + f1valf54.SetInputs(f1valf54f2) + } + if f1valiter.S3HudiCatalogTarget.Name != nil { + f1valf54.SetName(*f1valiter.S3HudiCatalogTarget.Name) + } + if f1valiter.S3HudiCatalogTarget.PartitionKeys != nil { + f1valf54f4 := [][]*string{} + for _, f1valf54f4iter := range f1valiter.S3HudiCatalogTarget.PartitionKeys { + f1valf54f4elem := []*string{} + for _, f1valf54f4elemiter := range f1valf54f4iter { + var f1valf54f4elemelem string + f1valf54f4elemelem = *f1valf54f4elemiter + f1valf54f4elem = append(f1valf54f4elem, &f1valf54f4elemelem) + } + f1valf54f4 = append(f1valf54f4, f1valf54f4elem) + } + f1valf54.SetPartitionKeys(f1valf54f4) + } + if f1valiter.S3HudiCatalogTarget.SchemaChangePolicy != nil { + f1valf54f5 := &svcsdk.CatalogSchemaChangePolicy{} + if f1valiter.S3HudiCatalogTarget.SchemaChangePolicy.EnableUpdateCatalog != nil { + f1valf54f5.SetEnableUpdateCatalog(*f1valiter.S3HudiCatalogTarget.SchemaChangePolicy.EnableUpdateCatalog) + } + if f1valiter.S3HudiCatalogTarget.SchemaChangePolicy.UpdateBehavior != nil { + f1valf54f5.SetUpdateBehavior(*f1valiter.S3HudiCatalogTarget.SchemaChangePolicy.UpdateBehavior) + } + f1valf54.SetSchemaChangePolicy(f1valf54f5) + } + if f1valiter.S3HudiCatalogTarget.Table != nil { + f1valf54.SetTable(*f1valiter.S3HudiCatalogTarget.Table) + } + f1val.SetS3HudiCatalogTarget(f1valf54) + } + if f1valiter.S3HudiDirectTarget != nil { + f1valf55 := &svcsdk.S3HudiDirectTarget{} + if f1valiter.S3HudiDirectTarget.AdditionalOptions != nil { + f1valf55f0 := map[string]*string{} + for f1valf55f0key, f1valf55f0valiter := range f1valiter.S3HudiDirectTarget.AdditionalOptions { + var f1valf55f0val string + f1valf55f0val = *f1valf55f0valiter + f1valf55f0[f1valf55f0key] = &f1valf55f0val + } + f1valf55.SetAdditionalOptions(f1valf55f0) + } + if f1valiter.S3HudiDirectTarget.Compression != nil { + f1valf55.SetCompression(*f1valiter.S3HudiDirectTarget.Compression) + } + if f1valiter.S3HudiDirectTarget.Format != nil { + f1valf55.SetFormat(*f1valiter.S3HudiDirectTarget.Format) + } + if f1valiter.S3HudiDirectTarget.Inputs != nil { + f1valf55f3 := []*string{} + for _, f1valf55f3iter := range f1valiter.S3HudiDirectTarget.Inputs { + var f1valf55f3elem string + f1valf55f3elem = *f1valf55f3iter + f1valf55f3 = append(f1valf55f3, &f1valf55f3elem) + } + f1valf55.SetInputs(f1valf55f3) + } + if f1valiter.S3HudiDirectTarget.Name != nil { + f1valf55.SetName(*f1valiter.S3HudiDirectTarget.Name) + } + if f1valiter.S3HudiDirectTarget.PartitionKeys != nil { + f1valf55f5 := [][]*string{} + for _, f1valf55f5iter := range f1valiter.S3HudiDirectTarget.PartitionKeys { + f1valf55f5elem := []*string{} + for _, f1valf55f5elemiter := range f1valf55f5iter { + var f1valf55f5elemelem string + f1valf55f5elemelem = *f1valf55f5elemiter + f1valf55f5elem = append(f1valf55f5elem, &f1valf55f5elemelem) + } + f1valf55f5 = append(f1valf55f5, f1valf55f5elem) + } + f1valf55.SetPartitionKeys(f1valf55f5) + } + if f1valiter.S3HudiDirectTarget.Path != nil { + f1valf55.SetPath(*f1valiter.S3HudiDirectTarget.Path) + } + if f1valiter.S3HudiDirectTarget.SchemaChangePolicy != nil { + f1valf55f7 := &svcsdk.DirectSchemaChangePolicy{} + if f1valiter.S3HudiDirectTarget.SchemaChangePolicy.Database != nil { + f1valf55f7.SetDatabase(*f1valiter.S3HudiDirectTarget.SchemaChangePolicy.Database) + } + if f1valiter.S3HudiDirectTarget.SchemaChangePolicy.EnableUpdateCatalog != nil { + f1valf55f7.SetEnableUpdateCatalog(*f1valiter.S3HudiDirectTarget.SchemaChangePolicy.EnableUpdateCatalog) + } + if f1valiter.S3HudiDirectTarget.SchemaChangePolicy.Table != nil { + f1valf55f7.SetTable(*f1valiter.S3HudiDirectTarget.SchemaChangePolicy.Table) + } + if f1valiter.S3HudiDirectTarget.SchemaChangePolicy.UpdateBehavior != nil { + f1valf55f7.SetUpdateBehavior(*f1valiter.S3HudiDirectTarget.SchemaChangePolicy.UpdateBehavior) + } + f1valf55.SetSchemaChangePolicy(f1valf55f7) + } + f1val.SetS3HudiDirectTarget(f1valf55) + } + if f1valiter.S3HudiSource != nil { + f1valf56 := &svcsdk.S3HudiSource{} + if f1valiter.S3HudiSource.AdditionalHudiOptions != nil { + f1valf56f0 := map[string]*string{} + for f1valf56f0key, f1valf56f0valiter := range f1valiter.S3HudiSource.AdditionalHudiOptions { + var f1valf56f0val string + f1valf56f0val = *f1valf56f0valiter + f1valf56f0[f1valf56f0key] = &f1valf56f0val + } + f1valf56.SetAdditionalHudiOptions(f1valf56f0) + } + if f1valiter.S3HudiSource.AdditionalOptions != nil { + f1valf56f1 := &svcsdk.S3DirectSourceAdditionalOptions{} + if f1valiter.S3HudiSource.AdditionalOptions.BoundedFiles != nil { + f1valf56f1.SetBoundedFiles(*f1valiter.S3HudiSource.AdditionalOptions.BoundedFiles) + } + if f1valiter.S3HudiSource.AdditionalOptions.BoundedSize != nil { + f1valf56f1.SetBoundedSize(*f1valiter.S3HudiSource.AdditionalOptions.BoundedSize) + } + if f1valiter.S3HudiSource.AdditionalOptions.EnableSamplePath != nil { + f1valf56f1.SetEnableSamplePath(*f1valiter.S3HudiSource.AdditionalOptions.EnableSamplePath) + } + if f1valiter.S3HudiSource.AdditionalOptions.SamplePath != nil { + f1valf56f1.SetSamplePath(*f1valiter.S3HudiSource.AdditionalOptions.SamplePath) + } + f1valf56.SetAdditionalOptions(f1valf56f1) + } + if f1valiter.S3HudiSource.Name != nil { + f1valf56.SetName(*f1valiter.S3HudiSource.Name) + } + if f1valiter.S3HudiSource.OutputSchemas != nil { + f1valf56f3 := []*svcsdk.GlueSchema{} + for _, f1valf56f3iter := range f1valiter.S3HudiSource.OutputSchemas { + f1valf56f3elem := &svcsdk.GlueSchema{} + if f1valf56f3iter.Columns != nil { + f1valf56f3elemf0 := []*svcsdk.GlueStudioSchemaColumn{} + for _, f1valf56f3elemf0iter := range f1valf56f3iter.Columns { + f1valf56f3elemf0elem := &svcsdk.GlueStudioSchemaColumn{} + if f1valf56f3elemf0iter.Name != nil { + f1valf56f3elemf0elem.SetName(*f1valf56f3elemf0iter.Name) + } + if f1valf56f3elemf0iter.Type != nil { + f1valf56f3elemf0elem.SetType(*f1valf56f3elemf0iter.Type) + } + f1valf56f3elemf0 = append(f1valf56f3elemf0, f1valf56f3elemf0elem) + } + f1valf56f3elem.SetColumns(f1valf56f3elemf0) + } + f1valf56f3 = append(f1valf56f3, f1valf56f3elem) } - f1valf41.SetSchemaChangePolicy(f1valf41f5) + f1valf56.SetOutputSchemas(f1valf56f3) } - f1val.SetS3GlueParquetTarget(f1valf41) + if f1valiter.S3HudiSource.Paths != nil { + f1valf56f4 := []*string{} + for _, f1valf56f4iter := range f1valiter.S3HudiSource.Paths { + var f1valf56f4elem string + f1valf56f4elem = *f1valf56f4iter + f1valf56f4 = append(f1valf56f4, &f1valf56f4elem) + } + f1valf56.SetPaths(f1valf56f4) + } + f1val.SetS3HudiSource(f1valf56) } if f1valiter.S3JSONSource != nil { - f1valf42 := &svcsdk.S3JsonSource{} + f1valf57 := &svcsdk.S3JsonSource{} if f1valiter.S3JSONSource.AdditionalOptions != nil { - f1valf42f0 := &svcsdk.S3DirectSourceAdditionalOptions{} + f1valf57f0 := &svcsdk.S3DirectSourceAdditionalOptions{} if f1valiter.S3JSONSource.AdditionalOptions.BoundedFiles != nil { - f1valf42f0.SetBoundedFiles(*f1valiter.S3JSONSource.AdditionalOptions.BoundedFiles) + f1valf57f0.SetBoundedFiles(*f1valiter.S3JSONSource.AdditionalOptions.BoundedFiles) } if f1valiter.S3JSONSource.AdditionalOptions.BoundedSize != nil { - f1valf42f0.SetBoundedSize(*f1valiter.S3JSONSource.AdditionalOptions.BoundedSize) + f1valf57f0.SetBoundedSize(*f1valiter.S3JSONSource.AdditionalOptions.BoundedSize) } if f1valiter.S3JSONSource.AdditionalOptions.EnableSamplePath != nil { - f1valf42f0.SetEnableSamplePath(*f1valiter.S3JSONSource.AdditionalOptions.EnableSamplePath) + f1valf57f0.SetEnableSamplePath(*f1valiter.S3JSONSource.AdditionalOptions.EnableSamplePath) } if f1valiter.S3JSONSource.AdditionalOptions.SamplePath != nil { - f1valf42f0.SetSamplePath(*f1valiter.S3JSONSource.AdditionalOptions.SamplePath) + f1valf57f0.SetSamplePath(*f1valiter.S3JSONSource.AdditionalOptions.SamplePath) } - f1valf42.SetAdditionalOptions(f1valf42f0) + f1valf57.SetAdditionalOptions(f1valf57f0) } if f1valiter.S3JSONSource.CompressionType != nil { - f1valf42.SetCompressionType(*f1valiter.S3JSONSource.CompressionType) + f1valf57.SetCompressionType(*f1valiter.S3JSONSource.CompressionType) } if f1valiter.S3JSONSource.Exclusions != nil { - f1valf42f2 := []*string{} - for _, f1valf42f2iter := range f1valiter.S3JSONSource.Exclusions { - var f1valf42f2elem string - f1valf42f2elem = *f1valf42f2iter - f1valf42f2 = append(f1valf42f2, &f1valf42f2elem) + f1valf57f2 := []*string{} + for _, f1valf57f2iter := range f1valiter.S3JSONSource.Exclusions { + var f1valf57f2elem string + f1valf57f2elem = *f1valf57f2iter + f1valf57f2 = append(f1valf57f2, &f1valf57f2elem) } - f1valf42.SetExclusions(f1valf42f2) + f1valf57.SetExclusions(f1valf57f2) } if f1valiter.S3JSONSource.GroupFiles != nil { - f1valf42.SetGroupFiles(*f1valiter.S3JSONSource.GroupFiles) + f1valf57.SetGroupFiles(*f1valiter.S3JSONSource.GroupFiles) } if f1valiter.S3JSONSource.GroupSize != nil { - f1valf42.SetGroupSize(*f1valiter.S3JSONSource.GroupSize) + f1valf57.SetGroupSize(*f1valiter.S3JSONSource.GroupSize) } if f1valiter.S3JSONSource.JSONPath != nil { - f1valf42.SetJsonPath(*f1valiter.S3JSONSource.JSONPath) + f1valf57.SetJsonPath(*f1valiter.S3JSONSource.JSONPath) } if f1valiter.S3JSONSource.MaxBand != nil { - f1valf42.SetMaxBand(*f1valiter.S3JSONSource.MaxBand) + f1valf57.SetMaxBand(*f1valiter.S3JSONSource.MaxBand) } if f1valiter.S3JSONSource.MaxFilesInBand != nil { - f1valf42.SetMaxFilesInBand(*f1valiter.S3JSONSource.MaxFilesInBand) + f1valf57.SetMaxFilesInBand(*f1valiter.S3JSONSource.MaxFilesInBand) } if f1valiter.S3JSONSource.Multiline != nil { - f1valf42.SetMultiline(*f1valiter.S3JSONSource.Multiline) + f1valf57.SetMultiline(*f1valiter.S3JSONSource.Multiline) } if f1valiter.S3JSONSource.Name != nil { - f1valf42.SetName(*f1valiter.S3JSONSource.Name) + f1valf57.SetName(*f1valiter.S3JSONSource.Name) } if f1valiter.S3JSONSource.OutputSchemas != nil { - f1valf42f10 := []*svcsdk.GlueSchema{} - for _, f1valf42f10iter := range f1valiter.S3JSONSource.OutputSchemas { - f1valf42f10elem := &svcsdk.GlueSchema{} - if f1valf42f10iter.Columns != nil { - f1valf42f10elemf0 := []*svcsdk.GlueStudioSchemaColumn{} - for _, f1valf42f10elemf0iter := range f1valf42f10iter.Columns { - f1valf42f10elemf0elem := &svcsdk.GlueStudioSchemaColumn{} - if f1valf42f10elemf0iter.Name != nil { - f1valf42f10elemf0elem.SetName(*f1valf42f10elemf0iter.Name) + f1valf57f10 := []*svcsdk.GlueSchema{} + for _, f1valf57f10iter := range f1valiter.S3JSONSource.OutputSchemas { + f1valf57f10elem := &svcsdk.GlueSchema{} + if f1valf57f10iter.Columns != nil { + f1valf57f10elemf0 := []*svcsdk.GlueStudioSchemaColumn{} + for _, f1valf57f10elemf0iter := range f1valf57f10iter.Columns { + f1valf57f10elemf0elem := &svcsdk.GlueStudioSchemaColumn{} + if f1valf57f10elemf0iter.Name != nil { + f1valf57f10elemf0elem.SetName(*f1valf57f10elemf0iter.Name) } - if f1valf42f10elemf0iter.Type != nil { - f1valf42f10elemf0elem.SetType(*f1valf42f10elemf0iter.Type) + if f1valf57f10elemf0iter.Type != nil { + f1valf57f10elemf0elem.SetType(*f1valf57f10elemf0iter.Type) } - f1valf42f10elemf0 = append(f1valf42f10elemf0, f1valf42f10elemf0elem) + f1valf57f10elemf0 = append(f1valf57f10elemf0, f1valf57f10elemf0elem) } - f1valf42f10elem.SetColumns(f1valf42f10elemf0) + f1valf57f10elem.SetColumns(f1valf57f10elemf0) } - f1valf42f10 = append(f1valf42f10, f1valf42f10elem) + f1valf57f10 = append(f1valf57f10, f1valf57f10elem) } - f1valf42.SetOutputSchemas(f1valf42f10) + f1valf57.SetOutputSchemas(f1valf57f10) } if f1valiter.S3JSONSource.Paths != nil { - f1valf42f11 := []*string{} - for _, f1valf42f11iter := range f1valiter.S3JSONSource.Paths { - var f1valf42f11elem string - f1valf42f11elem = *f1valf42f11iter - f1valf42f11 = append(f1valf42f11, &f1valf42f11elem) + f1valf57f11 := []*string{} + for _, f1valf57f11iter := range f1valiter.S3JSONSource.Paths { + var f1valf57f11elem string + f1valf57f11elem = *f1valf57f11iter + f1valf57f11 = append(f1valf57f11, &f1valf57f11elem) } - f1valf42.SetPaths(f1valf42f11) + f1valf57.SetPaths(f1valf57f11) } if f1valiter.S3JSONSource.Recurse != nil { - f1valf42.SetRecurse(*f1valiter.S3JSONSource.Recurse) + f1valf57.SetRecurse(*f1valiter.S3JSONSource.Recurse) } - f1val.SetS3JsonSource(f1valf42) + f1val.SetS3JsonSource(f1valf57) } if f1valiter.S3ParquetSource != nil { - f1valf43 := &svcsdk.S3ParquetSource{} + f1valf58 := &svcsdk.S3ParquetSource{} if f1valiter.S3ParquetSource.AdditionalOptions != nil { - f1valf43f0 := &svcsdk.S3DirectSourceAdditionalOptions{} + f1valf58f0 := &svcsdk.S3DirectSourceAdditionalOptions{} if f1valiter.S3ParquetSource.AdditionalOptions.BoundedFiles != nil { - f1valf43f0.SetBoundedFiles(*f1valiter.S3ParquetSource.AdditionalOptions.BoundedFiles) + f1valf58f0.SetBoundedFiles(*f1valiter.S3ParquetSource.AdditionalOptions.BoundedFiles) } if f1valiter.S3ParquetSource.AdditionalOptions.BoundedSize != nil { - f1valf43f0.SetBoundedSize(*f1valiter.S3ParquetSource.AdditionalOptions.BoundedSize) + f1valf58f0.SetBoundedSize(*f1valiter.S3ParquetSource.AdditionalOptions.BoundedSize) } if f1valiter.S3ParquetSource.AdditionalOptions.EnableSamplePath != nil { - f1valf43f0.SetEnableSamplePath(*f1valiter.S3ParquetSource.AdditionalOptions.EnableSamplePath) + f1valf58f0.SetEnableSamplePath(*f1valiter.S3ParquetSource.AdditionalOptions.EnableSamplePath) } if f1valiter.S3ParquetSource.AdditionalOptions.SamplePath != nil { - f1valf43f0.SetSamplePath(*f1valiter.S3ParquetSource.AdditionalOptions.SamplePath) + f1valf58f0.SetSamplePath(*f1valiter.S3ParquetSource.AdditionalOptions.SamplePath) } - f1valf43.SetAdditionalOptions(f1valf43f0) + f1valf58.SetAdditionalOptions(f1valf58f0) } if f1valiter.S3ParquetSource.CompressionType != nil { - f1valf43.SetCompressionType(*f1valiter.S3ParquetSource.CompressionType) + f1valf58.SetCompressionType(*f1valiter.S3ParquetSource.CompressionType) } if f1valiter.S3ParquetSource.Exclusions != nil { - f1valf43f2 := []*string{} - for _, f1valf43f2iter := range f1valiter.S3ParquetSource.Exclusions { - var f1valf43f2elem string - f1valf43f2elem = *f1valf43f2iter - f1valf43f2 = append(f1valf43f2, &f1valf43f2elem) + f1valf58f2 := []*string{} + for _, f1valf58f2iter := range f1valiter.S3ParquetSource.Exclusions { + var f1valf58f2elem string + f1valf58f2elem = *f1valf58f2iter + f1valf58f2 = append(f1valf58f2, &f1valf58f2elem) } - f1valf43.SetExclusions(f1valf43f2) + f1valf58.SetExclusions(f1valf58f2) } if f1valiter.S3ParquetSource.GroupFiles != nil { - f1valf43.SetGroupFiles(*f1valiter.S3ParquetSource.GroupFiles) + f1valf58.SetGroupFiles(*f1valiter.S3ParquetSource.GroupFiles) } if f1valiter.S3ParquetSource.GroupSize != nil { - f1valf43.SetGroupSize(*f1valiter.S3ParquetSource.GroupSize) + f1valf58.SetGroupSize(*f1valiter.S3ParquetSource.GroupSize) } if f1valiter.S3ParquetSource.MaxBand != nil { - f1valf43.SetMaxBand(*f1valiter.S3ParquetSource.MaxBand) + f1valf58.SetMaxBand(*f1valiter.S3ParquetSource.MaxBand) } if f1valiter.S3ParquetSource.MaxFilesInBand != nil { - f1valf43.SetMaxFilesInBand(*f1valiter.S3ParquetSource.MaxFilesInBand) + f1valf58.SetMaxFilesInBand(*f1valiter.S3ParquetSource.MaxFilesInBand) } if f1valiter.S3ParquetSource.Name != nil { - f1valf43.SetName(*f1valiter.S3ParquetSource.Name) + f1valf58.SetName(*f1valiter.S3ParquetSource.Name) } if f1valiter.S3ParquetSource.OutputSchemas != nil { - f1valf43f8 := []*svcsdk.GlueSchema{} - for _, f1valf43f8iter := range f1valiter.S3ParquetSource.OutputSchemas { - f1valf43f8elem := &svcsdk.GlueSchema{} - if f1valf43f8iter.Columns != nil { - f1valf43f8elemf0 := []*svcsdk.GlueStudioSchemaColumn{} - for _, f1valf43f8elemf0iter := range f1valf43f8iter.Columns { - f1valf43f8elemf0elem := &svcsdk.GlueStudioSchemaColumn{} - if f1valf43f8elemf0iter.Name != nil { - f1valf43f8elemf0elem.SetName(*f1valf43f8elemf0iter.Name) + f1valf58f8 := []*svcsdk.GlueSchema{} + for _, f1valf58f8iter := range f1valiter.S3ParquetSource.OutputSchemas { + f1valf58f8elem := &svcsdk.GlueSchema{} + if f1valf58f8iter.Columns != nil { + f1valf58f8elemf0 := []*svcsdk.GlueStudioSchemaColumn{} + for _, f1valf58f8elemf0iter := range f1valf58f8iter.Columns { + f1valf58f8elemf0elem := &svcsdk.GlueStudioSchemaColumn{} + if f1valf58f8elemf0iter.Name != nil { + f1valf58f8elemf0elem.SetName(*f1valf58f8elemf0iter.Name) } - if f1valf43f8elemf0iter.Type != nil { - f1valf43f8elemf0elem.SetType(*f1valf43f8elemf0iter.Type) + if f1valf58f8elemf0iter.Type != nil { + f1valf58f8elemf0elem.SetType(*f1valf58f8elemf0iter.Type) } - f1valf43f8elemf0 = append(f1valf43f8elemf0, f1valf43f8elemf0elem) + f1valf58f8elemf0 = append(f1valf58f8elemf0, f1valf58f8elemf0elem) } - f1valf43f8elem.SetColumns(f1valf43f8elemf0) + f1valf58f8elem.SetColumns(f1valf58f8elemf0) } - f1valf43f8 = append(f1valf43f8, f1valf43f8elem) + f1valf58f8 = append(f1valf58f8, f1valf58f8elem) } - f1valf43.SetOutputSchemas(f1valf43f8) + f1valf58.SetOutputSchemas(f1valf58f8) } if f1valiter.S3ParquetSource.Paths != nil { - f1valf43f9 := []*string{} - for _, f1valf43f9iter := range f1valiter.S3ParquetSource.Paths { - var f1valf43f9elem string - f1valf43f9elem = *f1valf43f9iter - f1valf43f9 = append(f1valf43f9, &f1valf43f9elem) + f1valf58f9 := []*string{} + for _, f1valf58f9iter := range f1valiter.S3ParquetSource.Paths { + var f1valf58f9elem string + f1valf58f9elem = *f1valf58f9iter + f1valf58f9 = append(f1valf58f9, &f1valf58f9elem) } - f1valf43.SetPaths(f1valf43f9) + f1valf58.SetPaths(f1valf58f9) } if f1valiter.S3ParquetSource.Recurse != nil { - f1valf43.SetRecurse(*f1valiter.S3ParquetSource.Recurse) + f1valf58.SetRecurse(*f1valiter.S3ParquetSource.Recurse) } - f1val.SetS3ParquetSource(f1valf43) + f1val.SetS3ParquetSource(f1valf58) } if f1valiter.SelectFields != nil { - f1valf44 := &svcsdk.SelectFields{} + f1valf59 := &svcsdk.SelectFields{} if f1valiter.SelectFields.Inputs != nil { - f1valf44f0 := []*string{} - for _, f1valf44f0iter := range f1valiter.SelectFields.Inputs { - var f1valf44f0elem string - f1valf44f0elem = *f1valf44f0iter - f1valf44f0 = append(f1valf44f0, &f1valf44f0elem) + f1valf59f0 := []*string{} + for _, f1valf59f0iter := range f1valiter.SelectFields.Inputs { + var f1valf59f0elem string + f1valf59f0elem = *f1valf59f0iter + f1valf59f0 = append(f1valf59f0, &f1valf59f0elem) } - f1valf44.SetInputs(f1valf44f0) + f1valf59.SetInputs(f1valf59f0) } if f1valiter.SelectFields.Name != nil { - f1valf44.SetName(*f1valiter.SelectFields.Name) + f1valf59.SetName(*f1valiter.SelectFields.Name) } if f1valiter.SelectFields.Paths != nil { - f1valf44f2 := [][]*string{} - for _, f1valf44f2iter := range f1valiter.SelectFields.Paths { - f1valf44f2elem := []*string{} - for _, f1valf44f2elemiter := range f1valf44f2iter { - var f1valf44f2elemelem string - f1valf44f2elemelem = *f1valf44f2elemiter - f1valf44f2elem = append(f1valf44f2elem, &f1valf44f2elemelem) + f1valf59f2 := [][]*string{} + for _, f1valf59f2iter := range f1valiter.SelectFields.Paths { + f1valf59f2elem := []*string{} + for _, f1valf59f2elemiter := range f1valf59f2iter { + var f1valf59f2elemelem string + f1valf59f2elemelem = *f1valf59f2elemiter + f1valf59f2elem = append(f1valf59f2elem, &f1valf59f2elemelem) } - f1valf44f2 = append(f1valf44f2, f1valf44f2elem) + f1valf59f2 = append(f1valf59f2, f1valf59f2elem) } - f1valf44.SetPaths(f1valf44f2) + f1valf59.SetPaths(f1valf59f2) } - f1val.SetSelectFields(f1valf44) + f1val.SetSelectFields(f1valf59) } if f1valiter.SelectFromCollection != nil { - f1valf45 := &svcsdk.SelectFromCollection{} + f1valf60 := &svcsdk.SelectFromCollection{} if f1valiter.SelectFromCollection.Index != nil { - f1valf45.SetIndex(*f1valiter.SelectFromCollection.Index) + f1valf60.SetIndex(*f1valiter.SelectFromCollection.Index) } if f1valiter.SelectFromCollection.Inputs != nil { - f1valf45f1 := []*string{} - for _, f1valf45f1iter := range f1valiter.SelectFromCollection.Inputs { - var f1valf45f1elem string - f1valf45f1elem = *f1valf45f1iter - f1valf45f1 = append(f1valf45f1, &f1valf45f1elem) + f1valf60f1 := []*string{} + for _, f1valf60f1iter := range f1valiter.SelectFromCollection.Inputs { + var f1valf60f1elem string + f1valf60f1elem = *f1valf60f1iter + f1valf60f1 = append(f1valf60f1, &f1valf60f1elem) } - f1valf45.SetInputs(f1valf45f1) + f1valf60.SetInputs(f1valf60f1) } if f1valiter.SelectFromCollection.Name != nil { - f1valf45.SetName(*f1valiter.SelectFromCollection.Name) + f1valf60.SetName(*f1valiter.SelectFromCollection.Name) + } + f1val.SetSelectFromCollection(f1valf60) + } + if f1valiter.SnowflakeSource != nil { + f1valf61 := &svcsdk.SnowflakeSource{} + if f1valiter.SnowflakeSource.Data != nil { + f1valf61f0 := &svcsdk.SnowflakeNodeData{} + if f1valiter.SnowflakeSource.Data.Action != nil { + f1valf61f0.SetAction(*f1valiter.SnowflakeSource.Data.Action) + } + if f1valiter.SnowflakeSource.Data.AdditionalOptions != nil { + f1valf61f0f1 := map[string]*string{} + for f1valf61f0f1key, f1valf61f0f1valiter := range f1valiter.SnowflakeSource.Data.AdditionalOptions { + var f1valf61f0f1val string + f1valf61f0f1val = *f1valf61f0f1valiter + f1valf61f0f1[f1valf61f0f1key] = &f1valf61f0f1val + } + f1valf61f0.SetAdditionalOptions(f1valf61f0f1) + } + if f1valiter.SnowflakeSource.Data.AutoPushdown != nil { + f1valf61f0.SetAutoPushdown(*f1valiter.SnowflakeSource.Data.AutoPushdown) + } + if f1valiter.SnowflakeSource.Data.Connection != nil { + f1valf61f0f3 := &svcsdk.Option{} + if f1valiter.SnowflakeSource.Data.Connection.Description != nil { + f1valf61f0f3.SetDescription(*f1valiter.SnowflakeSource.Data.Connection.Description) + } + if f1valiter.SnowflakeSource.Data.Connection.Label != nil { + f1valf61f0f3.SetLabel(*f1valiter.SnowflakeSource.Data.Connection.Label) + } + if f1valiter.SnowflakeSource.Data.Connection.Value != nil { + f1valf61f0f3.SetValue(*f1valiter.SnowflakeSource.Data.Connection.Value) + } + f1valf61f0.SetConnection(f1valf61f0f3) + } + if f1valiter.SnowflakeSource.Data.Database != nil { + f1valf61f0.SetDatabase(*f1valiter.SnowflakeSource.Data.Database) + } + if f1valiter.SnowflakeSource.Data.IAMRole != nil { + f1valf61f0f5 := &svcsdk.Option{} + if f1valiter.SnowflakeSource.Data.IAMRole.Description != nil { + f1valf61f0f5.SetDescription(*f1valiter.SnowflakeSource.Data.IAMRole.Description) + } + if f1valiter.SnowflakeSource.Data.IAMRole.Label != nil { + f1valf61f0f5.SetLabel(*f1valiter.SnowflakeSource.Data.IAMRole.Label) + } + if f1valiter.SnowflakeSource.Data.IAMRole.Value != nil { + f1valf61f0f5.SetValue(*f1valiter.SnowflakeSource.Data.IAMRole.Value) + } + f1valf61f0.SetIamRole(f1valf61f0f5) + } + if f1valiter.SnowflakeSource.Data.MergeAction != nil { + f1valf61f0.SetMergeAction(*f1valiter.SnowflakeSource.Data.MergeAction) + } + if f1valiter.SnowflakeSource.Data.MergeClause != nil { + f1valf61f0.SetMergeClause(*f1valiter.SnowflakeSource.Data.MergeClause) + } + if f1valiter.SnowflakeSource.Data.MergeWhenMatched != nil { + f1valf61f0.SetMergeWhenMatched(*f1valiter.SnowflakeSource.Data.MergeWhenMatched) + } + if f1valiter.SnowflakeSource.Data.MergeWhenNotMatched != nil { + f1valf61f0.SetMergeWhenNotMatched(*f1valiter.SnowflakeSource.Data.MergeWhenNotMatched) + } + if f1valiter.SnowflakeSource.Data.PostAction != nil { + f1valf61f0.SetPostAction(*f1valiter.SnowflakeSource.Data.PostAction) + } + if f1valiter.SnowflakeSource.Data.PreAction != nil { + f1valf61f0.SetPreAction(*f1valiter.SnowflakeSource.Data.PreAction) + } + if f1valiter.SnowflakeSource.Data.SampleQuery != nil { + f1valf61f0.SetSampleQuery(*f1valiter.SnowflakeSource.Data.SampleQuery) + } + if f1valiter.SnowflakeSource.Data.Schema != nil { + f1valf61f0.SetSchema(*f1valiter.SnowflakeSource.Data.Schema) + } + if f1valiter.SnowflakeSource.Data.SelectedColumns != nil { + f1valf61f0f14 := []*svcsdk.Option{} + for _, f1valf61f0f14iter := range f1valiter.SnowflakeSource.Data.SelectedColumns { + f1valf61f0f14elem := &svcsdk.Option{} + if f1valf61f0f14iter.Description != nil { + f1valf61f0f14elem.SetDescription(*f1valf61f0f14iter.Description) + } + if f1valf61f0f14iter.Label != nil { + f1valf61f0f14elem.SetLabel(*f1valf61f0f14iter.Label) + } + if f1valf61f0f14iter.Value != nil { + f1valf61f0f14elem.SetValue(*f1valf61f0f14iter.Value) + } + f1valf61f0f14 = append(f1valf61f0f14, f1valf61f0f14elem) + } + f1valf61f0.SetSelectedColumns(f1valf61f0f14) + } + if f1valiter.SnowflakeSource.Data.SourceType != nil { + f1valf61f0.SetSourceType(*f1valiter.SnowflakeSource.Data.SourceType) + } + if f1valiter.SnowflakeSource.Data.StagingTable != nil { + f1valf61f0.SetStagingTable(*f1valiter.SnowflakeSource.Data.StagingTable) + } + if f1valiter.SnowflakeSource.Data.Table != nil { + f1valf61f0.SetTable(*f1valiter.SnowflakeSource.Data.Table) + } + if f1valiter.SnowflakeSource.Data.TableSchema != nil { + f1valf61f0f18 := []*svcsdk.Option{} + for _, f1valf61f0f18iter := range f1valiter.SnowflakeSource.Data.TableSchema { + f1valf61f0f18elem := &svcsdk.Option{} + if f1valf61f0f18iter.Description != nil { + f1valf61f0f18elem.SetDescription(*f1valf61f0f18iter.Description) + } + if f1valf61f0f18iter.Label != nil { + f1valf61f0f18elem.SetLabel(*f1valf61f0f18iter.Label) + } + if f1valf61f0f18iter.Value != nil { + f1valf61f0f18elem.SetValue(*f1valf61f0f18iter.Value) + } + f1valf61f0f18 = append(f1valf61f0f18, f1valf61f0f18elem) + } + f1valf61f0.SetTableSchema(f1valf61f0f18) + } + if f1valiter.SnowflakeSource.Data.TempDir != nil { + f1valf61f0.SetTempDir(*f1valiter.SnowflakeSource.Data.TempDir) + } + if f1valiter.SnowflakeSource.Data.Upsert != nil { + f1valf61f0.SetUpsert(*f1valiter.SnowflakeSource.Data.Upsert) + } + f1valf61.SetData(f1valf61f0) + } + if f1valiter.SnowflakeSource.Name != nil { + f1valf61.SetName(*f1valiter.SnowflakeSource.Name) + } + if f1valiter.SnowflakeSource.OutputSchemas != nil { + f1valf61f2 := []*svcsdk.GlueSchema{} + for _, f1valf61f2iter := range f1valiter.SnowflakeSource.OutputSchemas { + f1valf61f2elem := &svcsdk.GlueSchema{} + if f1valf61f2iter.Columns != nil { + f1valf61f2elemf0 := []*svcsdk.GlueStudioSchemaColumn{} + for _, f1valf61f2elemf0iter := range f1valf61f2iter.Columns { + f1valf61f2elemf0elem := &svcsdk.GlueStudioSchemaColumn{} + if f1valf61f2elemf0iter.Name != nil { + f1valf61f2elemf0elem.SetName(*f1valf61f2elemf0iter.Name) + } + if f1valf61f2elemf0iter.Type != nil { + f1valf61f2elemf0elem.SetType(*f1valf61f2elemf0iter.Type) + } + f1valf61f2elemf0 = append(f1valf61f2elemf0, f1valf61f2elemf0elem) + } + f1valf61f2elem.SetColumns(f1valf61f2elemf0) + } + f1valf61f2 = append(f1valf61f2, f1valf61f2elem) + } + f1valf61.SetOutputSchemas(f1valf61f2) + } + f1val.SetSnowflakeSource(f1valf61) + } + if f1valiter.SnowflakeTarget != nil { + f1valf62 := &svcsdk.SnowflakeTarget{} + if f1valiter.SnowflakeTarget.Data != nil { + f1valf62f0 := &svcsdk.SnowflakeNodeData{} + if f1valiter.SnowflakeTarget.Data.Action != nil { + f1valf62f0.SetAction(*f1valiter.SnowflakeTarget.Data.Action) + } + if f1valiter.SnowflakeTarget.Data.AdditionalOptions != nil { + f1valf62f0f1 := map[string]*string{} + for f1valf62f0f1key, f1valf62f0f1valiter := range f1valiter.SnowflakeTarget.Data.AdditionalOptions { + var f1valf62f0f1val string + f1valf62f0f1val = *f1valf62f0f1valiter + f1valf62f0f1[f1valf62f0f1key] = &f1valf62f0f1val + } + f1valf62f0.SetAdditionalOptions(f1valf62f0f1) + } + if f1valiter.SnowflakeTarget.Data.AutoPushdown != nil { + f1valf62f0.SetAutoPushdown(*f1valiter.SnowflakeTarget.Data.AutoPushdown) + } + if f1valiter.SnowflakeTarget.Data.Connection != nil { + f1valf62f0f3 := &svcsdk.Option{} + if f1valiter.SnowflakeTarget.Data.Connection.Description != nil { + f1valf62f0f3.SetDescription(*f1valiter.SnowflakeTarget.Data.Connection.Description) + } + if f1valiter.SnowflakeTarget.Data.Connection.Label != nil { + f1valf62f0f3.SetLabel(*f1valiter.SnowflakeTarget.Data.Connection.Label) + } + if f1valiter.SnowflakeTarget.Data.Connection.Value != nil { + f1valf62f0f3.SetValue(*f1valiter.SnowflakeTarget.Data.Connection.Value) + } + f1valf62f0.SetConnection(f1valf62f0f3) + } + if f1valiter.SnowflakeTarget.Data.Database != nil { + f1valf62f0.SetDatabase(*f1valiter.SnowflakeTarget.Data.Database) + } + if f1valiter.SnowflakeTarget.Data.IAMRole != nil { + f1valf62f0f5 := &svcsdk.Option{} + if f1valiter.SnowflakeTarget.Data.IAMRole.Description != nil { + f1valf62f0f5.SetDescription(*f1valiter.SnowflakeTarget.Data.IAMRole.Description) + } + if f1valiter.SnowflakeTarget.Data.IAMRole.Label != nil { + f1valf62f0f5.SetLabel(*f1valiter.SnowflakeTarget.Data.IAMRole.Label) + } + if f1valiter.SnowflakeTarget.Data.IAMRole.Value != nil { + f1valf62f0f5.SetValue(*f1valiter.SnowflakeTarget.Data.IAMRole.Value) + } + f1valf62f0.SetIamRole(f1valf62f0f5) + } + if f1valiter.SnowflakeTarget.Data.MergeAction != nil { + f1valf62f0.SetMergeAction(*f1valiter.SnowflakeTarget.Data.MergeAction) + } + if f1valiter.SnowflakeTarget.Data.MergeClause != nil { + f1valf62f0.SetMergeClause(*f1valiter.SnowflakeTarget.Data.MergeClause) + } + if f1valiter.SnowflakeTarget.Data.MergeWhenMatched != nil { + f1valf62f0.SetMergeWhenMatched(*f1valiter.SnowflakeTarget.Data.MergeWhenMatched) + } + if f1valiter.SnowflakeTarget.Data.MergeWhenNotMatched != nil { + f1valf62f0.SetMergeWhenNotMatched(*f1valiter.SnowflakeTarget.Data.MergeWhenNotMatched) + } + if f1valiter.SnowflakeTarget.Data.PostAction != nil { + f1valf62f0.SetPostAction(*f1valiter.SnowflakeTarget.Data.PostAction) + } + if f1valiter.SnowflakeTarget.Data.PreAction != nil { + f1valf62f0.SetPreAction(*f1valiter.SnowflakeTarget.Data.PreAction) + } + if f1valiter.SnowflakeTarget.Data.SampleQuery != nil { + f1valf62f0.SetSampleQuery(*f1valiter.SnowflakeTarget.Data.SampleQuery) + } + if f1valiter.SnowflakeTarget.Data.Schema != nil { + f1valf62f0.SetSchema(*f1valiter.SnowflakeTarget.Data.Schema) + } + if f1valiter.SnowflakeTarget.Data.SelectedColumns != nil { + f1valf62f0f14 := []*svcsdk.Option{} + for _, f1valf62f0f14iter := range f1valiter.SnowflakeTarget.Data.SelectedColumns { + f1valf62f0f14elem := &svcsdk.Option{} + if f1valf62f0f14iter.Description != nil { + f1valf62f0f14elem.SetDescription(*f1valf62f0f14iter.Description) + } + if f1valf62f0f14iter.Label != nil { + f1valf62f0f14elem.SetLabel(*f1valf62f0f14iter.Label) + } + if f1valf62f0f14iter.Value != nil { + f1valf62f0f14elem.SetValue(*f1valf62f0f14iter.Value) + } + f1valf62f0f14 = append(f1valf62f0f14, f1valf62f0f14elem) + } + f1valf62f0.SetSelectedColumns(f1valf62f0f14) + } + if f1valiter.SnowflakeTarget.Data.SourceType != nil { + f1valf62f0.SetSourceType(*f1valiter.SnowflakeTarget.Data.SourceType) + } + if f1valiter.SnowflakeTarget.Data.StagingTable != nil { + f1valf62f0.SetStagingTable(*f1valiter.SnowflakeTarget.Data.StagingTable) + } + if f1valiter.SnowflakeTarget.Data.Table != nil { + f1valf62f0.SetTable(*f1valiter.SnowflakeTarget.Data.Table) + } + if f1valiter.SnowflakeTarget.Data.TableSchema != nil { + f1valf62f0f18 := []*svcsdk.Option{} + for _, f1valf62f0f18iter := range f1valiter.SnowflakeTarget.Data.TableSchema { + f1valf62f0f18elem := &svcsdk.Option{} + if f1valf62f0f18iter.Description != nil { + f1valf62f0f18elem.SetDescription(*f1valf62f0f18iter.Description) + } + if f1valf62f0f18iter.Label != nil { + f1valf62f0f18elem.SetLabel(*f1valf62f0f18iter.Label) + } + if f1valf62f0f18iter.Value != nil { + f1valf62f0f18elem.SetValue(*f1valf62f0f18iter.Value) + } + f1valf62f0f18 = append(f1valf62f0f18, f1valf62f0f18elem) + } + f1valf62f0.SetTableSchema(f1valf62f0f18) + } + if f1valiter.SnowflakeTarget.Data.TempDir != nil { + f1valf62f0.SetTempDir(*f1valiter.SnowflakeTarget.Data.TempDir) + } + if f1valiter.SnowflakeTarget.Data.Upsert != nil { + f1valf62f0.SetUpsert(*f1valiter.SnowflakeTarget.Data.Upsert) + } + f1valf62.SetData(f1valf62f0) + } + if f1valiter.SnowflakeTarget.Inputs != nil { + f1valf62f1 := []*string{} + for _, f1valf62f1iter := range f1valiter.SnowflakeTarget.Inputs { + var f1valf62f1elem string + f1valf62f1elem = *f1valf62f1iter + f1valf62f1 = append(f1valf62f1, &f1valf62f1elem) + } + f1valf62.SetInputs(f1valf62f1) + } + if f1valiter.SnowflakeTarget.Name != nil { + f1valf62.SetName(*f1valiter.SnowflakeTarget.Name) } - f1val.SetSelectFromCollection(f1valf45) + f1val.SetSnowflakeTarget(f1valf62) } if f1valiter.SparkConnectorSource != nil { - f1valf46 := &svcsdk.SparkConnectorSource{} + f1valf63 := &svcsdk.SparkConnectorSource{} if f1valiter.SparkConnectorSource.AdditionalOptions != nil { - f1valf46f0 := map[string]*string{} - for f1valf46f0key, f1valf46f0valiter := range f1valiter.SparkConnectorSource.AdditionalOptions { - var f1valf46f0val string - f1valf46f0val = *f1valf46f0valiter - f1valf46f0[f1valf46f0key] = &f1valf46f0val + f1valf63f0 := map[string]*string{} + for f1valf63f0key, f1valf63f0valiter := range f1valiter.SparkConnectorSource.AdditionalOptions { + var f1valf63f0val string + f1valf63f0val = *f1valf63f0valiter + f1valf63f0[f1valf63f0key] = &f1valf63f0val } - f1valf46.SetAdditionalOptions(f1valf46f0) + f1valf63.SetAdditionalOptions(f1valf63f0) } if f1valiter.SparkConnectorSource.ConnectionName != nil { - f1valf46.SetConnectionName(*f1valiter.SparkConnectorSource.ConnectionName) + f1valf63.SetConnectionName(*f1valiter.SparkConnectorSource.ConnectionName) } if f1valiter.SparkConnectorSource.ConnectionType != nil { - f1valf46.SetConnectionType(*f1valiter.SparkConnectorSource.ConnectionType) + f1valf63.SetConnectionType(*f1valiter.SparkConnectorSource.ConnectionType) } if f1valiter.SparkConnectorSource.ConnectorName != nil { - f1valf46.SetConnectorName(*f1valiter.SparkConnectorSource.ConnectorName) + f1valf63.SetConnectorName(*f1valiter.SparkConnectorSource.ConnectorName) } if f1valiter.SparkConnectorSource.Name != nil { - f1valf46.SetName(*f1valiter.SparkConnectorSource.Name) + f1valf63.SetName(*f1valiter.SparkConnectorSource.Name) } if f1valiter.SparkConnectorSource.OutputSchemas != nil { - f1valf46f5 := []*svcsdk.GlueSchema{} - for _, f1valf46f5iter := range f1valiter.SparkConnectorSource.OutputSchemas { - f1valf46f5elem := &svcsdk.GlueSchema{} - if f1valf46f5iter.Columns != nil { - f1valf46f5elemf0 := []*svcsdk.GlueStudioSchemaColumn{} - for _, f1valf46f5elemf0iter := range f1valf46f5iter.Columns { - f1valf46f5elemf0elem := &svcsdk.GlueStudioSchemaColumn{} - if f1valf46f5elemf0iter.Name != nil { - f1valf46f5elemf0elem.SetName(*f1valf46f5elemf0iter.Name) + f1valf63f5 := []*svcsdk.GlueSchema{} + for _, f1valf63f5iter := range f1valiter.SparkConnectorSource.OutputSchemas { + f1valf63f5elem := &svcsdk.GlueSchema{} + if f1valf63f5iter.Columns != nil { + f1valf63f5elemf0 := []*svcsdk.GlueStudioSchemaColumn{} + for _, f1valf63f5elemf0iter := range f1valf63f5iter.Columns { + f1valf63f5elemf0elem := &svcsdk.GlueStudioSchemaColumn{} + if f1valf63f5elemf0iter.Name != nil { + f1valf63f5elemf0elem.SetName(*f1valf63f5elemf0iter.Name) } - if f1valf46f5elemf0iter.Type != nil { - f1valf46f5elemf0elem.SetType(*f1valf46f5elemf0iter.Type) + if f1valf63f5elemf0iter.Type != nil { + f1valf63f5elemf0elem.SetType(*f1valf63f5elemf0iter.Type) } - f1valf46f5elemf0 = append(f1valf46f5elemf0, f1valf46f5elemf0elem) + f1valf63f5elemf0 = append(f1valf63f5elemf0, f1valf63f5elemf0elem) } - f1valf46f5elem.SetColumns(f1valf46f5elemf0) + f1valf63f5elem.SetColumns(f1valf63f5elemf0) } - f1valf46f5 = append(f1valf46f5, f1valf46f5elem) + f1valf63f5 = append(f1valf63f5, f1valf63f5elem) } - f1valf46.SetOutputSchemas(f1valf46f5) + f1valf63.SetOutputSchemas(f1valf63f5) } - f1val.SetSparkConnectorSource(f1valf46) + f1val.SetSparkConnectorSource(f1valf63) } if f1valiter.SparkConnectorTarget != nil { - f1valf47 := &svcsdk.SparkConnectorTarget{} + f1valf64 := &svcsdk.SparkConnectorTarget{} if f1valiter.SparkConnectorTarget.AdditionalOptions != nil { - f1valf47f0 := map[string]*string{} - for f1valf47f0key, f1valf47f0valiter := range f1valiter.SparkConnectorTarget.AdditionalOptions { - var f1valf47f0val string - f1valf47f0val = *f1valf47f0valiter - f1valf47f0[f1valf47f0key] = &f1valf47f0val + f1valf64f0 := map[string]*string{} + for f1valf64f0key, f1valf64f0valiter := range f1valiter.SparkConnectorTarget.AdditionalOptions { + var f1valf64f0val string + f1valf64f0val = *f1valf64f0valiter + f1valf64f0[f1valf64f0key] = &f1valf64f0val } - f1valf47.SetAdditionalOptions(f1valf47f0) + f1valf64.SetAdditionalOptions(f1valf64f0) } if f1valiter.SparkConnectorTarget.ConnectionName != nil { - f1valf47.SetConnectionName(*f1valiter.SparkConnectorTarget.ConnectionName) + f1valf64.SetConnectionName(*f1valiter.SparkConnectorTarget.ConnectionName) } if f1valiter.SparkConnectorTarget.ConnectionType != nil { - f1valf47.SetConnectionType(*f1valiter.SparkConnectorTarget.ConnectionType) + f1valf64.SetConnectionType(*f1valiter.SparkConnectorTarget.ConnectionType) } if f1valiter.SparkConnectorTarget.ConnectorName != nil { - f1valf47.SetConnectorName(*f1valiter.SparkConnectorTarget.ConnectorName) + f1valf64.SetConnectorName(*f1valiter.SparkConnectorTarget.ConnectorName) } if f1valiter.SparkConnectorTarget.Inputs != nil { - f1valf47f4 := []*string{} - for _, f1valf47f4iter := range f1valiter.SparkConnectorTarget.Inputs { - var f1valf47f4elem string - f1valf47f4elem = *f1valf47f4iter - f1valf47f4 = append(f1valf47f4, &f1valf47f4elem) + f1valf64f4 := []*string{} + for _, f1valf64f4iter := range f1valiter.SparkConnectorTarget.Inputs { + var f1valf64f4elem string + f1valf64f4elem = *f1valf64f4iter + f1valf64f4 = append(f1valf64f4, &f1valf64f4elem) } - f1valf47.SetInputs(f1valf47f4) + f1valf64.SetInputs(f1valf64f4) } if f1valiter.SparkConnectorTarget.Name != nil { - f1valf47.SetName(*f1valiter.SparkConnectorTarget.Name) + f1valf64.SetName(*f1valiter.SparkConnectorTarget.Name) } if f1valiter.SparkConnectorTarget.OutputSchemas != nil { - f1valf47f6 := []*svcsdk.GlueSchema{} - for _, f1valf47f6iter := range f1valiter.SparkConnectorTarget.OutputSchemas { - f1valf47f6elem := &svcsdk.GlueSchema{} - if f1valf47f6iter.Columns != nil { - f1valf47f6elemf0 := []*svcsdk.GlueStudioSchemaColumn{} - for _, f1valf47f6elemf0iter := range f1valf47f6iter.Columns { - f1valf47f6elemf0elem := &svcsdk.GlueStudioSchemaColumn{} - if f1valf47f6elemf0iter.Name != nil { - f1valf47f6elemf0elem.SetName(*f1valf47f6elemf0iter.Name) + f1valf64f6 := []*svcsdk.GlueSchema{} + for _, f1valf64f6iter := range f1valiter.SparkConnectorTarget.OutputSchemas { + f1valf64f6elem := &svcsdk.GlueSchema{} + if f1valf64f6iter.Columns != nil { + f1valf64f6elemf0 := []*svcsdk.GlueStudioSchemaColumn{} + for _, f1valf64f6elemf0iter := range f1valf64f6iter.Columns { + f1valf64f6elemf0elem := &svcsdk.GlueStudioSchemaColumn{} + if f1valf64f6elemf0iter.Name != nil { + f1valf64f6elemf0elem.SetName(*f1valf64f6elemf0iter.Name) } - if f1valf47f6elemf0iter.Type != nil { - f1valf47f6elemf0elem.SetType(*f1valf47f6elemf0iter.Type) + if f1valf64f6elemf0iter.Type != nil { + f1valf64f6elemf0elem.SetType(*f1valf64f6elemf0iter.Type) } - f1valf47f6elemf0 = append(f1valf47f6elemf0, f1valf47f6elemf0elem) + f1valf64f6elemf0 = append(f1valf64f6elemf0, f1valf64f6elemf0elem) } - f1valf47f6elem.SetColumns(f1valf47f6elemf0) + f1valf64f6elem.SetColumns(f1valf64f6elemf0) } - f1valf47f6 = append(f1valf47f6, f1valf47f6elem) + f1valf64f6 = append(f1valf64f6, f1valf64f6elem) } - f1valf47.SetOutputSchemas(f1valf47f6) + f1valf64.SetOutputSchemas(f1valf64f6) } - f1val.SetSparkConnectorTarget(f1valf47) + f1val.SetSparkConnectorTarget(f1valf64) } if f1valiter.SparkSQL != nil { - f1valf48 := &svcsdk.SparkSQL{} + f1valf65 := &svcsdk.SparkSQL{} if f1valiter.SparkSQL.Inputs != nil { - f1valf48f0 := []*string{} - for _, f1valf48f0iter := range f1valiter.SparkSQL.Inputs { - var f1valf48f0elem string - f1valf48f0elem = *f1valf48f0iter - f1valf48f0 = append(f1valf48f0, &f1valf48f0elem) + f1valf65f0 := []*string{} + for _, f1valf65f0iter := range f1valiter.SparkSQL.Inputs { + var f1valf65f0elem string + f1valf65f0elem = *f1valf65f0iter + f1valf65f0 = append(f1valf65f0, &f1valf65f0elem) } - f1valf48.SetInputs(f1valf48f0) + f1valf65.SetInputs(f1valf65f0) } if f1valiter.SparkSQL.Name != nil { - f1valf48.SetName(*f1valiter.SparkSQL.Name) + f1valf65.SetName(*f1valiter.SparkSQL.Name) } if f1valiter.SparkSQL.OutputSchemas != nil { - f1valf48f2 := []*svcsdk.GlueSchema{} - for _, f1valf48f2iter := range f1valiter.SparkSQL.OutputSchemas { - f1valf48f2elem := &svcsdk.GlueSchema{} - if f1valf48f2iter.Columns != nil { - f1valf48f2elemf0 := []*svcsdk.GlueStudioSchemaColumn{} - for _, f1valf48f2elemf0iter := range f1valf48f2iter.Columns { - f1valf48f2elemf0elem := &svcsdk.GlueStudioSchemaColumn{} - if f1valf48f2elemf0iter.Name != nil { - f1valf48f2elemf0elem.SetName(*f1valf48f2elemf0iter.Name) + f1valf65f2 := []*svcsdk.GlueSchema{} + for _, f1valf65f2iter := range f1valiter.SparkSQL.OutputSchemas { + f1valf65f2elem := &svcsdk.GlueSchema{} + if f1valf65f2iter.Columns != nil { + f1valf65f2elemf0 := []*svcsdk.GlueStudioSchemaColumn{} + for _, f1valf65f2elemf0iter := range f1valf65f2iter.Columns { + f1valf65f2elemf0elem := &svcsdk.GlueStudioSchemaColumn{} + if f1valf65f2elemf0iter.Name != nil { + f1valf65f2elemf0elem.SetName(*f1valf65f2elemf0iter.Name) } - if f1valf48f2elemf0iter.Type != nil { - f1valf48f2elemf0elem.SetType(*f1valf48f2elemf0iter.Type) + if f1valf65f2elemf0iter.Type != nil { + f1valf65f2elemf0elem.SetType(*f1valf65f2elemf0iter.Type) } - f1valf48f2elemf0 = append(f1valf48f2elemf0, f1valf48f2elemf0elem) + f1valf65f2elemf0 = append(f1valf65f2elemf0, f1valf65f2elemf0elem) } - f1valf48f2elem.SetColumns(f1valf48f2elemf0) + f1valf65f2elem.SetColumns(f1valf65f2elemf0) } - f1valf48f2 = append(f1valf48f2, f1valf48f2elem) + f1valf65f2 = append(f1valf65f2, f1valf65f2elem) } - f1valf48.SetOutputSchemas(f1valf48f2) + f1valf65.SetOutputSchemas(f1valf65f2) } if f1valiter.SparkSQL.SQLAliases != nil { - f1valf48f3 := []*svcsdk.SqlAlias{} - for _, f1valf48f3iter := range f1valiter.SparkSQL.SQLAliases { - f1valf48f3elem := &svcsdk.SqlAlias{} - if f1valf48f3iter.Alias != nil { - f1valf48f3elem.SetAlias(*f1valf48f3iter.Alias) + f1valf65f3 := []*svcsdk.SqlAlias{} + for _, f1valf65f3iter := range f1valiter.SparkSQL.SQLAliases { + f1valf65f3elem := &svcsdk.SqlAlias{} + if f1valf65f3iter.Alias != nil { + f1valf65f3elem.SetAlias(*f1valf65f3iter.Alias) } - if f1valf48f3iter.From != nil { - f1valf48f3elem.SetFrom(*f1valf48f3iter.From) + if f1valf65f3iter.From != nil { + f1valf65f3elem.SetFrom(*f1valf65f3iter.From) } - f1valf48f3 = append(f1valf48f3, f1valf48f3elem) + f1valf65f3 = append(f1valf65f3, f1valf65f3elem) } - f1valf48.SetSqlAliases(f1valf48f3) + f1valf65.SetSqlAliases(f1valf65f3) } if f1valiter.SparkSQL.SQLQuery != nil { - f1valf48.SetSqlQuery(*f1valiter.SparkSQL.SQLQuery) + f1valf65.SetSqlQuery(*f1valiter.SparkSQL.SQLQuery) } - f1val.SetSparkSQL(f1valf48) + f1val.SetSparkSQL(f1valf65) } if f1valiter.Spigot != nil { - f1valf49 := &svcsdk.Spigot{} + f1valf66 := &svcsdk.Spigot{} if f1valiter.Spigot.Inputs != nil { - f1valf49f0 := []*string{} - for _, f1valf49f0iter := range f1valiter.Spigot.Inputs { - var f1valf49f0elem string - f1valf49f0elem = *f1valf49f0iter - f1valf49f0 = append(f1valf49f0, &f1valf49f0elem) + f1valf66f0 := []*string{} + for _, f1valf66f0iter := range f1valiter.Spigot.Inputs { + var f1valf66f0elem string + f1valf66f0elem = *f1valf66f0iter + f1valf66f0 = append(f1valf66f0, &f1valf66f0elem) } - f1valf49.SetInputs(f1valf49f0) + f1valf66.SetInputs(f1valf66f0) } if f1valiter.Spigot.Name != nil { - f1valf49.SetName(*f1valiter.Spigot.Name) + f1valf66.SetName(*f1valiter.Spigot.Name) } if f1valiter.Spigot.Path != nil { - f1valf49.SetPath(*f1valiter.Spigot.Path) + f1valf66.SetPath(*f1valiter.Spigot.Path) } if f1valiter.Spigot.Prob != nil { - f1valf49.SetProb(*f1valiter.Spigot.Prob) + f1valf66.SetProb(*f1valiter.Spigot.Prob) } if f1valiter.Spigot.Topk != nil { - f1valf49.SetTopk(*f1valiter.Spigot.Topk) + f1valf66.SetTopk(*f1valiter.Spigot.Topk) } - f1val.SetSpigot(f1valf49) + f1val.SetSpigot(f1valf66) } if f1valiter.SplitFields != nil { - f1valf50 := &svcsdk.SplitFields{} + f1valf67 := &svcsdk.SplitFields{} if f1valiter.SplitFields.Inputs != nil { - f1valf50f0 := []*string{} - for _, f1valf50f0iter := range f1valiter.SplitFields.Inputs { - var f1valf50f0elem string - f1valf50f0elem = *f1valf50f0iter - f1valf50f0 = append(f1valf50f0, &f1valf50f0elem) + f1valf67f0 := []*string{} + for _, f1valf67f0iter := range f1valiter.SplitFields.Inputs { + var f1valf67f0elem string + f1valf67f0elem = *f1valf67f0iter + f1valf67f0 = append(f1valf67f0, &f1valf67f0elem) } - f1valf50.SetInputs(f1valf50f0) + f1valf67.SetInputs(f1valf67f0) } if f1valiter.SplitFields.Name != nil { - f1valf50.SetName(*f1valiter.SplitFields.Name) + f1valf67.SetName(*f1valiter.SplitFields.Name) } if f1valiter.SplitFields.Paths != nil { - f1valf50f2 := [][]*string{} - for _, f1valf50f2iter := range f1valiter.SplitFields.Paths { - f1valf50f2elem := []*string{} - for _, f1valf50f2elemiter := range f1valf50f2iter { - var f1valf50f2elemelem string - f1valf50f2elemelem = *f1valf50f2elemiter - f1valf50f2elem = append(f1valf50f2elem, &f1valf50f2elemelem) + f1valf67f2 := [][]*string{} + for _, f1valf67f2iter := range f1valiter.SplitFields.Paths { + f1valf67f2elem := []*string{} + for _, f1valf67f2elemiter := range f1valf67f2iter { + var f1valf67f2elemelem string + f1valf67f2elemelem = *f1valf67f2elemiter + f1valf67f2elem = append(f1valf67f2elem, &f1valf67f2elemelem) } - f1valf50f2 = append(f1valf50f2, f1valf50f2elem) + f1valf67f2 = append(f1valf67f2, f1valf67f2elem) } - f1valf50.SetPaths(f1valf50f2) + f1valf67.SetPaths(f1valf67f2) } - f1val.SetSplitFields(f1valf50) + f1val.SetSplitFields(f1valf67) } if f1valiter.Union != nil { - f1valf51 := &svcsdk.Union{} + f1valf68 := &svcsdk.Union{} if f1valiter.Union.Inputs != nil { - f1valf51f0 := []*string{} - for _, f1valf51f0iter := range f1valiter.Union.Inputs { - var f1valf51f0elem string - f1valf51f0elem = *f1valf51f0iter - f1valf51f0 = append(f1valf51f0, &f1valf51f0elem) + f1valf68f0 := []*string{} + for _, f1valf68f0iter := range f1valiter.Union.Inputs { + var f1valf68f0elem string + f1valf68f0elem = *f1valf68f0iter + f1valf68f0 = append(f1valf68f0, &f1valf68f0elem) } - f1valf51.SetInputs(f1valf51f0) + f1valf68.SetInputs(f1valf68f0) } if f1valiter.Union.Name != nil { - f1valf51.SetName(*f1valiter.Union.Name) + f1valf68.SetName(*f1valiter.Union.Name) } if f1valiter.Union.UnionType != nil { - f1valf51.SetUnionType(*f1valiter.Union.UnionType) + f1valf68.SetUnionType(*f1valiter.Union.UnionType) } - f1val.SetUnion(f1valf51) + f1val.SetUnion(f1valf68) } f1[f1key] = f1val } @@ -4526,6 +7289,9 @@ func GenerateCreateJobInput(cr *svcapitypes.Job) *svcsdk.CreateJobInput { if cr.Spec.ForProvider.Command.PythonVersion != nil { f2.SetPythonVersion(*cr.Spec.ForProvider.Command.PythonVersion) } + if cr.Spec.ForProvider.Command.Runtime != nil { + f2.SetRuntime(*cr.Spec.ForProvider.Command.Runtime) + } if cr.Spec.ForProvider.Command.ScriptLocation != nil { f2.SetScriptLocation(*cr.Spec.ForProvider.Command.ScriptLocation) } diff --git a/pkg/controller/lambda/function/zz_controller.go b/pkg/controller/lambda/function/zz_controller.go index 75028b7692..f2afec5355 100644 --- a/pkg/controller/lambda/function/zz_controller.go +++ b/pkg/controller/lambda/function/zz_controller.go @@ -301,6 +301,25 @@ func (e *external) Create(ctx context.Context, mg cpresource.Managed) (managed.E } else { cr.Spec.ForProvider.Runtime = nil } + if resp.RuntimeVersionConfig != nil { + f23 := &svcapitypes.RuntimeVersionConfig{} + if resp.RuntimeVersionConfig.Error != nil { + f23f0 := &svcapitypes.RuntimeVersionError{} + if resp.RuntimeVersionConfig.Error.ErrorCode != nil { + f23f0.ErrorCode = resp.RuntimeVersionConfig.Error.ErrorCode + } + if resp.RuntimeVersionConfig.Error.Message != nil { + f23f0.Message = resp.RuntimeVersionConfig.Error.Message + } + f23.Error = f23f0 + } + if resp.RuntimeVersionConfig.RuntimeVersionArn != nil { + f23.RuntimeVersionARN = resp.RuntimeVersionConfig.RuntimeVersionArn + } + cr.Status.AtProvider.RuntimeVersionConfig = f23 + } else { + cr.Status.AtProvider.RuntimeVersionConfig = nil + } if resp.SigningJobArn != nil { cr.Status.AtProvider.SigningJobARN = resp.SigningJobArn } else { @@ -312,11 +331,11 @@ func (e *external) Create(ctx context.Context, mg cpresource.Managed) (managed.E cr.Status.AtProvider.SigningProfileVersionARN = nil } if resp.SnapStart != nil { - f25 := &svcapitypes.SnapStart{} + f26 := &svcapitypes.SnapStart{} if resp.SnapStart.ApplyOn != nil { - f25.ApplyOn = resp.SnapStart.ApplyOn + f26.ApplyOn = resp.SnapStart.ApplyOn } - cr.Spec.ForProvider.SnapStart = f25 + cr.Spec.ForProvider.SnapStart = f26 } else { cr.Spec.ForProvider.SnapStart = nil } @@ -341,11 +360,11 @@ func (e *external) Create(ctx context.Context, mg cpresource.Managed) (managed.E cr.Spec.ForProvider.Timeout = nil } if resp.TracingConfig != nil { - f30 := &svcapitypes.TracingConfig{} + f31 := &svcapitypes.TracingConfig{} if resp.TracingConfig.Mode != nil { - f30.Mode = resp.TracingConfig.Mode + f31.Mode = resp.TracingConfig.Mode } - cr.Spec.ForProvider.TracingConfig = f30 + cr.Spec.ForProvider.TracingConfig = f31 } else { cr.Spec.ForProvider.TracingConfig = nil } @@ -355,29 +374,29 @@ func (e *external) Create(ctx context.Context, mg cpresource.Managed) (managed.E cr.Status.AtProvider.Version = nil } if resp.VpcConfig != nil { - f32 := &svcapitypes.VPCConfigResponse{} + f33 := &svcapitypes.VPCConfigResponse{} if resp.VpcConfig.SecurityGroupIds != nil { - f32f0 := []*string{} - for _, f32f0iter := range resp.VpcConfig.SecurityGroupIds { - var f32f0elem string - f32f0elem = *f32f0iter - f32f0 = append(f32f0, &f32f0elem) + f33f0 := []*string{} + for _, f33f0iter := range resp.VpcConfig.SecurityGroupIds { + var f33f0elem string + f33f0elem = *f33f0iter + f33f0 = append(f33f0, &f33f0elem) } - f32.SecurityGroupIDs = f32f0 + f33.SecurityGroupIDs = f33f0 } if resp.VpcConfig.SubnetIds != nil { - f32f1 := []*string{} - for _, f32f1iter := range resp.VpcConfig.SubnetIds { - var f32f1elem string - f32f1elem = *f32f1iter - f32f1 = append(f32f1, &f32f1elem) + f33f1 := []*string{} + for _, f33f1iter := range resp.VpcConfig.SubnetIds { + var f33f1elem string + f33f1elem = *f33f1iter + f33f1 = append(f33f1, &f33f1elem) } - f32.SubnetIDs = f32f1 + f33.SubnetIDs = f33f1 } if resp.VpcConfig.VpcId != nil { - f32.VPCID = resp.VpcConfig.VpcId + f33.VPCID = resp.VpcConfig.VpcId } - cr.Status.AtProvider.VPCConfig = f32 + cr.Status.AtProvider.VPCConfig = f33 } else { cr.Status.AtProvider.VPCConfig = nil } diff --git a/pkg/controller/lambda/functionurlconfig/zz_controller.go b/pkg/controller/lambda/functionurlconfig/zz_controller.go index 3beea9e90e..7c10bfd40d 100644 --- a/pkg/controller/lambda/functionurlconfig/zz_controller.go +++ b/pkg/controller/lambda/functionurlconfig/zz_controller.go @@ -183,6 +183,11 @@ func (e *external) Create(ctx context.Context, mg cpresource.Managed) (managed.E } else { cr.Status.AtProvider.FunctionURL = nil } + if resp.InvokeMode != nil { + cr.Spec.ForProvider.InvokeMode = resp.InvokeMode + } else { + cr.Spec.ForProvider.InvokeMode = nil + } return e.postCreate(ctx, cr, resp, managed.ExternalCreation{}, err) } diff --git a/pkg/controller/lambda/functionurlconfig/zz_conversions.go b/pkg/controller/lambda/functionurlconfig/zz_conversions.go index 2ed147712c..28c4ee6f24 100644 --- a/pkg/controller/lambda/functionurlconfig/zz_conversions.go +++ b/pkg/controller/lambda/functionurlconfig/zz_conversions.go @@ -112,6 +112,11 @@ func GenerateFunctionURLConfig(resp *svcsdk.GetFunctionUrlConfigOutput) *svcapit } else { cr.Status.AtProvider.FunctionURL = nil } + if resp.InvokeMode != nil { + cr.Spec.ForProvider.InvokeMode = resp.InvokeMode + } else { + cr.Spec.ForProvider.InvokeMode = nil + } return cr } @@ -169,6 +174,9 @@ func GenerateCreateFunctionUrlConfigInput(cr *svcapitypes.FunctionURLConfig) *sv } res.SetCors(f1) } + if cr.Spec.ForProvider.InvokeMode != nil { + res.SetInvokeMode(*cr.Spec.ForProvider.InvokeMode) + } if cr.Spec.ForProvider.Qualifier != nil { res.SetQualifier(*cr.Spec.ForProvider.Qualifier) } @@ -229,6 +237,9 @@ func GenerateUpdateFunctionUrlConfigInput(cr *svcapitypes.FunctionURLConfig) *sv } res.SetCors(f1) } + if cr.Spec.ForProvider.InvokeMode != nil { + res.SetInvokeMode(*cr.Spec.ForProvider.InvokeMode) + } if cr.Spec.ForProvider.Qualifier != nil { res.SetQualifier(*cr.Spec.ForProvider.Qualifier) } diff --git a/pkg/controller/mq/broker/zz_conversions.go b/pkg/controller/mq/broker/zz_conversions.go index 05e7b27e45..60282e8426 100644 --- a/pkg/controller/mq/broker/zz_conversions.go +++ b/pkg/controller/mq/broker/zz_conversions.go @@ -140,20 +140,25 @@ func GenerateBroker(resp *svcsdk.DescribeBrokerResponse) *svcapitypes.Broker { } else { cr.Status.AtProvider.Created = nil } + if resp.DataReplicationMode != nil { + cr.Spec.ForProvider.DataReplicationMode = resp.DataReplicationMode + } else { + cr.Spec.ForProvider.DataReplicationMode = nil + } if resp.DeploymentMode != nil { cr.Spec.ForProvider.DeploymentMode = resp.DeploymentMode } else { cr.Spec.ForProvider.DeploymentMode = nil } if resp.EncryptionOptions != nil { - f11 := &svcapitypes.EncryptionOptions{} + f13 := &svcapitypes.EncryptionOptions{} if resp.EncryptionOptions.KmsKeyId != nil { - f11.KMSKeyID = resp.EncryptionOptions.KmsKeyId + f13.KMSKeyID = resp.EncryptionOptions.KmsKeyId } if resp.EncryptionOptions.UseAwsOwnedKey != nil { - f11.UseAWSOwnedKey = resp.EncryptionOptions.UseAwsOwnedKey + f13.UseAWSOwnedKey = resp.EncryptionOptions.UseAwsOwnedKey } - cr.Spec.ForProvider.EncryptionOptions = f11 + cr.Spec.ForProvider.EncryptionOptions = f13 } else { cr.Spec.ForProvider.EncryptionOptions = nil } @@ -173,71 +178,71 @@ func GenerateBroker(resp *svcsdk.DescribeBrokerResponse) *svcapitypes.Broker { cr.Spec.ForProvider.HostInstanceType = nil } if resp.LdapServerMetadata != nil { - f15 := &svcapitypes.LDAPServerMetadataInput{} + f17 := &svcapitypes.LDAPServerMetadataInput{} if resp.LdapServerMetadata.Hosts != nil { - f15f0 := []*string{} - for _, f15f0iter := range resp.LdapServerMetadata.Hosts { - var f15f0elem string - f15f0elem = *f15f0iter - f15f0 = append(f15f0, &f15f0elem) + f17f0 := []*string{} + for _, f17f0iter := range resp.LdapServerMetadata.Hosts { + var f17f0elem string + f17f0elem = *f17f0iter + f17f0 = append(f17f0, &f17f0elem) } - f15.Hosts = f15f0 + f17.Hosts = f17f0 } if resp.LdapServerMetadata.RoleBase != nil { - f15.RoleBase = resp.LdapServerMetadata.RoleBase + f17.RoleBase = resp.LdapServerMetadata.RoleBase } if resp.LdapServerMetadata.RoleName != nil { - f15.RoleName = resp.LdapServerMetadata.RoleName + f17.RoleName = resp.LdapServerMetadata.RoleName } if resp.LdapServerMetadata.RoleSearchMatching != nil { - f15.RoleSearchMatching = resp.LdapServerMetadata.RoleSearchMatching + f17.RoleSearchMatching = resp.LdapServerMetadata.RoleSearchMatching } if resp.LdapServerMetadata.RoleSearchSubtree != nil { - f15.RoleSearchSubtree = resp.LdapServerMetadata.RoleSearchSubtree + f17.RoleSearchSubtree = resp.LdapServerMetadata.RoleSearchSubtree } if resp.LdapServerMetadata.ServiceAccountUsername != nil { - f15.ServiceAccountUsername = resp.LdapServerMetadata.ServiceAccountUsername + f17.ServiceAccountUsername = resp.LdapServerMetadata.ServiceAccountUsername } if resp.LdapServerMetadata.UserBase != nil { - f15.UserBase = resp.LdapServerMetadata.UserBase + f17.UserBase = resp.LdapServerMetadata.UserBase } if resp.LdapServerMetadata.UserRoleName != nil { - f15.UserRoleName = resp.LdapServerMetadata.UserRoleName + f17.UserRoleName = resp.LdapServerMetadata.UserRoleName } if resp.LdapServerMetadata.UserSearchMatching != nil { - f15.UserSearchMatching = resp.LdapServerMetadata.UserSearchMatching + f17.UserSearchMatching = resp.LdapServerMetadata.UserSearchMatching } if resp.LdapServerMetadata.UserSearchSubtree != nil { - f15.UserSearchSubtree = resp.LdapServerMetadata.UserSearchSubtree + f17.UserSearchSubtree = resp.LdapServerMetadata.UserSearchSubtree } - cr.Spec.ForProvider.LDAPServerMetadata = f15 + cr.Spec.ForProvider.LDAPServerMetadata = f17 } else { cr.Spec.ForProvider.LDAPServerMetadata = nil } if resp.Logs != nil { - f16 := &svcapitypes.Logs{} + f18 := &svcapitypes.Logs{} if resp.Logs.Audit != nil { - f16.Audit = resp.Logs.Audit + f18.Audit = resp.Logs.Audit } if resp.Logs.General != nil { - f16.General = resp.Logs.General + f18.General = resp.Logs.General } - cr.Spec.ForProvider.Logs = f16 + cr.Spec.ForProvider.Logs = f18 } else { cr.Spec.ForProvider.Logs = nil } if resp.MaintenanceWindowStartTime != nil { - f17 := &svcapitypes.WeeklyStartTime{} + f19 := &svcapitypes.WeeklyStartTime{} if resp.MaintenanceWindowStartTime.DayOfWeek != nil { - f17.DayOfWeek = resp.MaintenanceWindowStartTime.DayOfWeek + f19.DayOfWeek = resp.MaintenanceWindowStartTime.DayOfWeek } if resp.MaintenanceWindowStartTime.TimeOfDay != nil { - f17.TimeOfDay = resp.MaintenanceWindowStartTime.TimeOfDay + f19.TimeOfDay = resp.MaintenanceWindowStartTime.TimeOfDay } if resp.MaintenanceWindowStartTime.TimeZone != nil { - f17.TimeZone = resp.MaintenanceWindowStartTime.TimeZone + f19.TimeZone = resp.MaintenanceWindowStartTime.TimeZone } - cr.Spec.ForProvider.MaintenanceWindowStartTime = f17 + cr.Spec.ForProvider.MaintenanceWindowStartTime = f19 } else { cr.Spec.ForProvider.MaintenanceWindowStartTime = nil } @@ -257,55 +262,55 @@ func GenerateBroker(resp *svcsdk.DescribeBrokerResponse) *svcapitypes.Broker { cr.Status.AtProvider.PendingHostInstanceType = nil } if resp.PendingLdapServerMetadata != nil { - f21 := &svcapitypes.LDAPServerMetadataOutput{} + f25 := &svcapitypes.LDAPServerMetadataOutput{} if resp.PendingLdapServerMetadata.Hosts != nil { - f21f0 := []*string{} - for _, f21f0iter := range resp.PendingLdapServerMetadata.Hosts { - var f21f0elem string - f21f0elem = *f21f0iter - f21f0 = append(f21f0, &f21f0elem) + f25f0 := []*string{} + for _, f25f0iter := range resp.PendingLdapServerMetadata.Hosts { + var f25f0elem string + f25f0elem = *f25f0iter + f25f0 = append(f25f0, &f25f0elem) } - f21.Hosts = f21f0 + f25.Hosts = f25f0 } if resp.PendingLdapServerMetadata.RoleBase != nil { - f21.RoleBase = resp.PendingLdapServerMetadata.RoleBase + f25.RoleBase = resp.PendingLdapServerMetadata.RoleBase } if resp.PendingLdapServerMetadata.RoleName != nil { - f21.RoleName = resp.PendingLdapServerMetadata.RoleName + f25.RoleName = resp.PendingLdapServerMetadata.RoleName } if resp.PendingLdapServerMetadata.RoleSearchMatching != nil { - f21.RoleSearchMatching = resp.PendingLdapServerMetadata.RoleSearchMatching + f25.RoleSearchMatching = resp.PendingLdapServerMetadata.RoleSearchMatching } if resp.PendingLdapServerMetadata.RoleSearchSubtree != nil { - f21.RoleSearchSubtree = resp.PendingLdapServerMetadata.RoleSearchSubtree + f25.RoleSearchSubtree = resp.PendingLdapServerMetadata.RoleSearchSubtree } if resp.PendingLdapServerMetadata.ServiceAccountUsername != nil { - f21.ServiceAccountUsername = resp.PendingLdapServerMetadata.ServiceAccountUsername + f25.ServiceAccountUsername = resp.PendingLdapServerMetadata.ServiceAccountUsername } if resp.PendingLdapServerMetadata.UserBase != nil { - f21.UserBase = resp.PendingLdapServerMetadata.UserBase + f25.UserBase = resp.PendingLdapServerMetadata.UserBase } if resp.PendingLdapServerMetadata.UserRoleName != nil { - f21.UserRoleName = resp.PendingLdapServerMetadata.UserRoleName + f25.UserRoleName = resp.PendingLdapServerMetadata.UserRoleName } if resp.PendingLdapServerMetadata.UserSearchMatching != nil { - f21.UserSearchMatching = resp.PendingLdapServerMetadata.UserSearchMatching + f25.UserSearchMatching = resp.PendingLdapServerMetadata.UserSearchMatching } if resp.PendingLdapServerMetadata.UserSearchSubtree != nil { - f21.UserSearchSubtree = resp.PendingLdapServerMetadata.UserSearchSubtree + f25.UserSearchSubtree = resp.PendingLdapServerMetadata.UserSearchSubtree } - cr.Status.AtProvider.PendingLDAPServerMetadata = f21 + cr.Status.AtProvider.PendingLDAPServerMetadata = f25 } else { cr.Status.AtProvider.PendingLDAPServerMetadata = nil } if resp.PendingSecurityGroups != nil { - f22 := []*string{} - for _, f22iter := range resp.PendingSecurityGroups { - var f22elem string - f22elem = *f22iter - f22 = append(f22, &f22elem) + f26 := []*string{} + for _, f26iter := range resp.PendingSecurityGroups { + var f26elem string + f26elem = *f26iter + f26 = append(f26, &f26elem) } - cr.Status.AtProvider.PendingSecurityGroups = f22 + cr.Status.AtProvider.PendingSecurityGroups = f26 } else { cr.Status.AtProvider.PendingSecurityGroups = nil } @@ -320,29 +325,29 @@ func GenerateBroker(resp *svcsdk.DescribeBrokerResponse) *svcapitypes.Broker { cr.Spec.ForProvider.StorageType = nil } if resp.Tags != nil { - f27 := map[string]*string{} - for f27key, f27valiter := range resp.Tags { - var f27val string - f27val = *f27valiter - f27[f27key] = &f27val + f31 := map[string]*string{} + for f31key, f31valiter := range resp.Tags { + var f31val string + f31val = *f31valiter + f31[f31key] = &f31val } - cr.Spec.ForProvider.Tags = f27 + cr.Spec.ForProvider.Tags = f31 } else { cr.Spec.ForProvider.Tags = nil } if resp.Users != nil { - f28 := []*svcapitypes.UserSummary{} - for _, f28iter := range resp.Users { - f28elem := &svcapitypes.UserSummary{} - if f28iter.PendingChange != nil { - f28elem.PendingChange = f28iter.PendingChange + f32 := []*svcapitypes.UserSummary{} + for _, f32iter := range resp.Users { + f32elem := &svcapitypes.UserSummary{} + if f32iter.PendingChange != nil { + f32elem.PendingChange = f32iter.PendingChange } - if f28iter.Username != nil { - f28elem.Username = f28iter.Username + if f32iter.Username != nil { + f32elem.Username = f32iter.Username } - f28 = append(f28, f28elem) + f32 = append(f32, f32elem) } - cr.Status.AtProvider.Users = f28 + cr.Status.AtProvider.Users = f32 } else { cr.Status.AtProvider.Users = nil } @@ -373,18 +378,24 @@ func GenerateCreateBrokerRequest(cr *svcapitypes.Broker) *svcsdk.CreateBrokerReq if cr.Spec.ForProvider.CreatorRequestID != nil { res.SetCreatorRequestId(*cr.Spec.ForProvider.CreatorRequestID) } + if cr.Spec.ForProvider.DataReplicationMode != nil { + res.SetDataReplicationMode(*cr.Spec.ForProvider.DataReplicationMode) + } + if cr.Spec.ForProvider.DataReplicationPrimaryBrokerARN != nil { + res.SetDataReplicationPrimaryBrokerArn(*cr.Spec.ForProvider.DataReplicationPrimaryBrokerARN) + } if cr.Spec.ForProvider.DeploymentMode != nil { res.SetDeploymentMode(*cr.Spec.ForProvider.DeploymentMode) } if cr.Spec.ForProvider.EncryptionOptions != nil { - f5 := &svcsdk.EncryptionOptions{} + f7 := &svcsdk.EncryptionOptions{} if cr.Spec.ForProvider.EncryptionOptions.KMSKeyID != nil { - f5.SetKmsKeyId(*cr.Spec.ForProvider.EncryptionOptions.KMSKeyID) + f7.SetKmsKeyId(*cr.Spec.ForProvider.EncryptionOptions.KMSKeyID) } if cr.Spec.ForProvider.EncryptionOptions.UseAWSOwnedKey != nil { - f5.SetUseAwsOwnedKey(*cr.Spec.ForProvider.EncryptionOptions.UseAWSOwnedKey) + f7.SetUseAwsOwnedKey(*cr.Spec.ForProvider.EncryptionOptions.UseAWSOwnedKey) } - res.SetEncryptionOptions(f5) + res.SetEncryptionOptions(f7) } if cr.Spec.ForProvider.EngineType != nil { res.SetEngineType(*cr.Spec.ForProvider.EngineType) @@ -396,70 +407,70 @@ func GenerateCreateBrokerRequest(cr *svcapitypes.Broker) *svcsdk.CreateBrokerReq res.SetHostInstanceType(*cr.Spec.ForProvider.HostInstanceType) } if cr.Spec.ForProvider.LDAPServerMetadata != nil { - f9 := &svcsdk.LdapServerMetadataInput{} + f11 := &svcsdk.LdapServerMetadataInput{} if cr.Spec.ForProvider.LDAPServerMetadata.Hosts != nil { - f9f0 := []*string{} - for _, f9f0iter := range cr.Spec.ForProvider.LDAPServerMetadata.Hosts { - var f9f0elem string - f9f0elem = *f9f0iter - f9f0 = append(f9f0, &f9f0elem) + f11f0 := []*string{} + for _, f11f0iter := range cr.Spec.ForProvider.LDAPServerMetadata.Hosts { + var f11f0elem string + f11f0elem = *f11f0iter + f11f0 = append(f11f0, &f11f0elem) } - f9.SetHosts(f9f0) + f11.SetHosts(f11f0) } if cr.Spec.ForProvider.LDAPServerMetadata.RoleBase != nil { - f9.SetRoleBase(*cr.Spec.ForProvider.LDAPServerMetadata.RoleBase) + f11.SetRoleBase(*cr.Spec.ForProvider.LDAPServerMetadata.RoleBase) } if cr.Spec.ForProvider.LDAPServerMetadata.RoleName != nil { - f9.SetRoleName(*cr.Spec.ForProvider.LDAPServerMetadata.RoleName) + f11.SetRoleName(*cr.Spec.ForProvider.LDAPServerMetadata.RoleName) } if cr.Spec.ForProvider.LDAPServerMetadata.RoleSearchMatching != nil { - f9.SetRoleSearchMatching(*cr.Spec.ForProvider.LDAPServerMetadata.RoleSearchMatching) + f11.SetRoleSearchMatching(*cr.Spec.ForProvider.LDAPServerMetadata.RoleSearchMatching) } if cr.Spec.ForProvider.LDAPServerMetadata.RoleSearchSubtree != nil { - f9.SetRoleSearchSubtree(*cr.Spec.ForProvider.LDAPServerMetadata.RoleSearchSubtree) + f11.SetRoleSearchSubtree(*cr.Spec.ForProvider.LDAPServerMetadata.RoleSearchSubtree) } if cr.Spec.ForProvider.LDAPServerMetadata.ServiceAccountPassword != nil { - f9.SetServiceAccountPassword(*cr.Spec.ForProvider.LDAPServerMetadata.ServiceAccountPassword) + f11.SetServiceAccountPassword(*cr.Spec.ForProvider.LDAPServerMetadata.ServiceAccountPassword) } if cr.Spec.ForProvider.LDAPServerMetadata.ServiceAccountUsername != nil { - f9.SetServiceAccountUsername(*cr.Spec.ForProvider.LDAPServerMetadata.ServiceAccountUsername) + f11.SetServiceAccountUsername(*cr.Spec.ForProvider.LDAPServerMetadata.ServiceAccountUsername) } if cr.Spec.ForProvider.LDAPServerMetadata.UserBase != nil { - f9.SetUserBase(*cr.Spec.ForProvider.LDAPServerMetadata.UserBase) + f11.SetUserBase(*cr.Spec.ForProvider.LDAPServerMetadata.UserBase) } if cr.Spec.ForProvider.LDAPServerMetadata.UserRoleName != nil { - f9.SetUserRoleName(*cr.Spec.ForProvider.LDAPServerMetadata.UserRoleName) + f11.SetUserRoleName(*cr.Spec.ForProvider.LDAPServerMetadata.UserRoleName) } if cr.Spec.ForProvider.LDAPServerMetadata.UserSearchMatching != nil { - f9.SetUserSearchMatching(*cr.Spec.ForProvider.LDAPServerMetadata.UserSearchMatching) + f11.SetUserSearchMatching(*cr.Spec.ForProvider.LDAPServerMetadata.UserSearchMatching) } if cr.Spec.ForProvider.LDAPServerMetadata.UserSearchSubtree != nil { - f9.SetUserSearchSubtree(*cr.Spec.ForProvider.LDAPServerMetadata.UserSearchSubtree) + f11.SetUserSearchSubtree(*cr.Spec.ForProvider.LDAPServerMetadata.UserSearchSubtree) } - res.SetLdapServerMetadata(f9) + res.SetLdapServerMetadata(f11) } if cr.Spec.ForProvider.Logs != nil { - f10 := &svcsdk.Logs{} + f12 := &svcsdk.Logs{} if cr.Spec.ForProvider.Logs.Audit != nil { - f10.SetAudit(*cr.Spec.ForProvider.Logs.Audit) + f12.SetAudit(*cr.Spec.ForProvider.Logs.Audit) } if cr.Spec.ForProvider.Logs.General != nil { - f10.SetGeneral(*cr.Spec.ForProvider.Logs.General) + f12.SetGeneral(*cr.Spec.ForProvider.Logs.General) } - res.SetLogs(f10) + res.SetLogs(f12) } if cr.Spec.ForProvider.MaintenanceWindowStartTime != nil { - f11 := &svcsdk.WeeklyStartTime{} + f13 := &svcsdk.WeeklyStartTime{} if cr.Spec.ForProvider.MaintenanceWindowStartTime.DayOfWeek != nil { - f11.SetDayOfWeek(*cr.Spec.ForProvider.MaintenanceWindowStartTime.DayOfWeek) + f13.SetDayOfWeek(*cr.Spec.ForProvider.MaintenanceWindowStartTime.DayOfWeek) } if cr.Spec.ForProvider.MaintenanceWindowStartTime.TimeOfDay != nil { - f11.SetTimeOfDay(*cr.Spec.ForProvider.MaintenanceWindowStartTime.TimeOfDay) + f13.SetTimeOfDay(*cr.Spec.ForProvider.MaintenanceWindowStartTime.TimeOfDay) } if cr.Spec.ForProvider.MaintenanceWindowStartTime.TimeZone != nil { - f11.SetTimeZone(*cr.Spec.ForProvider.MaintenanceWindowStartTime.TimeZone) + f13.SetTimeZone(*cr.Spec.ForProvider.MaintenanceWindowStartTime.TimeZone) } - res.SetMaintenanceWindowStartTime(f11) + res.SetMaintenanceWindowStartTime(f13) } if cr.Spec.ForProvider.PubliclyAccessible != nil { res.SetPubliclyAccessible(*cr.Spec.ForProvider.PubliclyAccessible) @@ -468,13 +479,13 @@ func GenerateCreateBrokerRequest(cr *svcapitypes.Broker) *svcsdk.CreateBrokerReq res.SetStorageType(*cr.Spec.ForProvider.StorageType) } if cr.Spec.ForProvider.Tags != nil { - f14 := map[string]*string{} - for f14key, f14valiter := range cr.Spec.ForProvider.Tags { - var f14val string - f14val = *f14valiter - f14[f14key] = &f14val + f16 := map[string]*string{} + for f16key, f16valiter := range cr.Spec.ForProvider.Tags { + var f16val string + f16val = *f16valiter + f16[f16key] = &f16val } - res.SetTags(f14) + res.SetTags(f16) } return res @@ -503,6 +514,9 @@ func GenerateUpdateBrokerRequest(cr *svcapitypes.Broker) *svcsdk.UpdateBrokerReq } res.SetConfiguration(f3) } + if cr.Spec.ForProvider.DataReplicationMode != nil { + res.SetDataReplicationMode(*cr.Spec.ForProvider.DataReplicationMode) + } if cr.Spec.ForProvider.EngineVersion != nil { res.SetEngineVersion(*cr.Spec.ForProvider.EngineVersion) } @@ -510,70 +524,70 @@ func GenerateUpdateBrokerRequest(cr *svcapitypes.Broker) *svcsdk.UpdateBrokerReq res.SetHostInstanceType(*cr.Spec.ForProvider.HostInstanceType) } if cr.Spec.ForProvider.LDAPServerMetadata != nil { - f6 := &svcsdk.LdapServerMetadataInput{} + f7 := &svcsdk.LdapServerMetadataInput{} if cr.Spec.ForProvider.LDAPServerMetadata.Hosts != nil { - f6f0 := []*string{} - for _, f6f0iter := range cr.Spec.ForProvider.LDAPServerMetadata.Hosts { - var f6f0elem string - f6f0elem = *f6f0iter - f6f0 = append(f6f0, &f6f0elem) + f7f0 := []*string{} + for _, f7f0iter := range cr.Spec.ForProvider.LDAPServerMetadata.Hosts { + var f7f0elem string + f7f0elem = *f7f0iter + f7f0 = append(f7f0, &f7f0elem) } - f6.SetHosts(f6f0) + f7.SetHosts(f7f0) } if cr.Spec.ForProvider.LDAPServerMetadata.RoleBase != nil { - f6.SetRoleBase(*cr.Spec.ForProvider.LDAPServerMetadata.RoleBase) + f7.SetRoleBase(*cr.Spec.ForProvider.LDAPServerMetadata.RoleBase) } if cr.Spec.ForProvider.LDAPServerMetadata.RoleName != nil { - f6.SetRoleName(*cr.Spec.ForProvider.LDAPServerMetadata.RoleName) + f7.SetRoleName(*cr.Spec.ForProvider.LDAPServerMetadata.RoleName) } if cr.Spec.ForProvider.LDAPServerMetadata.RoleSearchMatching != nil { - f6.SetRoleSearchMatching(*cr.Spec.ForProvider.LDAPServerMetadata.RoleSearchMatching) + f7.SetRoleSearchMatching(*cr.Spec.ForProvider.LDAPServerMetadata.RoleSearchMatching) } if cr.Spec.ForProvider.LDAPServerMetadata.RoleSearchSubtree != nil { - f6.SetRoleSearchSubtree(*cr.Spec.ForProvider.LDAPServerMetadata.RoleSearchSubtree) + f7.SetRoleSearchSubtree(*cr.Spec.ForProvider.LDAPServerMetadata.RoleSearchSubtree) } if cr.Spec.ForProvider.LDAPServerMetadata.ServiceAccountPassword != nil { - f6.SetServiceAccountPassword(*cr.Spec.ForProvider.LDAPServerMetadata.ServiceAccountPassword) + f7.SetServiceAccountPassword(*cr.Spec.ForProvider.LDAPServerMetadata.ServiceAccountPassword) } if cr.Spec.ForProvider.LDAPServerMetadata.ServiceAccountUsername != nil { - f6.SetServiceAccountUsername(*cr.Spec.ForProvider.LDAPServerMetadata.ServiceAccountUsername) + f7.SetServiceAccountUsername(*cr.Spec.ForProvider.LDAPServerMetadata.ServiceAccountUsername) } if cr.Spec.ForProvider.LDAPServerMetadata.UserBase != nil { - f6.SetUserBase(*cr.Spec.ForProvider.LDAPServerMetadata.UserBase) + f7.SetUserBase(*cr.Spec.ForProvider.LDAPServerMetadata.UserBase) } if cr.Spec.ForProvider.LDAPServerMetadata.UserRoleName != nil { - f6.SetUserRoleName(*cr.Spec.ForProvider.LDAPServerMetadata.UserRoleName) + f7.SetUserRoleName(*cr.Spec.ForProvider.LDAPServerMetadata.UserRoleName) } if cr.Spec.ForProvider.LDAPServerMetadata.UserSearchMatching != nil { - f6.SetUserSearchMatching(*cr.Spec.ForProvider.LDAPServerMetadata.UserSearchMatching) + f7.SetUserSearchMatching(*cr.Spec.ForProvider.LDAPServerMetadata.UserSearchMatching) } if cr.Spec.ForProvider.LDAPServerMetadata.UserSearchSubtree != nil { - f6.SetUserSearchSubtree(*cr.Spec.ForProvider.LDAPServerMetadata.UserSearchSubtree) + f7.SetUserSearchSubtree(*cr.Spec.ForProvider.LDAPServerMetadata.UserSearchSubtree) } - res.SetLdapServerMetadata(f6) + res.SetLdapServerMetadata(f7) } if cr.Spec.ForProvider.Logs != nil { - f7 := &svcsdk.Logs{} + f8 := &svcsdk.Logs{} if cr.Spec.ForProvider.Logs.Audit != nil { - f7.SetAudit(*cr.Spec.ForProvider.Logs.Audit) + f8.SetAudit(*cr.Spec.ForProvider.Logs.Audit) } if cr.Spec.ForProvider.Logs.General != nil { - f7.SetGeneral(*cr.Spec.ForProvider.Logs.General) + f8.SetGeneral(*cr.Spec.ForProvider.Logs.General) } - res.SetLogs(f7) + res.SetLogs(f8) } if cr.Spec.ForProvider.MaintenanceWindowStartTime != nil { - f8 := &svcsdk.WeeklyStartTime{} + f9 := &svcsdk.WeeklyStartTime{} if cr.Spec.ForProvider.MaintenanceWindowStartTime.DayOfWeek != nil { - f8.SetDayOfWeek(*cr.Spec.ForProvider.MaintenanceWindowStartTime.DayOfWeek) + f9.SetDayOfWeek(*cr.Spec.ForProvider.MaintenanceWindowStartTime.DayOfWeek) } if cr.Spec.ForProvider.MaintenanceWindowStartTime.TimeOfDay != nil { - f8.SetTimeOfDay(*cr.Spec.ForProvider.MaintenanceWindowStartTime.TimeOfDay) + f9.SetTimeOfDay(*cr.Spec.ForProvider.MaintenanceWindowStartTime.TimeOfDay) } if cr.Spec.ForProvider.MaintenanceWindowStartTime.TimeZone != nil { - f8.SetTimeZone(*cr.Spec.ForProvider.MaintenanceWindowStartTime.TimeZone) + f9.SetTimeZone(*cr.Spec.ForProvider.MaintenanceWindowStartTime.TimeZone) } - res.SetMaintenanceWindowStartTime(f8) + res.SetMaintenanceWindowStartTime(f9) } return res diff --git a/pkg/controller/mq/user/zz_conversions.go b/pkg/controller/mq/user/zz_conversions.go index 026d99bc9c..d7fb3a8a44 100644 --- a/pkg/controller/mq/user/zz_conversions.go +++ b/pkg/controller/mq/user/zz_conversions.go @@ -56,6 +56,11 @@ func GenerateUser(resp *svcsdk.DescribeUserResponse) *svcapitypes.User { } else { cr.Spec.ForProvider.Groups = nil } + if resp.ReplicationUser != nil { + cr.Spec.ForProvider.ReplicationUser = resp.ReplicationUser + } else { + cr.Spec.ForProvider.ReplicationUser = nil + } return cr } @@ -76,6 +81,9 @@ func GenerateCreateUserRequest(cr *svcapitypes.User) *svcsdk.CreateUserRequest { } res.SetGroups(f1) } + if cr.Spec.ForProvider.ReplicationUser != nil { + res.SetReplicationUser(*cr.Spec.ForProvider.ReplicationUser) + } return res } @@ -96,6 +104,9 @@ func GenerateUpdateUserRequest(cr *svcapitypes.User) *svcsdk.UpdateUserRequest { } res.SetGroups(f2) } + if cr.Spec.ForProvider.ReplicationUser != nil { + res.SetReplicationUser(*cr.Spec.ForProvider.ReplicationUser) + } return res } diff --git a/pkg/controller/mwaa/environment/zz_conversions.go b/pkg/controller/mwaa/environment/zz_conversions.go index 9debbbb89d..e9a5005f61 100644 --- a/pkg/controller/mwaa/environment/zz_conversions.go +++ b/pkg/controller/mwaa/environment/zz_conversions.go @@ -184,19 +184,29 @@ func GenerateEnvironment(resp *svcsdk.GetEnvironmentOutput) *svcapitypes.Environ } else { cr.Spec.ForProvider.Schedulers = nil } + if resp.Environment.StartupScriptS3ObjectVersion != nil { + cr.Spec.ForProvider.StartupScriptS3ObjectVersion = resp.Environment.StartupScriptS3ObjectVersion + } else { + cr.Spec.ForProvider.StartupScriptS3ObjectVersion = nil + } + if resp.Environment.StartupScriptS3Path != nil { + cr.Spec.ForProvider.StartupScriptS3Path = resp.Environment.StartupScriptS3Path + } else { + cr.Spec.ForProvider.StartupScriptS3Path = nil + } if resp.Environment.Status != nil { cr.Status.AtProvider.Status = resp.Environment.Status } else { cr.Status.AtProvider.Status = nil } if resp.Environment.Tags != nil { - f21 := map[string]*string{} - for f21key, f21valiter := range resp.Environment.Tags { - var f21val string - f21val = *f21valiter - f21[f21key] = &f21val + f23 := map[string]*string{} + for f23key, f23valiter := range resp.Environment.Tags { + var f23val string + f23val = *f23valiter + f23[f23key] = &f23val } - cr.Spec.ForProvider.Tags = f21 + cr.Spec.ForProvider.Tags = f23 } else { cr.Spec.ForProvider.Tags = nil } @@ -311,14 +321,20 @@ func GenerateCreateEnvironmentInput(cr *svcapitypes.Environment) *svcsdk.CreateE if cr.Spec.ForProvider.Schedulers != nil { res.SetSchedulers(*cr.Spec.ForProvider.Schedulers) } + if cr.Spec.ForProvider.StartupScriptS3ObjectVersion != nil { + res.SetStartupScriptS3ObjectVersion(*cr.Spec.ForProvider.StartupScriptS3ObjectVersion) + } + if cr.Spec.ForProvider.StartupScriptS3Path != nil { + res.SetStartupScriptS3Path(*cr.Spec.ForProvider.StartupScriptS3Path) + } if cr.Spec.ForProvider.Tags != nil { - f12 := map[string]*string{} - for f12key, f12valiter := range cr.Spec.ForProvider.Tags { - var f12val string - f12val = *f12valiter - f12[f12key] = &f12val + f14 := map[string]*string{} + for f14key, f14valiter := range cr.Spec.ForProvider.Tags { + var f14val string + f14val = *f14valiter + f14[f14key] = &f14val } - res.SetTags(f12) + res.SetTags(f14) } if cr.Spec.ForProvider.WebserverAccessMode != nil { res.SetWebserverAccessMode(*cr.Spec.ForProvider.WebserverAccessMode) @@ -427,6 +443,12 @@ func GenerateUpdateEnvironmentInput(cr *svcapitypes.Environment) *svcsdk.UpdateE if cr.Spec.ForProvider.Schedulers != nil { res.SetSchedulers(*cr.Spec.ForProvider.Schedulers) } + if cr.Spec.ForProvider.StartupScriptS3ObjectVersion != nil { + res.SetStartupScriptS3ObjectVersion(*cr.Spec.ForProvider.StartupScriptS3ObjectVersion) + } + if cr.Spec.ForProvider.StartupScriptS3Path != nil { + res.SetStartupScriptS3Path(*cr.Spec.ForProvider.StartupScriptS3Path) + } if cr.Spec.ForProvider.WebserverAccessMode != nil { res.SetWebserverAccessMode(*cr.Spec.ForProvider.WebserverAccessMode) } diff --git a/pkg/controller/neptune/dbcluster/zz_controller.go b/pkg/controller/neptune/dbcluster/zz_controller.go index efa71c72fc..b75d5e9e4a 100644 --- a/pkg/controller/neptune/dbcluster/zz_controller.go +++ b/pkg/controller/neptune/dbcluster/zz_controller.go @@ -294,6 +294,11 @@ func (e *external) Create(ctx context.Context, mg cpresource.Managed) (managed.E } else { cr.Spec.ForProvider.EngineVersion = nil } + if resp.DBCluster.GlobalClusterIdentifier != nil { + cr.Spec.ForProvider.GlobalClusterIdentifier = resp.DBCluster.GlobalClusterIdentifier + } else { + cr.Spec.ForProvider.GlobalClusterIdentifier = nil + } if resp.DBCluster.HostedZoneId != nil { cr.Status.AtProvider.HostedZoneID = resp.DBCluster.HostedZoneId } else { @@ -324,6 +329,52 @@ func (e *external) Create(ctx context.Context, mg cpresource.Managed) (managed.E } else { cr.Status.AtProvider.MultiAZ = nil } + if resp.DBCluster.PendingModifiedValues != nil { + f31 := &svcapitypes.ClusterPendingModifiedValues{} + if resp.DBCluster.PendingModifiedValues.AllocatedStorage != nil { + f31.AllocatedStorage = resp.DBCluster.PendingModifiedValues.AllocatedStorage + } + if resp.DBCluster.PendingModifiedValues.BackupRetentionPeriod != nil { + f31.BackupRetentionPeriod = resp.DBCluster.PendingModifiedValues.BackupRetentionPeriod + } + if resp.DBCluster.PendingModifiedValues.DBClusterIdentifier != nil { + f31.DBClusterIdentifier = resp.DBCluster.PendingModifiedValues.DBClusterIdentifier + } + if resp.DBCluster.PendingModifiedValues.EngineVersion != nil { + f31.EngineVersion = resp.DBCluster.PendingModifiedValues.EngineVersion + } + if resp.DBCluster.PendingModifiedValues.IAMDatabaseAuthenticationEnabled != nil { + f31.IAMDatabaseAuthenticationEnabled = resp.DBCluster.PendingModifiedValues.IAMDatabaseAuthenticationEnabled + } + if resp.DBCluster.PendingModifiedValues.Iops != nil { + f31.IOPS = resp.DBCluster.PendingModifiedValues.Iops + } + if resp.DBCluster.PendingModifiedValues.PendingCloudwatchLogsExports != nil { + f31f6 := &svcapitypes.PendingCloudwatchLogsExports{} + if resp.DBCluster.PendingModifiedValues.PendingCloudwatchLogsExports.LogTypesToDisable != nil { + f31f6f0 := []*string{} + for _, f31f6f0iter := range resp.DBCluster.PendingModifiedValues.PendingCloudwatchLogsExports.LogTypesToDisable { + var f31f6f0elem string + f31f6f0elem = *f31f6f0iter + f31f6f0 = append(f31f6f0, &f31f6f0elem) + } + f31f6.LogTypesToDisable = f31f6f0 + } + if resp.DBCluster.PendingModifiedValues.PendingCloudwatchLogsExports.LogTypesToEnable != nil { + f31f6f1 := []*string{} + for _, f31f6f1iter := range resp.DBCluster.PendingModifiedValues.PendingCloudwatchLogsExports.LogTypesToEnable { + var f31f6f1elem string + f31f6f1elem = *f31f6f1iter + f31f6f1 = append(f31f6f1, &f31f6f1elem) + } + f31f6.LogTypesToEnable = f31f6f1 + } + f31.PendingCloudwatchLogsExports = f31f6 + } + cr.Status.AtProvider.PendingModifiedValues = f31 + } else { + cr.Status.AtProvider.PendingModifiedValues = nil + } if resp.DBCluster.PercentProgress != nil { cr.Status.AtProvider.PercentProgress = resp.DBCluster.PercentProgress } else { @@ -345,13 +396,13 @@ func (e *external) Create(ctx context.Context, mg cpresource.Managed) (managed.E cr.Spec.ForProvider.PreferredMaintenanceWindow = nil } if resp.DBCluster.ReadReplicaIdentifiers != nil { - f34 := []*string{} - for _, f34iter := range resp.DBCluster.ReadReplicaIdentifiers { - var f34elem string - f34elem = *f34iter - f34 = append(f34, &f34elem) + f36 := []*string{} + for _, f36iter := range resp.DBCluster.ReadReplicaIdentifiers { + var f36elem string + f36elem = *f36iter + f36 = append(f36, &f36elem) } - cr.Status.AtProvider.ReadReplicaIdentifiers = f34 + cr.Status.AtProvider.ReadReplicaIdentifiers = f36 } else { cr.Status.AtProvider.ReadReplicaIdentifiers = nil } @@ -366,14 +417,14 @@ func (e *external) Create(ctx context.Context, mg cpresource.Managed) (managed.E cr.Spec.ForProvider.ReplicationSourceIdentifier = nil } if resp.DBCluster.ServerlessV2ScalingConfiguration != nil { - f37 := &svcapitypes.ServerlessV2ScalingConfiguration{} + f39 := &svcapitypes.ServerlessV2ScalingConfiguration{} if resp.DBCluster.ServerlessV2ScalingConfiguration.MaxCapacity != nil { - f37.MaxCapacity = resp.DBCluster.ServerlessV2ScalingConfiguration.MaxCapacity + f39.MaxCapacity = resp.DBCluster.ServerlessV2ScalingConfiguration.MaxCapacity } if resp.DBCluster.ServerlessV2ScalingConfiguration.MinCapacity != nil { - f37.MinCapacity = resp.DBCluster.ServerlessV2ScalingConfiguration.MinCapacity + f39.MinCapacity = resp.DBCluster.ServerlessV2ScalingConfiguration.MinCapacity } - cr.Spec.ForProvider.ServerlessV2ScalingConfiguration = f37 + cr.Spec.ForProvider.ServerlessV2ScalingConfiguration = f39 } else { cr.Spec.ForProvider.ServerlessV2ScalingConfiguration = nil } @@ -388,18 +439,18 @@ func (e *external) Create(ctx context.Context, mg cpresource.Managed) (managed.E cr.Spec.ForProvider.StorageEncrypted = nil } if resp.DBCluster.VpcSecurityGroups != nil { - f40 := []*svcapitypes.VPCSecurityGroupMembership{} - for _, f40iter := range resp.DBCluster.VpcSecurityGroups { - f40elem := &svcapitypes.VPCSecurityGroupMembership{} - if f40iter.Status != nil { - f40elem.Status = f40iter.Status + f42 := []*svcapitypes.VPCSecurityGroupMembership{} + for _, f42iter := range resp.DBCluster.VpcSecurityGroups { + f42elem := &svcapitypes.VPCSecurityGroupMembership{} + if f42iter.Status != nil { + f42elem.Status = f42iter.Status } - if f40iter.VpcSecurityGroupId != nil { - f40elem.VPCSecurityGroupID = f40iter.VpcSecurityGroupId + if f42iter.VpcSecurityGroupId != nil { + f42elem.VPCSecurityGroupID = f42iter.VpcSecurityGroupId } - f40 = append(f40, f40elem) + f42 = append(f42, f42elem) } - cr.Status.AtProvider.VPCSecurityGroups = f40 + cr.Status.AtProvider.VPCSecurityGroups = f42 } else { cr.Status.AtProvider.VPCSecurityGroups = nil } diff --git a/pkg/controller/neptune/dbcluster/zz_conversions.go b/pkg/controller/neptune/dbcluster/zz_conversions.go index a072ae2c63..191a8e266b 100644 --- a/pkg/controller/neptune/dbcluster/zz_conversions.go +++ b/pkg/controller/neptune/dbcluster/zz_conversions.go @@ -221,6 +221,11 @@ func GenerateDBCluster(resp *svcsdk.DescribeDBClustersOutput) *svcapitypes.DBClu } else { cr.Spec.ForProvider.EngineVersion = nil } + if elem.GlobalClusterIdentifier != nil { + cr.Spec.ForProvider.GlobalClusterIdentifier = elem.GlobalClusterIdentifier + } else { + cr.Spec.ForProvider.GlobalClusterIdentifier = nil + } if elem.HostedZoneId != nil { cr.Status.AtProvider.HostedZoneID = elem.HostedZoneId } else { @@ -251,6 +256,52 @@ func GenerateDBCluster(resp *svcsdk.DescribeDBClustersOutput) *svcapitypes.DBClu } else { cr.Status.AtProvider.MultiAZ = nil } + if elem.PendingModifiedValues != nil { + f31 := &svcapitypes.ClusterPendingModifiedValues{} + if elem.PendingModifiedValues.AllocatedStorage != nil { + f31.AllocatedStorage = elem.PendingModifiedValues.AllocatedStorage + } + if elem.PendingModifiedValues.BackupRetentionPeriod != nil { + f31.BackupRetentionPeriod = elem.PendingModifiedValues.BackupRetentionPeriod + } + if elem.PendingModifiedValues.DBClusterIdentifier != nil { + f31.DBClusterIdentifier = elem.PendingModifiedValues.DBClusterIdentifier + } + if elem.PendingModifiedValues.EngineVersion != nil { + f31.EngineVersion = elem.PendingModifiedValues.EngineVersion + } + if elem.PendingModifiedValues.IAMDatabaseAuthenticationEnabled != nil { + f31.IAMDatabaseAuthenticationEnabled = elem.PendingModifiedValues.IAMDatabaseAuthenticationEnabled + } + if elem.PendingModifiedValues.Iops != nil { + f31.IOPS = elem.PendingModifiedValues.Iops + } + if elem.PendingModifiedValues.PendingCloudwatchLogsExports != nil { + f31f6 := &svcapitypes.PendingCloudwatchLogsExports{} + if elem.PendingModifiedValues.PendingCloudwatchLogsExports.LogTypesToDisable != nil { + f31f6f0 := []*string{} + for _, f31f6f0iter := range elem.PendingModifiedValues.PendingCloudwatchLogsExports.LogTypesToDisable { + var f31f6f0elem string + f31f6f0elem = *f31f6f0iter + f31f6f0 = append(f31f6f0, &f31f6f0elem) + } + f31f6.LogTypesToDisable = f31f6f0 + } + if elem.PendingModifiedValues.PendingCloudwatchLogsExports.LogTypesToEnable != nil { + f31f6f1 := []*string{} + for _, f31f6f1iter := range elem.PendingModifiedValues.PendingCloudwatchLogsExports.LogTypesToEnable { + var f31f6f1elem string + f31f6f1elem = *f31f6f1iter + f31f6f1 = append(f31f6f1, &f31f6f1elem) + } + f31f6.LogTypesToEnable = f31f6f1 + } + f31.PendingCloudwatchLogsExports = f31f6 + } + cr.Status.AtProvider.PendingModifiedValues = f31 + } else { + cr.Status.AtProvider.PendingModifiedValues = nil + } if elem.PercentProgress != nil { cr.Status.AtProvider.PercentProgress = elem.PercentProgress } else { @@ -272,13 +323,13 @@ func GenerateDBCluster(resp *svcsdk.DescribeDBClustersOutput) *svcapitypes.DBClu cr.Spec.ForProvider.PreferredMaintenanceWindow = nil } if elem.ReadReplicaIdentifiers != nil { - f34 := []*string{} - for _, f34iter := range elem.ReadReplicaIdentifiers { - var f34elem string - f34elem = *f34iter - f34 = append(f34, &f34elem) + f36 := []*string{} + for _, f36iter := range elem.ReadReplicaIdentifiers { + var f36elem string + f36elem = *f36iter + f36 = append(f36, &f36elem) } - cr.Status.AtProvider.ReadReplicaIdentifiers = f34 + cr.Status.AtProvider.ReadReplicaIdentifiers = f36 } else { cr.Status.AtProvider.ReadReplicaIdentifiers = nil } @@ -293,14 +344,14 @@ func GenerateDBCluster(resp *svcsdk.DescribeDBClustersOutput) *svcapitypes.DBClu cr.Spec.ForProvider.ReplicationSourceIdentifier = nil } if elem.ServerlessV2ScalingConfiguration != nil { - f37 := &svcapitypes.ServerlessV2ScalingConfiguration{} + f39 := &svcapitypes.ServerlessV2ScalingConfiguration{} if elem.ServerlessV2ScalingConfiguration.MaxCapacity != nil { - f37.MaxCapacity = elem.ServerlessV2ScalingConfiguration.MaxCapacity + f39.MaxCapacity = elem.ServerlessV2ScalingConfiguration.MaxCapacity } if elem.ServerlessV2ScalingConfiguration.MinCapacity != nil { - f37.MinCapacity = elem.ServerlessV2ScalingConfiguration.MinCapacity + f39.MinCapacity = elem.ServerlessV2ScalingConfiguration.MinCapacity } - cr.Spec.ForProvider.ServerlessV2ScalingConfiguration = f37 + cr.Spec.ForProvider.ServerlessV2ScalingConfiguration = f39 } else { cr.Spec.ForProvider.ServerlessV2ScalingConfiguration = nil } @@ -315,18 +366,18 @@ func GenerateDBCluster(resp *svcsdk.DescribeDBClustersOutput) *svcapitypes.DBClu cr.Spec.ForProvider.StorageEncrypted = nil } if elem.VpcSecurityGroups != nil { - f40 := []*svcapitypes.VPCSecurityGroupMembership{} - for _, f40iter := range elem.VpcSecurityGroups { - f40elem := &svcapitypes.VPCSecurityGroupMembership{} - if f40iter.Status != nil { - f40elem.Status = f40iter.Status + f42 := []*svcapitypes.VPCSecurityGroupMembership{} + for _, f42iter := range elem.VpcSecurityGroups { + f42elem := &svcapitypes.VPCSecurityGroupMembership{} + if f42iter.Status != nil { + f42elem.Status = f42iter.Status } - if f40iter.VpcSecurityGroupId != nil { - f40elem.VPCSecurityGroupID = f40iter.VpcSecurityGroupId + if f42iter.VpcSecurityGroupId != nil { + f42elem.VPCSecurityGroupID = f42iter.VpcSecurityGroupId } - f40 = append(f40, f40elem) + f42 = append(f42, f42elem) } - cr.Status.AtProvider.VPCSecurityGroups = f40 + cr.Status.AtProvider.VPCSecurityGroups = f42 } else { cr.Status.AtProvider.VPCSecurityGroups = nil } diff --git a/pkg/controller/opensearchservice/domain/zz_controller.go b/pkg/controller/opensearchservice/domain/zz_controller.go index 065c2b4363..cb55d8fe37 100644 --- a/pkg/controller/opensearchservice/domain/zz_controller.go +++ b/pkg/controller/opensearchservice/domain/zz_controller.go @@ -180,6 +180,9 @@ func (e *external) Create(ctx context.Context, mg cpresource.Managed) (managed.E } if resp.DomainStatus.AutoTuneOptions != nil { f4 := &svcapitypes.AutoTuneOptionsInput{} + if resp.DomainStatus.AutoTuneOptions.UseOffPeakWindow != nil { + f4.UseOffPeakWindow = resp.DomainStatus.AutoTuneOptions.UseOffPeakWindow + } cr.Spec.ForProvider.AutoTuneOptions = f4 } else { cr.Spec.ForProvider.AutoTuneOptions = nil @@ -220,6 +223,9 @@ func (e *external) Create(ctx context.Context, mg cpresource.Managed) (managed.E if resp.DomainStatus.ClusterConfig.InstanceType != nil { f6.InstanceType = resp.DomainStatus.ClusterConfig.InstanceType } + if resp.DomainStatus.ClusterConfig.MultiAZWithStandbyEnabled != nil { + f6.MultiAZWithStandbyEnabled = resp.DomainStatus.ClusterConfig.MultiAZWithStandbyEnabled + } if resp.DomainStatus.ClusterConfig.WarmCount != nil { f6.WarmCount = resp.DomainStatus.ClusterConfig.WarmCount } @@ -230,11 +236,11 @@ func (e *external) Create(ctx context.Context, mg cpresource.Managed) (managed.E f6.WarmType = resp.DomainStatus.ClusterConfig.WarmType } if resp.DomainStatus.ClusterConfig.ZoneAwarenessConfig != nil { - f6f9 := &svcapitypes.ZoneAwarenessConfig{} + f6f10 := &svcapitypes.ZoneAwarenessConfig{} if resp.DomainStatus.ClusterConfig.ZoneAwarenessConfig.AvailabilityZoneCount != nil { - f6f9.AvailabilityZoneCount = resp.DomainStatus.ClusterConfig.ZoneAwarenessConfig.AvailabilityZoneCount + f6f10.AvailabilityZoneCount = resp.DomainStatus.ClusterConfig.ZoneAwarenessConfig.AvailabilityZoneCount } - f6.ZoneAwarenessConfig = f6f9 + f6.ZoneAwarenessConfig = f6f10 } if resp.DomainStatus.ClusterConfig.ZoneAwarenessEnabled != nil { f6.ZoneAwarenessEnabled = resp.DomainStatus.ClusterConfig.ZoneAwarenessEnabled @@ -381,88 +387,120 @@ func (e *external) Create(ctx context.Context, mg cpresource.Managed) (managed.E } else { cr.Spec.ForProvider.NodeToNodeEncryptionOptions = nil } + if resp.DomainStatus.OffPeakWindowOptions != nil { + f20 := &svcapitypes.OffPeakWindowOptions{} + if resp.DomainStatus.OffPeakWindowOptions.Enabled != nil { + f20.Enabled = resp.DomainStatus.OffPeakWindowOptions.Enabled + } + if resp.DomainStatus.OffPeakWindowOptions.OffPeakWindow != nil { + f20f1 := &svcapitypes.OffPeakWindow{} + if resp.DomainStatus.OffPeakWindowOptions.OffPeakWindow.WindowStartTime != nil { + f20f1f0 := &svcapitypes.WindowStartTime{} + if resp.DomainStatus.OffPeakWindowOptions.OffPeakWindow.WindowStartTime.Hours != nil { + f20f1f0.Hours = resp.DomainStatus.OffPeakWindowOptions.OffPeakWindow.WindowStartTime.Hours + } + if resp.DomainStatus.OffPeakWindowOptions.OffPeakWindow.WindowStartTime.Minutes != nil { + f20f1f0.Minutes = resp.DomainStatus.OffPeakWindowOptions.OffPeakWindow.WindowStartTime.Minutes + } + f20f1.WindowStartTime = f20f1f0 + } + f20.OffPeakWindow = f20f1 + } + cr.Spec.ForProvider.OffPeakWindowOptions = f20 + } else { + cr.Spec.ForProvider.OffPeakWindowOptions = nil + } if resp.DomainStatus.Processing != nil { cr.Status.AtProvider.Processing = resp.DomainStatus.Processing } else { cr.Status.AtProvider.Processing = nil } if resp.DomainStatus.ServiceSoftwareOptions != nil { - f21 := &svcapitypes.ServiceSoftwareOptions{} + f22 := &svcapitypes.ServiceSoftwareOptions{} if resp.DomainStatus.ServiceSoftwareOptions.AutomatedUpdateDate != nil { - f21.AutomatedUpdateDate = &metav1.Time{*resp.DomainStatus.ServiceSoftwareOptions.AutomatedUpdateDate} + f22.AutomatedUpdateDate = &metav1.Time{*resp.DomainStatus.ServiceSoftwareOptions.AutomatedUpdateDate} } if resp.DomainStatus.ServiceSoftwareOptions.Cancellable != nil { - f21.Cancellable = resp.DomainStatus.ServiceSoftwareOptions.Cancellable + f22.Cancellable = resp.DomainStatus.ServiceSoftwareOptions.Cancellable } if resp.DomainStatus.ServiceSoftwareOptions.CurrentVersion != nil { - f21.CurrentVersion = resp.DomainStatus.ServiceSoftwareOptions.CurrentVersion + f22.CurrentVersion = resp.DomainStatus.ServiceSoftwareOptions.CurrentVersion } if resp.DomainStatus.ServiceSoftwareOptions.Description != nil { - f21.Description = resp.DomainStatus.ServiceSoftwareOptions.Description + f22.Description = resp.DomainStatus.ServiceSoftwareOptions.Description } if resp.DomainStatus.ServiceSoftwareOptions.NewVersion != nil { - f21.NewVersion = resp.DomainStatus.ServiceSoftwareOptions.NewVersion + f22.NewVersion = resp.DomainStatus.ServiceSoftwareOptions.NewVersion } if resp.DomainStatus.ServiceSoftwareOptions.OptionalDeployment != nil { - f21.OptionalDeployment = resp.DomainStatus.ServiceSoftwareOptions.OptionalDeployment + f22.OptionalDeployment = resp.DomainStatus.ServiceSoftwareOptions.OptionalDeployment } if resp.DomainStatus.ServiceSoftwareOptions.UpdateAvailable != nil { - f21.UpdateAvailable = resp.DomainStatus.ServiceSoftwareOptions.UpdateAvailable + f22.UpdateAvailable = resp.DomainStatus.ServiceSoftwareOptions.UpdateAvailable } if resp.DomainStatus.ServiceSoftwareOptions.UpdateStatus != nil { - f21.UpdateStatus = resp.DomainStatus.ServiceSoftwareOptions.UpdateStatus + f22.UpdateStatus = resp.DomainStatus.ServiceSoftwareOptions.UpdateStatus } - cr.Status.AtProvider.ServiceSoftwareOptions = f21 + cr.Status.AtProvider.ServiceSoftwareOptions = f22 } else { cr.Status.AtProvider.ServiceSoftwareOptions = nil } if resp.DomainStatus.SnapshotOptions != nil { - f22 := &svcapitypes.SnapshotOptions{} + f23 := &svcapitypes.SnapshotOptions{} if resp.DomainStatus.SnapshotOptions.AutomatedSnapshotStartHour != nil { - f22.AutomatedSnapshotStartHour = resp.DomainStatus.SnapshotOptions.AutomatedSnapshotStartHour + f23.AutomatedSnapshotStartHour = resp.DomainStatus.SnapshotOptions.AutomatedSnapshotStartHour } - cr.Status.AtProvider.SnapshotOptions = f22 + cr.Status.AtProvider.SnapshotOptions = f23 } else { cr.Status.AtProvider.SnapshotOptions = nil } + if resp.DomainStatus.SoftwareUpdateOptions != nil { + f24 := &svcapitypes.SoftwareUpdateOptions{} + if resp.DomainStatus.SoftwareUpdateOptions.AutoSoftwareUpdateEnabled != nil { + f24.AutoSoftwareUpdateEnabled = resp.DomainStatus.SoftwareUpdateOptions.AutoSoftwareUpdateEnabled + } + cr.Spec.ForProvider.SoftwareUpdateOptions = f24 + } else { + cr.Spec.ForProvider.SoftwareUpdateOptions = nil + } if resp.DomainStatus.UpgradeProcessing != nil { cr.Status.AtProvider.UpgradeProcessing = resp.DomainStatus.UpgradeProcessing } else { cr.Status.AtProvider.UpgradeProcessing = nil } if resp.DomainStatus.VPCOptions != nil { - f24 := &svcapitypes.VPCDerivedInfo{} + f26 := &svcapitypes.VPCDerivedInfo{} if resp.DomainStatus.VPCOptions.AvailabilityZones != nil { - f24f0 := []*string{} - for _, f24f0iter := range resp.DomainStatus.VPCOptions.AvailabilityZones { - var f24f0elem string - f24f0elem = *f24f0iter - f24f0 = append(f24f0, &f24f0elem) + f26f0 := []*string{} + for _, f26f0iter := range resp.DomainStatus.VPCOptions.AvailabilityZones { + var f26f0elem string + f26f0elem = *f26f0iter + f26f0 = append(f26f0, &f26f0elem) } - f24.AvailabilityZones = f24f0 + f26.AvailabilityZones = f26f0 } if resp.DomainStatus.VPCOptions.SecurityGroupIds != nil { - f24f1 := []*string{} - for _, f24f1iter := range resp.DomainStatus.VPCOptions.SecurityGroupIds { - var f24f1elem string - f24f1elem = *f24f1iter - f24f1 = append(f24f1, &f24f1elem) + f26f1 := []*string{} + for _, f26f1iter := range resp.DomainStatus.VPCOptions.SecurityGroupIds { + var f26f1elem string + f26f1elem = *f26f1iter + f26f1 = append(f26f1, &f26f1elem) } - f24.SecurityGroupIDs = f24f1 + f26.SecurityGroupIDs = f26f1 } if resp.DomainStatus.VPCOptions.SubnetIds != nil { - f24f2 := []*string{} - for _, f24f2iter := range resp.DomainStatus.VPCOptions.SubnetIds { - var f24f2elem string - f24f2elem = *f24f2iter - f24f2 = append(f24f2, &f24f2elem) + f26f2 := []*string{} + for _, f26f2iter := range resp.DomainStatus.VPCOptions.SubnetIds { + var f26f2elem string + f26f2elem = *f26f2iter + f26f2 = append(f26f2, &f26f2elem) } - f24.SubnetIDs = f24f2 + f26.SubnetIDs = f26f2 } if resp.DomainStatus.VPCOptions.VPCId != nil { - f24.VPCID = resp.DomainStatus.VPCOptions.VPCId + f26.VPCID = resp.DomainStatus.VPCOptions.VPCId } - cr.Status.AtProvider.VPCOptions = f24 + cr.Status.AtProvider.VPCOptions = f26 } else { cr.Status.AtProvider.VPCOptions = nil } diff --git a/pkg/controller/opensearchservice/domain/zz_conversions.go b/pkg/controller/opensearchservice/domain/zz_conversions.go index 4a004f4962..55e3484196 100644 --- a/pkg/controller/opensearchservice/domain/zz_conversions.go +++ b/pkg/controller/opensearchservice/domain/zz_conversions.go @@ -105,6 +105,9 @@ func GenerateDomain(resp *svcsdk.DescribeDomainOutput) *svcapitypes.Domain { } if resp.DomainStatus.AutoTuneOptions != nil { f4 := &svcapitypes.AutoTuneOptionsInput{} + if resp.DomainStatus.AutoTuneOptions.UseOffPeakWindow != nil { + f4.UseOffPeakWindow = resp.DomainStatus.AutoTuneOptions.UseOffPeakWindow + } cr.Spec.ForProvider.AutoTuneOptions = f4 } else { cr.Spec.ForProvider.AutoTuneOptions = nil @@ -145,6 +148,9 @@ func GenerateDomain(resp *svcsdk.DescribeDomainOutput) *svcapitypes.Domain { if resp.DomainStatus.ClusterConfig.InstanceType != nil { f6.InstanceType = resp.DomainStatus.ClusterConfig.InstanceType } + if resp.DomainStatus.ClusterConfig.MultiAZWithStandbyEnabled != nil { + f6.MultiAZWithStandbyEnabled = resp.DomainStatus.ClusterConfig.MultiAZWithStandbyEnabled + } if resp.DomainStatus.ClusterConfig.WarmCount != nil { f6.WarmCount = resp.DomainStatus.ClusterConfig.WarmCount } @@ -155,11 +161,11 @@ func GenerateDomain(resp *svcsdk.DescribeDomainOutput) *svcapitypes.Domain { f6.WarmType = resp.DomainStatus.ClusterConfig.WarmType } if resp.DomainStatus.ClusterConfig.ZoneAwarenessConfig != nil { - f6f9 := &svcapitypes.ZoneAwarenessConfig{} + f6f10 := &svcapitypes.ZoneAwarenessConfig{} if resp.DomainStatus.ClusterConfig.ZoneAwarenessConfig.AvailabilityZoneCount != nil { - f6f9.AvailabilityZoneCount = resp.DomainStatus.ClusterConfig.ZoneAwarenessConfig.AvailabilityZoneCount + f6f10.AvailabilityZoneCount = resp.DomainStatus.ClusterConfig.ZoneAwarenessConfig.AvailabilityZoneCount } - f6.ZoneAwarenessConfig = f6f9 + f6.ZoneAwarenessConfig = f6f10 } if resp.DomainStatus.ClusterConfig.ZoneAwarenessEnabled != nil { f6.ZoneAwarenessEnabled = resp.DomainStatus.ClusterConfig.ZoneAwarenessEnabled @@ -306,88 +312,120 @@ func GenerateDomain(resp *svcsdk.DescribeDomainOutput) *svcapitypes.Domain { } else { cr.Spec.ForProvider.NodeToNodeEncryptionOptions = nil } + if resp.DomainStatus.OffPeakWindowOptions != nil { + f20 := &svcapitypes.OffPeakWindowOptions{} + if resp.DomainStatus.OffPeakWindowOptions.Enabled != nil { + f20.Enabled = resp.DomainStatus.OffPeakWindowOptions.Enabled + } + if resp.DomainStatus.OffPeakWindowOptions.OffPeakWindow != nil { + f20f1 := &svcapitypes.OffPeakWindow{} + if resp.DomainStatus.OffPeakWindowOptions.OffPeakWindow.WindowStartTime != nil { + f20f1f0 := &svcapitypes.WindowStartTime{} + if resp.DomainStatus.OffPeakWindowOptions.OffPeakWindow.WindowStartTime.Hours != nil { + f20f1f0.Hours = resp.DomainStatus.OffPeakWindowOptions.OffPeakWindow.WindowStartTime.Hours + } + if resp.DomainStatus.OffPeakWindowOptions.OffPeakWindow.WindowStartTime.Minutes != nil { + f20f1f0.Minutes = resp.DomainStatus.OffPeakWindowOptions.OffPeakWindow.WindowStartTime.Minutes + } + f20f1.WindowStartTime = f20f1f0 + } + f20.OffPeakWindow = f20f1 + } + cr.Spec.ForProvider.OffPeakWindowOptions = f20 + } else { + cr.Spec.ForProvider.OffPeakWindowOptions = nil + } if resp.DomainStatus.Processing != nil { cr.Status.AtProvider.Processing = resp.DomainStatus.Processing } else { cr.Status.AtProvider.Processing = nil } if resp.DomainStatus.ServiceSoftwareOptions != nil { - f21 := &svcapitypes.ServiceSoftwareOptions{} + f22 := &svcapitypes.ServiceSoftwareOptions{} if resp.DomainStatus.ServiceSoftwareOptions.AutomatedUpdateDate != nil { - f21.AutomatedUpdateDate = &metav1.Time{*resp.DomainStatus.ServiceSoftwareOptions.AutomatedUpdateDate} + f22.AutomatedUpdateDate = &metav1.Time{*resp.DomainStatus.ServiceSoftwareOptions.AutomatedUpdateDate} } if resp.DomainStatus.ServiceSoftwareOptions.Cancellable != nil { - f21.Cancellable = resp.DomainStatus.ServiceSoftwareOptions.Cancellable + f22.Cancellable = resp.DomainStatus.ServiceSoftwareOptions.Cancellable } if resp.DomainStatus.ServiceSoftwareOptions.CurrentVersion != nil { - f21.CurrentVersion = resp.DomainStatus.ServiceSoftwareOptions.CurrentVersion + f22.CurrentVersion = resp.DomainStatus.ServiceSoftwareOptions.CurrentVersion } if resp.DomainStatus.ServiceSoftwareOptions.Description != nil { - f21.Description = resp.DomainStatus.ServiceSoftwareOptions.Description + f22.Description = resp.DomainStatus.ServiceSoftwareOptions.Description } if resp.DomainStatus.ServiceSoftwareOptions.NewVersion != nil { - f21.NewVersion = resp.DomainStatus.ServiceSoftwareOptions.NewVersion + f22.NewVersion = resp.DomainStatus.ServiceSoftwareOptions.NewVersion } if resp.DomainStatus.ServiceSoftwareOptions.OptionalDeployment != nil { - f21.OptionalDeployment = resp.DomainStatus.ServiceSoftwareOptions.OptionalDeployment + f22.OptionalDeployment = resp.DomainStatus.ServiceSoftwareOptions.OptionalDeployment } if resp.DomainStatus.ServiceSoftwareOptions.UpdateAvailable != nil { - f21.UpdateAvailable = resp.DomainStatus.ServiceSoftwareOptions.UpdateAvailable + f22.UpdateAvailable = resp.DomainStatus.ServiceSoftwareOptions.UpdateAvailable } if resp.DomainStatus.ServiceSoftwareOptions.UpdateStatus != nil { - f21.UpdateStatus = resp.DomainStatus.ServiceSoftwareOptions.UpdateStatus + f22.UpdateStatus = resp.DomainStatus.ServiceSoftwareOptions.UpdateStatus } - cr.Status.AtProvider.ServiceSoftwareOptions = f21 + cr.Status.AtProvider.ServiceSoftwareOptions = f22 } else { cr.Status.AtProvider.ServiceSoftwareOptions = nil } if resp.DomainStatus.SnapshotOptions != nil { - f22 := &svcapitypes.SnapshotOptions{} + f23 := &svcapitypes.SnapshotOptions{} if resp.DomainStatus.SnapshotOptions.AutomatedSnapshotStartHour != nil { - f22.AutomatedSnapshotStartHour = resp.DomainStatus.SnapshotOptions.AutomatedSnapshotStartHour + f23.AutomatedSnapshotStartHour = resp.DomainStatus.SnapshotOptions.AutomatedSnapshotStartHour } - cr.Status.AtProvider.SnapshotOptions = f22 + cr.Status.AtProvider.SnapshotOptions = f23 } else { cr.Status.AtProvider.SnapshotOptions = nil } + if resp.DomainStatus.SoftwareUpdateOptions != nil { + f24 := &svcapitypes.SoftwareUpdateOptions{} + if resp.DomainStatus.SoftwareUpdateOptions.AutoSoftwareUpdateEnabled != nil { + f24.AutoSoftwareUpdateEnabled = resp.DomainStatus.SoftwareUpdateOptions.AutoSoftwareUpdateEnabled + } + cr.Spec.ForProvider.SoftwareUpdateOptions = f24 + } else { + cr.Spec.ForProvider.SoftwareUpdateOptions = nil + } if resp.DomainStatus.UpgradeProcessing != nil { cr.Status.AtProvider.UpgradeProcessing = resp.DomainStatus.UpgradeProcessing } else { cr.Status.AtProvider.UpgradeProcessing = nil } if resp.DomainStatus.VPCOptions != nil { - f24 := &svcapitypes.VPCDerivedInfo{} + f26 := &svcapitypes.VPCDerivedInfo{} if resp.DomainStatus.VPCOptions.AvailabilityZones != nil { - f24f0 := []*string{} - for _, f24f0iter := range resp.DomainStatus.VPCOptions.AvailabilityZones { - var f24f0elem string - f24f0elem = *f24f0iter - f24f0 = append(f24f0, &f24f0elem) + f26f0 := []*string{} + for _, f26f0iter := range resp.DomainStatus.VPCOptions.AvailabilityZones { + var f26f0elem string + f26f0elem = *f26f0iter + f26f0 = append(f26f0, &f26f0elem) } - f24.AvailabilityZones = f24f0 + f26.AvailabilityZones = f26f0 } if resp.DomainStatus.VPCOptions.SecurityGroupIds != nil { - f24f1 := []*string{} - for _, f24f1iter := range resp.DomainStatus.VPCOptions.SecurityGroupIds { - var f24f1elem string - f24f1elem = *f24f1iter - f24f1 = append(f24f1, &f24f1elem) + f26f1 := []*string{} + for _, f26f1iter := range resp.DomainStatus.VPCOptions.SecurityGroupIds { + var f26f1elem string + f26f1elem = *f26f1iter + f26f1 = append(f26f1, &f26f1elem) } - f24.SecurityGroupIDs = f24f1 + f26.SecurityGroupIDs = f26f1 } if resp.DomainStatus.VPCOptions.SubnetIds != nil { - f24f2 := []*string{} - for _, f24f2iter := range resp.DomainStatus.VPCOptions.SubnetIds { - var f24f2elem string - f24f2elem = *f24f2iter - f24f2 = append(f24f2, &f24f2elem) + f26f2 := []*string{} + for _, f26f2iter := range resp.DomainStatus.VPCOptions.SubnetIds { + var f26f2elem string + f26f2elem = *f26f2iter + f26f2 = append(f26f2, &f26f2elem) } - f24.SubnetIDs = f24f2 + f26.SubnetIDs = f26f2 } if resp.DomainStatus.VPCOptions.VPCId != nil { - f24.VPCID = resp.DomainStatus.VPCOptions.VPCId + f26.VPCID = resp.DomainStatus.VPCOptions.VPCId } - cr.Status.AtProvider.VPCOptions = f24 + cr.Status.AtProvider.VPCOptions = f26 } else { cr.Status.AtProvider.VPCOptions = nil } @@ -498,6 +536,9 @@ func GenerateCreateDomainInput(cr *svcapitypes.Domain) *svcsdk.CreateDomainInput } f3.SetMaintenanceSchedules(f3f1) } + if cr.Spec.ForProvider.AutoTuneOptions.UseOffPeakWindow != nil { + f3.SetUseOffPeakWindow(*cr.Spec.ForProvider.AutoTuneOptions.UseOffPeakWindow) + } res.SetAutoTuneOptions(f3) } if cr.Spec.ForProvider.ClusterConfig != nil { @@ -524,6 +565,9 @@ func GenerateCreateDomainInput(cr *svcapitypes.Domain) *svcsdk.CreateDomainInput if cr.Spec.ForProvider.ClusterConfig.InstanceType != nil { f4.SetInstanceType(*cr.Spec.ForProvider.ClusterConfig.InstanceType) } + if cr.Spec.ForProvider.ClusterConfig.MultiAZWithStandbyEnabled != nil { + f4.SetMultiAZWithStandbyEnabled(*cr.Spec.ForProvider.ClusterConfig.MultiAZWithStandbyEnabled) + } if cr.Spec.ForProvider.ClusterConfig.WarmCount != nil { f4.SetWarmCount(*cr.Spec.ForProvider.ClusterConfig.WarmCount) } @@ -534,11 +578,11 @@ func GenerateCreateDomainInput(cr *svcapitypes.Domain) *svcsdk.CreateDomainInput f4.SetWarmType(*cr.Spec.ForProvider.ClusterConfig.WarmType) } if cr.Spec.ForProvider.ClusterConfig.ZoneAwarenessConfig != nil { - f4f9 := &svcsdk.ZoneAwarenessConfig{} + f4f10 := &svcsdk.ZoneAwarenessConfig{} if cr.Spec.ForProvider.ClusterConfig.ZoneAwarenessConfig.AvailabilityZoneCount != nil { - f4f9.SetAvailabilityZoneCount(*cr.Spec.ForProvider.ClusterConfig.ZoneAwarenessConfig.AvailabilityZoneCount) + f4f10.SetAvailabilityZoneCount(*cr.Spec.ForProvider.ClusterConfig.ZoneAwarenessConfig.AvailabilityZoneCount) } - f4.SetZoneAwarenessConfig(f4f9) + f4.SetZoneAwarenessConfig(f4f10) } if cr.Spec.ForProvider.ClusterConfig.ZoneAwarenessEnabled != nil { f4.SetZoneAwarenessEnabled(*cr.Spec.ForProvider.ClusterConfig.ZoneAwarenessEnabled) @@ -623,19 +667,47 @@ func GenerateCreateDomainInput(cr *svcapitypes.Domain) *svcsdk.CreateDomainInput } res.SetNodeToNodeEncryptionOptions(f10) } + if cr.Spec.ForProvider.OffPeakWindowOptions != nil { + f11 := &svcsdk.OffPeakWindowOptions{} + if cr.Spec.ForProvider.OffPeakWindowOptions.Enabled != nil { + f11.SetEnabled(*cr.Spec.ForProvider.OffPeakWindowOptions.Enabled) + } + if cr.Spec.ForProvider.OffPeakWindowOptions.OffPeakWindow != nil { + f11f1 := &svcsdk.OffPeakWindow{} + if cr.Spec.ForProvider.OffPeakWindowOptions.OffPeakWindow.WindowStartTime != nil { + f11f1f0 := &svcsdk.WindowStartTime{} + if cr.Spec.ForProvider.OffPeakWindowOptions.OffPeakWindow.WindowStartTime.Hours != nil { + f11f1f0.SetHours(*cr.Spec.ForProvider.OffPeakWindowOptions.OffPeakWindow.WindowStartTime.Hours) + } + if cr.Spec.ForProvider.OffPeakWindowOptions.OffPeakWindow.WindowStartTime.Minutes != nil { + f11f1f0.SetMinutes(*cr.Spec.ForProvider.OffPeakWindowOptions.OffPeakWindow.WindowStartTime.Minutes) + } + f11f1.SetWindowStartTime(f11f1f0) + } + f11.SetOffPeakWindow(f11f1) + } + res.SetOffPeakWindowOptions(f11) + } + if cr.Spec.ForProvider.SoftwareUpdateOptions != nil { + f12 := &svcsdk.SoftwareUpdateOptions{} + if cr.Spec.ForProvider.SoftwareUpdateOptions.AutoSoftwareUpdateEnabled != nil { + f12.SetAutoSoftwareUpdateEnabled(*cr.Spec.ForProvider.SoftwareUpdateOptions.AutoSoftwareUpdateEnabled) + } + res.SetSoftwareUpdateOptions(f12) + } if cr.Spec.ForProvider.Tags != nil { - f11 := []*svcsdk.Tag{} - for _, f11iter := range cr.Spec.ForProvider.Tags { - f11elem := &svcsdk.Tag{} - if f11iter.Key != nil { - f11elem.SetKey(*f11iter.Key) + f13 := []*svcsdk.Tag{} + for _, f13iter := range cr.Spec.ForProvider.Tags { + f13elem := &svcsdk.Tag{} + if f13iter.Key != nil { + f13elem.SetKey(*f13iter.Key) } - if f11iter.Value != nil { - f11elem.SetValue(*f11iter.Value) + if f13iter.Value != nil { + f13elem.SetValue(*f13iter.Value) } - f11 = append(f11, f11elem) + f13 = append(f13, f13elem) } - res.SetTagList(f11) + res.SetTagList(f13) } return res diff --git a/pkg/controller/ram/resourceshare/zz_conversions.go b/pkg/controller/ram/resourceshare/zz_conversions.go index f9a54b32ad..fc8011f80f 100644 --- a/pkg/controller/ram/resourceshare/zz_conversions.go +++ b/pkg/controller/ram/resourceshare/zz_conversions.go @@ -122,19 +122,28 @@ func GenerateCreateResourceShareInput(cr *svcapitypes.ResourceShare) *svcsdk.Cre } res.SetResourceArns(f5) } + if cr.Spec.ForProvider.Sources != nil { + f6 := []*string{} + for _, f6iter := range cr.Spec.ForProvider.Sources { + var f6elem string + f6elem = *f6iter + f6 = append(f6, &f6elem) + } + res.SetSources(f6) + } if cr.Spec.ForProvider.Tags != nil { - f6 := []*svcsdk.Tag{} - for _, f6iter := range cr.Spec.ForProvider.Tags { - f6elem := &svcsdk.Tag{} - if f6iter.Key != nil { - f6elem.SetKey(*f6iter.Key) + f7 := []*svcsdk.Tag{} + for _, f7iter := range cr.Spec.ForProvider.Tags { + f7elem := &svcsdk.Tag{} + if f7iter.Key != nil { + f7elem.SetKey(*f7iter.Key) } - if f6iter.Value != nil { - f6elem.SetValue(*f6iter.Value) + if f7iter.Value != nil { + f7elem.SetValue(*f7iter.Value) } - f6 = append(f6, f6elem) + f7 = append(f7, f7elem) } - res.SetTags(f6) + res.SetTags(f7) } return res diff --git a/pkg/controller/rds/dbcluster/zz_controller.go b/pkg/controller/rds/dbcluster/zz_controller.go index 579ce502c6..29bb96bb9c 100644 --- a/pkg/controller/rds/dbcluster/zz_controller.go +++ b/pkg/controller/rds/dbcluster/zz_controller.go @@ -328,6 +328,18 @@ func (e *external) Create(ctx context.Context, mg cpresource.Managed) (managed.E f30 := []*svcapitypes.DomainMembership{} for _, f30iter := range resp.DBCluster.DomainMemberships { f30elem := &svcapitypes.DomainMembership{} + if f30iter.AuthSecretArn != nil { + f30elem.AuthSecretARN = f30iter.AuthSecretArn + } + if f30iter.DnsIps != nil { + f30elemf1 := []*string{} + for _, f30elemf1iter := range f30iter.DnsIps { + var f30elemf1elem string + f30elemf1elem = *f30elemf1iter + f30elemf1 = append(f30elemf1, &f30elemf1elem) + } + f30elem.DNSIPs = f30elemf1 + } if f30iter.Domain != nil { f30elem.Domain = f30iter.Domain } @@ -337,6 +349,9 @@ func (e *external) Create(ctx context.Context, mg cpresource.Managed) (managed.E if f30iter.IAMRoleName != nil { f30elem.IAMRoleName = f30iter.IAMRoleName } + if f30iter.OU != nil { + f30elem.OU = f30iter.OU + } if f30iter.Status != nil { f30elem.Status = f30iter.Status } @@ -412,6 +427,11 @@ func (e *external) Create(ctx context.Context, mg cpresource.Managed) (managed.E } else { cr.Status.AtProvider.IAMDatabaseAuthenticationEnabled = nil } + if resp.DBCluster.IOOptimizedNextAllowedModificationTime != nil { + cr.Status.AtProvider.IOOptimizedNextAllowedModificationTime = &metav1.Time{*resp.DBCluster.IOOptimizedNextAllowedModificationTime} + } else { + cr.Status.AtProvider.IOOptimizedNextAllowedModificationTime = nil + } if resp.DBCluster.Iops != nil { cr.Spec.ForProvider.IOPS = resp.DBCluster.Iops } else { @@ -427,18 +447,23 @@ func (e *external) Create(ctx context.Context, mg cpresource.Managed) (managed.E } else { cr.Status.AtProvider.LatestRestorableTime = nil } + if resp.DBCluster.LocalWriteForwardingStatus != nil { + cr.Status.AtProvider.LocalWriteForwardingStatus = resp.DBCluster.LocalWriteForwardingStatus + } else { + cr.Status.AtProvider.LocalWriteForwardingStatus = nil + } if resp.DBCluster.MasterUserSecret != nil { - f46 := &svcapitypes.MasterUserSecret{} + f48 := &svcapitypes.MasterUserSecret{} if resp.DBCluster.MasterUserSecret.KmsKeyId != nil { - f46.KMSKeyID = resp.DBCluster.MasterUserSecret.KmsKeyId + f48.KMSKeyID = resp.DBCluster.MasterUserSecret.KmsKeyId } if resp.DBCluster.MasterUserSecret.SecretArn != nil { - f46.SecretARN = resp.DBCluster.MasterUserSecret.SecretArn + f48.SecretARN = resp.DBCluster.MasterUserSecret.SecretArn } if resp.DBCluster.MasterUserSecret.SecretStatus != nil { - f46.SecretStatus = resp.DBCluster.MasterUserSecret.SecretStatus + f48.SecretStatus = resp.DBCluster.MasterUserSecret.SecretStatus } - cr.Status.AtProvider.MasterUserSecret = f46 + cr.Status.AtProvider.MasterUserSecret = f48 } else { cr.Status.AtProvider.MasterUserSecret = nil } @@ -508,13 +533,13 @@ func (e *external) Create(ctx context.Context, mg cpresource.Managed) (managed.E cr.Spec.ForProvider.PubliclyAccessible = nil } if resp.DBCluster.ReadReplicaIdentifiers != nil { - f60 := []*string{} - for _, f60iter := range resp.DBCluster.ReadReplicaIdentifiers { - var f60elem string - f60elem = *f60iter - f60 = append(f60, &f60elem) + f62 := []*string{} + for _, f62iter := range resp.DBCluster.ReadReplicaIdentifiers { + var f62elem string + f62elem = *f62iter + f62 = append(f62, &f62elem) } - cr.Status.AtProvider.ReadReplicaIdentifiers = f60 + cr.Status.AtProvider.ReadReplicaIdentifiers = f62 } else { cr.Status.AtProvider.ReadReplicaIdentifiers = nil } @@ -529,38 +554,38 @@ func (e *external) Create(ctx context.Context, mg cpresource.Managed) (managed.E cr.Spec.ForProvider.ReplicationSourceIdentifier = nil } if resp.DBCluster.ScalingConfigurationInfo != nil { - f63 := &svcapitypes.ScalingConfigurationInfo{} + f65 := &svcapitypes.ScalingConfigurationInfo{} if resp.DBCluster.ScalingConfigurationInfo.AutoPause != nil { - f63.AutoPause = resp.DBCluster.ScalingConfigurationInfo.AutoPause + f65.AutoPause = resp.DBCluster.ScalingConfigurationInfo.AutoPause } if resp.DBCluster.ScalingConfigurationInfo.MaxCapacity != nil { - f63.MaxCapacity = resp.DBCluster.ScalingConfigurationInfo.MaxCapacity + f65.MaxCapacity = resp.DBCluster.ScalingConfigurationInfo.MaxCapacity } if resp.DBCluster.ScalingConfigurationInfo.MinCapacity != nil { - f63.MinCapacity = resp.DBCluster.ScalingConfigurationInfo.MinCapacity + f65.MinCapacity = resp.DBCluster.ScalingConfigurationInfo.MinCapacity } if resp.DBCluster.ScalingConfigurationInfo.SecondsBeforeTimeout != nil { - f63.SecondsBeforeTimeout = resp.DBCluster.ScalingConfigurationInfo.SecondsBeforeTimeout + f65.SecondsBeforeTimeout = resp.DBCluster.ScalingConfigurationInfo.SecondsBeforeTimeout } if resp.DBCluster.ScalingConfigurationInfo.SecondsUntilAutoPause != nil { - f63.SecondsUntilAutoPause = resp.DBCluster.ScalingConfigurationInfo.SecondsUntilAutoPause + f65.SecondsUntilAutoPause = resp.DBCluster.ScalingConfigurationInfo.SecondsUntilAutoPause } if resp.DBCluster.ScalingConfigurationInfo.TimeoutAction != nil { - f63.TimeoutAction = resp.DBCluster.ScalingConfigurationInfo.TimeoutAction + f65.TimeoutAction = resp.DBCluster.ScalingConfigurationInfo.TimeoutAction } - cr.Status.AtProvider.ScalingConfigurationInfo = f63 + cr.Status.AtProvider.ScalingConfigurationInfo = f65 } else { cr.Status.AtProvider.ScalingConfigurationInfo = nil } if resp.DBCluster.ServerlessV2ScalingConfiguration != nil { - f64 := &svcapitypes.ServerlessV2ScalingConfiguration{} + f66 := &svcapitypes.ServerlessV2ScalingConfiguration{} if resp.DBCluster.ServerlessV2ScalingConfiguration.MaxCapacity != nil { - f64.MaxCapacity = resp.DBCluster.ServerlessV2ScalingConfiguration.MaxCapacity + f66.MaxCapacity = resp.DBCluster.ServerlessV2ScalingConfiguration.MaxCapacity } if resp.DBCluster.ServerlessV2ScalingConfiguration.MinCapacity != nil { - f64.MinCapacity = resp.DBCluster.ServerlessV2ScalingConfiguration.MinCapacity + f66.MinCapacity = resp.DBCluster.ServerlessV2ScalingConfiguration.MinCapacity } - cr.Spec.ForProvider.ServerlessV2ScalingConfiguration = f64 + cr.Spec.ForProvider.ServerlessV2ScalingConfiguration = f66 } else { cr.Spec.ForProvider.ServerlessV2ScalingConfiguration = nil } @@ -580,34 +605,34 @@ func (e *external) Create(ctx context.Context, mg cpresource.Managed) (managed.E cr.Spec.ForProvider.StorageType = nil } if resp.DBCluster.TagList != nil { - f68 := []*svcapitypes.Tag{} - for _, f68iter := range resp.DBCluster.TagList { - f68elem := &svcapitypes.Tag{} - if f68iter.Key != nil { - f68elem.Key = f68iter.Key + f70 := []*svcapitypes.Tag{} + for _, f70iter := range resp.DBCluster.TagList { + f70elem := &svcapitypes.Tag{} + if f70iter.Key != nil { + f70elem.Key = f70iter.Key } - if f68iter.Value != nil { - f68elem.Value = f68iter.Value + if f70iter.Value != nil { + f70elem.Value = f70iter.Value } - f68 = append(f68, f68elem) + f70 = append(f70, f70elem) } - cr.Status.AtProvider.TagList = f68 + cr.Status.AtProvider.TagList = f70 } else { cr.Status.AtProvider.TagList = nil } if resp.DBCluster.VpcSecurityGroups != nil { - f69 := []*svcapitypes.VPCSecurityGroupMembership{} - for _, f69iter := range resp.DBCluster.VpcSecurityGroups { - f69elem := &svcapitypes.VPCSecurityGroupMembership{} - if f69iter.Status != nil { - f69elem.Status = f69iter.Status + f71 := []*svcapitypes.VPCSecurityGroupMembership{} + for _, f71iter := range resp.DBCluster.VpcSecurityGroups { + f71elem := &svcapitypes.VPCSecurityGroupMembership{} + if f71iter.Status != nil { + f71elem.Status = f71iter.Status } - if f69iter.VpcSecurityGroupId != nil { - f69elem.VPCSecurityGroupID = f69iter.VpcSecurityGroupId + if f71iter.VpcSecurityGroupId != nil { + f71elem.VPCSecurityGroupID = f71iter.VpcSecurityGroupId } - f69 = append(f69, f69elem) + f71 = append(f71, f71elem) } - cr.Status.AtProvider.VPCSecurityGroups = f69 + cr.Status.AtProvider.VPCSecurityGroups = f71 } else { cr.Status.AtProvider.VPCSecurityGroups = nil } diff --git a/pkg/controller/rds/dbcluster/zz_conversions.go b/pkg/controller/rds/dbcluster/zz_conversions.go index 8d48b15995..6893dba0ee 100644 --- a/pkg/controller/rds/dbcluster/zz_conversions.go +++ b/pkg/controller/rds/dbcluster/zz_conversions.go @@ -251,6 +251,18 @@ func GenerateDBCluster(resp *svcsdk.DescribeDBClustersOutput) *svcapitypes.DBClu f30 := []*svcapitypes.DomainMembership{} for _, f30iter := range elem.DomainMemberships { f30elem := &svcapitypes.DomainMembership{} + if f30iter.AuthSecretArn != nil { + f30elem.AuthSecretARN = f30iter.AuthSecretArn + } + if f30iter.DnsIps != nil { + f30elemf1 := []*string{} + for _, f30elemf1iter := range f30iter.DnsIps { + var f30elemf1elem string + f30elemf1elem = *f30elemf1iter + f30elemf1 = append(f30elemf1, &f30elemf1elem) + } + f30elem.DNSIPs = f30elemf1 + } if f30iter.Domain != nil { f30elem.Domain = f30iter.Domain } @@ -260,6 +272,9 @@ func GenerateDBCluster(resp *svcsdk.DescribeDBClustersOutput) *svcapitypes.DBClu if f30iter.IAMRoleName != nil { f30elem.IAMRoleName = f30iter.IAMRoleName } + if f30iter.OU != nil { + f30elem.OU = f30iter.OU + } if f30iter.Status != nil { f30elem.Status = f30iter.Status } @@ -335,6 +350,11 @@ func GenerateDBCluster(resp *svcsdk.DescribeDBClustersOutput) *svcapitypes.DBClu } else { cr.Status.AtProvider.IAMDatabaseAuthenticationEnabled = nil } + if elem.IOOptimizedNextAllowedModificationTime != nil { + cr.Status.AtProvider.IOOptimizedNextAllowedModificationTime = &metav1.Time{*elem.IOOptimizedNextAllowedModificationTime} + } else { + cr.Status.AtProvider.IOOptimizedNextAllowedModificationTime = nil + } if elem.Iops != nil { cr.Spec.ForProvider.IOPS = elem.Iops } else { @@ -350,18 +370,23 @@ func GenerateDBCluster(resp *svcsdk.DescribeDBClustersOutput) *svcapitypes.DBClu } else { cr.Status.AtProvider.LatestRestorableTime = nil } + if elem.LocalWriteForwardingStatus != nil { + cr.Status.AtProvider.LocalWriteForwardingStatus = elem.LocalWriteForwardingStatus + } else { + cr.Status.AtProvider.LocalWriteForwardingStatus = nil + } if elem.MasterUserSecret != nil { - f46 := &svcapitypes.MasterUserSecret{} + f48 := &svcapitypes.MasterUserSecret{} if elem.MasterUserSecret.KmsKeyId != nil { - f46.KMSKeyID = elem.MasterUserSecret.KmsKeyId + f48.KMSKeyID = elem.MasterUserSecret.KmsKeyId } if elem.MasterUserSecret.SecretArn != nil { - f46.SecretARN = elem.MasterUserSecret.SecretArn + f48.SecretARN = elem.MasterUserSecret.SecretArn } if elem.MasterUserSecret.SecretStatus != nil { - f46.SecretStatus = elem.MasterUserSecret.SecretStatus + f48.SecretStatus = elem.MasterUserSecret.SecretStatus } - cr.Status.AtProvider.MasterUserSecret = f46 + cr.Status.AtProvider.MasterUserSecret = f48 } else { cr.Status.AtProvider.MasterUserSecret = nil } @@ -431,13 +456,13 @@ func GenerateDBCluster(resp *svcsdk.DescribeDBClustersOutput) *svcapitypes.DBClu cr.Spec.ForProvider.PubliclyAccessible = nil } if elem.ReadReplicaIdentifiers != nil { - f60 := []*string{} - for _, f60iter := range elem.ReadReplicaIdentifiers { - var f60elem string - f60elem = *f60iter - f60 = append(f60, &f60elem) + f62 := []*string{} + for _, f62iter := range elem.ReadReplicaIdentifiers { + var f62elem string + f62elem = *f62iter + f62 = append(f62, &f62elem) } - cr.Status.AtProvider.ReadReplicaIdentifiers = f60 + cr.Status.AtProvider.ReadReplicaIdentifiers = f62 } else { cr.Status.AtProvider.ReadReplicaIdentifiers = nil } @@ -452,38 +477,38 @@ func GenerateDBCluster(resp *svcsdk.DescribeDBClustersOutput) *svcapitypes.DBClu cr.Spec.ForProvider.ReplicationSourceIdentifier = nil } if elem.ScalingConfigurationInfo != nil { - f63 := &svcapitypes.ScalingConfigurationInfo{} + f65 := &svcapitypes.ScalingConfigurationInfo{} if elem.ScalingConfigurationInfo.AutoPause != nil { - f63.AutoPause = elem.ScalingConfigurationInfo.AutoPause + f65.AutoPause = elem.ScalingConfigurationInfo.AutoPause } if elem.ScalingConfigurationInfo.MaxCapacity != nil { - f63.MaxCapacity = elem.ScalingConfigurationInfo.MaxCapacity + f65.MaxCapacity = elem.ScalingConfigurationInfo.MaxCapacity } if elem.ScalingConfigurationInfo.MinCapacity != nil { - f63.MinCapacity = elem.ScalingConfigurationInfo.MinCapacity + f65.MinCapacity = elem.ScalingConfigurationInfo.MinCapacity } if elem.ScalingConfigurationInfo.SecondsBeforeTimeout != nil { - f63.SecondsBeforeTimeout = elem.ScalingConfigurationInfo.SecondsBeforeTimeout + f65.SecondsBeforeTimeout = elem.ScalingConfigurationInfo.SecondsBeforeTimeout } if elem.ScalingConfigurationInfo.SecondsUntilAutoPause != nil { - f63.SecondsUntilAutoPause = elem.ScalingConfigurationInfo.SecondsUntilAutoPause + f65.SecondsUntilAutoPause = elem.ScalingConfigurationInfo.SecondsUntilAutoPause } if elem.ScalingConfigurationInfo.TimeoutAction != nil { - f63.TimeoutAction = elem.ScalingConfigurationInfo.TimeoutAction + f65.TimeoutAction = elem.ScalingConfigurationInfo.TimeoutAction } - cr.Status.AtProvider.ScalingConfigurationInfo = f63 + cr.Status.AtProvider.ScalingConfigurationInfo = f65 } else { cr.Status.AtProvider.ScalingConfigurationInfo = nil } if elem.ServerlessV2ScalingConfiguration != nil { - f64 := &svcapitypes.ServerlessV2ScalingConfiguration{} + f66 := &svcapitypes.ServerlessV2ScalingConfiguration{} if elem.ServerlessV2ScalingConfiguration.MaxCapacity != nil { - f64.MaxCapacity = elem.ServerlessV2ScalingConfiguration.MaxCapacity + f66.MaxCapacity = elem.ServerlessV2ScalingConfiguration.MaxCapacity } if elem.ServerlessV2ScalingConfiguration.MinCapacity != nil { - f64.MinCapacity = elem.ServerlessV2ScalingConfiguration.MinCapacity + f66.MinCapacity = elem.ServerlessV2ScalingConfiguration.MinCapacity } - cr.Spec.ForProvider.ServerlessV2ScalingConfiguration = f64 + cr.Spec.ForProvider.ServerlessV2ScalingConfiguration = f66 } else { cr.Spec.ForProvider.ServerlessV2ScalingConfiguration = nil } @@ -503,34 +528,34 @@ func GenerateDBCluster(resp *svcsdk.DescribeDBClustersOutput) *svcapitypes.DBClu cr.Spec.ForProvider.StorageType = nil } if elem.TagList != nil { - f68 := []*svcapitypes.Tag{} - for _, f68iter := range elem.TagList { - f68elem := &svcapitypes.Tag{} - if f68iter.Key != nil { - f68elem.Key = f68iter.Key + f70 := []*svcapitypes.Tag{} + for _, f70iter := range elem.TagList { + f70elem := &svcapitypes.Tag{} + if f70iter.Key != nil { + f70elem.Key = f70iter.Key } - if f68iter.Value != nil { - f68elem.Value = f68iter.Value + if f70iter.Value != nil { + f70elem.Value = f70iter.Value } - f68 = append(f68, f68elem) + f70 = append(f70, f70elem) } - cr.Status.AtProvider.TagList = f68 + cr.Status.AtProvider.TagList = f70 } else { cr.Status.AtProvider.TagList = nil } if elem.VpcSecurityGroups != nil { - f69 := []*svcapitypes.VPCSecurityGroupMembership{} - for _, f69iter := range elem.VpcSecurityGroups { - f69elem := &svcapitypes.VPCSecurityGroupMembership{} - if f69iter.Status != nil { - f69elem.Status = f69iter.Status + f71 := []*svcapitypes.VPCSecurityGroupMembership{} + for _, f71iter := range elem.VpcSecurityGroups { + f71elem := &svcapitypes.VPCSecurityGroupMembership{} + if f71iter.Status != nil { + f71elem.Status = f71iter.Status } - if f69iter.VpcSecurityGroupId != nil { - f69elem.VPCSecurityGroupID = f69iter.VpcSecurityGroupId + if f71iter.VpcSecurityGroupId != nil { + f71elem.VPCSecurityGroupID = f71iter.VpcSecurityGroupId } - f69 = append(f69, f69elem) + f71 = append(f71, f71elem) } - cr.Status.AtProvider.VPCSecurityGroups = f69 + cr.Status.AtProvider.VPCSecurityGroups = f71 } else { cr.Status.AtProvider.VPCSecurityGroups = nil } @@ -620,6 +645,9 @@ func GenerateCreateDBClusterInput(cr *svcapitypes.DBCluster) *svcsdk.CreateDBClu if cr.Spec.ForProvider.EnableIAMDatabaseAuthentication != nil { res.SetEnableIAMDatabaseAuthentication(*cr.Spec.ForProvider.EnableIAMDatabaseAuthentication) } + if cr.Spec.ForProvider.EnableLocalWriteForwarding != nil { + res.SetEnableLocalWriteForwarding(*cr.Spec.ForProvider.EnableLocalWriteForwarding) + } if cr.Spec.ForProvider.EnablePerformanceInsights != nil { res.SetEnablePerformanceInsights(*cr.Spec.ForProvider.EnablePerformanceInsights) } @@ -684,36 +712,36 @@ func GenerateCreateDBClusterInput(cr *svcapitypes.DBCluster) *svcsdk.CreateDBClu res.SetReplicationSourceIdentifier(*cr.Spec.ForProvider.ReplicationSourceIdentifier) } if cr.Spec.ForProvider.ScalingConfiguration != nil { - f41 := &svcsdk.ScalingConfiguration{} + f42 := &svcsdk.ScalingConfiguration{} if cr.Spec.ForProvider.ScalingConfiguration.AutoPause != nil { - f41.SetAutoPause(*cr.Spec.ForProvider.ScalingConfiguration.AutoPause) + f42.SetAutoPause(*cr.Spec.ForProvider.ScalingConfiguration.AutoPause) } if cr.Spec.ForProvider.ScalingConfiguration.MaxCapacity != nil { - f41.SetMaxCapacity(*cr.Spec.ForProvider.ScalingConfiguration.MaxCapacity) + f42.SetMaxCapacity(*cr.Spec.ForProvider.ScalingConfiguration.MaxCapacity) } if cr.Spec.ForProvider.ScalingConfiguration.MinCapacity != nil { - f41.SetMinCapacity(*cr.Spec.ForProvider.ScalingConfiguration.MinCapacity) + f42.SetMinCapacity(*cr.Spec.ForProvider.ScalingConfiguration.MinCapacity) } if cr.Spec.ForProvider.ScalingConfiguration.SecondsBeforeTimeout != nil { - f41.SetSecondsBeforeTimeout(*cr.Spec.ForProvider.ScalingConfiguration.SecondsBeforeTimeout) + f42.SetSecondsBeforeTimeout(*cr.Spec.ForProvider.ScalingConfiguration.SecondsBeforeTimeout) } if cr.Spec.ForProvider.ScalingConfiguration.SecondsUntilAutoPause != nil { - f41.SetSecondsUntilAutoPause(*cr.Spec.ForProvider.ScalingConfiguration.SecondsUntilAutoPause) + f42.SetSecondsUntilAutoPause(*cr.Spec.ForProvider.ScalingConfiguration.SecondsUntilAutoPause) } if cr.Spec.ForProvider.ScalingConfiguration.TimeoutAction != nil { - f41.SetTimeoutAction(*cr.Spec.ForProvider.ScalingConfiguration.TimeoutAction) + f42.SetTimeoutAction(*cr.Spec.ForProvider.ScalingConfiguration.TimeoutAction) } - res.SetScalingConfiguration(f41) + res.SetScalingConfiguration(f42) } if cr.Spec.ForProvider.ServerlessV2ScalingConfiguration != nil { - f42 := &svcsdk.ServerlessV2ScalingConfiguration{} + f43 := &svcsdk.ServerlessV2ScalingConfiguration{} if cr.Spec.ForProvider.ServerlessV2ScalingConfiguration.MaxCapacity != nil { - f42.SetMaxCapacity(*cr.Spec.ForProvider.ServerlessV2ScalingConfiguration.MaxCapacity) + f43.SetMaxCapacity(*cr.Spec.ForProvider.ServerlessV2ScalingConfiguration.MaxCapacity) } if cr.Spec.ForProvider.ServerlessV2ScalingConfiguration.MinCapacity != nil { - f42.SetMinCapacity(*cr.Spec.ForProvider.ServerlessV2ScalingConfiguration.MinCapacity) + f43.SetMinCapacity(*cr.Spec.ForProvider.ServerlessV2ScalingConfiguration.MinCapacity) } - res.SetServerlessV2ScalingConfiguration(f42) + res.SetServerlessV2ScalingConfiguration(f43) } if cr.Spec.ForProvider.SourceRegion != nil { res.SetSourceRegion(*cr.Spec.ForProvider.SourceRegion) @@ -725,18 +753,18 @@ func GenerateCreateDBClusterInput(cr *svcapitypes.DBCluster) *svcsdk.CreateDBClu res.SetStorageType(*cr.Spec.ForProvider.StorageType) } if cr.Spec.ForProvider.Tags != nil { - f46 := []*svcsdk.Tag{} - for _, f46iter := range cr.Spec.ForProvider.Tags { - f46elem := &svcsdk.Tag{} - if f46iter.Key != nil { - f46elem.SetKey(*f46iter.Key) + f47 := []*svcsdk.Tag{} + for _, f47iter := range cr.Spec.ForProvider.Tags { + f47elem := &svcsdk.Tag{} + if f47iter.Key != nil { + f47elem.SetKey(*f47iter.Key) } - if f46iter.Value != nil { - f46elem.SetValue(*f46iter.Value) + if f47iter.Value != nil { + f47elem.SetValue(*f47iter.Value) } - f46 = append(f46, f46elem) + f47 = append(f47, f47elem) } - res.SetTags(f46) + res.SetTags(f47) } return res @@ -788,9 +816,15 @@ func GenerateModifyDBClusterInput(cr *svcapitypes.DBCluster) *svcsdk.ModifyDBClu if cr.Spec.ForProvider.EnableIAMDatabaseAuthentication != nil { res.SetEnableIAMDatabaseAuthentication(*cr.Spec.ForProvider.EnableIAMDatabaseAuthentication) } + if cr.Spec.ForProvider.EnableLocalWriteForwarding != nil { + res.SetEnableLocalWriteForwarding(*cr.Spec.ForProvider.EnableLocalWriteForwarding) + } if cr.Spec.ForProvider.EnablePerformanceInsights != nil { res.SetEnablePerformanceInsights(*cr.Spec.ForProvider.EnablePerformanceInsights) } + if cr.Spec.ForProvider.EngineMode != nil { + res.SetEngineMode(*cr.Spec.ForProvider.EngineMode) + } if cr.Spec.ForProvider.IOPS != nil { res.SetIops(*cr.Spec.ForProvider.IOPS) } @@ -828,36 +862,36 @@ func GenerateModifyDBClusterInput(cr *svcapitypes.DBCluster) *svcsdk.ModifyDBClu res.SetPreferredMaintenanceWindow(*cr.Spec.ForProvider.PreferredMaintenanceWindow) } if cr.Spec.ForProvider.ScalingConfiguration != nil { - f32 := &svcsdk.ScalingConfiguration{} + f35 := &svcsdk.ScalingConfiguration{} if cr.Spec.ForProvider.ScalingConfiguration.AutoPause != nil { - f32.SetAutoPause(*cr.Spec.ForProvider.ScalingConfiguration.AutoPause) + f35.SetAutoPause(*cr.Spec.ForProvider.ScalingConfiguration.AutoPause) } if cr.Spec.ForProvider.ScalingConfiguration.MaxCapacity != nil { - f32.SetMaxCapacity(*cr.Spec.ForProvider.ScalingConfiguration.MaxCapacity) + f35.SetMaxCapacity(*cr.Spec.ForProvider.ScalingConfiguration.MaxCapacity) } if cr.Spec.ForProvider.ScalingConfiguration.MinCapacity != nil { - f32.SetMinCapacity(*cr.Spec.ForProvider.ScalingConfiguration.MinCapacity) + f35.SetMinCapacity(*cr.Spec.ForProvider.ScalingConfiguration.MinCapacity) } if cr.Spec.ForProvider.ScalingConfiguration.SecondsBeforeTimeout != nil { - f32.SetSecondsBeforeTimeout(*cr.Spec.ForProvider.ScalingConfiguration.SecondsBeforeTimeout) + f35.SetSecondsBeforeTimeout(*cr.Spec.ForProvider.ScalingConfiguration.SecondsBeforeTimeout) } if cr.Spec.ForProvider.ScalingConfiguration.SecondsUntilAutoPause != nil { - f32.SetSecondsUntilAutoPause(*cr.Spec.ForProvider.ScalingConfiguration.SecondsUntilAutoPause) + f35.SetSecondsUntilAutoPause(*cr.Spec.ForProvider.ScalingConfiguration.SecondsUntilAutoPause) } if cr.Spec.ForProvider.ScalingConfiguration.TimeoutAction != nil { - f32.SetTimeoutAction(*cr.Spec.ForProvider.ScalingConfiguration.TimeoutAction) + f35.SetTimeoutAction(*cr.Spec.ForProvider.ScalingConfiguration.TimeoutAction) } - res.SetScalingConfiguration(f32) + res.SetScalingConfiguration(f35) } if cr.Spec.ForProvider.ServerlessV2ScalingConfiguration != nil { - f33 := &svcsdk.ServerlessV2ScalingConfiguration{} + f36 := &svcsdk.ServerlessV2ScalingConfiguration{} if cr.Spec.ForProvider.ServerlessV2ScalingConfiguration.MaxCapacity != nil { - f33.SetMaxCapacity(*cr.Spec.ForProvider.ServerlessV2ScalingConfiguration.MaxCapacity) + f36.SetMaxCapacity(*cr.Spec.ForProvider.ServerlessV2ScalingConfiguration.MaxCapacity) } if cr.Spec.ForProvider.ServerlessV2ScalingConfiguration.MinCapacity != nil { - f33.SetMinCapacity(*cr.Spec.ForProvider.ServerlessV2ScalingConfiguration.MinCapacity) + f36.SetMinCapacity(*cr.Spec.ForProvider.ServerlessV2ScalingConfiguration.MinCapacity) } - res.SetServerlessV2ScalingConfiguration(f33) + res.SetServerlessV2ScalingConfiguration(f36) } if cr.Spec.ForProvider.StorageType != nil { res.SetStorageType(*cr.Spec.ForProvider.StorageType) diff --git a/pkg/controller/rds/dbinstance/zz_controller.go b/pkg/controller/rds/dbinstance/zz_controller.go index 2ffbf0a335..586d6e4872 100644 --- a/pkg/controller/rds/dbinstance/zz_controller.go +++ b/pkg/controller/rds/dbinstance/zz_controller.go @@ -380,9 +380,9 @@ func (e *external) Create(ctx context.Context, mg cpresource.Managed) (managed.E cr.Status.AtProvider.DBSubnetGroup = nil } if resp.DBInstance.DBSystemId != nil { - cr.Status.AtProvider.DBSystemID = resp.DBInstance.DBSystemId + cr.Spec.ForProvider.DBSystemID = resp.DBInstance.DBSystemId } else { - cr.Status.AtProvider.DBSystemID = nil + cr.Spec.ForProvider.DBSystemID = nil } if resp.DBInstance.DbInstancePort != nil { cr.Status.AtProvider.DBInstancePort = resp.DBInstance.DbInstancePort @@ -403,6 +403,18 @@ func (e *external) Create(ctx context.Context, mg cpresource.Managed) (managed.E f35 := []*svcapitypes.DomainMembership{} for _, f35iter := range resp.DBInstance.DomainMemberships { f35elem := &svcapitypes.DomainMembership{} + if f35iter.AuthSecretArn != nil { + f35elem.AuthSecretARN = f35iter.AuthSecretArn + } + if f35iter.DnsIps != nil { + f35elemf1 := []*string{} + for _, f35elemf1iter := range f35iter.DnsIps { + var f35elemf1elem string + f35elemf1elem = *f35elemf1iter + f35elemf1 = append(f35elemf1, &f35elemf1elem) + } + f35elem.DNSIPs = f35elemf1 + } if f35iter.Domain != nil { f35elem.Domain = f35iter.Domain } @@ -412,6 +424,9 @@ func (e *external) Create(ctx context.Context, mg cpresource.Managed) (managed.E if f35iter.IAMRoleName != nil { f35elem.IAMRoleName = f35iter.IAMRoleName } + if f35iter.OU != nil { + f35elem.OU = f35iter.OU + } if f35iter.Status != nil { f35elem.Status = f35iter.Status } @@ -596,6 +611,9 @@ func (e *external) Create(ctx context.Context, mg cpresource.Managed) (managed.E if resp.DBInstance.PendingModifiedValues.DBSubnetGroupName != nil { f57.DBSubnetGroupName = resp.DBInstance.PendingModifiedValues.DBSubnetGroupName } + if resp.DBInstance.PendingModifiedValues.Engine != nil { + f57.Engine = resp.DBInstance.PendingModifiedValues.Engine + } if resp.DBInstance.PendingModifiedValues.EngineVersion != nil { f57.EngineVersion = resp.DBInstance.PendingModifiedValues.EngineVersion } @@ -615,43 +633,43 @@ func (e *external) Create(ctx context.Context, mg cpresource.Managed) (managed.E f57.MultiAZ = resp.DBInstance.PendingModifiedValues.MultiAZ } if resp.DBInstance.PendingModifiedValues.PendingCloudwatchLogsExports != nil { - f57f13 := &svcapitypes.PendingCloudwatchLogsExports{} + f57f14 := &svcapitypes.PendingCloudwatchLogsExports{} if resp.DBInstance.PendingModifiedValues.PendingCloudwatchLogsExports.LogTypesToDisable != nil { - f57f13f0 := []*string{} - for _, f57f13f0iter := range resp.DBInstance.PendingModifiedValues.PendingCloudwatchLogsExports.LogTypesToDisable { - var f57f13f0elem string - f57f13f0elem = *f57f13f0iter - f57f13f0 = append(f57f13f0, &f57f13f0elem) + f57f14f0 := []*string{} + for _, f57f14f0iter := range resp.DBInstance.PendingModifiedValues.PendingCloudwatchLogsExports.LogTypesToDisable { + var f57f14f0elem string + f57f14f0elem = *f57f14f0iter + f57f14f0 = append(f57f14f0, &f57f14f0elem) } - f57f13.LogTypesToDisable = f57f13f0 + f57f14.LogTypesToDisable = f57f14f0 } if resp.DBInstance.PendingModifiedValues.PendingCloudwatchLogsExports.LogTypesToEnable != nil { - f57f13f1 := []*string{} - for _, f57f13f1iter := range resp.DBInstance.PendingModifiedValues.PendingCloudwatchLogsExports.LogTypesToEnable { - var f57f13f1elem string - f57f13f1elem = *f57f13f1iter - f57f13f1 = append(f57f13f1, &f57f13f1elem) + f57f14f1 := []*string{} + for _, f57f14f1iter := range resp.DBInstance.PendingModifiedValues.PendingCloudwatchLogsExports.LogTypesToEnable { + var f57f14f1elem string + f57f14f1elem = *f57f14f1iter + f57f14f1 = append(f57f14f1, &f57f14f1elem) } - f57f13.LogTypesToEnable = f57f13f1 + f57f14.LogTypesToEnable = f57f14f1 } - f57.PendingCloudwatchLogsExports = f57f13 + f57.PendingCloudwatchLogsExports = f57f14 } if resp.DBInstance.PendingModifiedValues.Port != nil { f57.Port = resp.DBInstance.PendingModifiedValues.Port } if resp.DBInstance.PendingModifiedValues.ProcessorFeatures != nil { - f57f15 := []*svcapitypes.ProcessorFeature{} - for _, f57f15iter := range resp.DBInstance.PendingModifiedValues.ProcessorFeatures { - f57f15elem := &svcapitypes.ProcessorFeature{} - if f57f15iter.Name != nil { - f57f15elem.Name = f57f15iter.Name + f57f16 := []*svcapitypes.ProcessorFeature{} + for _, f57f16iter := range resp.DBInstance.PendingModifiedValues.ProcessorFeatures { + f57f16elem := &svcapitypes.ProcessorFeature{} + if f57f16iter.Name != nil { + f57f16elem.Name = f57f16iter.Name } - if f57f15iter.Value != nil { - f57f15elem.Value = f57f15iter.Value + if f57f16iter.Value != nil { + f57f16elem.Value = f57f16iter.Value } - f57f15 = append(f57f15, f57f15elem) + f57f16 = append(f57f16, f57f16elem) } - f57.ProcessorFeatures = f57f15 + f57.ProcessorFeatures = f57f16 } if resp.DBInstance.PendingModifiedValues.ResumeFullAutomationModeTime != nil { f57.ResumeFullAutomationModeTime = &metav1.Time{*resp.DBInstance.PendingModifiedValues.ResumeFullAutomationModeTime} @@ -666,6 +684,11 @@ func (e *external) Create(ctx context.Context, mg cpresource.Managed) (managed.E } else { cr.Status.AtProvider.PendingModifiedValues = nil } + if resp.DBInstance.PercentProgress != nil { + cr.Status.AtProvider.PercentProgress = resp.DBInstance.PercentProgress + } else { + cr.Status.AtProvider.PercentProgress = nil + } if resp.DBInstance.PerformanceInsightsEnabled != nil { cr.Status.AtProvider.PerformanceInsightsEnabled = resp.DBInstance.PerformanceInsightsEnabled } else { @@ -692,18 +715,18 @@ func (e *external) Create(ctx context.Context, mg cpresource.Managed) (managed.E cr.Spec.ForProvider.PreferredMaintenanceWindow = nil } if resp.DBInstance.ProcessorFeatures != nil { - f63 := []*svcapitypes.ProcessorFeature{} - for _, f63iter := range resp.DBInstance.ProcessorFeatures { - f63elem := &svcapitypes.ProcessorFeature{} - if f63iter.Name != nil { - f63elem.Name = f63iter.Name + f64 := []*svcapitypes.ProcessorFeature{} + for _, f64iter := range resp.DBInstance.ProcessorFeatures { + f64elem := &svcapitypes.ProcessorFeature{} + if f64iter.Name != nil { + f64elem.Name = f64iter.Name } - if f63iter.Value != nil { - f63elem.Value = f63iter.Value + if f64iter.Value != nil { + f64elem.Value = f64iter.Value } - f63 = append(f63, f63elem) + f64 = append(f64, f64elem) } - cr.Spec.ForProvider.ProcessorFeatures = f63 + cr.Spec.ForProvider.ProcessorFeatures = f64 } else { cr.Spec.ForProvider.ProcessorFeatures = nil } @@ -718,27 +741,32 @@ func (e *external) Create(ctx context.Context, mg cpresource.Managed) (managed.E cr.Spec.ForProvider.PubliclyAccessible = nil } if resp.DBInstance.ReadReplicaDBClusterIdentifiers != nil { - f66 := []*string{} - for _, f66iter := range resp.DBInstance.ReadReplicaDBClusterIdentifiers { - var f66elem string - f66elem = *f66iter - f66 = append(f66, &f66elem) + f67 := []*string{} + for _, f67iter := range resp.DBInstance.ReadReplicaDBClusterIdentifiers { + var f67elem string + f67elem = *f67iter + f67 = append(f67, &f67elem) } - cr.Status.AtProvider.ReadReplicaDBClusterIdentifiers = f66 + cr.Status.AtProvider.ReadReplicaDBClusterIdentifiers = f67 } else { cr.Status.AtProvider.ReadReplicaDBClusterIdentifiers = nil } if resp.DBInstance.ReadReplicaDBInstanceIdentifiers != nil { - f67 := []*string{} - for _, f67iter := range resp.DBInstance.ReadReplicaDBInstanceIdentifiers { - var f67elem string - f67elem = *f67iter - f67 = append(f67, &f67elem) + f68 := []*string{} + for _, f68iter := range resp.DBInstance.ReadReplicaDBInstanceIdentifiers { + var f68elem string + f68elem = *f68iter + f68 = append(f68, &f68elem) } - cr.Status.AtProvider.ReadReplicaDBInstanceIdentifiers = f67 + cr.Status.AtProvider.ReadReplicaDBInstanceIdentifiers = f68 } else { cr.Status.AtProvider.ReadReplicaDBInstanceIdentifiers = nil } + if resp.DBInstance.ReadReplicaSourceDBClusterIdentifier != nil { + cr.Status.AtProvider.ReadReplicaSourceDBClusterIdentifier = resp.DBInstance.ReadReplicaSourceDBClusterIdentifier + } else { + cr.Status.AtProvider.ReadReplicaSourceDBClusterIdentifier = nil + } if resp.DBInstance.ReadReplicaSourceDBInstanceIdentifier != nil { cr.Status.AtProvider.ReadReplicaSourceDBInstanceIdentifier = resp.DBInstance.ReadReplicaSourceDBInstanceIdentifier } else { @@ -760,24 +788,24 @@ func (e *external) Create(ctx context.Context, mg cpresource.Managed) (managed.E cr.Status.AtProvider.SecondaryAvailabilityZone = nil } if resp.DBInstance.StatusInfos != nil { - f72 := []*svcapitypes.DBInstanceStatusInfo{} - for _, f72iter := range resp.DBInstance.StatusInfos { - f72elem := &svcapitypes.DBInstanceStatusInfo{} - if f72iter.Message != nil { - f72elem.Message = f72iter.Message + f74 := []*svcapitypes.DBInstanceStatusInfo{} + for _, f74iter := range resp.DBInstance.StatusInfos { + f74elem := &svcapitypes.DBInstanceStatusInfo{} + if f74iter.Message != nil { + f74elem.Message = f74iter.Message } - if f72iter.Normal != nil { - f72elem.Normal = f72iter.Normal + if f74iter.Normal != nil { + f74elem.Normal = f74iter.Normal } - if f72iter.Status != nil { - f72elem.Status = f72iter.Status + if f74iter.Status != nil { + f74elem.Status = f74iter.Status } - if f72iter.StatusType != nil { - f72elem.StatusType = f72iter.StatusType + if f74iter.StatusType != nil { + f74elem.StatusType = f74iter.StatusType } - f72 = append(f72, f72elem) + f74 = append(f74, f74elem) } - cr.Status.AtProvider.StatusInfos = f72 + cr.Status.AtProvider.StatusInfos = f74 } else { cr.Status.AtProvider.StatusInfos = nil } @@ -797,18 +825,18 @@ func (e *external) Create(ctx context.Context, mg cpresource.Managed) (managed.E cr.Spec.ForProvider.StorageType = nil } if resp.DBInstance.TagList != nil { - f76 := []*svcapitypes.Tag{} - for _, f76iter := range resp.DBInstance.TagList { - f76elem := &svcapitypes.Tag{} - if f76iter.Key != nil { - f76elem.Key = f76iter.Key + f78 := []*svcapitypes.Tag{} + for _, f78iter := range resp.DBInstance.TagList { + f78elem := &svcapitypes.Tag{} + if f78iter.Key != nil { + f78elem.Key = f78iter.Key } - if f76iter.Value != nil { - f76elem.Value = f76iter.Value + if f78iter.Value != nil { + f78elem.Value = f78iter.Value } - f76 = append(f76, f76elem) + f78 = append(f78, f78elem) } - cr.Status.AtProvider.TagList = f76 + cr.Status.AtProvider.TagList = f78 } else { cr.Status.AtProvider.TagList = nil } @@ -823,18 +851,18 @@ func (e *external) Create(ctx context.Context, mg cpresource.Managed) (managed.E cr.Spec.ForProvider.Timezone = nil } if resp.DBInstance.VpcSecurityGroups != nil { - f79 := []*svcapitypes.VPCSecurityGroupMembership{} - for _, f79iter := range resp.DBInstance.VpcSecurityGroups { - f79elem := &svcapitypes.VPCSecurityGroupMembership{} - if f79iter.Status != nil { - f79elem.Status = f79iter.Status + f81 := []*svcapitypes.VPCSecurityGroupMembership{} + for _, f81iter := range resp.DBInstance.VpcSecurityGroups { + f81elem := &svcapitypes.VPCSecurityGroupMembership{} + if f81iter.Status != nil { + f81elem.Status = f81iter.Status } - if f79iter.VpcSecurityGroupId != nil { - f79elem.VPCSecurityGroupID = f79iter.VpcSecurityGroupId + if f81iter.VpcSecurityGroupId != nil { + f81elem.VPCSecurityGroupID = f81iter.VpcSecurityGroupId } - f79 = append(f79, f79elem) + f81 = append(f81, f81elem) } - cr.Status.AtProvider.VPCSecurityGroups = f79 + cr.Status.AtProvider.VPCSecurityGroups = f81 } else { cr.Status.AtProvider.VPCSecurityGroups = nil } diff --git a/pkg/controller/rds/dbinstance/zz_conversions.go b/pkg/controller/rds/dbinstance/zz_conversions.go index 0f6e189407..18cb164426 100644 --- a/pkg/controller/rds/dbinstance/zz_conversions.go +++ b/pkg/controller/rds/dbinstance/zz_conversions.go @@ -307,9 +307,9 @@ func GenerateDBInstance(resp *svcsdk.DescribeDBInstancesOutput) *svcapitypes.DBI cr.Status.AtProvider.DBSubnetGroup = nil } if elem.DBSystemId != nil { - cr.Status.AtProvider.DBSystemID = elem.DBSystemId + cr.Spec.ForProvider.DBSystemID = elem.DBSystemId } else { - cr.Status.AtProvider.DBSystemID = nil + cr.Spec.ForProvider.DBSystemID = nil } if elem.DbInstancePort != nil { cr.Status.AtProvider.DBInstancePort = elem.DbInstancePort @@ -330,6 +330,18 @@ func GenerateDBInstance(resp *svcsdk.DescribeDBInstancesOutput) *svcapitypes.DBI f35 := []*svcapitypes.DomainMembership{} for _, f35iter := range elem.DomainMemberships { f35elem := &svcapitypes.DomainMembership{} + if f35iter.AuthSecretArn != nil { + f35elem.AuthSecretARN = f35iter.AuthSecretArn + } + if f35iter.DnsIps != nil { + f35elemf1 := []*string{} + for _, f35elemf1iter := range f35iter.DnsIps { + var f35elemf1elem string + f35elemf1elem = *f35elemf1iter + f35elemf1 = append(f35elemf1, &f35elemf1elem) + } + f35elem.DNSIPs = f35elemf1 + } if f35iter.Domain != nil { f35elem.Domain = f35iter.Domain } @@ -339,6 +351,9 @@ func GenerateDBInstance(resp *svcsdk.DescribeDBInstancesOutput) *svcapitypes.DBI if f35iter.IAMRoleName != nil { f35elem.IAMRoleName = f35iter.IAMRoleName } + if f35iter.OU != nil { + f35elem.OU = f35iter.OU + } if f35iter.Status != nil { f35elem.Status = f35iter.Status } @@ -523,6 +538,9 @@ func GenerateDBInstance(resp *svcsdk.DescribeDBInstancesOutput) *svcapitypes.DBI if elem.PendingModifiedValues.DBSubnetGroupName != nil { f57.DBSubnetGroupName = elem.PendingModifiedValues.DBSubnetGroupName } + if elem.PendingModifiedValues.Engine != nil { + f57.Engine = elem.PendingModifiedValues.Engine + } if elem.PendingModifiedValues.EngineVersion != nil { f57.EngineVersion = elem.PendingModifiedValues.EngineVersion } @@ -542,43 +560,43 @@ func GenerateDBInstance(resp *svcsdk.DescribeDBInstancesOutput) *svcapitypes.DBI f57.MultiAZ = elem.PendingModifiedValues.MultiAZ } if elem.PendingModifiedValues.PendingCloudwatchLogsExports != nil { - f57f13 := &svcapitypes.PendingCloudwatchLogsExports{} + f57f14 := &svcapitypes.PendingCloudwatchLogsExports{} if elem.PendingModifiedValues.PendingCloudwatchLogsExports.LogTypesToDisable != nil { - f57f13f0 := []*string{} - for _, f57f13f0iter := range elem.PendingModifiedValues.PendingCloudwatchLogsExports.LogTypesToDisable { - var f57f13f0elem string - f57f13f0elem = *f57f13f0iter - f57f13f0 = append(f57f13f0, &f57f13f0elem) + f57f14f0 := []*string{} + for _, f57f14f0iter := range elem.PendingModifiedValues.PendingCloudwatchLogsExports.LogTypesToDisable { + var f57f14f0elem string + f57f14f0elem = *f57f14f0iter + f57f14f0 = append(f57f14f0, &f57f14f0elem) } - f57f13.LogTypesToDisable = f57f13f0 + f57f14.LogTypesToDisable = f57f14f0 } if elem.PendingModifiedValues.PendingCloudwatchLogsExports.LogTypesToEnable != nil { - f57f13f1 := []*string{} - for _, f57f13f1iter := range elem.PendingModifiedValues.PendingCloudwatchLogsExports.LogTypesToEnable { - var f57f13f1elem string - f57f13f1elem = *f57f13f1iter - f57f13f1 = append(f57f13f1, &f57f13f1elem) + f57f14f1 := []*string{} + for _, f57f14f1iter := range elem.PendingModifiedValues.PendingCloudwatchLogsExports.LogTypesToEnable { + var f57f14f1elem string + f57f14f1elem = *f57f14f1iter + f57f14f1 = append(f57f14f1, &f57f14f1elem) } - f57f13.LogTypesToEnable = f57f13f1 + f57f14.LogTypesToEnable = f57f14f1 } - f57.PendingCloudwatchLogsExports = f57f13 + f57.PendingCloudwatchLogsExports = f57f14 } if elem.PendingModifiedValues.Port != nil { f57.Port = elem.PendingModifiedValues.Port } if elem.PendingModifiedValues.ProcessorFeatures != nil { - f57f15 := []*svcapitypes.ProcessorFeature{} - for _, f57f15iter := range elem.PendingModifiedValues.ProcessorFeatures { - f57f15elem := &svcapitypes.ProcessorFeature{} - if f57f15iter.Name != nil { - f57f15elem.Name = f57f15iter.Name + f57f16 := []*svcapitypes.ProcessorFeature{} + for _, f57f16iter := range elem.PendingModifiedValues.ProcessorFeatures { + f57f16elem := &svcapitypes.ProcessorFeature{} + if f57f16iter.Name != nil { + f57f16elem.Name = f57f16iter.Name } - if f57f15iter.Value != nil { - f57f15elem.Value = f57f15iter.Value + if f57f16iter.Value != nil { + f57f16elem.Value = f57f16iter.Value } - f57f15 = append(f57f15, f57f15elem) + f57f16 = append(f57f16, f57f16elem) } - f57.ProcessorFeatures = f57f15 + f57.ProcessorFeatures = f57f16 } if elem.PendingModifiedValues.ResumeFullAutomationModeTime != nil { f57.ResumeFullAutomationModeTime = &metav1.Time{*elem.PendingModifiedValues.ResumeFullAutomationModeTime} @@ -593,6 +611,11 @@ func GenerateDBInstance(resp *svcsdk.DescribeDBInstancesOutput) *svcapitypes.DBI } else { cr.Status.AtProvider.PendingModifiedValues = nil } + if elem.PercentProgress != nil { + cr.Status.AtProvider.PercentProgress = elem.PercentProgress + } else { + cr.Status.AtProvider.PercentProgress = nil + } if elem.PerformanceInsightsEnabled != nil { cr.Status.AtProvider.PerformanceInsightsEnabled = elem.PerformanceInsightsEnabled } else { @@ -619,18 +642,18 @@ func GenerateDBInstance(resp *svcsdk.DescribeDBInstancesOutput) *svcapitypes.DBI cr.Spec.ForProvider.PreferredMaintenanceWindow = nil } if elem.ProcessorFeatures != nil { - f63 := []*svcapitypes.ProcessorFeature{} - for _, f63iter := range elem.ProcessorFeatures { - f63elem := &svcapitypes.ProcessorFeature{} - if f63iter.Name != nil { - f63elem.Name = f63iter.Name + f64 := []*svcapitypes.ProcessorFeature{} + for _, f64iter := range elem.ProcessorFeatures { + f64elem := &svcapitypes.ProcessorFeature{} + if f64iter.Name != nil { + f64elem.Name = f64iter.Name } - if f63iter.Value != nil { - f63elem.Value = f63iter.Value + if f64iter.Value != nil { + f64elem.Value = f64iter.Value } - f63 = append(f63, f63elem) + f64 = append(f64, f64elem) } - cr.Spec.ForProvider.ProcessorFeatures = f63 + cr.Spec.ForProvider.ProcessorFeatures = f64 } else { cr.Spec.ForProvider.ProcessorFeatures = nil } @@ -645,27 +668,32 @@ func GenerateDBInstance(resp *svcsdk.DescribeDBInstancesOutput) *svcapitypes.DBI cr.Spec.ForProvider.PubliclyAccessible = nil } if elem.ReadReplicaDBClusterIdentifiers != nil { - f66 := []*string{} - for _, f66iter := range elem.ReadReplicaDBClusterIdentifiers { - var f66elem string - f66elem = *f66iter - f66 = append(f66, &f66elem) + f67 := []*string{} + for _, f67iter := range elem.ReadReplicaDBClusterIdentifiers { + var f67elem string + f67elem = *f67iter + f67 = append(f67, &f67elem) } - cr.Status.AtProvider.ReadReplicaDBClusterIdentifiers = f66 + cr.Status.AtProvider.ReadReplicaDBClusterIdentifiers = f67 } else { cr.Status.AtProvider.ReadReplicaDBClusterIdentifiers = nil } if elem.ReadReplicaDBInstanceIdentifiers != nil { - f67 := []*string{} - for _, f67iter := range elem.ReadReplicaDBInstanceIdentifiers { - var f67elem string - f67elem = *f67iter - f67 = append(f67, &f67elem) + f68 := []*string{} + for _, f68iter := range elem.ReadReplicaDBInstanceIdentifiers { + var f68elem string + f68elem = *f68iter + f68 = append(f68, &f68elem) } - cr.Status.AtProvider.ReadReplicaDBInstanceIdentifiers = f67 + cr.Status.AtProvider.ReadReplicaDBInstanceIdentifiers = f68 } else { cr.Status.AtProvider.ReadReplicaDBInstanceIdentifiers = nil } + if elem.ReadReplicaSourceDBClusterIdentifier != nil { + cr.Status.AtProvider.ReadReplicaSourceDBClusterIdentifier = elem.ReadReplicaSourceDBClusterIdentifier + } else { + cr.Status.AtProvider.ReadReplicaSourceDBClusterIdentifier = nil + } if elem.ReadReplicaSourceDBInstanceIdentifier != nil { cr.Status.AtProvider.ReadReplicaSourceDBInstanceIdentifier = elem.ReadReplicaSourceDBInstanceIdentifier } else { @@ -687,24 +715,24 @@ func GenerateDBInstance(resp *svcsdk.DescribeDBInstancesOutput) *svcapitypes.DBI cr.Status.AtProvider.SecondaryAvailabilityZone = nil } if elem.StatusInfos != nil { - f72 := []*svcapitypes.DBInstanceStatusInfo{} - for _, f72iter := range elem.StatusInfos { - f72elem := &svcapitypes.DBInstanceStatusInfo{} - if f72iter.Message != nil { - f72elem.Message = f72iter.Message + f74 := []*svcapitypes.DBInstanceStatusInfo{} + for _, f74iter := range elem.StatusInfos { + f74elem := &svcapitypes.DBInstanceStatusInfo{} + if f74iter.Message != nil { + f74elem.Message = f74iter.Message } - if f72iter.Normal != nil { - f72elem.Normal = f72iter.Normal + if f74iter.Normal != nil { + f74elem.Normal = f74iter.Normal } - if f72iter.Status != nil { - f72elem.Status = f72iter.Status + if f74iter.Status != nil { + f74elem.Status = f74iter.Status } - if f72iter.StatusType != nil { - f72elem.StatusType = f72iter.StatusType + if f74iter.StatusType != nil { + f74elem.StatusType = f74iter.StatusType } - f72 = append(f72, f72elem) + f74 = append(f74, f74elem) } - cr.Status.AtProvider.StatusInfos = f72 + cr.Status.AtProvider.StatusInfos = f74 } else { cr.Status.AtProvider.StatusInfos = nil } @@ -724,18 +752,18 @@ func GenerateDBInstance(resp *svcsdk.DescribeDBInstancesOutput) *svcapitypes.DBI cr.Spec.ForProvider.StorageType = nil } if elem.TagList != nil { - f76 := []*svcapitypes.Tag{} - for _, f76iter := range elem.TagList { - f76elem := &svcapitypes.Tag{} - if f76iter.Key != nil { - f76elem.Key = f76iter.Key + f78 := []*svcapitypes.Tag{} + for _, f78iter := range elem.TagList { + f78elem := &svcapitypes.Tag{} + if f78iter.Key != nil { + f78elem.Key = f78iter.Key } - if f76iter.Value != nil { - f76elem.Value = f76iter.Value + if f78iter.Value != nil { + f78elem.Value = f78iter.Value } - f76 = append(f76, f76elem) + f78 = append(f78, f78elem) } - cr.Status.AtProvider.TagList = f76 + cr.Status.AtProvider.TagList = f78 } else { cr.Status.AtProvider.TagList = nil } @@ -750,18 +778,18 @@ func GenerateDBInstance(resp *svcsdk.DescribeDBInstancesOutput) *svcapitypes.DBI cr.Spec.ForProvider.Timezone = nil } if elem.VpcSecurityGroups != nil { - f79 := []*svcapitypes.VPCSecurityGroupMembership{} - for _, f79iter := range elem.VpcSecurityGroups { - f79elem := &svcapitypes.VPCSecurityGroupMembership{} - if f79iter.Status != nil { - f79elem.Status = f79iter.Status + f81 := []*svcapitypes.VPCSecurityGroupMembership{} + for _, f81iter := range elem.VpcSecurityGroups { + f81elem := &svcapitypes.VPCSecurityGroupMembership{} + if f81iter.Status != nil { + f81elem.Status = f81iter.Status } - if f79iter.VpcSecurityGroupId != nil { - f79elem.VPCSecurityGroupID = f79iter.VpcSecurityGroupId + if f81iter.VpcSecurityGroupId != nil { + f81elem.VPCSecurityGroupID = f81iter.VpcSecurityGroupId } - f79 = append(f79, f79elem) + f81 = append(f81, f81elem) } - cr.Status.AtProvider.VPCSecurityGroups = f79 + cr.Status.AtProvider.VPCSecurityGroups = f81 } else { cr.Status.AtProvider.VPCSecurityGroups = nil } @@ -821,23 +849,44 @@ func GenerateCreateDBInstanceInput(cr *svcapitypes.DBInstance) *svcsdk.CreateDBI if cr.Spec.ForProvider.DBSubnetGroupName != nil { res.SetDBSubnetGroupName(*cr.Spec.ForProvider.DBSubnetGroupName) } + if cr.Spec.ForProvider.DBSystemID != nil { + res.SetDBSystemId(*cr.Spec.ForProvider.DBSystemID) + } if cr.Spec.ForProvider.DeletionProtection != nil { res.SetDeletionProtection(*cr.Spec.ForProvider.DeletionProtection) } if cr.Spec.ForProvider.Domain != nil { res.SetDomain(*cr.Spec.ForProvider.Domain) } + if cr.Spec.ForProvider.DomainAuthSecretARN != nil { + res.SetDomainAuthSecretArn(*cr.Spec.ForProvider.DomainAuthSecretARN) + } + if cr.Spec.ForProvider.DomainDNSIPs != nil { + f18 := []*string{} + for _, f18iter := range cr.Spec.ForProvider.DomainDNSIPs { + var f18elem string + f18elem = *f18iter + f18 = append(f18, &f18elem) + } + res.SetDomainDnsIps(f18) + } + if cr.Spec.ForProvider.DomainFqdn != nil { + res.SetDomainFqdn(*cr.Spec.ForProvider.DomainFqdn) + } if cr.Spec.ForProvider.DomainIAMRoleName != nil { res.SetDomainIAMRoleName(*cr.Spec.ForProvider.DomainIAMRoleName) } + if cr.Spec.ForProvider.DomainOu != nil { + res.SetDomainOu(*cr.Spec.ForProvider.DomainOu) + } if cr.Spec.ForProvider.EnableCloudwatchLogsExports != nil { - f17 := []*string{} - for _, f17iter := range cr.Spec.ForProvider.EnableCloudwatchLogsExports { - var f17elem string - f17elem = *f17iter - f17 = append(f17, &f17elem) + f22 := []*string{} + for _, f22iter := range cr.Spec.ForProvider.EnableCloudwatchLogsExports { + var f22elem string + f22elem = *f22iter + f22 = append(f22, &f22elem) } - res.SetEnableCloudwatchLogsExports(f17) + res.SetEnableCloudwatchLogsExports(f22) } if cr.Spec.ForProvider.EnableCustomerOwnedIP != nil { res.SetEnableCustomerOwnedIp(*cr.Spec.ForProvider.EnableCustomerOwnedIP) @@ -906,18 +955,18 @@ func GenerateCreateDBInstanceInput(cr *svcapitypes.DBInstance) *svcsdk.CreateDBI res.SetPreferredMaintenanceWindow(*cr.Spec.ForProvider.PreferredMaintenanceWindow) } if cr.Spec.ForProvider.ProcessorFeatures != nil { - f40 := []*svcsdk.ProcessorFeature{} - for _, f40iter := range cr.Spec.ForProvider.ProcessorFeatures { - f40elem := &svcsdk.ProcessorFeature{} - if f40iter.Name != nil { - f40elem.SetName(*f40iter.Name) + f45 := []*svcsdk.ProcessorFeature{} + for _, f45iter := range cr.Spec.ForProvider.ProcessorFeatures { + f45elem := &svcsdk.ProcessorFeature{} + if f45iter.Name != nil { + f45elem.SetName(*f45iter.Name) } - if f40iter.Value != nil { - f40elem.SetValue(*f40iter.Value) + if f45iter.Value != nil { + f45elem.SetValue(*f45iter.Value) } - f40 = append(f40, f40elem) + f45 = append(f45, f45elem) } - res.SetProcessorFeatures(f40) + res.SetProcessorFeatures(f45) } if cr.Spec.ForProvider.PromotionTier != nil { res.SetPromotionTier(*cr.Spec.ForProvider.PromotionTier) @@ -935,18 +984,18 @@ func GenerateCreateDBInstanceInput(cr *svcapitypes.DBInstance) *svcsdk.CreateDBI res.SetStorageType(*cr.Spec.ForProvider.StorageType) } if cr.Spec.ForProvider.Tags != nil { - f46 := []*svcsdk.Tag{} - for _, f46iter := range cr.Spec.ForProvider.Tags { - f46elem := &svcsdk.Tag{} - if f46iter.Key != nil { - f46elem.SetKey(*f46iter.Key) + f51 := []*svcsdk.Tag{} + for _, f51iter := range cr.Spec.ForProvider.Tags { + f51elem := &svcsdk.Tag{} + if f51iter.Key != nil { + f51elem.SetKey(*f51iter.Key) } - if f46iter.Value != nil { - f46elem.SetValue(*f46iter.Value) + if f51iter.Value != nil { + f51elem.SetValue(*f51iter.Value) } - f46 = append(f46, f46elem) + f51 = append(f51, f51elem) } - res.SetTags(f46) + res.SetTags(f51) } if cr.Spec.ForProvider.TDECredentialARN != nil { res.SetTdeCredentialArn(*cr.Spec.ForProvider.TDECredentialARN) @@ -1001,9 +1050,27 @@ func GenerateModifyDBInstanceInput(cr *svcapitypes.DBInstance) *svcsdk.ModifyDBI if cr.Spec.ForProvider.Domain != nil { res.SetDomain(*cr.Spec.ForProvider.Domain) } + if cr.Spec.ForProvider.DomainAuthSecretARN != nil { + res.SetDomainAuthSecretArn(*cr.Spec.ForProvider.DomainAuthSecretARN) + } + if cr.Spec.ForProvider.DomainDNSIPs != nil { + f18 := []*string{} + for _, f18iter := range cr.Spec.ForProvider.DomainDNSIPs { + var f18elem string + f18elem = *f18iter + f18 = append(f18, &f18elem) + } + res.SetDomainDnsIps(f18) + } + if cr.Spec.ForProvider.DomainFqdn != nil { + res.SetDomainFqdn(*cr.Spec.ForProvider.DomainFqdn) + } if cr.Spec.ForProvider.DomainIAMRoleName != nil { res.SetDomainIAMRoleName(*cr.Spec.ForProvider.DomainIAMRoleName) } + if cr.Spec.ForProvider.DomainOu != nil { + res.SetDomainOu(*cr.Spec.ForProvider.DomainOu) + } if cr.Spec.ForProvider.EnableCustomerOwnedIP != nil { res.SetEnableCustomerOwnedIp(*cr.Spec.ForProvider.EnableCustomerOwnedIP) } @@ -1013,6 +1080,9 @@ func GenerateModifyDBInstanceInput(cr *svcapitypes.DBInstance) *svcsdk.ModifyDBI if cr.Spec.ForProvider.EnablePerformanceInsights != nil { res.SetEnablePerformanceInsights(*cr.Spec.ForProvider.EnablePerformanceInsights) } + if cr.Spec.ForProvider.Engine != nil { + res.SetEngine(*cr.Spec.ForProvider.Engine) + } if cr.Spec.ForProvider.IOPS != nil { res.SetIops(*cr.Spec.ForProvider.IOPS) } @@ -1056,18 +1126,18 @@ func GenerateModifyDBInstanceInput(cr *svcapitypes.DBInstance) *svcsdk.ModifyDBI res.SetPreferredMaintenanceWindow(*cr.Spec.ForProvider.PreferredMaintenanceWindow) } if cr.Spec.ForProvider.ProcessorFeatures != nil { - f35 := []*svcsdk.ProcessorFeature{} - for _, f35iter := range cr.Spec.ForProvider.ProcessorFeatures { - f35elem := &svcsdk.ProcessorFeature{} - if f35iter.Name != nil { - f35elem.SetName(*f35iter.Name) + f41 := []*svcsdk.ProcessorFeature{} + for _, f41iter := range cr.Spec.ForProvider.ProcessorFeatures { + f41elem := &svcsdk.ProcessorFeature{} + if f41iter.Name != nil { + f41elem.SetName(*f41iter.Name) } - if f35iter.Value != nil { - f35elem.SetValue(*f35iter.Value) + if f41iter.Value != nil { + f41elem.SetValue(*f41iter.Value) } - f35 = append(f35, f35elem) + f41 = append(f41, f41elem) } - res.SetProcessorFeatures(f35) + res.SetProcessorFeatures(f41) } if cr.Spec.ForProvider.PromotionTier != nil { res.SetPromotionTier(*cr.Spec.ForProvider.PromotionTier) diff --git a/pkg/controller/rds/globalcluster/zz_controller.go b/pkg/controller/rds/globalcluster/zz_controller.go index f97f334144..c7e72c8f54 100644 --- a/pkg/controller/rds/globalcluster/zz_controller.go +++ b/pkg/controller/rds/globalcluster/zz_controller.go @@ -144,6 +144,9 @@ func (e *external) Create(ctx context.Context, mg cpresource.Managed) (managed.E if resp.GlobalCluster.FailoverState.FromDbClusterArn != nil { f4.FromDBClusterARN = resp.GlobalCluster.FailoverState.FromDbClusterArn } + if resp.GlobalCluster.FailoverState.IsDataLossAllowed != nil { + f4.IsDataLossAllowed = resp.GlobalCluster.FailoverState.IsDataLossAllowed + } if resp.GlobalCluster.FailoverState.Status != nil { f4.Status = resp.GlobalCluster.FailoverState.Status } @@ -186,6 +189,9 @@ func (e *external) Create(ctx context.Context, mg cpresource.Managed) (managed.E } f7elem.Readers = f7elemf3 } + if f7iter.SynchronizationStatus != nil { + f7elem.SynchronizationStatus = f7iter.SynchronizationStatus + } f7 = append(f7, f7elem) } cr.Status.AtProvider.GlobalClusterMembers = f7 diff --git a/pkg/controller/rds/globalcluster/zz_conversions.go b/pkg/controller/rds/globalcluster/zz_conversions.go index 4b8c01c1a3..a1952cec45 100644 --- a/pkg/controller/rds/globalcluster/zz_conversions.go +++ b/pkg/controller/rds/globalcluster/zz_conversions.go @@ -67,6 +67,9 @@ func GenerateGlobalCluster(resp *svcsdk.DescribeGlobalClustersOutput) *svcapityp if elem.FailoverState.FromDbClusterArn != nil { f4.FromDBClusterARN = elem.FailoverState.FromDbClusterArn } + if elem.FailoverState.IsDataLossAllowed != nil { + f4.IsDataLossAllowed = elem.FailoverState.IsDataLossAllowed + } if elem.FailoverState.Status != nil { f4.Status = elem.FailoverState.Status } @@ -109,6 +112,9 @@ func GenerateGlobalCluster(resp *svcsdk.DescribeGlobalClustersOutput) *svcapityp } f7elem.Readers = f7elemf3 } + if f7iter.SynchronizationStatus != nil { + f7elem.SynchronizationStatus = f7iter.SynchronizationStatus + } f7 = append(f7, f7elem) } cr.Status.AtProvider.GlobalClusterMembers = f7 diff --git a/pkg/controller/route53resolver/resolverendpoint/zz_controller.go b/pkg/controller/route53resolver/resolverendpoint/zz_controller.go index 0872a74121..b84e686cc2 100644 --- a/pkg/controller/route53resolver/resolverendpoint/zz_controller.go +++ b/pkg/controller/route53resolver/resolverendpoint/zz_controller.go @@ -160,14 +160,29 @@ func (e *external) Create(ctx context.Context, mg cpresource.Managed) (managed.E } else { cr.Spec.ForProvider.Name = nil } + if resp.ResolverEndpoint.OutpostArn != nil { + cr.Spec.ForProvider.OutpostARN = resp.ResolverEndpoint.OutpostArn + } else { + cr.Spec.ForProvider.OutpostARN = nil + } + if resp.ResolverEndpoint.PreferredInstanceType != nil { + cr.Spec.ForProvider.PreferredInstanceType = resp.ResolverEndpoint.PreferredInstanceType + } else { + cr.Spec.ForProvider.PreferredInstanceType = nil + } + if resp.ResolverEndpoint.ResolverEndpointType != nil { + cr.Spec.ForProvider.ResolverEndpointType = resp.ResolverEndpoint.ResolverEndpointType + } else { + cr.Spec.ForProvider.ResolverEndpointType = nil + } if resp.ResolverEndpoint.SecurityGroupIds != nil { - f9 := []*string{} - for _, f9iter := range resp.ResolverEndpoint.SecurityGroupIds { - var f9elem string - f9elem = *f9iter - f9 = append(f9, &f9elem) + f12 := []*string{} + for _, f12iter := range resp.ResolverEndpoint.SecurityGroupIds { + var f12elem string + f12elem = *f12iter + f12 = append(f12, &f12elem) } - cr.Status.AtProvider.SecurityGroupIDs = f9 + cr.Status.AtProvider.SecurityGroupIDs = f12 } else { cr.Status.AtProvider.SecurityGroupIDs = nil } diff --git a/pkg/controller/route53resolver/resolverendpoint/zz_conversions.go b/pkg/controller/route53resolver/resolverendpoint/zz_conversions.go index 9e9d6096b6..78a5d509f1 100644 --- a/pkg/controller/route53resolver/resolverendpoint/zz_conversions.go +++ b/pkg/controller/route53resolver/resolverendpoint/zz_conversions.go @@ -85,14 +85,29 @@ func GenerateResolverEndpoint(resp *svcsdk.GetResolverEndpointOutput) *svcapityp } else { cr.Spec.ForProvider.Name = nil } + if resp.ResolverEndpoint.OutpostArn != nil { + cr.Spec.ForProvider.OutpostARN = resp.ResolverEndpoint.OutpostArn + } else { + cr.Spec.ForProvider.OutpostARN = nil + } + if resp.ResolverEndpoint.PreferredInstanceType != nil { + cr.Spec.ForProvider.PreferredInstanceType = resp.ResolverEndpoint.PreferredInstanceType + } else { + cr.Spec.ForProvider.PreferredInstanceType = nil + } + if resp.ResolverEndpoint.ResolverEndpointType != nil { + cr.Spec.ForProvider.ResolverEndpointType = resp.ResolverEndpoint.ResolverEndpointType + } else { + cr.Spec.ForProvider.ResolverEndpointType = nil + } if resp.ResolverEndpoint.SecurityGroupIds != nil { - f9 := []*string{} - for _, f9iter := range resp.ResolverEndpoint.SecurityGroupIds { - var f9elem string - f9elem = *f9iter - f9 = append(f9, &f9elem) + f12 := []*string{} + for _, f12iter := range resp.ResolverEndpoint.SecurityGroupIds { + var f12elem string + f12elem = *f12iter + f12 = append(f12, &f12elem) } - cr.Status.AtProvider.SecurityGroupIDs = f9 + cr.Status.AtProvider.SecurityGroupIDs = f12 } else { cr.Status.AtProvider.SecurityGroupIDs = nil } @@ -120,19 +135,28 @@ func GenerateCreateResolverEndpointInput(cr *svcapitypes.ResolverEndpoint) *svcs if cr.Spec.ForProvider.Name != nil { res.SetName(*cr.Spec.ForProvider.Name) } + if cr.Spec.ForProvider.OutpostARN != nil { + res.SetOutpostArn(*cr.Spec.ForProvider.OutpostARN) + } + if cr.Spec.ForProvider.PreferredInstanceType != nil { + res.SetPreferredInstanceType(*cr.Spec.ForProvider.PreferredInstanceType) + } + if cr.Spec.ForProvider.ResolverEndpointType != nil { + res.SetResolverEndpointType(*cr.Spec.ForProvider.ResolverEndpointType) + } if cr.Spec.ForProvider.Tags != nil { - f2 := []*svcsdk.Tag{} - for _, f2iter := range cr.Spec.ForProvider.Tags { - f2elem := &svcsdk.Tag{} - if f2iter.Key != nil { - f2elem.SetKey(*f2iter.Key) + f5 := []*svcsdk.Tag{} + for _, f5iter := range cr.Spec.ForProvider.Tags { + f5elem := &svcsdk.Tag{} + if f5iter.Key != nil { + f5elem.SetKey(*f5iter.Key) } - if f2iter.Value != nil { - f2elem.SetValue(*f2iter.Value) + if f5iter.Value != nil { + f5elem.SetValue(*f5iter.Value) } - f2 = append(f2, f2elem) + f5 = append(f5, f5elem) } - res.SetTags(f2) + res.SetTags(f5) } return res @@ -145,6 +169,9 @@ func GenerateUpdateResolverEndpointInput(cr *svcapitypes.ResolverEndpoint) *svcs if cr.Spec.ForProvider.Name != nil { res.SetName(*cr.Spec.ForProvider.Name) } + if cr.Spec.ForProvider.ResolverEndpointType != nil { + res.SetResolverEndpointType(*cr.Spec.ForProvider.ResolverEndpointType) + } return res } diff --git a/pkg/controller/route53resolver/resolverrule/zz_controller.go b/pkg/controller/route53resolver/resolverrule/zz_controller.go index fc05680ee3..a175bd24fa 100644 --- a/pkg/controller/route53resolver/resolverrule/zz_controller.go +++ b/pkg/controller/route53resolver/resolverrule/zz_controller.go @@ -187,6 +187,9 @@ func (e *external) Create(ctx context.Context, mg cpresource.Managed) (managed.E if f13iter.Ip != nil { f13elem.IP = f13iter.Ip } + if f13iter.Ipv6 != nil { + f13elem.IPv6 = f13iter.Ipv6 + } if f13iter.Port != nil { f13elem.Port = f13iter.Port } diff --git a/pkg/controller/route53resolver/resolverrule/zz_conversions.go b/pkg/controller/route53resolver/resolverrule/zz_conversions.go index 6d0d1a7eea..f072754f2b 100644 --- a/pkg/controller/route53resolver/resolverrule/zz_conversions.go +++ b/pkg/controller/route53resolver/resolverrule/zz_conversions.go @@ -112,6 +112,9 @@ func GenerateResolverRule(resp *svcsdk.GetResolverRuleOutput) *svcapitypes.Resol if f13iter.Ip != nil { f13elem.IP = f13iter.Ip } + if f13iter.Ipv6 != nil { + f13elem.IPv6 = f13iter.Ipv6 + } if f13iter.Port != nil { f13elem.Port = f13iter.Port } @@ -162,6 +165,9 @@ func GenerateCreateResolverRuleInput(cr *svcapitypes.ResolverRule) *svcsdk.Creat if f5iter.IP != nil { f5elem.SetIp(*f5iter.IP) } + if f5iter.IPv6 != nil { + f5elem.SetIpv6(*f5iter.IPv6) + } if f5iter.Port != nil { f5elem.SetPort(*f5iter.Port) } diff --git a/pkg/controller/s3/bucket/policy_test.go b/pkg/controller/s3/bucket/policy_test.go index 9be3cea212..dd21dadbff 100644 --- a/pkg/controller/s3/bucket/policy_test.go +++ b/pkg/controller/s3/bucket/policy_test.go @@ -25,7 +25,7 @@ import ( "github.com/aws/smithy-go" "github.com/crossplane/crossplane-runtime/pkg/test" "github.com/google/go-cmp/cmp" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "github.com/crossplane-contrib/provider-aws/apis/s3/common" "github.com/crossplane-contrib/provider-aws/apis/s3/v1beta1" @@ -177,7 +177,7 @@ func TestPolicyObserve(t *testing.T) { Conditions: []common.ConditionPair{ { ConditionKey: "aws:SecureTransport", - ConditionBooleanValue: pointer.Bool(false), + ConditionBooleanValue: ptr.To(false), }, }, }, diff --git a/pkg/controller/sfn/statemachine/zz_controller.go b/pkg/controller/sfn/statemachine/zz_controller.go index b3ce873155..a09b177b0a 100644 --- a/pkg/controller/sfn/statemachine/zz_controller.go +++ b/pkg/controller/sfn/statemachine/zz_controller.go @@ -126,6 +126,11 @@ func (e *external) Create(ctx context.Context, mg cpresource.Managed) (managed.E } else { cr.Status.AtProvider.StateMachineARN = nil } + if resp.StateMachineVersionArn != nil { + cr.Status.AtProvider.StateMachineVersionARN = resp.StateMachineVersionArn + } else { + cr.Status.AtProvider.StateMachineVersionARN = nil + } return e.postCreate(ctx, cr, resp, managed.ExternalCreation{}, err) } diff --git a/pkg/controller/sfn/statemachine/zz_conversions.go b/pkg/controller/sfn/statemachine/zz_conversions.go index e9ab35d973..79f3473078 100644 --- a/pkg/controller/sfn/statemachine/zz_conversions.go +++ b/pkg/controller/sfn/statemachine/zz_conversions.go @@ -56,29 +56,29 @@ func GenerateStateMachine(resp *svcsdk.DescribeStateMachineOutput) *svcapitypes. cr.Spec.ForProvider.Definition = nil } if resp.LoggingConfiguration != nil { - f3 := &svcapitypes.LoggingConfiguration{} + f4 := &svcapitypes.LoggingConfiguration{} if resp.LoggingConfiguration.Destinations != nil { - f3f0 := []*svcapitypes.LogDestination{} - for _, f3f0iter := range resp.LoggingConfiguration.Destinations { - f3f0elem := &svcapitypes.LogDestination{} - if f3f0iter.CloudWatchLogsLogGroup != nil { - f3f0elemf0 := &svcapitypes.CloudWatchLogsLogGroup{} - if f3f0iter.CloudWatchLogsLogGroup.LogGroupArn != nil { - f3f0elemf0.LogGroupARN = f3f0iter.CloudWatchLogsLogGroup.LogGroupArn + f4f0 := []*svcapitypes.LogDestination{} + for _, f4f0iter := range resp.LoggingConfiguration.Destinations { + f4f0elem := &svcapitypes.LogDestination{} + if f4f0iter.CloudWatchLogsLogGroup != nil { + f4f0elemf0 := &svcapitypes.CloudWatchLogsLogGroup{} + if f4f0iter.CloudWatchLogsLogGroup.LogGroupArn != nil { + f4f0elemf0.LogGroupARN = f4f0iter.CloudWatchLogsLogGroup.LogGroupArn } - f3f0elem.CloudWatchLogsLogGroup = f3f0elemf0 + f4f0elem.CloudWatchLogsLogGroup = f4f0elemf0 } - f3f0 = append(f3f0, f3f0elem) + f4f0 = append(f4f0, f4f0elem) } - f3.Destinations = f3f0 + f4.Destinations = f4f0 } if resp.LoggingConfiguration.IncludeExecutionData != nil { - f3.IncludeExecutionData = resp.LoggingConfiguration.IncludeExecutionData + f4.IncludeExecutionData = resp.LoggingConfiguration.IncludeExecutionData } if resp.LoggingConfiguration.Level != nil { - f3.Level = resp.LoggingConfiguration.Level + f4.Level = resp.LoggingConfiguration.Level } - cr.Spec.ForProvider.LoggingConfiguration = f3 + cr.Spec.ForProvider.LoggingConfiguration = f4 } else { cr.Spec.ForProvider.LoggingConfiguration = nil } @@ -93,11 +93,11 @@ func GenerateStateMachine(resp *svcsdk.DescribeStateMachineOutput) *svcapitypes. cr.Status.AtProvider.StateMachineARN = nil } if resp.TracingConfiguration != nil { - f8 := &svcapitypes.TracingConfiguration{} + f10 := &svcapitypes.TracingConfiguration{} if resp.TracingConfiguration.Enabled != nil { - f8.Enabled = resp.TracingConfiguration.Enabled + f10.Enabled = resp.TracingConfiguration.Enabled } - cr.Spec.ForProvider.TracingConfiguration = f8 + cr.Spec.ForProvider.TracingConfiguration = f10 } else { cr.Spec.ForProvider.TracingConfiguration = nil } @@ -140,26 +140,32 @@ func GenerateCreateStateMachineInput(cr *svcapitypes.StateMachine) *svcsdk.Creat if cr.Spec.ForProvider.Name != nil { res.SetName(*cr.Spec.ForProvider.Name) } + if cr.Spec.ForProvider.Publish != nil { + res.SetPublish(*cr.Spec.ForProvider.Publish) + } if cr.Spec.ForProvider.Tags != nil { - f3 := []*svcsdk.Tag{} - for _, f3iter := range cr.Spec.ForProvider.Tags { - f3elem := &svcsdk.Tag{} - if f3iter.Key != nil { - f3elem.SetKey(*f3iter.Key) + f4 := []*svcsdk.Tag{} + for _, f4iter := range cr.Spec.ForProvider.Tags { + f4elem := &svcsdk.Tag{} + if f4iter.Key != nil { + f4elem.SetKey(*f4iter.Key) } - if f3iter.Value != nil { - f3elem.SetValue(*f3iter.Value) + if f4iter.Value != nil { + f4elem.SetValue(*f4iter.Value) } - f3 = append(f3, f3elem) + f4 = append(f4, f4elem) } - res.SetTags(f3) + res.SetTags(f4) } if cr.Spec.ForProvider.TracingConfiguration != nil { - f4 := &svcsdk.TracingConfiguration{} + f5 := &svcsdk.TracingConfiguration{} if cr.Spec.ForProvider.TracingConfiguration.Enabled != nil { - f4.SetEnabled(*cr.Spec.ForProvider.TracingConfiguration.Enabled) + f5.SetEnabled(*cr.Spec.ForProvider.TracingConfiguration.Enabled) } - res.SetTracingConfiguration(f4) + res.SetTracingConfiguration(f5) + } + if cr.Spec.ForProvider.VersionDescription != nil { + res.SetVersionDescription(*cr.Spec.ForProvider.VersionDescription) } return res @@ -197,15 +203,21 @@ func GenerateUpdateStateMachineInput(cr *svcapitypes.StateMachine) *svcsdk.Updat } res.SetLoggingConfiguration(f1) } + if cr.Spec.ForProvider.Publish != nil { + res.SetPublish(*cr.Spec.ForProvider.Publish) + } if cr.Status.AtProvider.StateMachineARN != nil { res.SetStateMachineArn(*cr.Status.AtProvider.StateMachineARN) } if cr.Spec.ForProvider.TracingConfiguration != nil { - f4 := &svcsdk.TracingConfiguration{} + f5 := &svcsdk.TracingConfiguration{} if cr.Spec.ForProvider.TracingConfiguration.Enabled != nil { - f4.SetEnabled(*cr.Spec.ForProvider.TracingConfiguration.Enabled) + f5.SetEnabled(*cr.Spec.ForProvider.TracingConfiguration.Enabled) } - res.SetTracingConfiguration(f4) + res.SetTracingConfiguration(f5) + } + if cr.Spec.ForProvider.VersionDescription != nil { + res.SetVersionDescription(*cr.Spec.ForProvider.VersionDescription) } return res diff --git a/pkg/controller/transfer/server/zz_conversions.go b/pkg/controller/transfer/server/zz_conversions.go index 0ff2f5dba1..234a6d81c8 100644 --- a/pkg/controller/transfer/server/zz_conversions.go +++ b/pkg/controller/transfer/server/zz_conversions.go @@ -65,6 +65,9 @@ func GenerateServer(resp *svcsdk.DescribeServerOutput) *svcapitypes.Server { if resp.Server.IdentityProviderDetails.InvocationRole != nil { f6.InvocationRole = resp.Server.IdentityProviderDetails.InvocationRole } + if resp.Server.IdentityProviderDetails.SftpAuthenticationMethods != nil { + f6.SftpAuthenticationMethods = resp.Server.IdentityProviderDetails.SftpAuthenticationMethods + } if resp.Server.IdentityProviderDetails.Url != nil { f6.URL = resp.Server.IdentityProviderDetails.Url } @@ -132,53 +135,64 @@ func GenerateServer(resp *svcsdk.DescribeServerOutput) *svcapitypes.Server { } else { cr.Status.AtProvider.ServerID = nil } + if resp.Server.StructuredLogDestinations != nil { + f16 := []*string{} + for _, f16iter := range resp.Server.StructuredLogDestinations { + var f16elem string + f16elem = *f16iter + f16 = append(f16, &f16elem) + } + cr.Spec.ForProvider.StructuredLogDestinations = f16 + } else { + cr.Spec.ForProvider.StructuredLogDestinations = nil + } if resp.Server.Tags != nil { - f16 := []*svcapitypes.Tag{} - for _, f16iter := range resp.Server.Tags { - f16elem := &svcapitypes.Tag{} - if f16iter.Key != nil { - f16elem.Key = f16iter.Key + f17 := []*svcapitypes.Tag{} + for _, f17iter := range resp.Server.Tags { + f17elem := &svcapitypes.Tag{} + if f17iter.Key != nil { + f17elem.Key = f17iter.Key } - if f16iter.Value != nil { - f16elem.Value = f16iter.Value + if f17iter.Value != nil { + f17elem.Value = f17iter.Value } - f16 = append(f16, f16elem) + f17 = append(f17, f17elem) } - cr.Spec.ForProvider.Tags = f16 + cr.Spec.ForProvider.Tags = f17 } else { cr.Spec.ForProvider.Tags = nil } if resp.Server.WorkflowDetails != nil { - f18 := &svcapitypes.WorkflowDetails{} + f19 := &svcapitypes.WorkflowDetails{} if resp.Server.WorkflowDetails.OnPartialUpload != nil { - f18f0 := []*svcapitypes.WorkflowDetail{} - for _, f18f0iter := range resp.Server.WorkflowDetails.OnPartialUpload { - f18f0elem := &svcapitypes.WorkflowDetail{} - if f18f0iter.ExecutionRole != nil { - f18f0elem.ExecutionRole = f18f0iter.ExecutionRole + f19f0 := []*svcapitypes.WorkflowDetail{} + for _, f19f0iter := range resp.Server.WorkflowDetails.OnPartialUpload { + f19f0elem := &svcapitypes.WorkflowDetail{} + if f19f0iter.ExecutionRole != nil { + f19f0elem.ExecutionRole = f19f0iter.ExecutionRole } - if f18f0iter.WorkflowId != nil { - f18f0elem.WorkflowID = f18f0iter.WorkflowId + if f19f0iter.WorkflowId != nil { + f19f0elem.WorkflowID = f19f0iter.WorkflowId } - f18f0 = append(f18f0, f18f0elem) + f19f0 = append(f19f0, f19f0elem) } - f18.OnPartialUpload = f18f0 + f19.OnPartialUpload = f19f0 } if resp.Server.WorkflowDetails.OnUpload != nil { - f18f1 := []*svcapitypes.WorkflowDetail{} - for _, f18f1iter := range resp.Server.WorkflowDetails.OnUpload { - f18f1elem := &svcapitypes.WorkflowDetail{} - if f18f1iter.ExecutionRole != nil { - f18f1elem.ExecutionRole = f18f1iter.ExecutionRole + f19f1 := []*svcapitypes.WorkflowDetail{} + for _, f19f1iter := range resp.Server.WorkflowDetails.OnUpload { + f19f1elem := &svcapitypes.WorkflowDetail{} + if f19f1iter.ExecutionRole != nil { + f19f1elem.ExecutionRole = f19f1iter.ExecutionRole } - if f18f1iter.WorkflowId != nil { - f18f1elem.WorkflowID = f18f1iter.WorkflowId + if f19f1iter.WorkflowId != nil { + f19f1elem.WorkflowID = f19f1iter.WorkflowId } - f18f1 = append(f18f1, f18f1elem) + f19f1 = append(f19f1, f19f1elem) } - f18.OnUpload = f18f1 + f19.OnUpload = f19f1 } - cr.Spec.ForProvider.WorkflowDetails = f18 + cr.Spec.ForProvider.WorkflowDetails = f19 } else { cr.Spec.ForProvider.WorkflowDetails = nil } @@ -210,6 +224,9 @@ func GenerateCreateServerInput(cr *svcapitypes.Server) *svcsdk.CreateServerInput if cr.Spec.ForProvider.IdentityProviderDetails.InvocationRole != nil { f3.SetInvocationRole(*cr.Spec.ForProvider.IdentityProviderDetails.InvocationRole) } + if cr.Spec.ForProvider.IdentityProviderDetails.SftpAuthenticationMethods != nil { + f3.SetSftpAuthenticationMethods(*cr.Spec.ForProvider.IdentityProviderDetails.SftpAuthenticationMethods) + } if cr.Spec.ForProvider.IdentityProviderDetails.URL != nil { f3.SetUrl(*cr.Spec.ForProvider.IdentityProviderDetails.URL) } @@ -258,51 +275,60 @@ func GenerateCreateServerInput(cr *svcapitypes.Server) *svcsdk.CreateServerInput if cr.Spec.ForProvider.SecurityPolicyName != nil { res.SetSecurityPolicyName(*cr.Spec.ForProvider.SecurityPolicyName) } + if cr.Spec.ForProvider.StructuredLogDestinations != nil { + f10 := []*string{} + for _, f10iter := range cr.Spec.ForProvider.StructuredLogDestinations { + var f10elem string + f10elem = *f10iter + f10 = append(f10, &f10elem) + } + res.SetStructuredLogDestinations(f10) + } if cr.Spec.ForProvider.Tags != nil { - f10 := []*svcsdk.Tag{} - for _, f10iter := range cr.Spec.ForProvider.Tags { - f10elem := &svcsdk.Tag{} - if f10iter.Key != nil { - f10elem.SetKey(*f10iter.Key) + f11 := []*svcsdk.Tag{} + for _, f11iter := range cr.Spec.ForProvider.Tags { + f11elem := &svcsdk.Tag{} + if f11iter.Key != nil { + f11elem.SetKey(*f11iter.Key) } - if f10iter.Value != nil { - f10elem.SetValue(*f10iter.Value) + if f11iter.Value != nil { + f11elem.SetValue(*f11iter.Value) } - f10 = append(f10, f10elem) + f11 = append(f11, f11elem) } - res.SetTags(f10) + res.SetTags(f11) } if cr.Spec.ForProvider.WorkflowDetails != nil { - f11 := &svcsdk.WorkflowDetails{} + f12 := &svcsdk.WorkflowDetails{} if cr.Spec.ForProvider.WorkflowDetails.OnPartialUpload != nil { - f11f0 := []*svcsdk.WorkflowDetail{} - for _, f11f0iter := range cr.Spec.ForProvider.WorkflowDetails.OnPartialUpload { - f11f0elem := &svcsdk.WorkflowDetail{} - if f11f0iter.ExecutionRole != nil { - f11f0elem.SetExecutionRole(*f11f0iter.ExecutionRole) + f12f0 := []*svcsdk.WorkflowDetail{} + for _, f12f0iter := range cr.Spec.ForProvider.WorkflowDetails.OnPartialUpload { + f12f0elem := &svcsdk.WorkflowDetail{} + if f12f0iter.ExecutionRole != nil { + f12f0elem.SetExecutionRole(*f12f0iter.ExecutionRole) } - if f11f0iter.WorkflowID != nil { - f11f0elem.SetWorkflowId(*f11f0iter.WorkflowID) + if f12f0iter.WorkflowID != nil { + f12f0elem.SetWorkflowId(*f12f0iter.WorkflowID) } - f11f0 = append(f11f0, f11f0elem) + f12f0 = append(f12f0, f12f0elem) } - f11.SetOnPartialUpload(f11f0) + f12.SetOnPartialUpload(f12f0) } if cr.Spec.ForProvider.WorkflowDetails.OnUpload != nil { - f11f1 := []*svcsdk.WorkflowDetail{} - for _, f11f1iter := range cr.Spec.ForProvider.WorkflowDetails.OnUpload { - f11f1elem := &svcsdk.WorkflowDetail{} - if f11f1iter.ExecutionRole != nil { - f11f1elem.SetExecutionRole(*f11f1iter.ExecutionRole) + f12f1 := []*svcsdk.WorkflowDetail{} + for _, f12f1iter := range cr.Spec.ForProvider.WorkflowDetails.OnUpload { + f12f1elem := &svcsdk.WorkflowDetail{} + if f12f1iter.ExecutionRole != nil { + f12f1elem.SetExecutionRole(*f12f1iter.ExecutionRole) } - if f11f1iter.WorkflowID != nil { - f11f1elem.SetWorkflowId(*f11f1iter.WorkflowID) + if f12f1iter.WorkflowID != nil { + f12f1elem.SetWorkflowId(*f12f1iter.WorkflowID) } - f11f1 = append(f11f1, f11f1elem) + f12f1 = append(f12f1, f12f1elem) } - f11.SetOnUpload(f11f1) + f12.SetOnUpload(f12f1) } - res.SetWorkflowDetails(f11) + res.SetWorkflowDetails(f12) } return res @@ -329,6 +355,9 @@ func GenerateUpdateServerInput(cr *svcapitypes.Server) *svcsdk.UpdateServerInput if cr.Spec.ForProvider.IdentityProviderDetails.InvocationRole != nil { f4.SetInvocationRole(*cr.Spec.ForProvider.IdentityProviderDetails.InvocationRole) } + if cr.Spec.ForProvider.IdentityProviderDetails.SftpAuthenticationMethods != nil { + f4.SetSftpAuthenticationMethods(*cr.Spec.ForProvider.IdentityProviderDetails.SftpAuthenticationMethods) + } if cr.Spec.ForProvider.IdentityProviderDetails.URL != nil { f4.SetUrl(*cr.Spec.ForProvider.IdentityProviderDetails.URL) } @@ -377,37 +406,46 @@ func GenerateUpdateServerInput(cr *svcapitypes.Server) *svcsdk.UpdateServerInput if cr.Status.AtProvider.ServerID != nil { res.SetServerId(*cr.Status.AtProvider.ServerID) } + if cr.Spec.ForProvider.StructuredLogDestinations != nil { + f12 := []*string{} + for _, f12iter := range cr.Spec.ForProvider.StructuredLogDestinations { + var f12elem string + f12elem = *f12iter + f12 = append(f12, &f12elem) + } + res.SetStructuredLogDestinations(f12) + } if cr.Spec.ForProvider.WorkflowDetails != nil { - f12 := &svcsdk.WorkflowDetails{} + f13 := &svcsdk.WorkflowDetails{} if cr.Spec.ForProvider.WorkflowDetails.OnPartialUpload != nil { - f12f0 := []*svcsdk.WorkflowDetail{} - for _, f12f0iter := range cr.Spec.ForProvider.WorkflowDetails.OnPartialUpload { - f12f0elem := &svcsdk.WorkflowDetail{} - if f12f0iter.ExecutionRole != nil { - f12f0elem.SetExecutionRole(*f12f0iter.ExecutionRole) + f13f0 := []*svcsdk.WorkflowDetail{} + for _, f13f0iter := range cr.Spec.ForProvider.WorkflowDetails.OnPartialUpload { + f13f0elem := &svcsdk.WorkflowDetail{} + if f13f0iter.ExecutionRole != nil { + f13f0elem.SetExecutionRole(*f13f0iter.ExecutionRole) } - if f12f0iter.WorkflowID != nil { - f12f0elem.SetWorkflowId(*f12f0iter.WorkflowID) + if f13f0iter.WorkflowID != nil { + f13f0elem.SetWorkflowId(*f13f0iter.WorkflowID) } - f12f0 = append(f12f0, f12f0elem) + f13f0 = append(f13f0, f13f0elem) } - f12.SetOnPartialUpload(f12f0) + f13.SetOnPartialUpload(f13f0) } if cr.Spec.ForProvider.WorkflowDetails.OnUpload != nil { - f12f1 := []*svcsdk.WorkflowDetail{} - for _, f12f1iter := range cr.Spec.ForProvider.WorkflowDetails.OnUpload { - f12f1elem := &svcsdk.WorkflowDetail{} - if f12f1iter.ExecutionRole != nil { - f12f1elem.SetExecutionRole(*f12f1iter.ExecutionRole) + f13f1 := []*svcsdk.WorkflowDetail{} + for _, f13f1iter := range cr.Spec.ForProvider.WorkflowDetails.OnUpload { + f13f1elem := &svcsdk.WorkflowDetail{} + if f13f1iter.ExecutionRole != nil { + f13f1elem.SetExecutionRole(*f13f1iter.ExecutionRole) } - if f12f1iter.WorkflowID != nil { - f12f1elem.SetWorkflowId(*f12f1iter.WorkflowID) + if f13f1iter.WorkflowID != nil { + f13f1elem.SetWorkflowId(*f13f1iter.WorkflowID) } - f12f1 = append(f12f1, f12f1elem) + f13f1 = append(f13f1, f13f1elem) } - f12.SetOnUpload(f12f1) + f13.SetOnUpload(f13f1) } - res.SetWorkflowDetails(f12) + res.SetWorkflowDetails(f13) } return res