diff --git a/apis/v1alpha1/ack-generate-metadata.yaml b/apis/v1alpha1/ack-generate-metadata.yaml index 0ad2391..587f400 100755 --- a/apis/v1alpha1/ack-generate-metadata.yaml +++ b/apis/v1alpha1/ack-generate-metadata.yaml @@ -1,13 +1,13 @@ ack_generate_info: - build_date: "2021-10-14T18:50:30Z" - build_hash: f4166ae9942034b0552f244685515eef5a92dc25 + build_date: "2021-10-27T19:39:24Z" + build_hash: 6cfd5d8443975a287535dc27424206ac133ebb24 go_version: go1.17.1 version: v0.15.1 -api_directory_checksum: e1236617364bb9947bcbfbeb21ce75841b5407f3 +api_directory_checksum: 5157c106fef6bc423adba5eaf17c55de26b36d1e api_version: v1alpha1 aws_sdk_go_version: v1.37.10 generator_config_info: - file_checksum: 45772c7b934f89394b89fe6214164cd8fe76a59e + file_checksum: 6a5e4faafc21f5c309ef5219f784fa8228d01744 original_file_name: generator.yaml last_modification: reason: API generation diff --git a/apis/v1alpha1/bucket.go b/apis/v1alpha1/bucket.go index d78e9af..bbfa334 100644 --- a/apis/v1alpha1/bucket.go +++ b/apis/v1alpha1/bucket.go @@ -29,6 +29,8 @@ type BucketSpec struct { ACL *string `json:"acl,omitempty"` // Container for setting the transfer acceleration state. Accelerate *AccelerateConfiguration `json:"accelerate,omitempty"` + + Analytics []*AnalyticsConfiguration `json:"analytics,omitempty"` // Describes the cross-origin access configuration for objects in an Amazon // S3 bucket. For more information, see Enabling Cross-Origin Resource Sharing // (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) in the Amazon @@ -49,10 +51,16 @@ type BucketSpec struct { GrantWrite *string `json:"grantWrite,omitempty"` // Allows grantee to write the ACL for the applicable bucket. GrantWriteACP *string `json:"grantWriteACP,omitempty"` + + IntelligentTiering []*IntelligentTieringConfiguration `json:"intelligentTiering,omitempty"` + + Inventory []*InventoryConfiguration `json:"inventory,omitempty"` // Container for lifecycle rules. You can add as many as 1,000 rules. Lifecycle *BucketLifecycleConfiguration `json:"lifecycle,omitempty"` // Container for logging status information. Logging *BucketLoggingStatus `json:"logging,omitempty"` + + Metrics []*MetricsConfiguration `json:"metrics,omitempty"` // The name of the bucket to create. // +kubebuilder:validation:Required Name *string `json:"name"` diff --git a/apis/v1alpha1/generator.yaml b/apis/v1alpha1/generator.yaml index 50e82d2..5b170ac 100644 --- a/apis/v1alpha1/generator.yaml +++ b/apis/v1alpha1/generator.yaml @@ -21,10 +21,9 @@ resources: from: operation: PutBucketAccelerateConfiguration path: AccelerateConfiguration - # AnalyticsConfiguration: - # from: - # operation: PutBucketAnalyticsConfiguration - # path: AnalyticsConfiguration # Double check about ID + Analytics: + custom_field: + list_of: AnalyticsConfiguration CORS: from: operation: PutBucketCors @@ -33,14 +32,12 @@ resources: from: operation: PutBucketEncryption path: ServerSideEncryptionConfiguration - # IntelligentTieringConfiguration: - # from: - # operation: PutBucketIntelligentTieringConfiguration - # path: IntelligentTieringConfiguration # Double check about ID - # InventoryConfiguration: - # from: - # operation: PutBucketInventoryConfiguration - # path: InventoryConfiguration # Double check about ID + IntelligentTiering: + custom_field: + list_of: IntelligentTieringConfiguration + Inventory: + custom_field: + list_of: InventoryConfiguration Lifecycle: from: operation: PutBucketLifecycleConfiguration @@ -49,10 +46,9 @@ resources: from: operation: PutBucketLogging path: BucketLoggingStatus - # MetricsConfiguration: - # from: - # operation: PutBucketMetricsConfiguration - # path: MetricsConfiguration # Double check about ID + Metrics: + custom_field: + list_of: MetricsConfiguration Notification: from: operation: PutBucketNotificationConfiguration diff --git a/apis/v1alpha1/types.go b/apis/v1alpha1/types.go index 8524dc4..bd686fd 100644 --- a/apis/v1alpha1/types.go +++ b/apis/v1alpha1/types.go @@ -63,11 +63,35 @@ type AnalyticsAndOperator struct { Tags []*Tag `json:"tags,omitempty"` } +// Specifies the configuration and any analyses for the analytics filter of +// an Amazon S3 bucket. +type AnalyticsConfiguration struct { + // The filter used to describe a set of objects for analyses. A filter must + // have exactly one prefix, one tag, or one conjunction (AnalyticsAndOperator). + // If no filter is provided, all objects will be considered in any analysis. + Filter *AnalyticsFilter `json:"filter,omitempty"` + ID *string `json:"id,omitempty"` + // Specifies data related to access patterns to be collected and made available + // to analyze the tradeoffs between different storage classes for an Amazon + // S3 bucket. + StorageClassAnalysis *StorageClassAnalysis `json:"storageClassAnalysis,omitempty"` +} + +// Where to publish the analytics results. +type AnalyticsExportDestination struct { + // Contains information about where to publish the analytics results. + S3BucketDestination *AnalyticsS3BucketDestination `json:"s3BucketDestination,omitempty"` +} + // The filter used to describe a set of objects for analyses. A filter must // have exactly one prefix, one tag, or one conjunction (AnalyticsAndOperator). // If no filter is provided, all objects will be considered in any analysis. type AnalyticsFilter struct { - Prefix *string `json:"prefix,omitempty"` + // A conjunction (logical AND) of predicates, which is used in evaluating a + // metrics filter. The operator must have at least two predicates in any combination, + // and an object must match all of the predicates for the filter to apply. + And *AnalyticsAndOperator `json:"and,omitempty"` + Prefix *string `json:"prefix,omitempty"` // A container of a key value name pair. Tag *Tag `json:"tag,omitempty"` } @@ -76,6 +100,7 @@ type AnalyticsFilter struct { type AnalyticsS3BucketDestination struct { Bucket *string `json:"bucket,omitempty"` BucketAccountID *string `json:"bucketAccountID,omitempty"` + Format *string `json:"format,omitempty"` Prefix *string `json:"prefix,omitempty"` } @@ -277,14 +302,62 @@ type IntelligentTieringAndOperator struct { Tags []*Tag `json:"tags,omitempty"` } +// Specifies the S3 Intelligent-Tiering configuration for an Amazon S3 bucket. +// +// For information about the S3 Intelligent-Tiering storage class, see Storage +// class for automatically optimizing frequently and infrequently accessed objects +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access). +type IntelligentTieringConfiguration struct { + // The Filter is used to identify objects that the S3 Intelligent-Tiering configuration + // applies to. + Filter *IntelligentTieringFilter `json:"filter,omitempty"` + ID *string `json:"id,omitempty"` + Status *string `json:"status,omitempty"` + Tierings []*Tiering `json:"tierings,omitempty"` +} + // The Filter is used to identify objects that the S3 Intelligent-Tiering configuration // applies to. type IntelligentTieringFilter struct { - Prefix *string `json:"prefix,omitempty"` + // A container for specifying S3 Intelligent-Tiering filters. The filters determine + // the subset of objects to which the rule applies. + And *IntelligentTieringAndOperator `json:"and,omitempty"` + Prefix *string `json:"prefix,omitempty"` // A container of a key value name pair. Tag *Tag `json:"tag,omitempty"` } +// Specifies the inventory configuration for an Amazon S3 bucket. For more information, +// see GET Bucket inventory (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETInventoryConfig.html) +// in the Amazon Simple Storage Service API Reference. +type InventoryConfiguration struct { + // Specifies the inventory configuration for an Amazon S3 bucket. + Destination *InventoryDestination `json:"destination,omitempty"` + // Specifies an inventory filter. The inventory only includes objects that meet + // the filter's criteria. + Filter *InventoryFilter `json:"filter,omitempty"` + ID *string `json:"id,omitempty"` + IncludedObjectVersions *string `json:"includedObjectVersions,omitempty"` + IsEnabled *bool `json:"isEnabled,omitempty"` + OptionalFields []*string `json:"optionalFields,omitempty"` + // Specifies the schedule for generating inventory results. + Schedule *InventorySchedule `json:"schedule,omitempty"` +} + +// Specifies the inventory configuration for an Amazon S3 bucket. +type InventoryDestination struct { + // Contains the bucket name, file format, bucket owner (optional), and prefix + // (optional) where inventory results are published. + S3BucketDestination *InventoryS3BucketDestination `json:"s3BucketDestination,omitempty"` +} + +// Contains the type of server-side encryption used to encrypt the inventory +// results. +type InventoryEncryption struct { + // Specifies the use of SSE-KMS to encrypt delivered inventory reports. + SSEKMS *SSEKMS `json:"sseKMS,omitempty"` +} + // Specifies an inventory filter. The inventory only includes objects that meet // the filter's criteria. type InventoryFilter struct { @@ -296,7 +369,16 @@ type InventoryFilter struct { type InventoryS3BucketDestination struct { AccountID *string `json:"accountID,omitempty"` Bucket *string `json:"bucket,omitempty"` - Prefix *string `json:"prefix,omitempty"` + // Contains the type of server-side encryption used to encrypt the inventory + // results. + Encryption *InventoryEncryption `json:"encryption,omitempty"` + Format *string `json:"format,omitempty"` + Prefix *string `json:"prefix,omitempty"` +} + +// Specifies the schedule for generating inventory results. +type InventorySchedule struct { + Frequency *string `json:"frequency,omitempty"` } // A container for object key name prefix and suffix filtering rules. @@ -408,11 +490,30 @@ type MetricsAndOperator struct { Tags []*Tag `json:"tags,omitempty"` } +// Specifies a metrics configuration for the CloudWatch request metrics (specified +// by the metrics configuration ID) from an Amazon S3 bucket. If you're updating +// an existing metrics configuration, note that this is a full replacement of +// the existing metrics configuration. If you don't include the elements you +// want to keep, they are erased. For more information, see PUT Bucket metrics +// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTMetricConfiguration.html) +// in the Amazon Simple Storage Service API Reference. +type MetricsConfiguration struct { + // Specifies a metrics configuration filter. The metrics configuration only + // includes objects that meet the filter's criteria. A filter must be a prefix, + // a tag, or a conjunction (MetricsAndOperator). + Filter *MetricsFilter `json:"filter,omitempty"` + ID *string `json:"id,omitempty"` +} + // Specifies a metrics configuration filter. The metrics configuration only // includes objects that meet the filter's criteria. A filter must be a prefix, // a tag, or a conjunction (MetricsAndOperator). type MetricsFilter struct { - Prefix *string `json:"prefix,omitempty"` + // A conjunction (logical AND) of predicates, which is used in evaluating a + // metrics filter. The operator must have at least two predicates, and an object + // must match all of the predicates in order for the filter to apply. + And *MetricsAndOperator `json:"and,omitempty"` + Prefix *string `json:"prefix,omitempty"` // A container of a key value name pair. Tag *Tag `json:"tag,omitempty"` } @@ -805,6 +906,23 @@ type SourceSelectionCriteria struct { SSEKMSEncryptedObjects *SSEKMSEncryptedObjects `json:"sseKMSEncryptedObjects,omitempty"` } +// Specifies data related to access patterns to be collected and made available +// to analyze the tradeoffs between different storage classes for an Amazon +// S3 bucket. +type StorageClassAnalysis struct { + // Container for data related to the storage class analysis for an Amazon S3 + // bucket for export. + DataExport *StorageClassAnalysisDataExport `json:"dataExport,omitempty"` +} + +// Container for data related to the storage class analysis for an Amazon S3 +// bucket for export. +type StorageClassAnalysisDataExport struct { + // Where to publish the analytics results. + Destination *AnalyticsExportDestination `json:"destination,omitempty"` + OutputSchemaVersion *string `json:"outputSchemaVersion,omitempty"` +} + // A container of a key value name pair. type Tag struct { Key *string `json:"key,omitempty"` @@ -823,6 +941,14 @@ type TargetGrant struct { Permission *string `json:"permission,omitempty"` } +// The S3 Intelligent-Tiering storage class is designed to optimize storage +// costs by automatically moving data to the most cost-effective storage access +// tier, without additional operational overhead. +type Tiering struct { + AccessTier *string `json:"accessTier,omitempty"` + Days *int64 `json:"days,omitempty"` +} + // A container for specifying the configuration for publication of messages // to an Amazon Simple Notification Service (Amazon SNS) topic when Amazon S3 // detects specified events. diff --git a/apis/v1alpha1/zz_generated.deepcopy.go b/apis/v1alpha1/zz_generated.deepcopy.go index f9af655..936939a 100644 --- a/apis/v1alpha1/zz_generated.deepcopy.go +++ b/apis/v1alpha1/zz_generated.deepcopy.go @@ -136,9 +136,64 @@ func (in *AnalyticsAndOperator) DeepCopy() *AnalyticsAndOperator { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AnalyticsConfiguration) DeepCopyInto(out *AnalyticsConfiguration) { + *out = *in + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = new(AnalyticsFilter) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.StorageClassAnalysis != nil { + in, out := &in.StorageClassAnalysis, &out.StorageClassAnalysis + *out = new(StorageClassAnalysis) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AnalyticsConfiguration. +func (in *AnalyticsConfiguration) DeepCopy() *AnalyticsConfiguration { + if in == nil { + return nil + } + out := new(AnalyticsConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AnalyticsExportDestination) DeepCopyInto(out *AnalyticsExportDestination) { + *out = *in + if in.S3BucketDestination != nil { + in, out := &in.S3BucketDestination, &out.S3BucketDestination + *out = new(AnalyticsS3BucketDestination) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AnalyticsExportDestination. +func (in *AnalyticsExportDestination) DeepCopy() *AnalyticsExportDestination { + if in == nil { + return nil + } + out := new(AnalyticsExportDestination) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AnalyticsFilter) DeepCopyInto(out *AnalyticsFilter) { *out = *in + if in.And != nil { + in, out := &in.And, &out.And + *out = new(AnalyticsAndOperator) + (*in).DeepCopyInto(*out) + } if in.Prefix != nil { in, out := &in.Prefix, &out.Prefix *out = new(string) @@ -174,6 +229,11 @@ func (in *AnalyticsS3BucketDestination) DeepCopyInto(out *AnalyticsS3BucketDesti *out = new(string) **out = **in } + if in.Format != nil { + in, out := &in.Format, &out.Format + *out = new(string) + **out = **in + } if in.Prefix != nil { in, out := &in.Prefix, &out.Prefix *out = new(string) @@ -309,6 +369,17 @@ func (in *BucketSpec) DeepCopyInto(out *BucketSpec) { *out = new(AccelerateConfiguration) (*in).DeepCopyInto(*out) } + if in.Analytics != nil { + in, out := &in.Analytics, &out.Analytics + *out = make([]*AnalyticsConfiguration, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(AnalyticsConfiguration) + (*in).DeepCopyInto(*out) + } + } + } if in.CORS != nil { in, out := &in.CORS, &out.CORS *out = new(CORSConfiguration) @@ -349,6 +420,28 @@ func (in *BucketSpec) DeepCopyInto(out *BucketSpec) { *out = new(string) **out = **in } + if in.IntelligentTiering != nil { + in, out := &in.IntelligentTiering, &out.IntelligentTiering + *out = make([]*IntelligentTieringConfiguration, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(IntelligentTieringConfiguration) + (*in).DeepCopyInto(*out) + } + } + } + if in.Inventory != nil { + in, out := &in.Inventory, &out.Inventory + *out = make([]*InventoryConfiguration, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(InventoryConfiguration) + (*in).DeepCopyInto(*out) + } + } + } if in.Lifecycle != nil { in, out := &in.Lifecycle, &out.Lifecycle *out = new(BucketLifecycleConfiguration) @@ -359,6 +452,17 @@ func (in *BucketSpec) DeepCopyInto(out *BucketSpec) { *out = new(BucketLoggingStatus) (*in).DeepCopyInto(*out) } + if in.Metrics != nil { + in, out := &in.Metrics, &out.Metrics + *out = make([]*MetricsConfiguration, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(MetricsConfiguration) + (*in).DeepCopyInto(*out) + } + } + } if in.Name != nil { in, out := &in.Name, &out.Name *out = new(string) @@ -1078,9 +1182,55 @@ func (in *IntelligentTieringAndOperator) DeepCopy() *IntelligentTieringAndOperat return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IntelligentTieringConfiguration) DeepCopyInto(out *IntelligentTieringConfiguration) { + *out = *in + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = new(IntelligentTieringFilter) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.Tierings != nil { + in, out := &in.Tierings, &out.Tierings + *out = make([]*Tiering, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(Tiering) + (*in).DeepCopyInto(*out) + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IntelligentTieringConfiguration. +func (in *IntelligentTieringConfiguration) DeepCopy() *IntelligentTieringConfiguration { + if in == nil { + return nil + } + out := new(IntelligentTieringConfiguration) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *IntelligentTieringFilter) DeepCopyInto(out *IntelligentTieringFilter) { *out = *in + if in.And != nil { + in, out := &in.And, &out.And + *out = new(IntelligentTieringAndOperator) + (*in).DeepCopyInto(*out) + } if in.Prefix != nil { in, out := &in.Prefix, &out.Prefix *out = new(string) @@ -1103,6 +1253,102 @@ func (in *IntelligentTieringFilter) DeepCopy() *IntelligentTieringFilter { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InventoryConfiguration) DeepCopyInto(out *InventoryConfiguration) { + *out = *in + if in.Destination != nil { + in, out := &in.Destination, &out.Destination + *out = new(InventoryDestination) + (*in).DeepCopyInto(*out) + } + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = new(InventoryFilter) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IncludedObjectVersions != nil { + in, out := &in.IncludedObjectVersions, &out.IncludedObjectVersions + *out = new(string) + **out = **in + } + if in.IsEnabled != nil { + in, out := &in.IsEnabled, &out.IsEnabled + *out = new(bool) + **out = **in + } + if in.OptionalFields != nil { + in, out := &in.OptionalFields, &out.OptionalFields + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Schedule != nil { + in, out := &in.Schedule, &out.Schedule + *out = new(InventorySchedule) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InventoryConfiguration. +func (in *InventoryConfiguration) DeepCopy() *InventoryConfiguration { + if in == nil { + return nil + } + out := new(InventoryConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InventoryDestination) DeepCopyInto(out *InventoryDestination) { + *out = *in + if in.S3BucketDestination != nil { + in, out := &in.S3BucketDestination, &out.S3BucketDestination + *out = new(InventoryS3BucketDestination) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InventoryDestination. +func (in *InventoryDestination) DeepCopy() *InventoryDestination { + if in == nil { + return nil + } + out := new(InventoryDestination) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InventoryEncryption) DeepCopyInto(out *InventoryEncryption) { + *out = *in + if in.SSEKMS != nil { + in, out := &in.SSEKMS, &out.SSEKMS + *out = new(SSEKMS) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InventoryEncryption. +func (in *InventoryEncryption) DeepCopy() *InventoryEncryption { + if in == nil { + return nil + } + out := new(InventoryEncryption) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *InventoryFilter) DeepCopyInto(out *InventoryFilter) { *out = *in @@ -1136,6 +1382,16 @@ func (in *InventoryS3BucketDestination) DeepCopyInto(out *InventoryS3BucketDesti *out = new(string) **out = **in } + if in.Encryption != nil { + in, out := &in.Encryption, &out.Encryption + *out = new(InventoryEncryption) + (*in).DeepCopyInto(*out) + } + if in.Format != nil { + in, out := &in.Format, &out.Format + *out = new(string) + **out = **in + } if in.Prefix != nil { in, out := &in.Prefix, &out.Prefix *out = new(string) @@ -1153,6 +1409,26 @@ func (in *InventoryS3BucketDestination) DeepCopy() *InventoryS3BucketDestination return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InventorySchedule) DeepCopyInto(out *InventorySchedule) { + *out = *in + if in.Frequency != nil { + in, out := &in.Frequency, &out.Frequency + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InventorySchedule. +func (in *InventorySchedule) DeepCopy() *InventorySchedule { + if in == nil { + return nil + } + out := new(InventorySchedule) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *KeyFilter) DeepCopyInto(out *KeyFilter) { *out = *in @@ -1504,9 +1780,39 @@ func (in *MetricsAndOperator) DeepCopy() *MetricsAndOperator { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricsConfiguration) DeepCopyInto(out *MetricsConfiguration) { + *out = *in + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = new(MetricsFilter) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricsConfiguration. +func (in *MetricsConfiguration) DeepCopy() *MetricsConfiguration { + if in == nil { + return nil + } + out := new(MetricsConfiguration) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *MetricsFilter) DeepCopyInto(out *MetricsFilter) { *out = *in + if in.And != nil { + in, out := &in.And, &out.And + *out = new(MetricsAndOperator) + (*in).DeepCopyInto(*out) + } if in.Prefix != nil { in, out := &in.Prefix, &out.Prefix *out = new(string) @@ -2512,6 +2818,51 @@ func (in *SourceSelectionCriteria) DeepCopy() *SourceSelectionCriteria { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageClassAnalysis) DeepCopyInto(out *StorageClassAnalysis) { + *out = *in + if in.DataExport != nil { + in, out := &in.DataExport, &out.DataExport + *out = new(StorageClassAnalysisDataExport) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageClassAnalysis. +func (in *StorageClassAnalysis) DeepCopy() *StorageClassAnalysis { + if in == nil { + return nil + } + out := new(StorageClassAnalysis) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageClassAnalysisDataExport) DeepCopyInto(out *StorageClassAnalysisDataExport) { + *out = *in + if in.Destination != nil { + in, out := &in.Destination, &out.Destination + *out = new(AnalyticsExportDestination) + (*in).DeepCopyInto(*out) + } + if in.OutputSchemaVersion != nil { + in, out := &in.OutputSchemaVersion, &out.OutputSchemaVersion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageClassAnalysisDataExport. +func (in *StorageClassAnalysisDataExport) DeepCopy() *StorageClassAnalysisDataExport { + if in == nil { + return nil + } + out := new(StorageClassAnalysisDataExport) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Tag) DeepCopyInto(out *Tag) { *out = *in @@ -2588,6 +2939,31 @@ func (in *TargetGrant) DeepCopy() *TargetGrant { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Tiering) DeepCopyInto(out *Tiering) { + *out = *in + if in.AccessTier != nil { + in, out := &in.AccessTier, &out.AccessTier + *out = new(string) + **out = **in + } + if in.Days != nil { + in, out := &in.Days, &out.Days + *out = new(int64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Tiering. +func (in *Tiering) DeepCopy() *Tiering { + if in == nil { + return nil + } + out := new(Tiering) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TopicConfiguration) DeepCopyInto(out *TopicConfiguration) { *out = *in diff --git a/config/crd/bases/s3.services.k8s.aws_buckets.yaml b/config/crd/bases/s3.services.k8s.aws_buckets.yaml index d020289..1ff079e 100644 --- a/config/crd/bases/s3.services.k8s.aws_buckets.yaml +++ b/config/crd/bases/s3.services.k8s.aws_buckets.yaml @@ -47,6 +47,82 @@ spec: acl: description: The canned ACL to apply to the bucket. type: string + analytics: + items: + description: Specifies the configuration and any analyses for the + analytics filter of an Amazon S3 bucket. + properties: + filter: + description: The filter used to describe a set of objects for + analyses. A filter must have exactly one prefix, one tag, + or one conjunction (AnalyticsAndOperator). If no filter is + provided, all objects will be considered in any analysis. + properties: + and: + description: A conjunction (logical AND) of predicates, + which is used in evaluating a metrics filter. The operator + must have at least two predicates in any combination, + and an object must match all of the predicates for the + filter to apply. + properties: + prefix: + type: string + tags: + items: + description: A container of a key value name pair. + properties: + key: + type: string + value: + type: string + type: object + type: array + type: object + prefix: + type: string + tag: + description: A container of a key value name pair. + properties: + key: + type: string + value: + type: string + type: object + type: object + id: + type: string + storageClassAnalysis: + description: Specifies data related to access patterns to be + collected and made available to analyze the tradeoffs between + different storage classes for an Amazon S3 bucket. + properties: + dataExport: + description: Container for data related to the storage class + analysis for an Amazon S3 bucket for export. + properties: + destination: + description: Where to publish the analytics results. + properties: + s3BucketDestination: + description: Contains information about where to + publish the analytics results. + properties: + bucket: + type: string + bucketAccountID: + type: string + format: + type: string + prefix: + type: string + type: object + type: object + outputSchemaVersion: + type: string + type: object + type: object + type: object + type: array cors: description: Describes the cross-origin access configuration for objects in an Amazon S3 bucket. For more information, see Enabling Cross-Origin @@ -128,6 +204,129 @@ spec: grantWriteACP: description: Allows grantee to write the ACL for the applicable bucket. type: string + intelligentTiering: + items: + description: "Specifies the S3 Intelligent-Tiering configuration + for an Amazon S3 bucket. \n For information about the S3 Intelligent-Tiering + storage class, see Storage class for automatically optimizing + frequently and infrequently accessed objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access)." + properties: + filter: + description: The Filter is used to identify objects that the + S3 Intelligent-Tiering configuration applies to. + properties: + and: + description: A container for specifying S3 Intelligent-Tiering + filters. The filters determine the subset of objects to + which the rule applies. + properties: + prefix: + type: string + tags: + items: + description: A container of a key value name pair. + properties: + key: + type: string + value: + type: string + type: object + type: array + type: object + prefix: + type: string + tag: + description: A container of a key value name pair. + properties: + key: + type: string + value: + type: string + type: object + type: object + id: + type: string + status: + type: string + tierings: + items: + description: The S3 Intelligent-Tiering storage class is designed + to optimize storage costs by automatically moving data to + the most cost-effective storage access tier, without additional + operational overhead. + properties: + accessTier: + type: string + days: + format: int64 + type: integer + type: object + type: array + type: object + type: array + inventory: + items: + description: Specifies the inventory configuration for an Amazon + S3 bucket. For more information, see GET Bucket inventory (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETInventoryConfig.html) + in the Amazon Simple Storage Service API Reference. + properties: + destination: + description: Specifies the inventory configuration for an Amazon + S3 bucket. + properties: + s3BucketDestination: + description: Contains the bucket name, file format, bucket + owner (optional), and prefix (optional) where inventory + results are published. + properties: + accountID: + type: string + bucket: + type: string + encryption: + description: Contains the type of server-side encryption + used to encrypt the inventory results. + properties: + sseKMS: + description: Specifies the use of SSE-KMS to encrypt + delivered inventory reports. + properties: + keyID: + type: string + type: object + type: object + format: + type: string + prefix: + type: string + type: object + type: object + filter: + description: Specifies an inventory filter. The inventory only + includes objects that meet the filter's criteria. + properties: + prefix: + type: string + type: object + id: + type: string + includedObjectVersions: + type: string + isEnabled: + type: boolean + optionalFields: + items: + type: string + type: array + schedule: + description: Specifies the schedule for generating inventory + results. + properties: + frequency: + type: string + type: object + type: object + type: array lifecycle: description: Container for lifecycle rules. You can add as many as 1,000 rules. @@ -293,6 +492,57 @@ spec: type: string type: object type: object + metrics: + items: + description: Specifies a metrics configuration for the CloudWatch + request metrics (specified by the metrics configuration ID) from + an Amazon S3 bucket. If you're updating an existing metrics configuration, + note that this is a full replacement of the existing metrics configuration. + If you don't include the elements you want to keep, they are erased. + For more information, see PUT Bucket metrics (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTMetricConfiguration.html) + in the Amazon Simple Storage Service API Reference. + properties: + filter: + description: Specifies a metrics configuration filter. The metrics + configuration only includes objects that meet the filter's + criteria. A filter must be a prefix, a tag, or a conjunction + (MetricsAndOperator). + properties: + and: + description: A conjunction (logical AND) of predicates, + which is used in evaluating a metrics filter. The operator + must have at least two predicates, and an object must + match all of the predicates in order for the filter to + apply. + properties: + prefix: + type: string + tags: + items: + description: A container of a key value name pair. + properties: + key: + type: string + value: + type: string + type: object + type: array + type: object + prefix: + type: string + tag: + description: A container of a key value name pair. + properties: + key: + type: string + value: + type: string + type: object + type: object + id: + type: string + type: object + type: array name: description: The name of the bucket to create. type: string diff --git a/generator.yaml b/generator.yaml index 50e82d2..5b170ac 100644 --- a/generator.yaml +++ b/generator.yaml @@ -21,10 +21,9 @@ resources: from: operation: PutBucketAccelerateConfiguration path: AccelerateConfiguration - # AnalyticsConfiguration: - # from: - # operation: PutBucketAnalyticsConfiguration - # path: AnalyticsConfiguration # Double check about ID + Analytics: + custom_field: + list_of: AnalyticsConfiguration CORS: from: operation: PutBucketCors @@ -33,14 +32,12 @@ resources: from: operation: PutBucketEncryption path: ServerSideEncryptionConfiguration - # IntelligentTieringConfiguration: - # from: - # operation: PutBucketIntelligentTieringConfiguration - # path: IntelligentTieringConfiguration # Double check about ID - # InventoryConfiguration: - # from: - # operation: PutBucketInventoryConfiguration - # path: InventoryConfiguration # Double check about ID + IntelligentTiering: + custom_field: + list_of: IntelligentTieringConfiguration + Inventory: + custom_field: + list_of: InventoryConfiguration Lifecycle: from: operation: PutBucketLifecycleConfiguration @@ -49,10 +46,9 @@ resources: from: operation: PutBucketLogging path: BucketLoggingStatus - # MetricsConfiguration: - # from: - # operation: PutBucketMetricsConfiguration - # path: MetricsConfiguration # Double check about ID + Metrics: + custom_field: + list_of: MetricsConfiguration Notification: from: operation: PutBucketNotificationConfiguration diff --git a/helm/crds/s3.services.k8s.aws_buckets.yaml b/helm/crds/s3.services.k8s.aws_buckets.yaml index d020289..1ff079e 100644 --- a/helm/crds/s3.services.k8s.aws_buckets.yaml +++ b/helm/crds/s3.services.k8s.aws_buckets.yaml @@ -47,6 +47,82 @@ spec: acl: description: The canned ACL to apply to the bucket. type: string + analytics: + items: + description: Specifies the configuration and any analyses for the + analytics filter of an Amazon S3 bucket. + properties: + filter: + description: The filter used to describe a set of objects for + analyses. A filter must have exactly one prefix, one tag, + or one conjunction (AnalyticsAndOperator). If no filter is + provided, all objects will be considered in any analysis. + properties: + and: + description: A conjunction (logical AND) of predicates, + which is used in evaluating a metrics filter. The operator + must have at least two predicates in any combination, + and an object must match all of the predicates for the + filter to apply. + properties: + prefix: + type: string + tags: + items: + description: A container of a key value name pair. + properties: + key: + type: string + value: + type: string + type: object + type: array + type: object + prefix: + type: string + tag: + description: A container of a key value name pair. + properties: + key: + type: string + value: + type: string + type: object + type: object + id: + type: string + storageClassAnalysis: + description: Specifies data related to access patterns to be + collected and made available to analyze the tradeoffs between + different storage classes for an Amazon S3 bucket. + properties: + dataExport: + description: Container for data related to the storage class + analysis for an Amazon S3 bucket for export. + properties: + destination: + description: Where to publish the analytics results. + properties: + s3BucketDestination: + description: Contains information about where to + publish the analytics results. + properties: + bucket: + type: string + bucketAccountID: + type: string + format: + type: string + prefix: + type: string + type: object + type: object + outputSchemaVersion: + type: string + type: object + type: object + type: object + type: array cors: description: Describes the cross-origin access configuration for objects in an Amazon S3 bucket. For more information, see Enabling Cross-Origin @@ -128,6 +204,129 @@ spec: grantWriteACP: description: Allows grantee to write the ACL for the applicable bucket. type: string + intelligentTiering: + items: + description: "Specifies the S3 Intelligent-Tiering configuration + for an Amazon S3 bucket. \n For information about the S3 Intelligent-Tiering + storage class, see Storage class for automatically optimizing + frequently and infrequently accessed objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access)." + properties: + filter: + description: The Filter is used to identify objects that the + S3 Intelligent-Tiering configuration applies to. + properties: + and: + description: A container for specifying S3 Intelligent-Tiering + filters. The filters determine the subset of objects to + which the rule applies. + properties: + prefix: + type: string + tags: + items: + description: A container of a key value name pair. + properties: + key: + type: string + value: + type: string + type: object + type: array + type: object + prefix: + type: string + tag: + description: A container of a key value name pair. + properties: + key: + type: string + value: + type: string + type: object + type: object + id: + type: string + status: + type: string + tierings: + items: + description: The S3 Intelligent-Tiering storage class is designed + to optimize storage costs by automatically moving data to + the most cost-effective storage access tier, without additional + operational overhead. + properties: + accessTier: + type: string + days: + format: int64 + type: integer + type: object + type: array + type: object + type: array + inventory: + items: + description: Specifies the inventory configuration for an Amazon + S3 bucket. For more information, see GET Bucket inventory (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETInventoryConfig.html) + in the Amazon Simple Storage Service API Reference. + properties: + destination: + description: Specifies the inventory configuration for an Amazon + S3 bucket. + properties: + s3BucketDestination: + description: Contains the bucket name, file format, bucket + owner (optional), and prefix (optional) where inventory + results are published. + properties: + accountID: + type: string + bucket: + type: string + encryption: + description: Contains the type of server-side encryption + used to encrypt the inventory results. + properties: + sseKMS: + description: Specifies the use of SSE-KMS to encrypt + delivered inventory reports. + properties: + keyID: + type: string + type: object + type: object + format: + type: string + prefix: + type: string + type: object + type: object + filter: + description: Specifies an inventory filter. The inventory only + includes objects that meet the filter's criteria. + properties: + prefix: + type: string + type: object + id: + type: string + includedObjectVersions: + type: string + isEnabled: + type: boolean + optionalFields: + items: + type: string + type: array + schedule: + description: Specifies the schedule for generating inventory + results. + properties: + frequency: + type: string + type: object + type: object + type: array lifecycle: description: Container for lifecycle rules. You can add as many as 1,000 rules. @@ -293,6 +492,57 @@ spec: type: string type: object type: object + metrics: + items: + description: Specifies a metrics configuration for the CloudWatch + request metrics (specified by the metrics configuration ID) from + an Amazon S3 bucket. If you're updating an existing metrics configuration, + note that this is a full replacement of the existing metrics configuration. + If you don't include the elements you want to keep, they are erased. + For more information, see PUT Bucket metrics (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTMetricConfiguration.html) + in the Amazon Simple Storage Service API Reference. + properties: + filter: + description: Specifies a metrics configuration filter. The metrics + configuration only includes objects that meet the filter's + criteria. A filter must be a prefix, a tag, or a conjunction + (MetricsAndOperator). + properties: + and: + description: A conjunction (logical AND) of predicates, + which is used in evaluating a metrics filter. The operator + must have at least two predicates, and an object must + match all of the predicates in order for the filter to + apply. + properties: + prefix: + type: string + tags: + items: + description: A container of a key value name pair. + properties: + key: + type: string + value: + type: string + type: object + type: array + type: object + prefix: + type: string + tag: + description: A container of a key value name pair. + properties: + key: + type: string + value: + type: string + type: object + type: object + id: + type: string + type: object + type: array name: description: The name of the bucket to create. type: string diff --git a/pkg/resource/bucket/delta.go b/pkg/resource/bucket/delta.go index c65d783..9589504 100644 --- a/pkg/resource/bucket/delta.go +++ b/pkg/resource/bucket/delta.go @@ -60,6 +60,9 @@ func newResourceDelta( } } } + if !reflect.DeepEqual(a.ko.Spec.Analytics, b.ko.Spec.Analytics) { + delta.Add("Spec.Analytics", a.ko.Spec.Analytics, b.ko.Spec.Analytics) + } if ackcompare.HasNilDifference(a.ko.Spec.CORS, b.ko.Spec.CORS) { delta.Add("Spec.CORS", a.ko.Spec.CORS, b.ko.Spec.CORS) } else if a.ko.Spec.CORS != nil && b.ko.Spec.CORS != nil { @@ -120,6 +123,12 @@ func newResourceDelta( delta.Add("Spec.GrantWriteACP", a.ko.Spec.GrantWriteACP, b.ko.Spec.GrantWriteACP) } } + if !reflect.DeepEqual(a.ko.Spec.IntelligentTiering, b.ko.Spec.IntelligentTiering) { + delta.Add("Spec.IntelligentTiering", a.ko.Spec.IntelligentTiering, b.ko.Spec.IntelligentTiering) + } + if !reflect.DeepEqual(a.ko.Spec.Inventory, b.ko.Spec.Inventory) { + delta.Add("Spec.Inventory", a.ko.Spec.Inventory, b.ko.Spec.Inventory) + } if ackcompare.HasNilDifference(a.ko.Spec.Lifecycle, b.ko.Spec.Lifecycle) { delta.Add("Spec.Lifecycle", a.ko.Spec.Lifecycle, b.ko.Spec.Lifecycle) } else if a.ko.Spec.Lifecycle != nil && b.ko.Spec.Lifecycle != nil { @@ -152,6 +161,9 @@ func newResourceDelta( } } } + if !reflect.DeepEqual(a.ko.Spec.Metrics, b.ko.Spec.Metrics) { + delta.Add("Spec.Metrics", a.ko.Spec.Metrics, b.ko.Spec.Metrics) + } if ackcompare.HasNilDifference(a.ko.Spec.Name, b.ko.Spec.Name) { delta.Add("Spec.Name", a.ko.Spec.Name, b.ko.Spec.Name) } else if a.ko.Spec.Name != nil && b.ko.Spec.Name != nil { diff --git a/pkg/resource/bucket/hook.go b/pkg/resource/bucket/hook.go index 19cf3f0..d57dc89 100644 --- a/pkg/resource/bucket/hook.go +++ b/pkg/resource/bucket/hook.go @@ -39,6 +39,17 @@ var ( CannedACLJoinDelimiter = "|" ) +// ConfigurationAction stores the possible actions that can be performed on +// any of the elements of a configuration list +type ConfigurationAction int + +const ( + ConfigurationActionNone ConfigurationAction = iota + ConfigurationActionPut + ConfigurationActionDelete + ConfigurationActionUpdate +) + func (rm *resourceManager) createPutFields( ctx context.Context, r *resource, @@ -56,6 +67,11 @@ func (rm *resourceManager) createPutFields( return err } } + if len(r.ko.Spec.Analytics) != 0 { + if err := rm.syncAnalytics(ctx, r, nil); err != nil { + return err + } + } if r.ko.Spec.CORS != nil { if err := rm.syncCORS(ctx, r); err != nil { return err @@ -66,6 +82,16 @@ func (rm *resourceManager) createPutFields( return err } } + if len(r.ko.Spec.IntelligentTiering) != 0 { + if err := rm.syncIntelligentTiering(ctx, r, nil); err != nil { + return err + } + } + if len(r.ko.Spec.Inventory) != 0 { + if err := rm.syncInventory(ctx, r, nil); err != nil { + return err + } + } if r.ko.Spec.Lifecycle != nil { if err := rm.syncLifecycle(ctx, r); err != nil { return err @@ -76,6 +102,11 @@ func (rm *resourceManager) createPutFields( return err } } + if len(r.ko.Spec.Metrics) != 0 { + if err := rm.syncMetrics(ctx, r, nil); err != nil { + return err + } + } if r.ko.Spec.Notification != nil { if err := rm.syncNotification(ctx, r); err != nil { return err @@ -142,6 +173,11 @@ func (rm *resourceManager) customUpdateBucket( return nil, err } } + if delta.DifferentAt("Spec.Analytics") { + if err := rm.syncAnalytics(ctx, desired, latest); err != nil { + return nil, err + } + } if delta.DifferentAt("Spec.ACL") || delta.DifferentAt("Spec.GrantFullControl") || delta.DifferentAt("Spec.GrantRead") || @@ -162,6 +198,16 @@ func (rm *resourceManager) customUpdateBucket( return nil, err } } + if delta.DifferentAt("Spec.IntelligentTiering") { + if err := rm.syncIntelligentTiering(ctx, desired, latest); err != nil { + return nil, err + } + } + if delta.DifferentAt("Spec.Inventory") { + if err := rm.syncInventory(ctx, desired, latest); err != nil { + return nil, err + } + } if delta.DifferentAt("Spec.Lifecycle") { if err := rm.syncLifecycle(ctx, desired); err != nil { return nil, err @@ -172,6 +218,11 @@ func (rm *resourceManager) customUpdateBucket( return nil, err } } + if delta.DifferentAt("Spec.Metrics") { + if err := rm.syncMetrics(ctx, desired, latest); err != nil { + return nil, err + } + } if delta.DifferentAt("Spec.Notification") { if err := rm.syncNotification(ctx, desired); err != nil { return nil, err @@ -245,6 +296,15 @@ func (rm *resourceManager) addPutFieldsToSpec( } ko.Spec.Accelerate = rm.setResourceAccelerate(r, getAccelerateResponse) + listAnalyticsResponse, err := rm.sdkapi.ListBucketAnalyticsConfigurationsWithContext(ctx, rm.newListBucketAnalyticsPayload(r)) + if err != nil { + return err + } + ko.Spec.Analytics = make([]*svcapitypes.AnalyticsConfiguration, len(listAnalyticsResponse.AnalyticsConfigurationList)) + for i, analyticsConfiguration := range listAnalyticsResponse.AnalyticsConfigurationList { + ko.Spec.Analytics[i] = rm.setResourceAnalyticsConfiguration(r, analyticsConfiguration) + } + getACLResponse, err := rm.sdkapi.GetBucketAclWithContext(ctx, rm.newGetBucketACLPayload(r)) if err != nil { return err @@ -273,6 +333,24 @@ func (rm *resourceManager) addPutFieldsToSpec( } ko.Spec.Encryption = rm.setResourceEncryption(r, getEncryptionResponse) + listIntelligentTieringResponse, err := rm.sdkapi.ListBucketIntelligentTieringConfigurationsWithContext(ctx, rm.newListBucketIntelligentTieringPayload(r)) + if err != nil { + return err + } + ko.Spec.IntelligentTiering = make([]*svcapitypes.IntelligentTieringConfiguration, len(listIntelligentTieringResponse.IntelligentTieringConfigurationList)) + for i, intelligentTieringConfiguration := range listIntelligentTieringResponse.IntelligentTieringConfigurationList { + ko.Spec.IntelligentTiering[i] = rm.setResourceIntelligentTieringConfiguration(r, intelligentTieringConfiguration) + } + + listInventoryResponse, err := rm.sdkapi.ListBucketInventoryConfigurationsWithContext(ctx, rm.newListBucketInventoryPayload(r)) + if err != nil { + return err + } + ko.Spec.Inventory = make([]*svcapitypes.InventoryConfiguration, len(listInventoryResponse.InventoryConfigurationList)) + for i, inventoryConfiguration := range listInventoryResponse.InventoryConfigurationList { + ko.Spec.Inventory[i] = rm.setResourceInventoryConfiguration(r, inventoryConfiguration) + } + getLifecycleResponse, err := rm.sdkapi.GetBucketLifecycleConfigurationWithContext(ctx, rm.newGetBucketLifecyclePayload(r)) if err != nil { if awsErr, ok := ackerr.AWSError(err); ok && awsErr.Code() == "NoSuchLifecycleConfiguration" { @@ -289,6 +367,15 @@ func (rm *resourceManager) addPutFieldsToSpec( } ko.Spec.Logging = rm.setResourceLogging(r, getLoggingResponse) + listMetricsResponse, err := rm.sdkapi.ListBucketMetricsConfigurationsWithContext(ctx, rm.newListBucketMetricsPayload(r)) + if err != nil { + return err + } + ko.Spec.Metrics = make([]*svcapitypes.MetricsConfiguration, len(listMetricsResponse.MetricsConfigurationList)) + for i, metricsConfiguration := range listMetricsResponse.MetricsConfigurationList { + ko.Spec.Metrics[i] = rm.setResourceMetricsConfiguration(r, metricsConfiguration) + } + getNotificationResponse, err := rm.sdkapi.GetBucketNotificationConfigurationWithContext(ctx, rm.newGetBucketNotificationPayload(r)) if err != nil { return err @@ -398,6 +485,9 @@ func customPreCompare( a.ko.Spec.Accelerate.Status = &DefaultAccelerationStatus } } + if a.ko.Spec.Analytics == nil && b.ko.Spec.Analytics != nil { + a.ko.Spec.Analytics = make([]*svcapitypes.AnalyticsConfiguration, 0) + } if a.ko.Spec.ACL != nil { // Don't diff grant headers if a canned ACL has been used b.ko.Spec.GrantFullControl = nil @@ -452,12 +542,21 @@ func customPreCompare( if a.ko.Spec.Encryption == nil && b.ko.Spec.Encryption != nil { a.ko.Spec.Encryption = &svcapitypes.ServerSideEncryptionConfiguration{} } + if a.ko.Spec.IntelligentTiering == nil && b.ko.Spec.IntelligentTiering != nil { + a.ko.Spec.IntelligentTiering = make([]*svcapitypes.IntelligentTieringConfiguration, 0) + } + if a.ko.Spec.Inventory == nil && b.ko.Spec.Inventory != nil { + a.ko.Spec.Inventory = make([]*svcapitypes.InventoryConfiguration, 0) + } if a.ko.Spec.Lifecycle == nil && b.ko.Spec.Lifecycle != nil { a.ko.Spec.Lifecycle = &svcapitypes.BucketLifecycleConfiguration{} } if a.ko.Spec.Logging == nil && b.ko.Spec.Logging != nil { a.ko.Spec.Logging = &svcapitypes.BucketLoggingStatus{} } + if a.ko.Spec.Metrics == nil && b.ko.Spec.Metrics != nil { + a.ko.Spec.Metrics = make([]*svcapitypes.MetricsConfiguration, 0) + } if a.ko.Spec.Notification == nil && b.ko.Spec.Notification != nil { a.ko.Spec.Notification = &svcapitypes.NotificationConfiguration{} } diff --git a/pkg/resource/bucket/sdk.go b/pkg/resource/bucket/sdk.go index 66ac6c4..770f088 100644 --- a/pkg/resource/bucket/sdk.go +++ b/pkg/resource/bucket/sdk.go @@ -17,6 +17,7 @@ package bucket import ( "context" + "reflect" "strings" ackv1alpha1 "github.com/aws-controllers-k8s/runtime/apis/core/v1alpha1" @@ -42,6 +43,7 @@ var ( _ = ackv1alpha1.AWSAccountID("") _ = &ackerr.NotFound _ = &ackcondition.NotManagedMessage + _ = &reflect.Value{} ) // sdkFind returns SDK-specific information about a supplied resource @@ -369,6 +371,7 @@ func (rm *resourceManager) setResourceAccelerate( resp *svcsdk.GetBucketAccelerateConfigurationOutput, ) *svcapitypes.AccelerateConfiguration { res := &svcapitypes.AccelerateConfiguration{} + if resp.Status != nil { res.Status = resp.Status } @@ -376,6 +379,416 @@ func (rm *resourceManager) setResourceAccelerate( return res } +// newAnalyticsConfiguration returns a AnalyticsConfiguration object +// with each the field set by the corresponding configuration's fields. +func (rm *resourceManager) newAnalyticsConfiguration( + c *svcapitypes.AnalyticsConfiguration, +) *svcsdk.AnalyticsConfiguration { + res := &svcsdk.AnalyticsConfiguration{} + + if c.Filter != nil { + resf0 := &svcsdk.AnalyticsFilter{} + if c.Filter.And != nil { + resf0f0 := &svcsdk.AnalyticsAndOperator{} + if c.Filter.And.Prefix != nil { + resf0f0.SetPrefix(*c.Filter.And.Prefix) + } + if c.Filter.And.Tags != nil { + resf0f0f1 := []*svcsdk.Tag{} + for _, resf0f0f1iter := range c.Filter.And.Tags { + resf0f0f1elem := &svcsdk.Tag{} + if resf0f0f1iter.Key != nil { + resf0f0f1elem.SetKey(*resf0f0f1iter.Key) + } + if resf0f0f1iter.Value != nil { + resf0f0f1elem.SetValue(*resf0f0f1iter.Value) + } + resf0f0f1 = append(resf0f0f1, resf0f0f1elem) + } + resf0f0.SetTags(resf0f0f1) + } + resf0.SetAnd(resf0f0) + } + if c.Filter.Prefix != nil { + resf0.SetPrefix(*c.Filter.Prefix) + } + if c.Filter.Tag != nil { + resf0f2 := &svcsdk.Tag{} + if c.Filter.Tag.Key != nil { + resf0f2.SetKey(*c.Filter.Tag.Key) + } + if c.Filter.Tag.Value != nil { + resf0f2.SetValue(*c.Filter.Tag.Value) + } + resf0.SetTag(resf0f2) + } + res.SetFilter(resf0) + } + if c.ID != nil { + res.SetId(*c.ID) + } + if c.StorageClassAnalysis != nil { + resf2 := &svcsdk.StorageClassAnalysis{} + if c.StorageClassAnalysis.DataExport != nil { + resf2f0 := &svcsdk.StorageClassAnalysisDataExport{} + if c.StorageClassAnalysis.DataExport.Destination != nil { + resf2f0f0 := &svcsdk.AnalyticsExportDestination{} + if c.StorageClassAnalysis.DataExport.Destination.S3BucketDestination != nil { + resf2f0f0f0 := &svcsdk.AnalyticsS3BucketDestination{} + if c.StorageClassAnalysis.DataExport.Destination.S3BucketDestination.Bucket != nil { + resf2f0f0f0.SetBucket(*c.StorageClassAnalysis.DataExport.Destination.S3BucketDestination.Bucket) + } + if c.StorageClassAnalysis.DataExport.Destination.S3BucketDestination.BucketAccountID != nil { + resf2f0f0f0.SetBucketAccountId(*c.StorageClassAnalysis.DataExport.Destination.S3BucketDestination.BucketAccountID) + } + if c.StorageClassAnalysis.DataExport.Destination.S3BucketDestination.Format != nil { + resf2f0f0f0.SetFormat(*c.StorageClassAnalysis.DataExport.Destination.S3BucketDestination.Format) + } + if c.StorageClassAnalysis.DataExport.Destination.S3BucketDestination.Prefix != nil { + resf2f0f0f0.SetPrefix(*c.StorageClassAnalysis.DataExport.Destination.S3BucketDestination.Prefix) + } + resf2f0f0.SetS3BucketDestination(resf2f0f0f0) + } + resf2f0.SetDestination(resf2f0f0) + } + if c.StorageClassAnalysis.DataExport.OutputSchemaVersion != nil { + resf2f0.SetOutputSchemaVersion(*c.StorageClassAnalysis.DataExport.OutputSchemaVersion) + } + resf2.SetDataExport(resf2f0) + } + res.SetStorageClassAnalysis(resf2) + } + + return res +} + +// setAnalyticsConfiguration sets a resource AnalyticsConfiguration type +// given the SDK type. +func (rm *resourceManager) setResourceAnalyticsConfiguration( + r *resource, + resp *svcsdk.AnalyticsConfiguration, +) *svcapitypes.AnalyticsConfiguration { + res := &svcapitypes.AnalyticsConfiguration{} + + if resp.Filter != nil { + resf0 := &svcapitypes.AnalyticsFilter{} + if resp.Filter.And != nil { + resf0f0 := &svcapitypes.AnalyticsAndOperator{} + if resp.Filter.And.Prefix != nil { + resf0f0.Prefix = resp.Filter.And.Prefix + } + if resp.Filter.And.Tags != nil { + resf0f0f1 := []*svcapitypes.Tag{} + for _, resf0f0f1iter := range resp.Filter.And.Tags { + resf0f0f1elem := &svcapitypes.Tag{} + if resf0f0f1iter.Key != nil { + resf0f0f1elem.Key = resf0f0f1iter.Key + } + if resf0f0f1iter.Value != nil { + resf0f0f1elem.Value = resf0f0f1iter.Value + } + resf0f0f1 = append(resf0f0f1, resf0f0f1elem) + } + resf0f0.Tags = resf0f0f1 + } + resf0.And = resf0f0 + } + if resp.Filter.Prefix != nil { + resf0.Prefix = resp.Filter.Prefix + } + if resp.Filter.Tag != nil { + resf0f2 := &svcapitypes.Tag{} + if resp.Filter.Tag.Key != nil { + resf0f2.Key = resp.Filter.Tag.Key + } + if resp.Filter.Tag.Value != nil { + resf0f2.Value = resp.Filter.Tag.Value + } + resf0.Tag = resf0f2 + } + res.Filter = resf0 + } + if resp.Id != nil { + res.ID = resp.Id + } + if resp.StorageClassAnalysis != nil { + resf2 := &svcapitypes.StorageClassAnalysis{} + if resp.StorageClassAnalysis.DataExport != nil { + resf2f0 := &svcapitypes.StorageClassAnalysisDataExport{} + if resp.StorageClassAnalysis.DataExport.Destination != nil { + resf2f0f0 := &svcapitypes.AnalyticsExportDestination{} + if resp.StorageClassAnalysis.DataExport.Destination.S3BucketDestination != nil { + resf2f0f0f0 := &svcapitypes.AnalyticsS3BucketDestination{} + if resp.StorageClassAnalysis.DataExport.Destination.S3BucketDestination.Bucket != nil { + resf2f0f0f0.Bucket = resp.StorageClassAnalysis.DataExport.Destination.S3BucketDestination.Bucket + } + if resp.StorageClassAnalysis.DataExport.Destination.S3BucketDestination.BucketAccountId != nil { + resf2f0f0f0.BucketAccountID = resp.StorageClassAnalysis.DataExport.Destination.S3BucketDestination.BucketAccountId + } + if resp.StorageClassAnalysis.DataExport.Destination.S3BucketDestination.Format != nil { + resf2f0f0f0.Format = resp.StorageClassAnalysis.DataExport.Destination.S3BucketDestination.Format + } + if resp.StorageClassAnalysis.DataExport.Destination.S3BucketDestination.Prefix != nil { + resf2f0f0f0.Prefix = resp.StorageClassAnalysis.DataExport.Destination.S3BucketDestination.Prefix + } + resf2f0f0.S3BucketDestination = resf2f0f0f0 + } + resf2f0.Destination = resf2f0f0 + } + if resp.StorageClassAnalysis.DataExport.OutputSchemaVersion != nil { + resf2f0.OutputSchemaVersion = resp.StorageClassAnalysis.DataExport.OutputSchemaVersion + } + resf2.DataExport = resf2f0 + } + res.StorageClassAnalysis = resf2 + } + + return res +} + +func compareAnalyticsConfiguration( + a *svcapitypes.AnalyticsConfiguration, + b *svcapitypes.AnalyticsConfiguration, +) *ackcompare.Delta { + delta := ackcompare.NewDelta() + if ackcompare.HasNilDifference(a.Filter, b.Filter) { + delta.Add("AnalyticsConfiguration.Filter", a.Filter, b.Filter) + } else if a.Filter != nil && b.Filter != nil { + if ackcompare.HasNilDifference(a.Filter.And, b.Filter.And) { + delta.Add("AnalyticsConfiguration.Filter.And", a.Filter.And, b.Filter.And) + } else if a.Filter.And != nil && b.Filter.And != nil { + if ackcompare.HasNilDifference(a.Filter.And.Prefix, b.Filter.And.Prefix) { + delta.Add("AnalyticsConfiguration.Filter.And.Prefix", a.Filter.And.Prefix, b.Filter.And.Prefix) + } else if a.Filter.And.Prefix != nil && b.Filter.And.Prefix != nil { + if *a.Filter.And.Prefix != *b.Filter.And.Prefix { + delta.Add("AnalyticsConfiguration.Filter.And.Prefix", a.Filter.And.Prefix, b.Filter.And.Prefix) + } + } + if !reflect.DeepEqual(a.Filter.And.Tags, b.Filter.And.Tags) { + delta.Add("AnalyticsConfiguration.Filter.And.Tags", a.Filter.And.Tags, b.Filter.And.Tags) + } + } + if ackcompare.HasNilDifference(a.Filter.Prefix, b.Filter.Prefix) { + delta.Add("AnalyticsConfiguration.Filter.Prefix", a.Filter.Prefix, b.Filter.Prefix) + } else if a.Filter.Prefix != nil && b.Filter.Prefix != nil { + if *a.Filter.Prefix != *b.Filter.Prefix { + delta.Add("AnalyticsConfiguration.Filter.Prefix", a.Filter.Prefix, b.Filter.Prefix) + } + } + if ackcompare.HasNilDifference(a.Filter.Tag, b.Filter.Tag) { + delta.Add("AnalyticsConfiguration.Filter.Tag", a.Filter.Tag, b.Filter.Tag) + } else if a.Filter.Tag != nil && b.Filter.Tag != nil { + if ackcompare.HasNilDifference(a.Filter.Tag.Key, b.Filter.Tag.Key) { + delta.Add("AnalyticsConfiguration.Filter.Tag.Key", a.Filter.Tag.Key, b.Filter.Tag.Key) + } else if a.Filter.Tag.Key != nil && b.Filter.Tag.Key != nil { + if *a.Filter.Tag.Key != *b.Filter.Tag.Key { + delta.Add("AnalyticsConfiguration.Filter.Tag.Key", a.Filter.Tag.Key, b.Filter.Tag.Key) + } + } + if ackcompare.HasNilDifference(a.Filter.Tag.Value, b.Filter.Tag.Value) { + delta.Add("AnalyticsConfiguration.Filter.Tag.Value", a.Filter.Tag.Value, b.Filter.Tag.Value) + } else if a.Filter.Tag.Value != nil && b.Filter.Tag.Value != nil { + if *a.Filter.Tag.Value != *b.Filter.Tag.Value { + delta.Add("AnalyticsConfiguration.Filter.Tag.Value", a.Filter.Tag.Value, b.Filter.Tag.Value) + } + } + } + } + if ackcompare.HasNilDifference(a.ID, b.ID) { + delta.Add("AnalyticsConfiguration.ID", a.ID, b.ID) + } else if a.ID != nil && b.ID != nil { + if *a.ID != *b.ID { + delta.Add("AnalyticsConfiguration.ID", a.ID, b.ID) + } + } + if ackcompare.HasNilDifference(a.StorageClassAnalysis, b.StorageClassAnalysis) { + delta.Add("AnalyticsConfiguration.StorageClassAnalysis", a.StorageClassAnalysis, b.StorageClassAnalysis) + } else if a.StorageClassAnalysis != nil && b.StorageClassAnalysis != nil { + if ackcompare.HasNilDifference(a.StorageClassAnalysis.DataExport, b.StorageClassAnalysis.DataExport) { + delta.Add("AnalyticsConfiguration.StorageClassAnalysis.DataExport", a.StorageClassAnalysis.DataExport, b.StorageClassAnalysis.DataExport) + } else if a.StorageClassAnalysis.DataExport != nil && b.StorageClassAnalysis.DataExport != nil { + if ackcompare.HasNilDifference(a.StorageClassAnalysis.DataExport.Destination, b.StorageClassAnalysis.DataExport.Destination) { + delta.Add("AnalyticsConfiguration.StorageClassAnalysis.DataExport.Destination", a.StorageClassAnalysis.DataExport.Destination, b.StorageClassAnalysis.DataExport.Destination) + } else if a.StorageClassAnalysis.DataExport.Destination != nil && b.StorageClassAnalysis.DataExport.Destination != nil { + if ackcompare.HasNilDifference(a.StorageClassAnalysis.DataExport.Destination.S3BucketDestination, b.StorageClassAnalysis.DataExport.Destination.S3BucketDestination) { + delta.Add("AnalyticsConfiguration.StorageClassAnalysis.DataExport.Destination.S3BucketDestination", a.StorageClassAnalysis.DataExport.Destination.S3BucketDestination, b.StorageClassAnalysis.DataExport.Destination.S3BucketDestination) + } else if a.StorageClassAnalysis.DataExport.Destination.S3BucketDestination != nil && b.StorageClassAnalysis.DataExport.Destination.S3BucketDestination != nil { + if ackcompare.HasNilDifference(a.StorageClassAnalysis.DataExport.Destination.S3BucketDestination.Bucket, b.StorageClassAnalysis.DataExport.Destination.S3BucketDestination.Bucket) { + delta.Add("AnalyticsConfiguration.StorageClassAnalysis.DataExport.Destination.S3BucketDestination.Bucket", a.StorageClassAnalysis.DataExport.Destination.S3BucketDestination.Bucket, b.StorageClassAnalysis.DataExport.Destination.S3BucketDestination.Bucket) + } else if a.StorageClassAnalysis.DataExport.Destination.S3BucketDestination.Bucket != nil && b.StorageClassAnalysis.DataExport.Destination.S3BucketDestination.Bucket != nil { + if *a.StorageClassAnalysis.DataExport.Destination.S3BucketDestination.Bucket != *b.StorageClassAnalysis.DataExport.Destination.S3BucketDestination.Bucket { + delta.Add("AnalyticsConfiguration.StorageClassAnalysis.DataExport.Destination.S3BucketDestination.Bucket", a.StorageClassAnalysis.DataExport.Destination.S3BucketDestination.Bucket, b.StorageClassAnalysis.DataExport.Destination.S3BucketDestination.Bucket) + } + } + if ackcompare.HasNilDifference(a.StorageClassAnalysis.DataExport.Destination.S3BucketDestination.BucketAccountID, b.StorageClassAnalysis.DataExport.Destination.S3BucketDestination.BucketAccountID) { + delta.Add("AnalyticsConfiguration.StorageClassAnalysis.DataExport.Destination.S3BucketDestination.BucketAccountID", a.StorageClassAnalysis.DataExport.Destination.S3BucketDestination.BucketAccountID, b.StorageClassAnalysis.DataExport.Destination.S3BucketDestination.BucketAccountID) + } else if a.StorageClassAnalysis.DataExport.Destination.S3BucketDestination.BucketAccountID != nil && b.StorageClassAnalysis.DataExport.Destination.S3BucketDestination.BucketAccountID != nil { + if *a.StorageClassAnalysis.DataExport.Destination.S3BucketDestination.BucketAccountID != *b.StorageClassAnalysis.DataExport.Destination.S3BucketDestination.BucketAccountID { + delta.Add("AnalyticsConfiguration.StorageClassAnalysis.DataExport.Destination.S3BucketDestination.BucketAccountID", a.StorageClassAnalysis.DataExport.Destination.S3BucketDestination.BucketAccountID, b.StorageClassAnalysis.DataExport.Destination.S3BucketDestination.BucketAccountID) + } + } + if ackcompare.HasNilDifference(a.StorageClassAnalysis.DataExport.Destination.S3BucketDestination.Format, b.StorageClassAnalysis.DataExport.Destination.S3BucketDestination.Format) { + delta.Add("AnalyticsConfiguration.StorageClassAnalysis.DataExport.Destination.S3BucketDestination.Format", a.StorageClassAnalysis.DataExport.Destination.S3BucketDestination.Format, b.StorageClassAnalysis.DataExport.Destination.S3BucketDestination.Format) + } else if a.StorageClassAnalysis.DataExport.Destination.S3BucketDestination.Format != nil && b.StorageClassAnalysis.DataExport.Destination.S3BucketDestination.Format != nil { + if *a.StorageClassAnalysis.DataExport.Destination.S3BucketDestination.Format != *b.StorageClassAnalysis.DataExport.Destination.S3BucketDestination.Format { + delta.Add("AnalyticsConfiguration.StorageClassAnalysis.DataExport.Destination.S3BucketDestination.Format", a.StorageClassAnalysis.DataExport.Destination.S3BucketDestination.Format, b.StorageClassAnalysis.DataExport.Destination.S3BucketDestination.Format) + } + } + if ackcompare.HasNilDifference(a.StorageClassAnalysis.DataExport.Destination.S3BucketDestination.Prefix, b.StorageClassAnalysis.DataExport.Destination.S3BucketDestination.Prefix) { + delta.Add("AnalyticsConfiguration.StorageClassAnalysis.DataExport.Destination.S3BucketDestination.Prefix", a.StorageClassAnalysis.DataExport.Destination.S3BucketDestination.Prefix, b.StorageClassAnalysis.DataExport.Destination.S3BucketDestination.Prefix) + } else if a.StorageClassAnalysis.DataExport.Destination.S3BucketDestination.Prefix != nil && b.StorageClassAnalysis.DataExport.Destination.S3BucketDestination.Prefix != nil { + if *a.StorageClassAnalysis.DataExport.Destination.S3BucketDestination.Prefix != *b.StorageClassAnalysis.DataExport.Destination.S3BucketDestination.Prefix { + delta.Add("AnalyticsConfiguration.StorageClassAnalysis.DataExport.Destination.S3BucketDestination.Prefix", a.StorageClassAnalysis.DataExport.Destination.S3BucketDestination.Prefix, b.StorageClassAnalysis.DataExport.Destination.S3BucketDestination.Prefix) + } + } + } + } + if ackcompare.HasNilDifference(a.StorageClassAnalysis.DataExport.OutputSchemaVersion, b.StorageClassAnalysis.DataExport.OutputSchemaVersion) { + delta.Add("AnalyticsConfiguration.StorageClassAnalysis.DataExport.OutputSchemaVersion", a.StorageClassAnalysis.DataExport.OutputSchemaVersion, b.StorageClassAnalysis.DataExport.OutputSchemaVersion) + } else if a.StorageClassAnalysis.DataExport.OutputSchemaVersion != nil && b.StorageClassAnalysis.DataExport.OutputSchemaVersion != nil { + if *a.StorageClassAnalysis.DataExport.OutputSchemaVersion != *b.StorageClassAnalysis.DataExport.OutputSchemaVersion { + delta.Add("AnalyticsConfiguration.StorageClassAnalysis.DataExport.OutputSchemaVersion", a.StorageClassAnalysis.DataExport.OutputSchemaVersion, b.StorageClassAnalysis.DataExport.OutputSchemaVersion) + } + } + } + } + + return delta +} + +// getAnalyticsConfigurationAction returns the determined action for a given +// configuration object, depending on the desired and latest values +func getAnalyticsConfigurationAction( + c *svcapitypes.AnalyticsConfiguration, + latest *resource, +) ConfigurationAction { + action := ConfigurationActionPut + if latest != nil { + for _, l := range latest.ko.Spec.Analytics { + if *l.ID != *c.ID { + continue + } + + // Don't perform any action if they are identical + delta := compareAnalyticsConfiguration(l, c) + if len(delta.Differences) > 0 { + action = ConfigurationActionNone + } else { + action = ConfigurationActionUpdate + } + break + } + } + return action +} + +func (rm *resourceManager) newListBucketAnalyticsPayload( + r *resource, +) *svcsdk.ListBucketAnalyticsConfigurationsInput { + res := &svcsdk.ListBucketAnalyticsConfigurationsInput{} + res.SetBucket(*r.ko.Spec.Name) + return res +} + +func (rm *resourceManager) newPutBucketAnalyticsPayload( + r *resource, + c svcapitypes.AnalyticsConfiguration, +) *svcsdk.PutBucketAnalyticsConfigurationInput { + res := &svcsdk.PutBucketAnalyticsConfigurationInput{} + res.SetBucket(*r.ko.Spec.Name) + res.SetId(*c.ID) + res.SetAnalyticsConfiguration(rm.newAnalyticsConfiguration(&c)) + + return res +} + +func (rm *resourceManager) newDeleteBucketAnalyticsPayload( + r *resource, + c svcapitypes.AnalyticsConfiguration, +) *svcsdk.DeleteBucketAnalyticsConfigurationInput { + res := &svcsdk.DeleteBucketAnalyticsConfigurationInput{} + res.SetBucket(*r.ko.Spec.Name) + res.SetId(*c.ID) + + return res +} + +func (rm *resourceManager) deleteAnalyticsConfiguration( + ctx context.Context, + r *resource, + c svcapitypes.AnalyticsConfiguration, +) (err error) { + rlog := ackrtlog.FromContext(ctx) + exit := rlog.Trace("rm.deleteAnalyticsConfiguration") + defer exit(err) + + input := rm.newDeleteBucketAnalyticsPayload(r, c) + _, err = rm.sdkapi.DeleteBucketAnalyticsConfigurationWithContext(ctx, input) + rm.metrics.RecordAPICall("UPDATE", "DeleteBucketAnalyticsConfiguration", err) + return err +} + +func (rm *resourceManager) putAnalyticsConfiguration( + ctx context.Context, + r *resource, + c svcapitypes.AnalyticsConfiguration, +) (err error) { + rlog := ackrtlog.FromContext(ctx) + exit := rlog.Trace("rm.putAnalyticsConfiguration") + defer exit(err) + + input := rm.newPutBucketAnalyticsPayload(r, c) + _, err = rm.sdkapi.PutBucketAnalyticsConfigurationWithContext(ctx, input) + rm.metrics.RecordAPICall("UPDATE", "PutBucketAnalyticsConfiguration", err) + return err +} + +func (rm *resourceManager) syncAnalytics( + ctx context.Context, + desired *resource, + latest *resource, +) (err error) { + rlog := ackrtlog.FromContext(ctx) + exit := rlog.Trace("rm.syncAnalytics") + defer exit(err) + + for _, c := range desired.ko.Spec.Analytics { + action := getAnalyticsConfigurationAction(c, latest) + + switch action { + case ConfigurationActionUpdate: + fallthrough + case ConfigurationActionPut: + if err = rm.putAnalyticsConfiguration(ctx, desired, *c); err != nil { + return err + } + default: + } + } + + if latest != nil { + // Find any configurations that are in the latest but not in desired + for _, l := range latest.ko.Spec.Analytics { + exists := false + for _, c := range desired.ko.Spec.Analytics { + if *c.ID != *l.ID { + continue + } + exists = true + break + } + + if !exists { + if err = rm.deleteAnalyticsConfiguration(ctx, desired, *l); err != nil { + return err + } + } + } + } + + return nil +} + // newCORSConfiguration returns a CORSConfiguration object // with each the field set by the resource's corresponding spec field. func (rm *resourceManager) newCORSConfiguration( @@ -441,6 +854,7 @@ func (rm *resourceManager) setResourceCORS( resp *svcsdk.GetBucketCorsOutput, ) *svcapitypes.CORSConfiguration { res := &svcapitypes.CORSConfiguration{} + if resp.CORSRules != nil { resf0 := []*svcapitypes.CORSRule{} for _, resf0iter := range resp.CORSRules { @@ -531,6 +945,7 @@ func (rm *resourceManager) setResourceEncryption( resp *svcsdk.GetBucketEncryptionOutput, ) *svcapitypes.ServerSideEncryptionConfiguration { res := &svcapitypes.ServerSideEncryptionConfiguration{} + if resp.ServerSideEncryptionConfiguration.Rules != nil { resf0 := []*svcapitypes.ServerSideEncryptionRule{} for _, resf0iter := range resp.ServerSideEncryptionConfiguration.Rules { @@ -556,6 +971,735 @@ func (rm *resourceManager) setResourceEncryption( return res } +// newIntelligentTieringConfiguration returns a IntelligentTieringConfiguration object +// with each the field set by the corresponding configuration's fields. +func (rm *resourceManager) newIntelligentTieringConfiguration( + c *svcapitypes.IntelligentTieringConfiguration, +) *svcsdk.IntelligentTieringConfiguration { + res := &svcsdk.IntelligentTieringConfiguration{} + + if c.Filter != nil { + resf0 := &svcsdk.IntelligentTieringFilter{} + if c.Filter.And != nil { + resf0f0 := &svcsdk.IntelligentTieringAndOperator{} + if c.Filter.And.Prefix != nil { + resf0f0.SetPrefix(*c.Filter.And.Prefix) + } + if c.Filter.And.Tags != nil { + resf0f0f1 := []*svcsdk.Tag{} + for _, resf0f0f1iter := range c.Filter.And.Tags { + resf0f0f1elem := &svcsdk.Tag{} + if resf0f0f1iter.Key != nil { + resf0f0f1elem.SetKey(*resf0f0f1iter.Key) + } + if resf0f0f1iter.Value != nil { + resf0f0f1elem.SetValue(*resf0f0f1iter.Value) + } + resf0f0f1 = append(resf0f0f1, resf0f0f1elem) + } + resf0f0.SetTags(resf0f0f1) + } + resf0.SetAnd(resf0f0) + } + if c.Filter.Prefix != nil { + resf0.SetPrefix(*c.Filter.Prefix) + } + if c.Filter.Tag != nil { + resf0f2 := &svcsdk.Tag{} + if c.Filter.Tag.Key != nil { + resf0f2.SetKey(*c.Filter.Tag.Key) + } + if c.Filter.Tag.Value != nil { + resf0f2.SetValue(*c.Filter.Tag.Value) + } + resf0.SetTag(resf0f2) + } + res.SetFilter(resf0) + } + if c.ID != nil { + res.SetId(*c.ID) + } + if c.Status != nil { + res.SetStatus(*c.Status) + } + if c.Tierings != nil { + resf3 := []*svcsdk.Tiering{} + for _, resf3iter := range c.Tierings { + resf3elem := &svcsdk.Tiering{} + if resf3iter.AccessTier != nil { + resf3elem.SetAccessTier(*resf3iter.AccessTier) + } + if resf3iter.Days != nil { + resf3elem.SetDays(*resf3iter.Days) + } + resf3 = append(resf3, resf3elem) + } + res.SetTierings(resf3) + } + + return res +} + +// setIntelligentTieringConfiguration sets a resource IntelligentTieringConfiguration type +// given the SDK type. +func (rm *resourceManager) setResourceIntelligentTieringConfiguration( + r *resource, + resp *svcsdk.IntelligentTieringConfiguration, +) *svcapitypes.IntelligentTieringConfiguration { + res := &svcapitypes.IntelligentTieringConfiguration{} + + if resp.Filter != nil { + resf0 := &svcapitypes.IntelligentTieringFilter{} + if resp.Filter.And != nil { + resf0f0 := &svcapitypes.IntelligentTieringAndOperator{} + if resp.Filter.And.Prefix != nil { + resf0f0.Prefix = resp.Filter.And.Prefix + } + if resp.Filter.And.Tags != nil { + resf0f0f1 := []*svcapitypes.Tag{} + for _, resf0f0f1iter := range resp.Filter.And.Tags { + resf0f0f1elem := &svcapitypes.Tag{} + if resf0f0f1iter.Key != nil { + resf0f0f1elem.Key = resf0f0f1iter.Key + } + if resf0f0f1iter.Value != nil { + resf0f0f1elem.Value = resf0f0f1iter.Value + } + resf0f0f1 = append(resf0f0f1, resf0f0f1elem) + } + resf0f0.Tags = resf0f0f1 + } + resf0.And = resf0f0 + } + if resp.Filter.Prefix != nil { + resf0.Prefix = resp.Filter.Prefix + } + if resp.Filter.Tag != nil { + resf0f2 := &svcapitypes.Tag{} + if resp.Filter.Tag.Key != nil { + resf0f2.Key = resp.Filter.Tag.Key + } + if resp.Filter.Tag.Value != nil { + resf0f2.Value = resp.Filter.Tag.Value + } + resf0.Tag = resf0f2 + } + res.Filter = resf0 + } + if resp.Id != nil { + res.ID = resp.Id + } + if resp.Status != nil { + res.Status = resp.Status + } + if resp.Tierings != nil { + resf3 := []*svcapitypes.Tiering{} + for _, resf3iter := range resp.Tierings { + resf3elem := &svcapitypes.Tiering{} + if resf3iter.AccessTier != nil { + resf3elem.AccessTier = resf3iter.AccessTier + } + if resf3iter.Days != nil { + resf3elem.Days = resf3iter.Days + } + resf3 = append(resf3, resf3elem) + } + res.Tierings = resf3 + } + + return res +} + +func compareIntelligentTieringConfiguration( + a *svcapitypes.IntelligentTieringConfiguration, + b *svcapitypes.IntelligentTieringConfiguration, +) *ackcompare.Delta { + delta := ackcompare.NewDelta() + if ackcompare.HasNilDifference(a.Filter, b.Filter) { + delta.Add("IntelligentTieringConfiguration.Filter", a.Filter, b.Filter) + } else if a.Filter != nil && b.Filter != nil { + if ackcompare.HasNilDifference(a.Filter.And, b.Filter.And) { + delta.Add("IntelligentTieringConfiguration.Filter.And", a.Filter.And, b.Filter.And) + } else if a.Filter.And != nil && b.Filter.And != nil { + if ackcompare.HasNilDifference(a.Filter.And.Prefix, b.Filter.And.Prefix) { + delta.Add("IntelligentTieringConfiguration.Filter.And.Prefix", a.Filter.And.Prefix, b.Filter.And.Prefix) + } else if a.Filter.And.Prefix != nil && b.Filter.And.Prefix != nil { + if *a.Filter.And.Prefix != *b.Filter.And.Prefix { + delta.Add("IntelligentTieringConfiguration.Filter.And.Prefix", a.Filter.And.Prefix, b.Filter.And.Prefix) + } + } + if !reflect.DeepEqual(a.Filter.And.Tags, b.Filter.And.Tags) { + delta.Add("IntelligentTieringConfiguration.Filter.And.Tags", a.Filter.And.Tags, b.Filter.And.Tags) + } + } + if ackcompare.HasNilDifference(a.Filter.Prefix, b.Filter.Prefix) { + delta.Add("IntelligentTieringConfiguration.Filter.Prefix", a.Filter.Prefix, b.Filter.Prefix) + } else if a.Filter.Prefix != nil && b.Filter.Prefix != nil { + if *a.Filter.Prefix != *b.Filter.Prefix { + delta.Add("IntelligentTieringConfiguration.Filter.Prefix", a.Filter.Prefix, b.Filter.Prefix) + } + } + if ackcompare.HasNilDifference(a.Filter.Tag, b.Filter.Tag) { + delta.Add("IntelligentTieringConfiguration.Filter.Tag", a.Filter.Tag, b.Filter.Tag) + } else if a.Filter.Tag != nil && b.Filter.Tag != nil { + if ackcompare.HasNilDifference(a.Filter.Tag.Key, b.Filter.Tag.Key) { + delta.Add("IntelligentTieringConfiguration.Filter.Tag.Key", a.Filter.Tag.Key, b.Filter.Tag.Key) + } else if a.Filter.Tag.Key != nil && b.Filter.Tag.Key != nil { + if *a.Filter.Tag.Key != *b.Filter.Tag.Key { + delta.Add("IntelligentTieringConfiguration.Filter.Tag.Key", a.Filter.Tag.Key, b.Filter.Tag.Key) + } + } + if ackcompare.HasNilDifference(a.Filter.Tag.Value, b.Filter.Tag.Value) { + delta.Add("IntelligentTieringConfiguration.Filter.Tag.Value", a.Filter.Tag.Value, b.Filter.Tag.Value) + } else if a.Filter.Tag.Value != nil && b.Filter.Tag.Value != nil { + if *a.Filter.Tag.Value != *b.Filter.Tag.Value { + delta.Add("IntelligentTieringConfiguration.Filter.Tag.Value", a.Filter.Tag.Value, b.Filter.Tag.Value) + } + } + } + } + if ackcompare.HasNilDifference(a.ID, b.ID) { + delta.Add("IntelligentTieringConfiguration.ID", a.ID, b.ID) + } else if a.ID != nil && b.ID != nil { + if *a.ID != *b.ID { + delta.Add("IntelligentTieringConfiguration.ID", a.ID, b.ID) + } + } + if ackcompare.HasNilDifference(a.Status, b.Status) { + delta.Add("IntelligentTieringConfiguration.Status", a.Status, b.Status) + } else if a.Status != nil && b.Status != nil { + if *a.Status != *b.Status { + delta.Add("IntelligentTieringConfiguration.Status", a.Status, b.Status) + } + } + if !reflect.DeepEqual(a.Tierings, b.Tierings) { + delta.Add("IntelligentTieringConfiguration.Tierings", a.Tierings, b.Tierings) + } + + return delta +} + +// getIntelligentTieringConfigurationAction returns the determined action for a given +// configuration object, depending on the desired and latest values +func getIntelligentTieringConfigurationAction( + c *svcapitypes.IntelligentTieringConfiguration, + latest *resource, +) ConfigurationAction { + action := ConfigurationActionPut + if latest != nil { + for _, l := range latest.ko.Spec.IntelligentTiering { + if *l.ID != *c.ID { + continue + } + + // Don't perform any action if they are identical + delta := compareIntelligentTieringConfiguration(l, c) + if len(delta.Differences) > 0 { + action = ConfigurationActionNone + } else { + action = ConfigurationActionUpdate + } + break + } + } + return action +} + +func (rm *resourceManager) newListBucketIntelligentTieringPayload( + r *resource, +) *svcsdk.ListBucketIntelligentTieringConfigurationsInput { + res := &svcsdk.ListBucketIntelligentTieringConfigurationsInput{} + res.SetBucket(*r.ko.Spec.Name) + return res +} + +func (rm *resourceManager) newPutBucketIntelligentTieringPayload( + r *resource, + c svcapitypes.IntelligentTieringConfiguration, +) *svcsdk.PutBucketIntelligentTieringConfigurationInput { + res := &svcsdk.PutBucketIntelligentTieringConfigurationInput{} + res.SetBucket(*r.ko.Spec.Name) + res.SetId(*c.ID) + res.SetIntelligentTieringConfiguration(rm.newIntelligentTieringConfiguration(&c)) + + return res +} + +func (rm *resourceManager) newDeleteBucketIntelligentTieringPayload( + r *resource, + c svcapitypes.IntelligentTieringConfiguration, +) *svcsdk.DeleteBucketIntelligentTieringConfigurationInput { + res := &svcsdk.DeleteBucketIntelligentTieringConfigurationInput{} + res.SetBucket(*r.ko.Spec.Name) + res.SetId(*c.ID) + + return res +} + +func (rm *resourceManager) deleteIntelligentTieringConfiguration( + ctx context.Context, + r *resource, + c svcapitypes.IntelligentTieringConfiguration, +) (err error) { + rlog := ackrtlog.FromContext(ctx) + exit := rlog.Trace("rm.deleteIntelligentTieringConfiguration") + defer exit(err) + + input := rm.newDeleteBucketIntelligentTieringPayload(r, c) + _, err = rm.sdkapi.DeleteBucketIntelligentTieringConfigurationWithContext(ctx, input) + rm.metrics.RecordAPICall("UPDATE", "DeleteBucketIntelligentTieringConfiguration", err) + return err +} + +func (rm *resourceManager) putIntelligentTieringConfiguration( + ctx context.Context, + r *resource, + c svcapitypes.IntelligentTieringConfiguration, +) (err error) { + rlog := ackrtlog.FromContext(ctx) + exit := rlog.Trace("rm.putIntelligentTieringConfiguration") + defer exit(err) + + input := rm.newPutBucketIntelligentTieringPayload(r, c) + _, err = rm.sdkapi.PutBucketIntelligentTieringConfigurationWithContext(ctx, input) + rm.metrics.RecordAPICall("UPDATE", "PutBucketIntelligentTieringConfiguration", err) + return err +} + +func (rm *resourceManager) syncIntelligentTiering( + ctx context.Context, + desired *resource, + latest *resource, +) (err error) { + rlog := ackrtlog.FromContext(ctx) + exit := rlog.Trace("rm.syncIntelligentTiering") + defer exit(err) + + for _, c := range desired.ko.Spec.IntelligentTiering { + action := getIntelligentTieringConfigurationAction(c, latest) + + switch action { + case ConfigurationActionUpdate: + fallthrough + case ConfigurationActionPut: + if err = rm.putIntelligentTieringConfiguration(ctx, desired, *c); err != nil { + return err + } + default: + } + } + + if latest != nil { + // Find any configurations that are in the latest but not in desired + for _, l := range latest.ko.Spec.IntelligentTiering { + exists := false + for _, c := range desired.ko.Spec.IntelligentTiering { + if *c.ID != *l.ID { + continue + } + exists = true + break + } + + if !exists { + if err = rm.deleteIntelligentTieringConfiguration(ctx, desired, *l); err != nil { + return err + } + } + } + } + + return nil +} + +// newInventoryConfiguration returns a InventoryConfiguration object +// with each the field set by the corresponding configuration's fields. +func (rm *resourceManager) newInventoryConfiguration( + c *svcapitypes.InventoryConfiguration, +) *svcsdk.InventoryConfiguration { + res := &svcsdk.InventoryConfiguration{} + + if c.Destination != nil { + resf0 := &svcsdk.InventoryDestination{} + if c.Destination.S3BucketDestination != nil { + resf0f0 := &svcsdk.InventoryS3BucketDestination{} + if c.Destination.S3BucketDestination.AccountID != nil { + resf0f0.SetAccountId(*c.Destination.S3BucketDestination.AccountID) + } + if c.Destination.S3BucketDestination.Bucket != nil { + resf0f0.SetBucket(*c.Destination.S3BucketDestination.Bucket) + } + if c.Destination.S3BucketDestination.Encryption != nil { + resf0f0f2 := &svcsdk.InventoryEncryption{} + if c.Destination.S3BucketDestination.Encryption.SSEKMS != nil { + resf0f0f2f0 := &svcsdk.SSEKMS{} + if c.Destination.S3BucketDestination.Encryption.SSEKMS.KeyID != nil { + resf0f0f2f0.SetKeyId(*c.Destination.S3BucketDestination.Encryption.SSEKMS.KeyID) + } + resf0f0f2.SetSSEKMS(resf0f0f2f0) + } + resf0f0.SetEncryption(resf0f0f2) + } + if c.Destination.S3BucketDestination.Format != nil { + resf0f0.SetFormat(*c.Destination.S3BucketDestination.Format) + } + if c.Destination.S3BucketDestination.Prefix != nil { + resf0f0.SetPrefix(*c.Destination.S3BucketDestination.Prefix) + } + resf0.SetS3BucketDestination(resf0f0) + } + res.SetDestination(resf0) + } + if c.Filter != nil { + resf1 := &svcsdk.InventoryFilter{} + if c.Filter.Prefix != nil { + resf1.SetPrefix(*c.Filter.Prefix) + } + res.SetFilter(resf1) + } + if c.ID != nil { + res.SetId(*c.ID) + } + if c.IncludedObjectVersions != nil { + res.SetIncludedObjectVersions(*c.IncludedObjectVersions) + } + if c.IsEnabled != nil { + res.SetIsEnabled(*c.IsEnabled) + } + if c.OptionalFields != nil { + resf5 := []*string{} + for _, resf5iter := range c.OptionalFields { + var resf5elem string + resf5elem = *resf5iter + resf5 = append(resf5, &resf5elem) + } + res.SetOptionalFields(resf5) + } + if c.Schedule != nil { + resf6 := &svcsdk.InventorySchedule{} + if c.Schedule.Frequency != nil { + resf6.SetFrequency(*c.Schedule.Frequency) + } + res.SetSchedule(resf6) + } + + return res +} + +// setInventoryConfiguration sets a resource InventoryConfiguration type +// given the SDK type. +func (rm *resourceManager) setResourceInventoryConfiguration( + r *resource, + resp *svcsdk.InventoryConfiguration, +) *svcapitypes.InventoryConfiguration { + res := &svcapitypes.InventoryConfiguration{} + + if resp.Destination != nil { + resf0 := &svcapitypes.InventoryDestination{} + if resp.Destination.S3BucketDestination != nil { + resf0f0 := &svcapitypes.InventoryS3BucketDestination{} + if resp.Destination.S3BucketDestination.AccountId != nil { + resf0f0.AccountID = resp.Destination.S3BucketDestination.AccountId + } + if resp.Destination.S3BucketDestination.Bucket != nil { + resf0f0.Bucket = resp.Destination.S3BucketDestination.Bucket + } + if resp.Destination.S3BucketDestination.Encryption != nil { + resf0f0f2 := &svcapitypes.InventoryEncryption{} + if resp.Destination.S3BucketDestination.Encryption.SSEKMS != nil { + resf0f0f2f0 := &svcapitypes.SSEKMS{} + if resp.Destination.S3BucketDestination.Encryption.SSEKMS.KeyId != nil { + resf0f0f2f0.KeyID = resp.Destination.S3BucketDestination.Encryption.SSEKMS.KeyId + } + resf0f0f2.SSEKMS = resf0f0f2f0 + } + resf0f0.Encryption = resf0f0f2 + } + if resp.Destination.S3BucketDestination.Format != nil { + resf0f0.Format = resp.Destination.S3BucketDestination.Format + } + if resp.Destination.S3BucketDestination.Prefix != nil { + resf0f0.Prefix = resp.Destination.S3BucketDestination.Prefix + } + resf0.S3BucketDestination = resf0f0 + } + res.Destination = resf0 + } + if resp.Filter != nil { + resf1 := &svcapitypes.InventoryFilter{} + if resp.Filter.Prefix != nil { + resf1.Prefix = resp.Filter.Prefix + } + res.Filter = resf1 + } + if resp.Id != nil { + res.ID = resp.Id + } + if resp.IncludedObjectVersions != nil { + res.IncludedObjectVersions = resp.IncludedObjectVersions + } + if resp.IsEnabled != nil { + res.IsEnabled = resp.IsEnabled + } + if resp.OptionalFields != nil { + resf5 := []*string{} + for _, resf5iter := range resp.OptionalFields { + var resf5elem string + resf5elem = *resf5iter + resf5 = append(resf5, &resf5elem) + } + res.OptionalFields = resf5 + } + if resp.Schedule != nil { + resf6 := &svcapitypes.InventorySchedule{} + if resp.Schedule.Frequency != nil { + resf6.Frequency = resp.Schedule.Frequency + } + res.Schedule = resf6 + } + + return res +} + +func compareInventoryConfiguration( + a *svcapitypes.InventoryConfiguration, + b *svcapitypes.InventoryConfiguration, +) *ackcompare.Delta { + delta := ackcompare.NewDelta() + if ackcompare.HasNilDifference(a.Destination, b.Destination) { + delta.Add("InventoryConfiguration.Destination", a.Destination, b.Destination) + } else if a.Destination != nil && b.Destination != nil { + if ackcompare.HasNilDifference(a.Destination.S3BucketDestination, b.Destination.S3BucketDestination) { + delta.Add("InventoryConfiguration.Destination.S3BucketDestination", a.Destination.S3BucketDestination, b.Destination.S3BucketDestination) + } else if a.Destination.S3BucketDestination != nil && b.Destination.S3BucketDestination != nil { + if ackcompare.HasNilDifference(a.Destination.S3BucketDestination.AccountID, b.Destination.S3BucketDestination.AccountID) { + delta.Add("InventoryConfiguration.Destination.S3BucketDestination.AccountID", a.Destination.S3BucketDestination.AccountID, b.Destination.S3BucketDestination.AccountID) + } else if a.Destination.S3BucketDestination.AccountID != nil && b.Destination.S3BucketDestination.AccountID != nil { + if *a.Destination.S3BucketDestination.AccountID != *b.Destination.S3BucketDestination.AccountID { + delta.Add("InventoryConfiguration.Destination.S3BucketDestination.AccountID", a.Destination.S3BucketDestination.AccountID, b.Destination.S3BucketDestination.AccountID) + } + } + if ackcompare.HasNilDifference(a.Destination.S3BucketDestination.Bucket, b.Destination.S3BucketDestination.Bucket) { + delta.Add("InventoryConfiguration.Destination.S3BucketDestination.Bucket", a.Destination.S3BucketDestination.Bucket, b.Destination.S3BucketDestination.Bucket) + } else if a.Destination.S3BucketDestination.Bucket != nil && b.Destination.S3BucketDestination.Bucket != nil { + if *a.Destination.S3BucketDestination.Bucket != *b.Destination.S3BucketDestination.Bucket { + delta.Add("InventoryConfiguration.Destination.S3BucketDestination.Bucket", a.Destination.S3BucketDestination.Bucket, b.Destination.S3BucketDestination.Bucket) + } + } + if ackcompare.HasNilDifference(a.Destination.S3BucketDestination.Encryption, b.Destination.S3BucketDestination.Encryption) { + delta.Add("InventoryConfiguration.Destination.S3BucketDestination.Encryption", a.Destination.S3BucketDestination.Encryption, b.Destination.S3BucketDestination.Encryption) + } else if a.Destination.S3BucketDestination.Encryption != nil && b.Destination.S3BucketDestination.Encryption != nil { + if ackcompare.HasNilDifference(a.Destination.S3BucketDestination.Encryption.SSEKMS, b.Destination.S3BucketDestination.Encryption.SSEKMS) { + delta.Add("InventoryConfiguration.Destination.S3BucketDestination.Encryption.SSEKMS", a.Destination.S3BucketDestination.Encryption.SSEKMS, b.Destination.S3BucketDestination.Encryption.SSEKMS) + } else if a.Destination.S3BucketDestination.Encryption.SSEKMS != nil && b.Destination.S3BucketDestination.Encryption.SSEKMS != nil { + if ackcompare.HasNilDifference(a.Destination.S3BucketDestination.Encryption.SSEKMS.KeyID, b.Destination.S3BucketDestination.Encryption.SSEKMS.KeyID) { + delta.Add("InventoryConfiguration.Destination.S3BucketDestination.Encryption.SSEKMS.KeyID", a.Destination.S3BucketDestination.Encryption.SSEKMS.KeyID, b.Destination.S3BucketDestination.Encryption.SSEKMS.KeyID) + } else if a.Destination.S3BucketDestination.Encryption.SSEKMS.KeyID != nil && b.Destination.S3BucketDestination.Encryption.SSEKMS.KeyID != nil { + if *a.Destination.S3BucketDestination.Encryption.SSEKMS.KeyID != *b.Destination.S3BucketDestination.Encryption.SSEKMS.KeyID { + delta.Add("InventoryConfiguration.Destination.S3BucketDestination.Encryption.SSEKMS.KeyID", a.Destination.S3BucketDestination.Encryption.SSEKMS.KeyID, b.Destination.S3BucketDestination.Encryption.SSEKMS.KeyID) + } + } + } + } + if ackcompare.HasNilDifference(a.Destination.S3BucketDestination.Format, b.Destination.S3BucketDestination.Format) { + delta.Add("InventoryConfiguration.Destination.S3BucketDestination.Format", a.Destination.S3BucketDestination.Format, b.Destination.S3BucketDestination.Format) + } else if a.Destination.S3BucketDestination.Format != nil && b.Destination.S3BucketDestination.Format != nil { + if *a.Destination.S3BucketDestination.Format != *b.Destination.S3BucketDestination.Format { + delta.Add("InventoryConfiguration.Destination.S3BucketDestination.Format", a.Destination.S3BucketDestination.Format, b.Destination.S3BucketDestination.Format) + } + } + if ackcompare.HasNilDifference(a.Destination.S3BucketDestination.Prefix, b.Destination.S3BucketDestination.Prefix) { + delta.Add("InventoryConfiguration.Destination.S3BucketDestination.Prefix", a.Destination.S3BucketDestination.Prefix, b.Destination.S3BucketDestination.Prefix) + } else if a.Destination.S3BucketDestination.Prefix != nil && b.Destination.S3BucketDestination.Prefix != nil { + if *a.Destination.S3BucketDestination.Prefix != *b.Destination.S3BucketDestination.Prefix { + delta.Add("InventoryConfiguration.Destination.S3BucketDestination.Prefix", a.Destination.S3BucketDestination.Prefix, b.Destination.S3BucketDestination.Prefix) + } + } + } + } + if ackcompare.HasNilDifference(a.Filter, b.Filter) { + delta.Add("InventoryConfiguration.Filter", a.Filter, b.Filter) + } else if a.Filter != nil && b.Filter != nil { + if ackcompare.HasNilDifference(a.Filter.Prefix, b.Filter.Prefix) { + delta.Add("InventoryConfiguration.Filter.Prefix", a.Filter.Prefix, b.Filter.Prefix) + } else if a.Filter.Prefix != nil && b.Filter.Prefix != nil { + if *a.Filter.Prefix != *b.Filter.Prefix { + delta.Add("InventoryConfiguration.Filter.Prefix", a.Filter.Prefix, b.Filter.Prefix) + } + } + } + if ackcompare.HasNilDifference(a.ID, b.ID) { + delta.Add("InventoryConfiguration.ID", a.ID, b.ID) + } else if a.ID != nil && b.ID != nil { + if *a.ID != *b.ID { + delta.Add("InventoryConfiguration.ID", a.ID, b.ID) + } + } + if ackcompare.HasNilDifference(a.IncludedObjectVersions, b.IncludedObjectVersions) { + delta.Add("InventoryConfiguration.IncludedObjectVersions", a.IncludedObjectVersions, b.IncludedObjectVersions) + } else if a.IncludedObjectVersions != nil && b.IncludedObjectVersions != nil { + if *a.IncludedObjectVersions != *b.IncludedObjectVersions { + delta.Add("InventoryConfiguration.IncludedObjectVersions", a.IncludedObjectVersions, b.IncludedObjectVersions) + } + } + if ackcompare.HasNilDifference(a.IsEnabled, b.IsEnabled) { + delta.Add("InventoryConfiguration.IsEnabled", a.IsEnabled, b.IsEnabled) + } else if a.IsEnabled != nil && b.IsEnabled != nil { + if *a.IsEnabled != *b.IsEnabled { + delta.Add("InventoryConfiguration.IsEnabled", a.IsEnabled, b.IsEnabled) + } + } + if !ackcompare.SliceStringPEqual(a.OptionalFields, b.OptionalFields) { + delta.Add("InventoryConfiguration.OptionalFields", a.OptionalFields, b.OptionalFields) + } + if ackcompare.HasNilDifference(a.Schedule, b.Schedule) { + delta.Add("InventoryConfiguration.Schedule", a.Schedule, b.Schedule) + } else if a.Schedule != nil && b.Schedule != nil { + if ackcompare.HasNilDifference(a.Schedule.Frequency, b.Schedule.Frequency) { + delta.Add("InventoryConfiguration.Schedule.Frequency", a.Schedule.Frequency, b.Schedule.Frequency) + } else if a.Schedule.Frequency != nil && b.Schedule.Frequency != nil { + if *a.Schedule.Frequency != *b.Schedule.Frequency { + delta.Add("InventoryConfiguration.Schedule.Frequency", a.Schedule.Frequency, b.Schedule.Frequency) + } + } + } + + return delta +} + +// getInventoryConfigurationAction returns the determined action for a given +// configuration object, depending on the desired and latest values +func getInventoryConfigurationAction( + c *svcapitypes.InventoryConfiguration, + latest *resource, +) ConfigurationAction { + action := ConfigurationActionPut + if latest != nil { + for _, l := range latest.ko.Spec.Inventory { + if *l.ID != *c.ID { + continue + } + + // Don't perform any action if they are identical + delta := compareInventoryConfiguration(l, c) + if len(delta.Differences) > 0 { + action = ConfigurationActionNone + } else { + action = ConfigurationActionUpdate + } + break + } + } + return action +} + +func (rm *resourceManager) newListBucketInventoryPayload( + r *resource, +) *svcsdk.ListBucketInventoryConfigurationsInput { + res := &svcsdk.ListBucketInventoryConfigurationsInput{} + res.SetBucket(*r.ko.Spec.Name) + return res +} + +func (rm *resourceManager) newPutBucketInventoryPayload( + r *resource, + c svcapitypes.InventoryConfiguration, +) *svcsdk.PutBucketInventoryConfigurationInput { + res := &svcsdk.PutBucketInventoryConfigurationInput{} + res.SetBucket(*r.ko.Spec.Name) + res.SetId(*c.ID) + res.SetInventoryConfiguration(rm.newInventoryConfiguration(&c)) + + return res +} + +func (rm *resourceManager) newDeleteBucketInventoryPayload( + r *resource, + c svcapitypes.InventoryConfiguration, +) *svcsdk.DeleteBucketInventoryConfigurationInput { + res := &svcsdk.DeleteBucketInventoryConfigurationInput{} + res.SetBucket(*r.ko.Spec.Name) + res.SetId(*c.ID) + + return res +} + +func (rm *resourceManager) deleteInventoryConfiguration( + ctx context.Context, + r *resource, + c svcapitypes.InventoryConfiguration, +) (err error) { + rlog := ackrtlog.FromContext(ctx) + exit := rlog.Trace("rm.deleteInventoryConfiguration") + defer exit(err) + + input := rm.newDeleteBucketInventoryPayload(r, c) + _, err = rm.sdkapi.DeleteBucketInventoryConfigurationWithContext(ctx, input) + rm.metrics.RecordAPICall("UPDATE", "DeleteBucketInventoryConfiguration", err) + return err +} + +func (rm *resourceManager) putInventoryConfiguration( + ctx context.Context, + r *resource, + c svcapitypes.InventoryConfiguration, +) (err error) { + rlog := ackrtlog.FromContext(ctx) + exit := rlog.Trace("rm.putInventoryConfiguration") + defer exit(err) + + input := rm.newPutBucketInventoryPayload(r, c) + _, err = rm.sdkapi.PutBucketInventoryConfigurationWithContext(ctx, input) + rm.metrics.RecordAPICall("UPDATE", "PutBucketInventoryConfiguration", err) + return err +} + +func (rm *resourceManager) syncInventory( + ctx context.Context, + desired *resource, + latest *resource, +) (err error) { + rlog := ackrtlog.FromContext(ctx) + exit := rlog.Trace("rm.syncInventory") + defer exit(err) + + for _, c := range desired.ko.Spec.Inventory { + action := getInventoryConfigurationAction(c, latest) + + switch action { + case ConfigurationActionUpdate: + fallthrough + case ConfigurationActionPut: + if err = rm.putInventoryConfiguration(ctx, desired, *c); err != nil { + return err + } + default: + } + } + + if latest != nil { + // Find any configurations that are in the latest but not in desired + for _, l := range latest.ko.Spec.Inventory { + exists := false + for _, c := range desired.ko.Spec.Inventory { + if *c.ID != *l.ID { + continue + } + exists = true + break + } + + if !exists { + if err = rm.deleteInventoryConfiguration(ctx, desired, *l); err != nil { + return err + } + } + } + } + + return nil +} + // newLifecycleConfiguration returns a LifecycleConfiguration object // with each the field set by the resource's corresponding spec field. func (rm *resourceManager) newLifecycleConfiguration( @@ -687,6 +1831,7 @@ func (rm *resourceManager) setResourceLifecycle( resp *svcsdk.GetBucketLifecycleConfigurationOutput, ) *svcapitypes.BucketLifecycleConfiguration { res := &svcapitypes.BucketLifecycleConfiguration{} + if resp.Rules != nil { resf0 := []*svcapitypes.LifecycleRule{} for _, resf0iter := range resp.Rules { @@ -862,6 +2007,7 @@ func (rm *resourceManager) setResourceLogging( resp *svcsdk.GetBucketLoggingOutput, ) *svcapitypes.BucketLoggingStatus { res := &svcapitypes.BucketLoggingStatus{} + if resp.LoggingEnabled != nil { resf0 := &svcapitypes.LoggingEnabled{} if resp.LoggingEnabled.TargetBucket != nil { @@ -906,6 +2052,303 @@ func (rm *resourceManager) setResourceLogging( return res } +// newMetricsConfiguration returns a MetricsConfiguration object +// with each the field set by the corresponding configuration's fields. +func (rm *resourceManager) newMetricsConfiguration( + c *svcapitypes.MetricsConfiguration, +) *svcsdk.MetricsConfiguration { + res := &svcsdk.MetricsConfiguration{} + + if c.Filter != nil { + resf0 := &svcsdk.MetricsFilter{} + if c.Filter.And != nil { + resf0f0 := &svcsdk.MetricsAndOperator{} + if c.Filter.And.Prefix != nil { + resf0f0.SetPrefix(*c.Filter.And.Prefix) + } + if c.Filter.And.Tags != nil { + resf0f0f1 := []*svcsdk.Tag{} + for _, resf0f0f1iter := range c.Filter.And.Tags { + resf0f0f1elem := &svcsdk.Tag{} + if resf0f0f1iter.Key != nil { + resf0f0f1elem.SetKey(*resf0f0f1iter.Key) + } + if resf0f0f1iter.Value != nil { + resf0f0f1elem.SetValue(*resf0f0f1iter.Value) + } + resf0f0f1 = append(resf0f0f1, resf0f0f1elem) + } + resf0f0.SetTags(resf0f0f1) + } + resf0.SetAnd(resf0f0) + } + if c.Filter.Prefix != nil { + resf0.SetPrefix(*c.Filter.Prefix) + } + if c.Filter.Tag != nil { + resf0f2 := &svcsdk.Tag{} + if c.Filter.Tag.Key != nil { + resf0f2.SetKey(*c.Filter.Tag.Key) + } + if c.Filter.Tag.Value != nil { + resf0f2.SetValue(*c.Filter.Tag.Value) + } + resf0.SetTag(resf0f2) + } + res.SetFilter(resf0) + } + if c.ID != nil { + res.SetId(*c.ID) + } + + return res +} + +// setMetricsConfiguration sets a resource MetricsConfiguration type +// given the SDK type. +func (rm *resourceManager) setResourceMetricsConfiguration( + r *resource, + resp *svcsdk.MetricsConfiguration, +) *svcapitypes.MetricsConfiguration { + res := &svcapitypes.MetricsConfiguration{} + + if resp.Filter != nil { + resf0 := &svcapitypes.MetricsFilter{} + if resp.Filter.And != nil { + resf0f0 := &svcapitypes.MetricsAndOperator{} + if resp.Filter.And.Prefix != nil { + resf0f0.Prefix = resp.Filter.And.Prefix + } + if resp.Filter.And.Tags != nil { + resf0f0f1 := []*svcapitypes.Tag{} + for _, resf0f0f1iter := range resp.Filter.And.Tags { + resf0f0f1elem := &svcapitypes.Tag{} + if resf0f0f1iter.Key != nil { + resf0f0f1elem.Key = resf0f0f1iter.Key + } + if resf0f0f1iter.Value != nil { + resf0f0f1elem.Value = resf0f0f1iter.Value + } + resf0f0f1 = append(resf0f0f1, resf0f0f1elem) + } + resf0f0.Tags = resf0f0f1 + } + resf0.And = resf0f0 + } + if resp.Filter.Prefix != nil { + resf0.Prefix = resp.Filter.Prefix + } + if resp.Filter.Tag != nil { + resf0f2 := &svcapitypes.Tag{} + if resp.Filter.Tag.Key != nil { + resf0f2.Key = resp.Filter.Tag.Key + } + if resp.Filter.Tag.Value != nil { + resf0f2.Value = resp.Filter.Tag.Value + } + resf0.Tag = resf0f2 + } + res.Filter = resf0 + } + if resp.Id != nil { + res.ID = resp.Id + } + + return res +} + +func compareMetricsConfiguration( + a *svcapitypes.MetricsConfiguration, + b *svcapitypes.MetricsConfiguration, +) *ackcompare.Delta { + delta := ackcompare.NewDelta() + if ackcompare.HasNilDifference(a.Filter, b.Filter) { + delta.Add("MetricsConfiguration.Filter", a.Filter, b.Filter) + } else if a.Filter != nil && b.Filter != nil { + if ackcompare.HasNilDifference(a.Filter.And, b.Filter.And) { + delta.Add("MetricsConfiguration.Filter.And", a.Filter.And, b.Filter.And) + } else if a.Filter.And != nil && b.Filter.And != nil { + if ackcompare.HasNilDifference(a.Filter.And.Prefix, b.Filter.And.Prefix) { + delta.Add("MetricsConfiguration.Filter.And.Prefix", a.Filter.And.Prefix, b.Filter.And.Prefix) + } else if a.Filter.And.Prefix != nil && b.Filter.And.Prefix != nil { + if *a.Filter.And.Prefix != *b.Filter.And.Prefix { + delta.Add("MetricsConfiguration.Filter.And.Prefix", a.Filter.And.Prefix, b.Filter.And.Prefix) + } + } + if !reflect.DeepEqual(a.Filter.And.Tags, b.Filter.And.Tags) { + delta.Add("MetricsConfiguration.Filter.And.Tags", a.Filter.And.Tags, b.Filter.And.Tags) + } + } + if ackcompare.HasNilDifference(a.Filter.Prefix, b.Filter.Prefix) { + delta.Add("MetricsConfiguration.Filter.Prefix", a.Filter.Prefix, b.Filter.Prefix) + } else if a.Filter.Prefix != nil && b.Filter.Prefix != nil { + if *a.Filter.Prefix != *b.Filter.Prefix { + delta.Add("MetricsConfiguration.Filter.Prefix", a.Filter.Prefix, b.Filter.Prefix) + } + } + if ackcompare.HasNilDifference(a.Filter.Tag, b.Filter.Tag) { + delta.Add("MetricsConfiguration.Filter.Tag", a.Filter.Tag, b.Filter.Tag) + } else if a.Filter.Tag != nil && b.Filter.Tag != nil { + if ackcompare.HasNilDifference(a.Filter.Tag.Key, b.Filter.Tag.Key) { + delta.Add("MetricsConfiguration.Filter.Tag.Key", a.Filter.Tag.Key, b.Filter.Tag.Key) + } else if a.Filter.Tag.Key != nil && b.Filter.Tag.Key != nil { + if *a.Filter.Tag.Key != *b.Filter.Tag.Key { + delta.Add("MetricsConfiguration.Filter.Tag.Key", a.Filter.Tag.Key, b.Filter.Tag.Key) + } + } + if ackcompare.HasNilDifference(a.Filter.Tag.Value, b.Filter.Tag.Value) { + delta.Add("MetricsConfiguration.Filter.Tag.Value", a.Filter.Tag.Value, b.Filter.Tag.Value) + } else if a.Filter.Tag.Value != nil && b.Filter.Tag.Value != nil { + if *a.Filter.Tag.Value != *b.Filter.Tag.Value { + delta.Add("MetricsConfiguration.Filter.Tag.Value", a.Filter.Tag.Value, b.Filter.Tag.Value) + } + } + } + } + if ackcompare.HasNilDifference(a.ID, b.ID) { + delta.Add("MetricsConfiguration.ID", a.ID, b.ID) + } else if a.ID != nil && b.ID != nil { + if *a.ID != *b.ID { + delta.Add("MetricsConfiguration.ID", a.ID, b.ID) + } + } + + return delta +} + +// getMetricsConfigurationAction returns the determined action for a given +// configuration object, depending on the desired and latest values +func getMetricsConfigurationAction( + c *svcapitypes.MetricsConfiguration, + latest *resource, +) ConfigurationAction { + action := ConfigurationActionPut + if latest != nil { + for _, l := range latest.ko.Spec.Metrics { + if *l.ID != *c.ID { + continue + } + + // Don't perform any action if they are identical + delta := compareMetricsConfiguration(l, c) + if len(delta.Differences) > 0 { + action = ConfigurationActionNone + } else { + action = ConfigurationActionUpdate + } + break + } + } + return action +} + +func (rm *resourceManager) newListBucketMetricsPayload( + r *resource, +) *svcsdk.ListBucketMetricsConfigurationsInput { + res := &svcsdk.ListBucketMetricsConfigurationsInput{} + res.SetBucket(*r.ko.Spec.Name) + return res +} + +func (rm *resourceManager) newPutBucketMetricsPayload( + r *resource, + c svcapitypes.MetricsConfiguration, +) *svcsdk.PutBucketMetricsConfigurationInput { + res := &svcsdk.PutBucketMetricsConfigurationInput{} + res.SetBucket(*r.ko.Spec.Name) + res.SetId(*c.ID) + res.SetMetricsConfiguration(rm.newMetricsConfiguration(&c)) + + return res +} + +func (rm *resourceManager) newDeleteBucketMetricsPayload( + r *resource, + c svcapitypes.MetricsConfiguration, +) *svcsdk.DeleteBucketMetricsConfigurationInput { + res := &svcsdk.DeleteBucketMetricsConfigurationInput{} + res.SetBucket(*r.ko.Spec.Name) + res.SetId(*c.ID) + + return res +} + +func (rm *resourceManager) deleteMetricsConfiguration( + ctx context.Context, + r *resource, + c svcapitypes.MetricsConfiguration, +) (err error) { + rlog := ackrtlog.FromContext(ctx) + exit := rlog.Trace("rm.deleteMetricsConfiguration") + defer exit(err) + + input := rm.newDeleteBucketMetricsPayload(r, c) + _, err = rm.sdkapi.DeleteBucketMetricsConfigurationWithContext(ctx, input) + rm.metrics.RecordAPICall("UPDATE", "DeleteBucketMetricsConfiguration", err) + return err +} + +func (rm *resourceManager) putMetricsConfiguration( + ctx context.Context, + r *resource, + c svcapitypes.MetricsConfiguration, +) (err error) { + rlog := ackrtlog.FromContext(ctx) + exit := rlog.Trace("rm.putMetricsConfiguration") + defer exit(err) + + input := rm.newPutBucketMetricsPayload(r, c) + _, err = rm.sdkapi.PutBucketMetricsConfigurationWithContext(ctx, input) + rm.metrics.RecordAPICall("UPDATE", "PutBucketMetricsConfiguration", err) + return err +} + +func (rm *resourceManager) syncMetrics( + ctx context.Context, + desired *resource, + latest *resource, +) (err error) { + rlog := ackrtlog.FromContext(ctx) + exit := rlog.Trace("rm.syncMetrics") + defer exit(err) + + for _, c := range desired.ko.Spec.Metrics { + action := getMetricsConfigurationAction(c, latest) + + switch action { + case ConfigurationActionUpdate: + fallthrough + case ConfigurationActionPut: + if err = rm.putMetricsConfiguration(ctx, desired, *c); err != nil { + return err + } + default: + } + } + + if latest != nil { + // Find any configurations that are in the latest but not in desired + for _, l := range latest.ko.Spec.Metrics { + exists := false + for _, c := range desired.ko.Spec.Metrics { + if *c.ID != *l.ID { + continue + } + exists = true + break + } + + if !exists { + if err = rm.deleteMetricsConfiguration(ctx, desired, *l); err != nil { + return err + } + } + } + } + + return nil +} + // newNotificationConfiguration returns a NotificationConfiguration object // with each the field set by the resource's corresponding spec field. func (rm *resourceManager) newNotificationConfiguration( @@ -1059,6 +2502,7 @@ func (rm *resourceManager) setResourceNotification( resp *svcsdk.NotificationConfiguration, ) *svcapitypes.NotificationConfiguration { res := &svcapitypes.NotificationConfiguration{} + if resp.LambdaFunctionConfigurations != nil { resf0 := []*svcapitypes.LambdaFunctionConfiguration{} for _, resf0iter := range resp.LambdaFunctionConfigurations { @@ -1227,6 +2671,7 @@ func (rm *resourceManager) setResourceOwnershipControls( resp *svcsdk.GetBucketOwnershipControlsOutput, ) *svcapitypes.OwnershipControls { res := &svcapitypes.OwnershipControls{} + if resp.OwnershipControls.Rules != nil { resf0 := []*svcapitypes.OwnershipControlsRule{} for _, resf0iter := range resp.OwnershipControls.Rules { @@ -1454,6 +2899,7 @@ func (rm *resourceManager) setResourceReplication( resp *svcsdk.GetBucketReplicationOutput, ) *svcapitypes.ReplicationConfiguration { res := &svcapitypes.ReplicationConfiguration{} + if resp.ReplicationConfiguration.Role != nil { res.Role = resp.ReplicationConfiguration.Role } @@ -1627,6 +3073,7 @@ func (rm *resourceManager) setResourceRequestPayment( resp *svcsdk.GetBucketRequestPaymentOutput, ) *svcapitypes.RequestPaymentConfiguration { res := &svcapitypes.RequestPaymentConfiguration{} + if resp.Payer != nil { res.Payer = resp.Payer } @@ -1666,6 +3113,7 @@ func (rm *resourceManager) setResourceTagging( resp *svcsdk.GetBucketTaggingOutput, ) *svcapitypes.Tagging { res := &svcapitypes.Tagging{} + if resp.TagSet != nil { resf0 := []*svcapitypes.Tag{} for _, resf0iter := range resp.TagSet { @@ -1705,6 +3153,7 @@ func (rm *resourceManager) setResourceVersioning( resp *svcsdk.GetBucketVersioningOutput, ) *svcapitypes.VersioningConfiguration { res := &svcapitypes.VersioningConfiguration{} + if resp.Status != nil { res.Status = resp.Status } @@ -1791,6 +3240,7 @@ func (rm *resourceManager) setResourceWebsite( resp *svcsdk.GetBucketWebsiteOutput, ) *svcapitypes.WebsiteConfiguration { res := &svcapitypes.WebsiteConfiguration{} + if resp.ErrorDocument != nil { resf0 := &svcapitypes.ErrorDocument{} if resp.ErrorDocument.Key != nil { diff --git a/templates/hooks/bucket/sdk_file_end.go.tpl b/templates/hooks/bucket/sdk_file_end.go.tpl index 0f9b09d..87b79b5 100644 --- a/templates/hooks/bucket/sdk_file_end.go.tpl +++ b/templates/hooks/bucket/sdk_file_end.go.tpl @@ -2,12 +2,17 @@ {{ $SDKAPI := .SDKAPI }} {{ range $specFieldName, $specField := $CRD.Config.Resources.Bucket.Fields -}} + +{{/* If the field comes from a single Put* operation */}} +{{- if $specField.From }} {{- $operationName := $specField.From.Operation }} {{- $path := $specField.From.Path }} +{{/* Only generate for Put* operation fields */}} {{- if (eq (slice $operationName 0 3) "Put") }} {{- $field := (index $CRD.SpecFields $specFieldName )}} {{- $operation := (index $SDKAPI.API.Operations $operationName) -}} +{{/* Find the structure field within the operation */}} {{- range $memberRefName, $memberRef := $operation.InputRef.Shape.MemberRefs -}} {{- if (eq $memberRef.Shape.Type "structure") }} @@ -23,6 +28,7 @@ func (rm *resourceManager) new{{ $memberRefName }}( return res } +{{/* Find the matching Get* operation */}} {{- $describeOperationName := (printf "Get%s" (slice $operationName 3))}} {{- $field := (index $CRD.SpecFields $specFieldName )}} {{- $operation := (index $SDKAPI.API.Operations $describeOperationName)}} @@ -35,6 +41,7 @@ func (rm *resourceManager) setResource{{ $specFieldName }}( ) *svcapitypes.{{ $memberRef.ShapeName }} { res := &svcapitypes.{{ $memberRef.ShapeName }}{} +{{/* Some operations have wrapping structures in their response */}} {{- if (eq $operationName "PutBucketEncryption") }} {{ GoCodeSetResourceForStruct $CRD "" "res" $memberRef "resp.ServerSideEncryptionConfiguration" $memberRef 1 }} {{- else if (eq $operationName "PutBucketOwnershipControls") }} @@ -52,4 +59,187 @@ func (rm *resourceManager) setResource{{ $specFieldName }}( {{- end }} {{- end }} {{- end }} + +{{/* If the field is a custom shape */}} +{{- else if $specField.CustomField }} + +{{- $memberRefName := $specField.CustomField.ListOf }} +{{/* Iterate through the custom shapes to find the matching shape ref */}} +{{- range $index, $customShape := $SDKAPI.CustomShapes }} +{{- if (eq (Dereference $customShape.MemberShapeName) $memberRefName) }} + +{{- $memberRef := $customShape.Shape.MemberRef }} + +// new{{ $memberRefName }} returns a {{ $memberRefName }} object +// with each the field set by the corresponding configuration's fields. +func (rm *resourceManager) new{{ $memberRefName }}( + c *svcapitypes.{{ $memberRefName }}, +) *svcsdk.{{ $memberRefName }} { + res := &svcsdk.{{ $memberRefName }}{} + +{{ GoCodeSetSDKForStruct $CRD "" "res" $memberRef "" "c" 1 }} + + return res +} + +// set{{ $memberRefName }} sets a resource {{ $memberRefName }} type +// given the SDK type. +func (rm *resourceManager) setResource{{ $memberRefName }}( + r *resource, + resp *svcsdk.{{ $memberRefName }}, +) *svcapitypes.{{ $memberRefName }} { + res := &svcapitypes.{{ $memberRefName }}{} + +{{ GoCodeSetResourceForStruct $CRD "" "res" $memberRef "resp" $memberRef 1 }} + + return res +} + +func compare{{$memberRefName}} ( + a *svcapitypes.{{ $memberRefName }}, + b *svcapitypes.{{ $memberRefName }}, +) *ackcompare.Delta { + delta := ackcompare.NewDelta() +{{ GoCodeCompareStruct $CRD $memberRef.Shape "delta" "a" "b" $memberRefName 1 }} + return delta +} + +// get{{$memberRefName}}Action returns the determined action for a given +// configuration object, depending on the desired and latest values +func get{{$memberRefName}}Action( + c *svcapitypes.{{ $memberRefName }}, + latest *resource, +) ConfigurationAction{ + action := ConfigurationActionPut + if latest != nil { + for _, l := range latest.ko.Spec.{{ $specFieldName }} { + if *l.ID != *c.ID { + continue + } + + // Don't perform any action if they are identical + delta := compare{{$memberRefName}}(l, c) + if len(delta.Differences) > 0 { + action = ConfigurationActionNone + } else { + action = ConfigurationActionUpdate + } + break + } + } + return action +} + +func (rm *resourceManager) newListBucket{{ $specFieldName }}Payload( + r *resource, +) *svcsdk.ListBucket{{ $memberRefName }}sInput { + res := &svcsdk.ListBucket{{ $memberRefName }}sInput{} + res.SetBucket(*r.ko.Spec.Name) + return res +} + +func (rm *resourceManager) newPutBucket{{ $specFieldName }}Payload( + r *resource, + c svcapitypes.{{ $memberRefName }}, +) *svcsdk.PutBucket{{ $memberRefName }}Input { + res := &svcsdk.PutBucket{{ $memberRefName }}Input{} + res.SetBucket(*r.ko.Spec.Name) + res.SetId(*c.ID) + res.Set{{ $memberRefName }}(rm.new{{ $memberRefName }}(&c)) + + return res +} + +func (rm *resourceManager) newDeleteBucket{{ $specFieldName }}Payload( + r *resource, + c svcapitypes.{{ $memberRefName }}, +) *svcsdk.DeleteBucket{{ $memberRefName }}Input { + res := &svcsdk.DeleteBucket{{ $memberRefName }}Input{} + res.SetBucket(*r.ko.Spec.Name) + res.SetId(*c.ID) + + return res +} + +func (rm *resourceManager) delete{{ $memberRefName }}( + ctx context.Context, + r *resource, + c svcapitypes.{{ $memberRefName }}, +) (err error) { + rlog := ackrtlog.FromContext(ctx) + exit := rlog.Trace("rm.delete{{ $memberRefName }}") + defer exit(err) + + input := rm.newDeleteBucket{{ $specFieldName }}Payload(r, c) + _, err = rm.sdkapi.DeleteBucket{{ $memberRefName }}WithContext(ctx, input) + rm.metrics.RecordAPICall("UPDATE", "DeleteBucket{{ $memberRefName }}", err) + return err +} + +func (rm *resourceManager) put{{ $memberRefName }}( + ctx context.Context, + r *resource, + c svcapitypes.{{ $memberRefName }}, +) (err error) { + rlog := ackrtlog.FromContext(ctx) + exit := rlog.Trace("rm.put{{ $memberRefName }}") + defer exit(err) + + input := rm.newPutBucket{{ $specFieldName }}Payload(r, c) + _, err = rm.sdkapi.PutBucket{{ $memberRefName }}WithContext(ctx, input) + rm.metrics.RecordAPICall("UPDATE", "PutBucket{{ $memberRefName }}", err) + return err +} + +func (rm *resourceManager) sync{{ $specFieldName }}( + ctx context.Context, + desired *resource, + latest *resource, +) (err error) { + rlog := ackrtlog.FromContext(ctx) + exit := rlog.Trace("rm.sync{{ $specFieldName }}") + defer exit(err) + + for _, c := range desired.ko.Spec.{{ $specFieldName }} { + action := get{{ $memberRefName }}Action(c, latest) + + switch action { + case ConfigurationActionUpdate: + fallthrough + case ConfigurationActionPut: + if err = rm.put{{ $memberRefName }}(ctx, desired, *c); err != nil { + return err + } + default: + } + } + + if latest != nil { + // Find any configurations that are in the latest but not in desired + for _, l := range latest.ko.Spec.{{ $specFieldName }} { + exists := false + for _, c := range desired.ko.Spec.{{ $specFieldName }} { + if *c.ID != *l.ID { + continue + } + exists = true + break + } + + if !exists { + if err = rm.delete{{ $memberRefName }}(ctx, desired, *l); err != nil { + return err + } + } + } + } + + return nil +} + +{{- end }} +{{- end }} + +{{- end }} + {{- end }} \ No newline at end of file