diff --git a/apis/v1alpha1/ack-generate-metadata.yaml b/apis/v1alpha1/ack-generate-metadata.yaml index fb335f9..b35f521 100755 --- a/apis/v1alpha1/ack-generate-metadata.yaml +++ b/apis/v1alpha1/ack-generate-metadata.yaml @@ -1,13 +1,13 @@ ack_generate_info: - build_date: "2025-07-22T22:08:29Z" - build_hash: b2dc0f44e0b08f041de14c3944a5cc005ba97c8f + build_date: "2025-08-11T21:53:39Z" + build_hash: b4fbf4e427daaef74ed873aac01e4a9ca68fb479 go_version: go1.24.5 - version: v0.50.0 -api_directory_checksum: fdaeeb99359feab7411a0ea7a1546597978675dd + version: v0.50.0-3-gb4fbf4e +api_directory_checksum: 96da551bae0f7145461817315244bfb60d5fa4ab api_version: v1alpha1 aws_sdk_go_version: 1.32.6 generator_config_info: - file_checksum: 2440ba630b0f66b3e92fd740afcafbd79da6b0c4 + file_checksum: ddda15a3856a7fa63b1f7cf5c1c376189c02dcd7 original_file_name: generator.yaml last_modification: reason: API generation diff --git a/apis/v1alpha1/generator.yaml b/apis/v1alpha1/generator.yaml index a6ac8c2..2e18780 100644 --- a/apis/v1alpha1/generator.yaml +++ b/apis/v1alpha1/generator.yaml @@ -4,7 +4,7 @@ ignore: - CompositeAlarm - InsightRule - ManagedInsightRule - - MetricStream + # - MetricStream model_name: monitoring operations: DeleteAlarms: @@ -21,6 +21,11 @@ operations: - Create - Update resource_name: MetricAlarm + PutMetricStream: + operation_type: + - Create + - Update + resource_name: MetricStream resources: MetricAlarm: fields: @@ -36,4 +41,4 @@ resources: sdk_read_many_post_build_request: template_path: hooks/metricalarm/sdk_read_many_post_build_request.go.tpl sdk_delete_post_build_request: - template_path: hooks/metricalarm/sdk_delete_post_build_request.go.tpl + template_path: hooks/metricalarm/sdk_delete_post_build_request.go.tpl \ No newline at end of file diff --git a/apis/v1alpha1/metric_stream.go b/apis/v1alpha1/metric_stream.go new file mode 100644 index 0000000..ee03214 --- /dev/null +++ b/apis/v1alpha1/metric_stream.go @@ -0,0 +1,132 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +// Code generated by ack-generate. DO NOT EDIT. + +package v1alpha1 + +import ( + ackv1alpha1 "github.com/aws-controllers-k8s/runtime/apis/core/v1alpha1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// MetricStreamSpec defines the desired state of MetricStream. +type MetricStreamSpec struct { + + // If you specify this parameter, the stream sends metrics from all metric namespaces + // except for the namespaces that you specify here. + // + // You cannot include ExcludeFilters and IncludeFilters in the same operation. + ExcludeFilters []*MetricStreamFilter `json:"excludeFilters,omitempty"` + // The ARN of the Amazon Kinesis Data Firehose delivery stream to use for this + // metric stream. This Amazon Kinesis Data Firehose delivery stream must already + // exist and must be in the same account as the metric stream. + // +kubebuilder:validation:Required + FirehoseARN *string `json:"firehoseARN"` + // If you specify this parameter, the stream sends only the metrics from the + // metric namespaces that you specify here. + // + // You cannot include IncludeFilters and ExcludeFilters in the same operation. + IncludeFilters []*MetricStreamFilter `json:"includeFilters,omitempty"` + // If you are creating a metric stream in a monitoring account, specify true + // to include metrics from source accounts in the metric stream. + IncludeLinkedAccountsMetrics *bool `json:"includeLinkedAccountsMetrics,omitempty"` + // If you are creating a new metric stream, this is the name for the new stream. + // The name must be different than the names of other metric streams in this + // account and Region. + // + // If you are updating a metric stream, specify the name of that stream here. + // + // Valid characters are A-Z, a-z, 0-9, "-" and "_". + // +kubebuilder:validation:Required + Name *string `json:"name"` + // The output format for the stream. Valid values are json, opentelemetry1.0, + // and opentelemetry0.7. For more information about metric stream output formats, + // see Metric streams output formats (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-metric-streams-formats.html). + // +kubebuilder:validation:Required + OutputFormat *string `json:"outputFormat"` + // The ARN of an IAM role that this metric stream will use to access Amazon + // Kinesis Data Firehose resources. This IAM role must already exist and must + // be in the same account as the metric stream. This IAM role must include the + // following permissions: + // + // - firehose:PutRecord + // + // - firehose:PutRecordBatch + // + // +kubebuilder:validation:Required + RoleARN *string `json:"roleARN"` + // By default, a metric stream always sends the MAX, MIN, SUM, and SAMPLECOUNT + // statistics for each metric that is streamed. You can use this parameter to + // have the metric stream also send additional statistics in the stream. This + // array can have up to 100 members. + // + // For each entry in this array, you specify one or more metrics and the list + // of additional statistics to stream for those metrics. The additional statistics + // that you can stream depend on the stream's OutputFormat. If the OutputFormat + // is json, you can stream any additional statistic that is supported by CloudWatch, + // listed in CloudWatch statistics definitions (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/Statistics-definitions.html.html). + // If the OutputFormat is opentelemetry1.0 or opentelemetry0.7, you can stream + // percentile statistics such as p95, p99.9, and so on. + StatisticsConfigurations []*MetricStreamStatisticsConfiguration `json:"statisticsConfigurations,omitempty"` + // A list of key-value pairs to associate with the metric stream. You can associate + // as many as 50 tags with a metric stream. + // + // Tags can help you organize and categorize your resources. You can also use + // them to scope user permissions by granting a user permission to access or + // change only resources with certain tag values. + // + // You can use this parameter only when you are creating a new metric stream. + // If you are using this operation to update an existing metric stream, any + // tags you specify in this parameter are ignored. To change the tags of an + // existing metric stream, use TagResource (https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_TagResource.html) + // or UntagResource (https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_UntagResource.html). + Tags []*Tag `json:"tags,omitempty"` +} + +// MetricStreamStatus defines the observed state of MetricStream +type MetricStreamStatus struct { + // All CRs managed by ACK have a common `Status.ACKResourceMetadata` member + // that is used to contain resource sync state, account ownership, + // constructed ARN for the resource + // +kubebuilder:validation:Optional + ACKResourceMetadata *ackv1alpha1.ResourceMetadata `json:"ackResourceMetadata"` + // All CRs managed by ACK have a common `Status.Conditions` member that + // contains a collection of `ackv1alpha1.Condition` objects that describe + // the various terminal states of the CR and its backend AWS service API + // resource + // +kubebuilder:validation:Optional + Conditions []*ackv1alpha1.Condition `json:"conditions"` +} + +// MetricStream is the Schema for the MetricStreams API +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +type MetricStream struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec MetricStreamSpec `json:"spec,omitempty"` + Status MetricStreamStatus `json:"status,omitempty"` +} + +// MetricStreamList contains a list of MetricStream +// +kubebuilder:object:root=true +type MetricStreamList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []MetricStream `json:"items"` +} + +func init() { + SchemeBuilder.Register(&MetricStream{}, &MetricStreamList{}) +} diff --git a/apis/v1alpha1/types.go b/apis/v1alpha1/types.go index b735789..543e34d 100644 --- a/apis/v1alpha1/types.go +++ b/apis/v1alpha1/types.go @@ -115,7 +115,14 @@ type InsightRuleMetricDatapoint struct { // Contains the information that's required to enable a managed Contributor // Insights rule for an Amazon Web Services resource. type ManagedRule struct { - Tags []*Tag `json:"tags,omitempty"` + ResourceARN *string `json:"resourceARN,omitempty"` + Tags []*Tag `json:"tags,omitempty"` +} + +// Contains information about managed Contributor Insights rules, as returned +// by ListManagedInsightRules. +type ManagedRuleDescription struct { + ResourceARN *string `json:"resourceARN,omitempty"` } // Represents a specific metric. @@ -231,8 +238,13 @@ type MetricStat struct { // This structure contains the configuration information about one metric stream. type MetricStreamEntry struct { + ARN *string `json:"arn,omitempty"` CreationDate *metav1.Time `json:"creationDate,omitempty"` + FirehoseARN *string `json:"firehoseARN,omitempty"` LastUpdateDate *metav1.Time `json:"lastUpdateDate,omitempty"` + Name *string `json:"name,omitempty"` + OutputFormat *string `json:"outputFormat,omitempty"` + State *string `json:"state,omitempty"` } // This structure contains a metric namespace and optionally, a list of metric @@ -243,7 +255,18 @@ type MetricStreamEntry struct { // example, this could include 10 metric namespace filters with 99 metrics each, // or 20 namespace filters with 49 metrics specified in each filter. type MetricStreamFilter struct { - Namespace *string `json:"namespace,omitempty"` + MetricNames []*string `json:"metricNames,omitempty"` + Namespace *string `json:"namespace,omitempty"` +} + +// By default, a metric stream always sends the MAX, MIN, SUM, and SAMPLECOUNT +// statistics for each metric that is streamed. This structure contains information +// for one metric that includes additional statistics in the stream. For more +// information about statistics, see CloudWatch, listed in CloudWatch statistics +// definitions (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/Statistics-definitions.html.html). +type MetricStreamStatisticsConfiguration struct { + AdditionalStatistics []*string `json:"additionalStatistics,omitempty"` + IncludeMetrics []*MetricStreamStatisticsMetric `json:"includeMetrics,omitempty"` } // This object contains the information for one metric that is to be streamed diff --git a/apis/v1alpha1/zz_generated.deepcopy.go b/apis/v1alpha1/zz_generated.deepcopy.go index aeefe00..adaa525 100644 --- a/apis/v1alpha1/zz_generated.deepcopy.go +++ b/apis/v1alpha1/zz_generated.deepcopy.go @@ -329,6 +329,11 @@ func (in *InsightRuleMetricDatapoint) DeepCopy() *InsightRuleMetricDatapoint { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ManagedRule) DeepCopyInto(out *ManagedRule) { *out = *in + if in.ResourceARN != nil { + in, out := &in.ResourceARN, &out.ResourceARN + *out = new(string) + **out = **in + } if in.Tags != nil { in, out := &in.Tags, &out.Tags *out = make([]*Tag, len(*in)) @@ -352,6 +357,26 @@ func (in *ManagedRule) DeepCopy() *ManagedRule { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedRuleDescription) DeepCopyInto(out *ManagedRuleDescription) { + *out = *in + if in.ResourceARN != nil { + in, out := &in.ResourceARN, &out.ResourceARN + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedRuleDescription. +func (in *ManagedRuleDescription) DeepCopy() *ManagedRuleDescription { + if in == nil { + return nil + } + out := new(ManagedRuleDescription) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Metric) DeepCopyInto(out *Metric) { *out = *in @@ -1002,17 +1027,69 @@ func (in *MetricStat) DeepCopy() *MetricStat { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricStream) DeepCopyInto(out *MetricStream) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricStream. +func (in *MetricStream) DeepCopy() *MetricStream { + if in == nil { + return nil + } + out := new(MetricStream) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MetricStream) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *MetricStreamEntry) DeepCopyInto(out *MetricStreamEntry) { *out = *in + if in.ARN != nil { + in, out := &in.ARN, &out.ARN + *out = new(string) + **out = **in + } if in.CreationDate != nil { in, out := &in.CreationDate, &out.CreationDate *out = (*in).DeepCopy() } + if in.FirehoseARN != nil { + in, out := &in.FirehoseARN, &out.FirehoseARN + *out = new(string) + **out = **in + } if in.LastUpdateDate != nil { in, out := &in.LastUpdateDate, &out.LastUpdateDate *out = (*in).DeepCopy() } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.OutputFormat != nil { + in, out := &in.OutputFormat, &out.OutputFormat + *out = new(string) + **out = **in + } + if in.State != nil { + in, out := &in.State, &out.State + *out = new(string) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricStreamEntry. @@ -1028,6 +1105,17 @@ func (in *MetricStreamEntry) DeepCopy() *MetricStreamEntry { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *MetricStreamFilter) DeepCopyInto(out *MetricStreamFilter) { *out = *in + if in.MetricNames != nil { + in, out := &in.MetricNames, &out.MetricNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } if in.Namespace != nil { in, out := &in.Namespace, &out.Namespace *out = new(string) @@ -1045,6 +1133,159 @@ func (in *MetricStreamFilter) DeepCopy() *MetricStreamFilter { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricStreamList) DeepCopyInto(out *MetricStreamList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]MetricStream, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricStreamList. +func (in *MetricStreamList) DeepCopy() *MetricStreamList { + if in == nil { + return nil + } + out := new(MetricStreamList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MetricStreamList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricStreamSpec) DeepCopyInto(out *MetricStreamSpec) { + *out = *in + if in.ExcludeFilters != nil { + in, out := &in.ExcludeFilters, &out.ExcludeFilters + *out = make([]*MetricStreamFilter, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(MetricStreamFilter) + (*in).DeepCopyInto(*out) + } + } + } + if in.FirehoseARN != nil { + in, out := &in.FirehoseARN, &out.FirehoseARN + *out = new(string) + **out = **in + } + if in.IncludeFilters != nil { + in, out := &in.IncludeFilters, &out.IncludeFilters + *out = make([]*MetricStreamFilter, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(MetricStreamFilter) + (*in).DeepCopyInto(*out) + } + } + } + if in.IncludeLinkedAccountsMetrics != nil { + in, out := &in.IncludeLinkedAccountsMetrics, &out.IncludeLinkedAccountsMetrics + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.OutputFormat != nil { + in, out := &in.OutputFormat, &out.OutputFormat + *out = new(string) + **out = **in + } + if in.RoleARN != nil { + in, out := &in.RoleARN, &out.RoleARN + *out = new(string) + **out = **in + } + if in.StatisticsConfigurations != nil { + in, out := &in.StatisticsConfigurations, &out.StatisticsConfigurations + *out = make([]*MetricStreamStatisticsConfiguration, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(MetricStreamStatisticsConfiguration) + (*in).DeepCopyInto(*out) + } + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make([]*Tag, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(Tag) + (*in).DeepCopyInto(*out) + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricStreamSpec. +func (in *MetricStreamSpec) DeepCopy() *MetricStreamSpec { + if in == nil { + return nil + } + out := new(MetricStreamSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricStreamStatisticsConfiguration) DeepCopyInto(out *MetricStreamStatisticsConfiguration) { + *out = *in + if in.AdditionalStatistics != nil { + in, out := &in.AdditionalStatistics, &out.AdditionalStatistics + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IncludeMetrics != nil { + in, out := &in.IncludeMetrics, &out.IncludeMetrics + *out = make([]*MetricStreamStatisticsMetric, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(MetricStreamStatisticsMetric) + (*in).DeepCopyInto(*out) + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricStreamStatisticsConfiguration. +func (in *MetricStreamStatisticsConfiguration) DeepCopy() *MetricStreamStatisticsConfiguration { + if in == nil { + return nil + } + out := new(MetricStreamStatisticsConfiguration) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *MetricStreamStatisticsMetric) DeepCopyInto(out *MetricStreamStatisticsMetric) { *out = *in @@ -1070,6 +1311,37 @@ func (in *MetricStreamStatisticsMetric) DeepCopy() *MetricStreamStatisticsMetric return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricStreamStatus) DeepCopyInto(out *MetricStreamStatus) { + *out = *in + if in.ACKResourceMetadata != nil { + in, out := &in.ACKResourceMetadata, &out.ACKResourceMetadata + *out = new(corev1alpha1.ResourceMetadata) + (*in).DeepCopyInto(*out) + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]*corev1alpha1.Condition, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(corev1alpha1.Condition) + (*in).DeepCopyInto(*out) + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricStreamStatus. +func (in *MetricStreamStatus) DeepCopy() *MetricStreamStatus { + if in == nil { + return nil + } + out := new(MetricStreamStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Range) DeepCopyInto(out *Range) { *out = *in diff --git a/cmd/controller/main.go b/cmd/controller/main.go index 94bf457..0ef5f8c 100644 --- a/cmd/controller/main.go +++ b/cmd/controller/main.go @@ -40,6 +40,7 @@ import ( svcresource "github.com/aws-controllers-k8s/cloudwatch-controller/pkg/resource" _ "github.com/aws-controllers-k8s/cloudwatch-controller/pkg/resource/metric_alarm" + _ "github.com/aws-controllers-k8s/cloudwatch-controller/pkg/resource/metric_stream" "github.com/aws-controllers-k8s/cloudwatch-controller/pkg/version" ) diff --git a/config/crd/bases/cloudwatch.services.k8s.aws_metricstreams.yaml b/config/crd/bases/cloudwatch.services.k8s.aws_metricstreams.yaml new file mode 100644 index 0000000..a155ee1 --- /dev/null +++ b/config/crd/bases/cloudwatch.services.k8s.aws_metricstreams.yaml @@ -0,0 +1,269 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.2 + name: metricstreams.cloudwatch.services.k8s.aws +spec: + group: cloudwatch.services.k8s.aws + names: + kind: MetricStream + listKind: MetricStreamList + plural: metricstreams + singular: metricstream + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: MetricStream is the Schema for the MetricStreams API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: MetricStreamSpec defines the desired state of MetricStream. + properties: + excludeFilters: + description: |- + If you specify this parameter, the stream sends metrics from all metric namespaces + except for the namespaces that you specify here. + + You cannot include ExcludeFilters and IncludeFilters in the same operation. + items: + description: |- + This structure contains a metric namespace and optionally, a list of metric + names, to either include in a metric stream or exclude from a metric stream. + + A metric stream's filters can include up to 1000 total names. This limit + applies to the sum of namespace names and metric names in the filters. For + example, this could include 10 metric namespace filters with 99 metrics each, + or 20 namespace filters with 49 metrics specified in each filter. + properties: + metricNames: + items: + type: string + type: array + namespace: + type: string + type: object + type: array + firehoseARN: + description: |- + The ARN of the Amazon Kinesis Data Firehose delivery stream to use for this + metric stream. This Amazon Kinesis Data Firehose delivery stream must already + exist and must be in the same account as the metric stream. + type: string + includeFilters: + description: |- + If you specify this parameter, the stream sends only the metrics from the + metric namespaces that you specify here. + + You cannot include IncludeFilters and ExcludeFilters in the same operation. + items: + description: |- + This structure contains a metric namespace and optionally, a list of metric + names, to either include in a metric stream or exclude from a metric stream. + + A metric stream's filters can include up to 1000 total names. This limit + applies to the sum of namespace names and metric names in the filters. For + example, this could include 10 metric namespace filters with 99 metrics each, + or 20 namespace filters with 49 metrics specified in each filter. + properties: + metricNames: + items: + type: string + type: array + namespace: + type: string + type: object + type: array + includeLinkedAccountsMetrics: + description: |- + If you are creating a metric stream in a monitoring account, specify true + to include metrics from source accounts in the metric stream. + type: boolean + name: + description: |- + If you are creating a new metric stream, this is the name for the new stream. + The name must be different than the names of other metric streams in this + account and Region. + + If you are updating a metric stream, specify the name of that stream here. + + Valid characters are A-Z, a-z, 0-9, "-" and "_". + type: string + outputFormat: + description: |- + The output format for the stream. Valid values are json, opentelemetry1.0, + and opentelemetry0.7. For more information about metric stream output formats, + see Metric streams output formats (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-metric-streams-formats.html). + type: string + roleARN: + description: |- + The ARN of an IAM role that this metric stream will use to access Amazon + Kinesis Data Firehose resources. This IAM role must already exist and must + be in the same account as the metric stream. This IAM role must include the + following permissions: + + * firehose:PutRecord + + * firehose:PutRecordBatch + type: string + statisticsConfigurations: + description: |- + By default, a metric stream always sends the MAX, MIN, SUM, and SAMPLECOUNT + statistics for each metric that is streamed. You can use this parameter to + have the metric stream also send additional statistics in the stream. This + array can have up to 100 members. + + For each entry in this array, you specify one or more metrics and the list + of additional statistics to stream for those metrics. The additional statistics + that you can stream depend on the stream's OutputFormat. If the OutputFormat + is json, you can stream any additional statistic that is supported by CloudWatch, + listed in CloudWatch statistics definitions (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/Statistics-definitions.html.html). + If the OutputFormat is opentelemetry1.0 or opentelemetry0.7, you can stream + percentile statistics such as p95, p99.9, and so on. + items: + description: |- + By default, a metric stream always sends the MAX, MIN, SUM, and SAMPLECOUNT + statistics for each metric that is streamed. This structure contains information + for one metric that includes additional statistics in the stream. For more + information about statistics, see CloudWatch, listed in CloudWatch statistics + definitions (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/Statistics-definitions.html.html). + properties: + additionalStatistics: + items: + type: string + type: array + includeMetrics: + items: + description: |- + This object contains the information for one metric that is to be streamed + with additional statistics. + properties: + metricName: + type: string + namespace: + type: string + type: object + type: array + type: object + type: array + tags: + description: |- + A list of key-value pairs to associate with the metric stream. You can associate + as many as 50 tags with a metric stream. + + Tags can help you organize and categorize your resources. You can also use + them to scope user permissions by granting a user permission to access or + change only resources with certain tag values. + + You can use this parameter only when you are creating a new metric stream. + If you are using this operation to update an existing metric stream, any + tags you specify in this parameter are ignored. To change the tags of an + existing metric stream, use TagResource (https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_TagResource.html) + or UntagResource (https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_UntagResource.html). + items: + description: A key-value pair associated with a CloudWatch resource. + properties: + key: + type: string + value: + type: string + type: object + type: array + required: + - firehoseARN + - name + - outputFormat + - roleARN + type: object + status: + description: MetricStreamStatus defines the observed state of MetricStream + properties: + ackResourceMetadata: + description: |- + All CRs managed by ACK have a common `Status.ACKResourceMetadata` member + that is used to contain resource sync state, account ownership, + constructed ARN for the resource + properties: + arn: + description: |- + ARN is the Amazon Resource Name for the resource. This is a + globally-unique identifier and is set only by the ACK service controller + once the controller has orchestrated the creation of the resource OR + when it has verified that an "adopted" resource (a resource where the + ARN annotation was set by the Kubernetes user on the CR) exists and + matches the supplied CR's Spec field values. + https://github.com/aws/aws-controllers-k8s/issues/270 + type: string + ownerAccountID: + description: |- + OwnerAccountID is the AWS Account ID of the account that owns the + backend AWS service API resource. + type: string + region: + description: Region is the AWS region in which the resource exists + or will exist. + type: string + required: + - ownerAccountID + - region + type: object + conditions: + description: |- + All CRs managed by ACK have a common `Status.Conditions` member that + contains a collection of `ackv1alpha1.Condition` objects that describe + the various terminal states of the CR and its backend AWS service API + resource + items: + description: |- + Condition is the common struct used by all CRDs managed by ACK service + controllers to indicate terminal states of the CR and its backend AWS + service API resource + properties: + lastTransitionTime: + description: Last time the condition transitioned from one status + to another. + format: date-time + type: string + message: + description: A human readable message indicating details about + the transition. + type: string + reason: + description: The reason for the condition's last transition. + type: string + status: + description: Status of the condition, one of True, False, Unknown. + type: string + type: + description: Type is the type of the Condition + type: string + required: + - status + - type + type: object + type: array + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index 7787da9..b5289ac 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -3,3 +3,4 @@ kind: Kustomization resources: - common - bases/cloudwatch.services.k8s.aws_metricalarms.yaml + - bases/cloudwatch.services.k8s.aws_metricstreams.yaml diff --git a/config/rbac/cluster-role-controller.yaml b/config/rbac/cluster-role-controller.yaml index d65e21e..39a4e0d 100644 --- a/config/rbac/cluster-role-controller.yaml +++ b/config/rbac/cluster-role-controller.yaml @@ -26,6 +26,7 @@ rules: - cloudwatch.services.k8s.aws resources: - metricalarms + - metricstreams verbs: - create - delete @@ -38,6 +39,7 @@ rules: - cloudwatch.services.k8s.aws resources: - metricalarms/status + - metricstreams/status verbs: - get - patch diff --git a/config/rbac/role-reader.yaml b/config/rbac/role-reader.yaml index 224243a..d6a1634 100644 --- a/config/rbac/role-reader.yaml +++ b/config/rbac/role-reader.yaml @@ -10,6 +10,7 @@ rules: - cloudwatch.services.k8s.aws resources: - metricalarms + - metricstreams verbs: - get - list diff --git a/config/rbac/role-writer.yaml b/config/rbac/role-writer.yaml index 639f349..58888f8 100644 --- a/config/rbac/role-writer.yaml +++ b/config/rbac/role-writer.yaml @@ -10,6 +10,7 @@ rules: - cloudwatch.services.k8s.aws resources: - metricalarms + - metricstreams verbs: - create - delete @@ -22,6 +23,7 @@ rules: - cloudwatch.services.k8s.aws resources: - metricalarms + - metricstreams verbs: - get - patch diff --git a/generator.yaml b/generator.yaml index a6ac8c2..2e18780 100644 --- a/generator.yaml +++ b/generator.yaml @@ -4,7 +4,7 @@ ignore: - CompositeAlarm - InsightRule - ManagedInsightRule - - MetricStream + # - MetricStream model_name: monitoring operations: DeleteAlarms: @@ -21,6 +21,11 @@ operations: - Create - Update resource_name: MetricAlarm + PutMetricStream: + operation_type: + - Create + - Update + resource_name: MetricStream resources: MetricAlarm: fields: @@ -36,4 +41,4 @@ resources: sdk_read_many_post_build_request: template_path: hooks/metricalarm/sdk_read_many_post_build_request.go.tpl sdk_delete_post_build_request: - template_path: hooks/metricalarm/sdk_delete_post_build_request.go.tpl + template_path: hooks/metricalarm/sdk_delete_post_build_request.go.tpl \ No newline at end of file diff --git a/helm/crds/cloudwatch.services.k8s.aws_metricstreams.yaml b/helm/crds/cloudwatch.services.k8s.aws_metricstreams.yaml new file mode 100644 index 0000000..b02689e --- /dev/null +++ b/helm/crds/cloudwatch.services.k8s.aws_metricstreams.yaml @@ -0,0 +1,269 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.2 + name: metricstreams.cloudwatch.services.k8s.aws +spec: + group: cloudwatch.services.k8s.aws + names: + kind: MetricStream + listKind: MetricStreamList + plural: metricstreams + singular: metricstream + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: MetricStream is the Schema for the MetricStreams API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: MetricStreamSpec defines the desired state of MetricStream. + properties: + excludeFilters: + description: |- + If you specify this parameter, the stream sends metrics from all metric namespaces + except for the namespaces that you specify here. + + You cannot include ExcludeFilters and IncludeFilters in the same operation. + items: + description: |- + This structure contains a metric namespace and optionally, a list of metric + names, to either include in a metric stream or exclude from a metric stream. + + A metric stream's filters can include up to 1000 total names. This limit + applies to the sum of namespace names and metric names in the filters. For + example, this could include 10 metric namespace filters with 99 metrics each, + or 20 namespace filters with 49 metrics specified in each filter. + properties: + metricNames: + items: + type: string + type: array + namespace: + type: string + type: object + type: array + firehoseARN: + description: |- + The ARN of the Amazon Kinesis Data Firehose delivery stream to use for this + metric stream. This Amazon Kinesis Data Firehose delivery stream must already + exist and must be in the same account as the metric stream. + type: string + includeFilters: + description: |- + If you specify this parameter, the stream sends only the metrics from the + metric namespaces that you specify here. + + You cannot include IncludeFilters and ExcludeFilters in the same operation. + items: + description: |- + This structure contains a metric namespace and optionally, a list of metric + names, to either include in a metric stream or exclude from a metric stream. + + A metric stream's filters can include up to 1000 total names. This limit + applies to the sum of namespace names and metric names in the filters. For + example, this could include 10 metric namespace filters with 99 metrics each, + or 20 namespace filters with 49 metrics specified in each filter. + properties: + metricNames: + items: + type: string + type: array + namespace: + type: string + type: object + type: array + includeLinkedAccountsMetrics: + description: |- + If you are creating a metric stream in a monitoring account, specify true + to include metrics from source accounts in the metric stream. + type: boolean + name: + description: |- + If you are creating a new metric stream, this is the name for the new stream. + The name must be different than the names of other metric streams in this + account and Region. + + If you are updating a metric stream, specify the name of that stream here. + + Valid characters are A-Z, a-z, 0-9, "-" and "_". + type: string + outputFormat: + description: |- + The output format for the stream. Valid values are json, opentelemetry1.0, + and opentelemetry0.7. For more information about metric stream output formats, + see Metric streams output formats (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-metric-streams-formats.html). + type: string + roleARN: + description: |- + The ARN of an IAM role that this metric stream will use to access Amazon + Kinesis Data Firehose resources. This IAM role must already exist and must + be in the same account as the metric stream. This IAM role must include the + following permissions: + + - firehose:PutRecord + + - firehose:PutRecordBatch + type: string + statisticsConfigurations: + description: |- + By default, a metric stream always sends the MAX, MIN, SUM, and SAMPLECOUNT + statistics for each metric that is streamed. You can use this parameter to + have the metric stream also send additional statistics in the stream. This + array can have up to 100 members. + + For each entry in this array, you specify one or more metrics and the list + of additional statistics to stream for those metrics. The additional statistics + that you can stream depend on the stream's OutputFormat. If the OutputFormat + is json, you can stream any additional statistic that is supported by CloudWatch, + listed in CloudWatch statistics definitions (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/Statistics-definitions.html.html). + If the OutputFormat is opentelemetry1.0 or opentelemetry0.7, you can stream + percentile statistics such as p95, p99.9, and so on. + items: + description: |- + By default, a metric stream always sends the MAX, MIN, SUM, and SAMPLECOUNT + statistics for each metric that is streamed. This structure contains information + for one metric that includes additional statistics in the stream. For more + information about statistics, see CloudWatch, listed in CloudWatch statistics + definitions (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/Statistics-definitions.html.html). + properties: + additionalStatistics: + items: + type: string + type: array + includeMetrics: + items: + description: |- + This object contains the information for one metric that is to be streamed + with additional statistics. + properties: + metricName: + type: string + namespace: + type: string + type: object + type: array + type: object + type: array + tags: + description: |- + A list of key-value pairs to associate with the metric stream. You can associate + as many as 50 tags with a metric stream. + + Tags can help you organize and categorize your resources. You can also use + them to scope user permissions by granting a user permission to access or + change only resources with certain tag values. + + You can use this parameter only when you are creating a new metric stream. + If you are using this operation to update an existing metric stream, any + tags you specify in this parameter are ignored. To change the tags of an + existing metric stream, use TagResource (https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_TagResource.html) + or UntagResource (https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_UntagResource.html). + items: + description: A key-value pair associated with a CloudWatch resource. + properties: + key: + type: string + value: + type: string + type: object + type: array + required: + - firehoseARN + - name + - outputFormat + - roleARN + type: object + status: + description: MetricStreamStatus defines the observed state of MetricStream + properties: + ackResourceMetadata: + description: |- + All CRs managed by ACK have a common `Status.ACKResourceMetadata` member + that is used to contain resource sync state, account ownership, + constructed ARN for the resource + properties: + arn: + description: |- + ARN is the Amazon Resource Name for the resource. This is a + globally-unique identifier and is set only by the ACK service controller + once the controller has orchestrated the creation of the resource OR + when it has verified that an "adopted" resource (a resource where the + ARN annotation was set by the Kubernetes user on the CR) exists and + matches the supplied CR's Spec field values. + https://github.com/aws/aws-controllers-k8s/issues/270 + type: string + ownerAccountID: + description: |- + OwnerAccountID is the AWS Account ID of the account that owns the + backend AWS service API resource. + type: string + region: + description: Region is the AWS region in which the resource exists + or will exist. + type: string + required: + - ownerAccountID + - region + type: object + conditions: + description: |- + All CRs managed by ACK have a common `Status.Conditions` member that + contains a collection of `ackv1alpha1.Condition` objects that describe + the various terminal states of the CR and its backend AWS service API + resource + items: + description: |- + Condition is the common struct used by all CRDs managed by ACK service + controllers to indicate terminal states of the CR and its backend AWS + service API resource + properties: + lastTransitionTime: + description: Last time the condition transitioned from one status + to another. + format: date-time + type: string + message: + description: A human readable message indicating details about + the transition. + type: string + reason: + description: The reason for the condition's last transition. + type: string + status: + description: Status of the condition, one of True, False, Unknown. + type: string + type: + description: Type is the type of the Condition + type: string + required: + - status + - type + type: object + type: array + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/helm/templates/_helpers.tpl b/helm/templates/_helpers.tpl index 404fde6..d065145 100644 --- a/helm/templates/_helpers.tpl +++ b/helm/templates/_helpers.tpl @@ -73,6 +73,7 @@ rules: - cloudwatch.services.k8s.aws resources: - metricalarms + - metricstreams verbs: - create - delete @@ -85,6 +86,7 @@ rules: - cloudwatch.services.k8s.aws resources: - metricalarms/status + - metricstreams/status verbs: - get - patch diff --git a/helm/templates/caches-role-binding.yaml b/helm/templates/caches-role-binding.yaml index 49dd619..25a677d 100644 --- a/helm/templates/caches-role-binding.yaml +++ b/helm/templates/caches-role-binding.yaml @@ -1,7 +1,7 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: {{ include "ack-cloudwatch-controller.app.fullname" . }}-namespace-caches + name: {{ include "ack-cloudwatch-controller.app.fullname" . }}-namespaces-cache labels: app.kubernetes.io/name: {{ include "ack-cloudwatch-controller.app.name" . }} app.kubernetes.io/instance: {{ .Release.Name }} @@ -12,7 +12,7 @@ metadata: roleRef: kind: ClusterRole apiGroup: rbac.authorization.k8s.io - name: {{ include "ack-cloudwatch-controller.app.fullname" . }}-namespace-caches + name: {{ include "ack-cloudwatch-controller.app.fullname" . }}-namespaces-cache subjects: - kind: ServiceAccount name: {{ include "ack-cloudwatch-controller.service-account.name" . }} diff --git a/helm/templates/deployment.yaml b/helm/templates/deployment.yaml index b6cc1f2..b40b137 100644 --- a/helm/templates/deployment.yaml +++ b/helm/templates/deployment.yaml @@ -205,7 +205,7 @@ spec: secretName: {{ .Values.aws.credentials.secretName }} {{- end }} {{- if .Values.deployment.extraVolumes }} - {{ toYaml .Values.deployment.extraVolumes | indent 8 }} + {{- toYaml .Values.deployment.extraVolumes | nindent 8 }} {{- end }} {{- end }} {{- with .Values.deployment.strategy }} diff --git a/helm/templates/role-reader.yaml b/helm/templates/role-reader.yaml index ec785dd..f2343e7 100644 --- a/helm/templates/role-reader.yaml +++ b/helm/templates/role-reader.yaml @@ -17,6 +17,7 @@ rules: - cloudwatch.services.k8s.aws resources: - metricalarms + - metricstreams verbs: - get - list diff --git a/helm/templates/role-writer.yaml b/helm/templates/role-writer.yaml index 4bf7ba2..77fe2be 100644 --- a/helm/templates/role-writer.yaml +++ b/helm/templates/role-writer.yaml @@ -17,6 +17,7 @@ rules: - cloudwatch.services.k8s.aws resources: - metricalarms + - metricstreams verbs: - create - delete @@ -29,6 +30,7 @@ rules: - cloudwatch.services.k8s.aws resources: - metricalarms + - metricstreams verbs: - get - patch diff --git a/helm/values.yaml b/helm/values.yaml index c680764..83f4b15 100644 --- a/helm/values.yaml +++ b/helm/values.yaml @@ -146,6 +146,7 @@ reconcile: # If specified, only the listed resource kinds will be reconciled. resources: - MetricAlarm + - MetricStream serviceAccount: # Specifies whether a service account should be created diff --git a/pkg/resource/metric_stream/delta.go b/pkg/resource/metric_stream/delta.go new file mode 100644 index 0000000..68c4781 --- /dev/null +++ b/pkg/resource/metric_stream/delta.go @@ -0,0 +1,109 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +// Code generated by ack-generate. DO NOT EDIT. + +package metric_stream + +import ( + "bytes" + "reflect" + + ackcompare "github.com/aws-controllers-k8s/runtime/pkg/compare" + acktags "github.com/aws-controllers-k8s/runtime/pkg/tags" +) + +// Hack to avoid import errors during build... +var ( + _ = &bytes.Buffer{} + _ = &reflect.Method{} + _ = &acktags.Tags{} +) + +// newResourceDelta returns a new `ackcompare.Delta` used to compare two +// resources +func newResourceDelta( + a *resource, + b *resource, +) *ackcompare.Delta { + delta := ackcompare.NewDelta() + if (a == nil && b != nil) || + (a != nil && b == nil) { + delta.Add("", a, b) + return delta + } + + if len(a.ko.Spec.ExcludeFilters) != len(b.ko.Spec.ExcludeFilters) { + delta.Add("Spec.ExcludeFilters", a.ko.Spec.ExcludeFilters, b.ko.Spec.ExcludeFilters) + } else if len(a.ko.Spec.ExcludeFilters) > 0 { + if !reflect.DeepEqual(a.ko.Spec.ExcludeFilters, b.ko.Spec.ExcludeFilters) { + delta.Add("Spec.ExcludeFilters", a.ko.Spec.ExcludeFilters, b.ko.Spec.ExcludeFilters) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.FirehoseARN, b.ko.Spec.FirehoseARN) { + delta.Add("Spec.FirehoseARN", a.ko.Spec.FirehoseARN, b.ko.Spec.FirehoseARN) + } else if a.ko.Spec.FirehoseARN != nil && b.ko.Spec.FirehoseARN != nil { + if *a.ko.Spec.FirehoseARN != *b.ko.Spec.FirehoseARN { + delta.Add("Spec.FirehoseARN", a.ko.Spec.FirehoseARN, b.ko.Spec.FirehoseARN) + } + } + if len(a.ko.Spec.IncludeFilters) != len(b.ko.Spec.IncludeFilters) { + delta.Add("Spec.IncludeFilters", a.ko.Spec.IncludeFilters, b.ko.Spec.IncludeFilters) + } else if len(a.ko.Spec.IncludeFilters) > 0 { + if !reflect.DeepEqual(a.ko.Spec.IncludeFilters, b.ko.Spec.IncludeFilters) { + delta.Add("Spec.IncludeFilters", a.ko.Spec.IncludeFilters, b.ko.Spec.IncludeFilters) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.IncludeLinkedAccountsMetrics, b.ko.Spec.IncludeLinkedAccountsMetrics) { + delta.Add("Spec.IncludeLinkedAccountsMetrics", a.ko.Spec.IncludeLinkedAccountsMetrics, b.ko.Spec.IncludeLinkedAccountsMetrics) + } else if a.ko.Spec.IncludeLinkedAccountsMetrics != nil && b.ko.Spec.IncludeLinkedAccountsMetrics != nil { + if *a.ko.Spec.IncludeLinkedAccountsMetrics != *b.ko.Spec.IncludeLinkedAccountsMetrics { + delta.Add("Spec.IncludeLinkedAccountsMetrics", a.ko.Spec.IncludeLinkedAccountsMetrics, b.ko.Spec.IncludeLinkedAccountsMetrics) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.Name, b.ko.Spec.Name) { + delta.Add("Spec.Name", a.ko.Spec.Name, b.ko.Spec.Name) + } else if a.ko.Spec.Name != nil && b.ko.Spec.Name != nil { + if *a.ko.Spec.Name != *b.ko.Spec.Name { + delta.Add("Spec.Name", a.ko.Spec.Name, b.ko.Spec.Name) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.OutputFormat, b.ko.Spec.OutputFormat) { + delta.Add("Spec.OutputFormat", a.ko.Spec.OutputFormat, b.ko.Spec.OutputFormat) + } else if a.ko.Spec.OutputFormat != nil && b.ko.Spec.OutputFormat != nil { + if *a.ko.Spec.OutputFormat != *b.ko.Spec.OutputFormat { + delta.Add("Spec.OutputFormat", a.ko.Spec.OutputFormat, b.ko.Spec.OutputFormat) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.RoleARN, b.ko.Spec.RoleARN) { + delta.Add("Spec.RoleARN", a.ko.Spec.RoleARN, b.ko.Spec.RoleARN) + } else if a.ko.Spec.RoleARN != nil && b.ko.Spec.RoleARN != nil { + if *a.ko.Spec.RoleARN != *b.ko.Spec.RoleARN { + delta.Add("Spec.RoleARN", a.ko.Spec.RoleARN, b.ko.Spec.RoleARN) + } + } + if len(a.ko.Spec.StatisticsConfigurations) != len(b.ko.Spec.StatisticsConfigurations) { + delta.Add("Spec.StatisticsConfigurations", a.ko.Spec.StatisticsConfigurations, b.ko.Spec.StatisticsConfigurations) + } else if len(a.ko.Spec.StatisticsConfigurations) > 0 { + if !reflect.DeepEqual(a.ko.Spec.StatisticsConfigurations, b.ko.Spec.StatisticsConfigurations) { + delta.Add("Spec.StatisticsConfigurations", a.ko.Spec.StatisticsConfigurations, b.ko.Spec.StatisticsConfigurations) + } + } + desiredACKTags, _ := convertToOrderedACKTags(a.ko.Spec.Tags) + latestACKTags, _ := convertToOrderedACKTags(b.ko.Spec.Tags) + if !ackcompare.MapStringStringEqual(desiredACKTags, latestACKTags) { + delta.Add("Spec.Tags", a.ko.Spec.Tags, b.ko.Spec.Tags) + } + + return delta +} diff --git a/pkg/resource/metric_stream/descriptor.go b/pkg/resource/metric_stream/descriptor.go new file mode 100644 index 0000000..e6ecada --- /dev/null +++ b/pkg/resource/metric_stream/descriptor.go @@ -0,0 +1,155 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +// Code generated by ack-generate. DO NOT EDIT. + +package metric_stream + +import ( + ackv1alpha1 "github.com/aws-controllers-k8s/runtime/apis/core/v1alpha1" + ackcompare "github.com/aws-controllers-k8s/runtime/pkg/compare" + acktypes "github.com/aws-controllers-k8s/runtime/pkg/types" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + rtclient "sigs.k8s.io/controller-runtime/pkg/client" + k8sctrlutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + + svcapitypes "github.com/aws-controllers-k8s/cloudwatch-controller/apis/v1alpha1" +) + +const ( + FinalizerString = "finalizers.cloudwatch.services.k8s.aws/MetricStream" +) + +var ( + GroupVersionResource = svcapitypes.GroupVersion.WithResource("metricstreams") + GroupKind = metav1.GroupKind{ + Group: "cloudwatch.services.k8s.aws", + Kind: "MetricStream", + } +) + +// resourceDescriptor implements the +// `aws-service-operator-k8s/pkg/types.AWSResourceDescriptor` interface +type resourceDescriptor struct { +} + +// GroupVersionKind returns a Kubernetes schema.GroupVersionKind struct that +// describes the API Group, Version and Kind of CRs described by the descriptor +func (d *resourceDescriptor) GroupVersionKind() schema.GroupVersionKind { + return svcapitypes.GroupVersion.WithKind(GroupKind.Kind) +} + +// EmptyRuntimeObject returns an empty object prototype that may be used in +// apimachinery and k8s client operations +func (d *resourceDescriptor) EmptyRuntimeObject() rtclient.Object { + return &svcapitypes.MetricStream{} +} + +// ResourceFromRuntimeObject returns an AWSResource that has been initialized +// with the supplied runtime.Object +func (d *resourceDescriptor) ResourceFromRuntimeObject( + obj rtclient.Object, +) acktypes.AWSResource { + return &resource{ + ko: obj.(*svcapitypes.MetricStream), + } +} + +// Delta returns an `ackcompare.Delta` object containing the difference between +// one `AWSResource` and another. +func (d *resourceDescriptor) Delta(a, b acktypes.AWSResource) *ackcompare.Delta { + return newResourceDelta(a.(*resource), b.(*resource)) +} + +// IsManaged returns true if the supplied AWSResource is under the management +// of an ACK service controller. What this means in practice is that the +// underlying custom resource (CR) in the AWSResource has had a +// resource-specific finalizer associated with it. +func (d *resourceDescriptor) IsManaged( + res acktypes.AWSResource, +) bool { + obj := res.RuntimeObject() + if obj == nil { + // Should not happen. If it does, there is a bug in the code + panic("nil RuntimeMetaObject in AWSResource") + } + // Remove use of custom code once + // https://github.com/kubernetes-sigs/controller-runtime/issues/994 is + // fixed. This should be able to be: + // + // return k8sctrlutil.ContainsFinalizer(obj, FinalizerString) + return containsFinalizer(obj, FinalizerString) +} + +// Remove once https://github.com/kubernetes-sigs/controller-runtime/issues/994 +// is fixed. +func containsFinalizer(obj rtclient.Object, finalizer string) bool { + f := obj.GetFinalizers() + for _, e := range f { + if e == finalizer { + return true + } + } + return false +} + +// MarkManaged places the supplied resource under the management of ACK. What +// this typically means is that the resource manager will decorate the +// underlying custom resource (CR) with a finalizer that indicates ACK is +// managing the resource and the underlying CR may not be deleted until ACK is +// finished cleaning up any backend AWS service resources associated with the +// CR. +func (d *resourceDescriptor) MarkManaged( + res acktypes.AWSResource, +) { + obj := res.RuntimeObject() + if obj == nil { + // Should not happen. If it does, there is a bug in the code + panic("nil RuntimeMetaObject in AWSResource") + } + k8sctrlutil.AddFinalizer(obj, FinalizerString) +} + +// MarkUnmanaged removes the supplied resource from management by ACK. What +// this typically means is that the resource manager will remove a finalizer +// underlying custom resource (CR) that indicates ACK is managing the resource. +// This will allow the Kubernetes API server to delete the underlying CR. +func (d *resourceDescriptor) MarkUnmanaged( + res acktypes.AWSResource, +) { + obj := res.RuntimeObject() + if obj == nil { + // Should not happen. If it does, there is a bug in the code + panic("nil RuntimeMetaObject in AWSResource") + } + k8sctrlutil.RemoveFinalizer(obj, FinalizerString) +} + +// MarkAdopted places descriptors on the custom resource that indicate the +// resource was not created from within ACK. +func (d *resourceDescriptor) MarkAdopted( + res acktypes.AWSResource, +) { + obj := res.RuntimeObject() + if obj == nil { + // Should not happen. If it does, there is a bug in the code + panic("nil RuntimeObject in AWSResource") + } + curr := obj.GetAnnotations() + if curr == nil { + curr = make(map[string]string) + } + curr[ackv1alpha1.AnnotationAdopted] = "true" + obj.SetAnnotations(curr) +} diff --git a/pkg/resource/metric_stream/identifiers.go b/pkg/resource/metric_stream/identifiers.go new file mode 100644 index 0000000..3fe9aaa --- /dev/null +++ b/pkg/resource/metric_stream/identifiers.go @@ -0,0 +1,55 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +// Code generated by ack-generate. DO NOT EDIT. + +package metric_stream + +import ( + ackv1alpha1 "github.com/aws-controllers-k8s/runtime/apis/core/v1alpha1" +) + +// resourceIdentifiers implements the +// `aws-service-operator-k8s/pkg/types.AWSResourceIdentifiers` interface +type resourceIdentifiers struct { + meta *ackv1alpha1.ResourceMetadata +} + +// ARN returns the AWS Resource Name for the backend AWS resource. If nil, +// this means the resource has not yet been created in the backend AWS +// service. +func (ri *resourceIdentifiers) ARN() *ackv1alpha1.AWSResourceName { + if ri.meta != nil { + return ri.meta.ARN + } + return nil +} + +// OwnerAccountID returns the AWS account identifier in which the +// backend AWS resource resides, or nil if this information is not known +// for the resource +func (ri *resourceIdentifiers) OwnerAccountID() *ackv1alpha1.AWSAccountID { + if ri.meta != nil { + return ri.meta.OwnerAccountID + } + return nil +} + +// Region returns the AWS region in which the resource exists, or +// nil if this information is not known. +func (ri *resourceIdentifiers) Region() *ackv1alpha1.AWSRegion { + if ri.meta != nil { + return ri.meta.Region + } + return nil +} diff --git a/pkg/resource/metric_stream/manager.go b/pkg/resource/metric_stream/manager.go new file mode 100644 index 0000000..6bece12 --- /dev/null +++ b/pkg/resource/metric_stream/manager.go @@ -0,0 +1,404 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +// Code generated by ack-generate. DO NOT EDIT. + +package metric_stream + +import ( + "context" + "fmt" + "time" + + ackv1alpha1 "github.com/aws-controllers-k8s/runtime/apis/core/v1alpha1" + ackcompare "github.com/aws-controllers-k8s/runtime/pkg/compare" + ackcondition "github.com/aws-controllers-k8s/runtime/pkg/condition" + ackcfg "github.com/aws-controllers-k8s/runtime/pkg/config" + ackerr "github.com/aws-controllers-k8s/runtime/pkg/errors" + ackmetrics "github.com/aws-controllers-k8s/runtime/pkg/metrics" + ackrequeue "github.com/aws-controllers-k8s/runtime/pkg/requeue" + ackrt "github.com/aws-controllers-k8s/runtime/pkg/runtime" + ackrtlog "github.com/aws-controllers-k8s/runtime/pkg/runtime/log" + acktags "github.com/aws-controllers-k8s/runtime/pkg/tags" + acktypes "github.com/aws-controllers-k8s/runtime/pkg/types" + ackutil "github.com/aws-controllers-k8s/runtime/pkg/util" + "github.com/aws/aws-sdk-go-v2/aws" + svcsdk "github.com/aws/aws-sdk-go-v2/service/cloudwatch" + "github.com/go-logr/logr" + corev1 "k8s.io/api/core/v1" + + svcapitypes "github.com/aws-controllers-k8s/cloudwatch-controller/apis/v1alpha1" +) + +var ( + _ = ackutil.InStrings + _ = acktags.NewTags() + _ = ackrt.MissingImageTagValue + _ = svcapitypes.MetricStream{} +) + +// +kubebuilder:rbac:groups=cloudwatch.services.k8s.aws,resources=metricstreams,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=cloudwatch.services.k8s.aws,resources=metricstreams/status,verbs=get;update;patch + +var lateInitializeFieldNames = []string{} + +// resourceManager is responsible for providing a consistent way to perform +// CRUD operations in a backend AWS service API for Book custom resources. +type resourceManager struct { + // cfg is a copy of the ackcfg.Config object passed on start of the service + // controller + cfg ackcfg.Config + // clientcfg is a copy of the client configuration passed on start of the + // service controller + clientcfg aws.Config + // log refers to the logr.Logger object handling logging for the service + // controller + log logr.Logger + // metrics contains a collection of Prometheus metric objects that the + // service controller and its reconcilers track + metrics *ackmetrics.Metrics + // rr is the Reconciler which can be used for various utility + // functions such as querying for Secret values given a SecretReference + rr acktypes.Reconciler + // awsAccountID is the AWS account identifier that contains the resources + // managed by this resource manager + awsAccountID ackv1alpha1.AWSAccountID + // The AWS Region that this resource manager targets + awsRegion ackv1alpha1.AWSRegion + // sdk is a pointer to the AWS service API client exposed by the + // aws-sdk-go-v2/services/{alias} package. + sdkapi *svcsdk.Client +} + +// concreteResource returns a pointer to a resource from the supplied +// generic AWSResource interface +func (rm *resourceManager) concreteResource( + res acktypes.AWSResource, +) *resource { + // cast the generic interface into a pointer type specific to the concrete + // implementing resource type managed by this resource manager + return res.(*resource) +} + +// ReadOne returns the currently-observed state of the supplied AWSResource in +// the backend AWS service API. +func (rm *resourceManager) ReadOne( + ctx context.Context, + res acktypes.AWSResource, +) (acktypes.AWSResource, error) { + r := rm.concreteResource(res) + if r.ko == nil { + // Should never happen... if it does, it's buggy code. + panic("resource manager's ReadOne() method received resource with nil CR object") + } + observed, err := rm.sdkFind(ctx, r) + mirrorAWSTags(r, observed) + if err != nil { + if observed != nil { + return rm.onError(observed, err) + } + return rm.onError(r, err) + } + return rm.onSuccess(observed) +} + +// Create attempts to create the supplied AWSResource in the backend AWS +// service API, returning an AWSResource representing the newly-created +// resource +func (rm *resourceManager) Create( + ctx context.Context, + res acktypes.AWSResource, +) (acktypes.AWSResource, error) { + r := rm.concreteResource(res) + if r.ko == nil { + // Should never happen... if it does, it's buggy code. + panic("resource manager's Create() method received resource with nil CR object") + } + created, err := rm.sdkCreate(ctx, r) + if err != nil { + if created != nil { + return rm.onError(created, err) + } + return rm.onError(r, err) + } + return rm.onSuccess(created) +} + +// Update attempts to mutate the supplied desired AWSResource in the backend AWS +// service API, returning an AWSResource representing the newly-mutated +// resource. +// Note for specialized logic implementers can check to see how the latest +// observed resource differs from the supplied desired state. The +// higher-level reonciler determines whether or not the desired differs +// from the latest observed and decides whether to call the resource +// manager's Update method +func (rm *resourceManager) Update( + ctx context.Context, + resDesired acktypes.AWSResource, + resLatest acktypes.AWSResource, + delta *ackcompare.Delta, +) (acktypes.AWSResource, error) { + desired := rm.concreteResource(resDesired) + latest := rm.concreteResource(resLatest) + if desired.ko == nil || latest.ko == nil { + // Should never happen... if it does, it's buggy code. + panic("resource manager's Update() method received resource with nil CR object") + } + updated, err := rm.sdkUpdate(ctx, desired, latest, delta) + if err != nil { + if updated != nil { + return rm.onError(updated, err) + } + return rm.onError(latest, err) + } + return rm.onSuccess(updated) +} + +// Delete attempts to destroy the supplied AWSResource in the backend AWS +// service API, returning an AWSResource representing the +// resource being deleted (if delete is asynchronous and takes time) +func (rm *resourceManager) Delete( + ctx context.Context, + res acktypes.AWSResource, +) (acktypes.AWSResource, error) { + r := rm.concreteResource(res) + if r.ko == nil { + // Should never happen... if it does, it's buggy code. + panic("resource manager's Update() method received resource with nil CR object") + } + observed, err := rm.sdkDelete(ctx, r) + if err != nil { + if observed != nil { + return rm.onError(observed, err) + } + return rm.onError(r, err) + } + + return rm.onSuccess(observed) +} + +// ARNFromName returns an AWS Resource Name from a given string name. This +// is useful for constructing ARNs for APIs that require ARNs in their +// GetAttributes operations but all we have (for new CRs at least) is a +// name for the resource +func (rm *resourceManager) ARNFromName(name string) string { + return fmt.Sprintf( + "arn:aws:cloudwatch:%s:%s:%s", + rm.awsRegion, + rm.awsAccountID, + name, + ) +} + +// LateInitialize returns an acktypes.AWSResource after setting the late initialized +// fields from the readOne call. This method will initialize the optional fields +// which were not provided by the k8s user but were defaulted by the AWS service. +// If there are no such fields to be initialized, the returned object is similar to +// object passed in the parameter. +func (rm *resourceManager) LateInitialize( + ctx context.Context, + latest acktypes.AWSResource, +) (acktypes.AWSResource, error) { + rlog := ackrtlog.FromContext(ctx) + // If there are no fields to late initialize, do nothing + if len(lateInitializeFieldNames) == 0 { + rlog.Debug("no late initialization required.") + return latest, nil + } + latestCopy := latest.DeepCopy() + lateInitConditionReason := "" + lateInitConditionMessage := "" + observed, err := rm.ReadOne(ctx, latestCopy) + if err != nil { + lateInitConditionMessage = "Unable to complete Read operation required for late initialization" + lateInitConditionReason = "Late Initialization Failure" + ackcondition.SetLateInitialized(latestCopy, corev1.ConditionFalse, &lateInitConditionMessage, &lateInitConditionReason) + ackcondition.SetSynced(latestCopy, corev1.ConditionFalse, nil, nil) + return latestCopy, err + } + lateInitializedRes := rm.lateInitializeFromReadOneOutput(observed, latestCopy) + incompleteInitialization := rm.incompleteLateInitialization(lateInitializedRes) + if incompleteInitialization { + // Add the condition with LateInitialized=False + lateInitConditionMessage = "Late initialization did not complete, requeuing with delay of 5 seconds" + lateInitConditionReason = "Delayed Late Initialization" + ackcondition.SetLateInitialized(lateInitializedRes, corev1.ConditionFalse, &lateInitConditionMessage, &lateInitConditionReason) + ackcondition.SetSynced(lateInitializedRes, corev1.ConditionFalse, nil, nil) + return lateInitializedRes, ackrequeue.NeededAfter(nil, time.Duration(5)*time.Second) + } + // Set LateInitialized condition to True + lateInitConditionMessage = "Late initialization successful" + lateInitConditionReason = "Late initialization successful" + ackcondition.SetLateInitialized(lateInitializedRes, corev1.ConditionTrue, &lateInitConditionMessage, &lateInitConditionReason) + return lateInitializedRes, nil +} + +// incompleteLateInitialization return true if there are fields which were supposed to be +// late initialized but are not. If all the fields are late initialized, false is returned +func (rm *resourceManager) incompleteLateInitialization( + res acktypes.AWSResource, +) bool { + return false +} + +// lateInitializeFromReadOneOutput late initializes the 'latest' resource from the 'observed' +// resource and returns 'latest' resource +func (rm *resourceManager) lateInitializeFromReadOneOutput( + observed acktypes.AWSResource, + latest acktypes.AWSResource, +) acktypes.AWSResource { + return latest +} + +// IsSynced returns true if the resource is synced. +func (rm *resourceManager) IsSynced(ctx context.Context, res acktypes.AWSResource) (bool, error) { + r := rm.concreteResource(res) + if r.ko == nil { + // Should never happen... if it does, it's buggy code. + panic("resource manager's IsSynced() method received resource with nil CR object") + } + + return true, nil +} + +// EnsureTags ensures that tags are present inside the AWSResource. +// If the AWSResource does not have any existing resource tags, the 'tags' +// field is initialized and the controller tags are added. +// If the AWSResource has existing resource tags, then controller tags are +// added to the existing resource tags without overriding them. +// If the AWSResource does not support tags, only then the controller tags +// will not be added to the AWSResource. +func (rm *resourceManager) EnsureTags( + ctx context.Context, + res acktypes.AWSResource, + md acktypes.ServiceControllerMetadata, +) error { + r := rm.concreteResource(res) + if r.ko == nil { + // Should never happen... if it does, it's buggy code. + panic("resource manager's EnsureTags method received resource with nil CR object") + } + defaultTags := ackrt.GetDefaultTags(&rm.cfg, r.ko, md) + var existingTags []*svcapitypes.Tag + existingTags = r.ko.Spec.Tags + resourceTags, keyOrder := convertToOrderedACKTags(existingTags) + tags := acktags.Merge(resourceTags, defaultTags) + r.ko.Spec.Tags = fromACKTags(tags, keyOrder) + return nil +} + +// FilterAWSTags ignores tags that have keys that start with "aws:" +// is needed to ensure the controller does not attempt to remove +// tags set by AWS. This function needs to be called after each Read +// operation. +// Eg. resources created with cloudformation have tags that cannot be +// removed by an ACK controller +func (rm *resourceManager) FilterSystemTags(res acktypes.AWSResource) { + r := rm.concreteResource(res) + if r == nil || r.ko == nil { + return + } + var existingTags []*svcapitypes.Tag + existingTags = r.ko.Spec.Tags + resourceTags, tagKeyOrder := convertToOrderedACKTags(existingTags) + ignoreSystemTags(resourceTags) + r.ko.Spec.Tags = fromACKTags(resourceTags, tagKeyOrder) +} + +// mirrorAWSTags ensures that AWS tags are included in the desired resource +// if they are present in the latest resource. This will ensure that the +// aws tags are not present in a diff. The logic of the controller will +// ensure these tags aren't patched to the resource in the cluster, and +// will only be present to make sure we don't try to remove these tags. +// +// Although there are a lot of similarities between this function and +// EnsureTags, they are very much different. +// While EnsureTags tries to make sure the resource contains the controller +// tags, mirrowAWSTags tries to make sure tags injected by AWS are mirrored +// from the latest resoruce to the desired resource. +func mirrorAWSTags(a *resource, b *resource) { + if a == nil || a.ko == nil || b == nil || b.ko == nil { + return + } + var existingLatestTags []*svcapitypes.Tag + var existingDesiredTags []*svcapitypes.Tag + existingDesiredTags = a.ko.Spec.Tags + existingLatestTags = b.ko.Spec.Tags + desiredTags, desiredTagKeyOrder := convertToOrderedACKTags(existingDesiredTags) + latestTags, _ := convertToOrderedACKTags(existingLatestTags) + syncAWSTags(desiredTags, latestTags) + a.ko.Spec.Tags = fromACKTags(desiredTags, desiredTagKeyOrder) +} + +// newResourceManager returns a new struct implementing +// acktypes.AWSResourceManager +// This is for AWS-SDK-GO-V2 - Created newResourceManager With AWS sdk-Go-ClientV2 +func newResourceManager( + cfg ackcfg.Config, + clientcfg aws.Config, + log logr.Logger, + metrics *ackmetrics.Metrics, + rr acktypes.Reconciler, + id ackv1alpha1.AWSAccountID, + region ackv1alpha1.AWSRegion, +) (*resourceManager, error) { + return &resourceManager{ + cfg: cfg, + clientcfg: clientcfg, + log: log, + metrics: metrics, + rr: rr, + awsAccountID: id, + awsRegion: region, + sdkapi: svcsdk.NewFromConfig(clientcfg), + }, nil +} + +// onError updates resource conditions and returns updated resource +// it returns nil if no condition is updated. +func (rm *resourceManager) onError( + r *resource, + err error, +) (acktypes.AWSResource, error) { + if r == nil { + return nil, err + } + r1, updated := rm.updateConditions(r, false, err) + if !updated { + return r, err + } + for _, condition := range r1.Conditions() { + if condition.Type == ackv1alpha1.ConditionTypeTerminal && + condition.Status == corev1.ConditionTrue { + // resource is in Terminal condition + // return Terminal error + return r1, ackerr.Terminal + } + } + return r1, err +} + +// onSuccess updates resource conditions and returns updated resource +// it returns the supplied resource if no condition is updated. +func (rm *resourceManager) onSuccess( + r *resource, +) (acktypes.AWSResource, error) { + if r == nil { + return nil, nil + } + r1, updated := rm.updateConditions(r, true, nil) + if !updated { + return r, nil + } + return r1, nil +} diff --git a/pkg/resource/metric_stream/manager_factory.go b/pkg/resource/metric_stream/manager_factory.go new file mode 100644 index 0000000..d23687a --- /dev/null +++ b/pkg/resource/metric_stream/manager_factory.go @@ -0,0 +1,100 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +// Code generated by ack-generate. DO NOT EDIT. + +package metric_stream + +import ( + "fmt" + "sync" + + ackv1alpha1 "github.com/aws-controllers-k8s/runtime/apis/core/v1alpha1" + ackcfg "github.com/aws-controllers-k8s/runtime/pkg/config" + ackmetrics "github.com/aws-controllers-k8s/runtime/pkg/metrics" + acktypes "github.com/aws-controllers-k8s/runtime/pkg/types" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/go-logr/logr" + + svcresource "github.com/aws-controllers-k8s/cloudwatch-controller/pkg/resource" +) + +// resourceManagerFactory produces resourceManager objects. It implements the +// `types.AWSResourceManagerFactory` interface. +type resourceManagerFactory struct { + sync.RWMutex + // rmCache contains resource managers for a particular AWS account ID + rmCache map[string]*resourceManager +} + +// ResourcePrototype returns an AWSResource that resource managers produced by +// this factory will handle +func (f *resourceManagerFactory) ResourceDescriptor() acktypes.AWSResourceDescriptor { + return &resourceDescriptor{} +} + +// ManagerFor returns a resource manager object that can manage resources for a +// supplied AWS account +func (f *resourceManagerFactory) ManagerFor( + cfg ackcfg.Config, + clientcfg aws.Config, + log logr.Logger, + metrics *ackmetrics.Metrics, + rr acktypes.Reconciler, + id ackv1alpha1.AWSAccountID, + region ackv1alpha1.AWSRegion, + roleARN ackv1alpha1.AWSResourceName, +) (acktypes.AWSResourceManager, error) { + // We use the account ID, region, and role ARN to uniquely identify a + // resource manager. This helps us to avoid creating multiple resource + // managers for the same account/region/roleARN combination. + rmId := fmt.Sprintf("%s/%s/%s", id, region, roleARN) + f.RLock() + rm, found := f.rmCache[rmId] + f.RUnlock() + + if found { + return rm, nil + } + + f.Lock() + defer f.Unlock() + + rm, err := newResourceManager(cfg, clientcfg, log, metrics, rr, id, region) + if err != nil { + return nil, err + } + f.rmCache[rmId] = rm + return rm, nil +} + +// IsAdoptable returns true if the resource is able to be adopted +func (f *resourceManagerFactory) IsAdoptable() bool { + return true +} + +// RequeueOnSuccessSeconds returns true if the resource should be requeued after specified seconds +// Default is false which means resource will not be requeued after success. +func (f *resourceManagerFactory) RequeueOnSuccessSeconds() int { + return 0 +} + +func newResourceManagerFactory() *resourceManagerFactory { + return &resourceManagerFactory{ + rmCache: map[string]*resourceManager{}, + } +} + +func init() { + svcresource.RegisterManagerFactory(newResourceManagerFactory()) +} diff --git a/pkg/resource/metric_stream/references.go b/pkg/resource/metric_stream/references.go new file mode 100644 index 0000000..240c7b8 --- /dev/null +++ b/pkg/resource/metric_stream/references.go @@ -0,0 +1,57 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +// Code generated by ack-generate. DO NOT EDIT. + +package metric_stream + +import ( + "context" + + "sigs.k8s.io/controller-runtime/pkg/client" + + acktypes "github.com/aws-controllers-k8s/runtime/pkg/types" + + svcapitypes "github.com/aws-controllers-k8s/cloudwatch-controller/apis/v1alpha1" +) + +// ClearResolvedReferences removes any reference values that were made +// concrete in the spec. It returns a copy of the input AWSResource which +// contains the original *Ref values, but none of their respective concrete +// values. +func (rm *resourceManager) ClearResolvedReferences(res acktypes.AWSResource) acktypes.AWSResource { + ko := rm.concreteResource(res).ko.DeepCopy() + + return &resource{ko} +} + +// ResolveReferences finds if there are any Reference field(s) present +// inside AWSResource passed in the parameter and attempts to resolve those +// reference field(s) into their respective target field(s). It returns a +// copy of the input AWSResource with resolved reference(s), a boolean which +// is set to true if the resource contains any references (regardless of if +// they are resolved successfully) and an error if the passed AWSResource's +// reference field(s) could not be resolved. +func (rm *resourceManager) ResolveReferences( + ctx context.Context, + apiReader client.Reader, + res acktypes.AWSResource, +) (acktypes.AWSResource, bool, error) { + return res, false, nil +} + +// validateReferenceFields validates the reference field and corresponding +// identifier field. +func validateReferenceFields(ko *svcapitypes.MetricStream) error { + return nil +} diff --git a/pkg/resource/metric_stream/resource.go b/pkg/resource/metric_stream/resource.go new file mode 100644 index 0000000..0c28ca4 --- /dev/null +++ b/pkg/resource/metric_stream/resource.go @@ -0,0 +1,113 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +// Code generated by ack-generate. DO NOT EDIT. + +package metric_stream + +import ( + "fmt" + + ackv1alpha1 "github.com/aws-controllers-k8s/runtime/apis/core/v1alpha1" + ackerrors "github.com/aws-controllers-k8s/runtime/pkg/errors" + acktypes "github.com/aws-controllers-k8s/runtime/pkg/types" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + rtclient "sigs.k8s.io/controller-runtime/pkg/client" + + svcapitypes "github.com/aws-controllers-k8s/cloudwatch-controller/apis/v1alpha1" +) + +// Hack to avoid import errors during build... +var ( + _ = &ackerrors.MissingNameIdentifier +) + +// resource implements the `aws-controller-k8s/runtime/pkg/types.AWSResource` +// interface +type resource struct { + // The Kubernetes-native CR representing the resource + ko *svcapitypes.MetricStream +} + +// Identifiers returns an AWSResourceIdentifiers object containing various +// identifying information, including the AWS account ID that owns the +// resource, the resource's AWS Resource Name (ARN) +func (r *resource) Identifiers() acktypes.AWSResourceIdentifiers { + return &resourceIdentifiers{r.ko.Status.ACKResourceMetadata} +} + +// IsBeingDeleted returns true if the Kubernetes resource has a non-zero +// deletion timestamp +func (r *resource) IsBeingDeleted() bool { + return !r.ko.DeletionTimestamp.IsZero() +} + +// RuntimeObject returns the Kubernetes apimachinery/runtime representation of +// the AWSResource +func (r *resource) RuntimeObject() rtclient.Object { + return r.ko +} + +// MetaObject returns the Kubernetes apimachinery/apis/meta/v1.Object +// representation of the AWSResource +func (r *resource) MetaObject() metav1.Object { + return r.ko.GetObjectMeta() +} + +// Conditions returns the ACK Conditions collection for the AWSResource +func (r *resource) Conditions() []*ackv1alpha1.Condition { + return r.ko.Status.Conditions +} + +// ReplaceConditions sets the Conditions status field for the resource +func (r *resource) ReplaceConditions(conditions []*ackv1alpha1.Condition) { + r.ko.Status.Conditions = conditions +} + +// SetObjectMeta sets the ObjectMeta field for the resource +func (r *resource) SetObjectMeta(meta metav1.ObjectMeta) { + r.ko.ObjectMeta = meta +} + +// SetStatus will set the Status field for the resource +func (r *resource) SetStatus(desired acktypes.AWSResource) { + r.ko.Status = desired.(*resource).ko.Status +} + +// SetIdentifiers sets the Spec or Status field that is referenced as the unique +// resource identifier +func (r *resource) SetIdentifiers(identifier *ackv1alpha1.AWSIdentifiers) error { + if identifier.NameOrID == "" { + return ackerrors.MissingNameIdentifier + } + r.ko.Spec.Name = &identifier.NameOrID + + return nil +} + +// PopulateResourceFromAnnotation populates the fields passed from adoption annotation +func (r *resource) PopulateResourceFromAnnotation(fields map[string]string) error { + tmp, ok := fields["name"] + if !ok { + return ackerrors.NewTerminalError(fmt.Errorf("required field missing: name")) + } + r.ko.Spec.Name = &tmp + + return nil +} + +// DeepCopy will return a copy of the resource +func (r *resource) DeepCopy() acktypes.AWSResource { + koCopy := r.ko.DeepCopy() + return &resource{koCopy} +} diff --git a/pkg/resource/metric_stream/sdk.go b/pkg/resource/metric_stream/sdk.go new file mode 100644 index 0000000..ace961a --- /dev/null +++ b/pkg/resource/metric_stream/sdk.go @@ -0,0 +1,619 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +// Code generated by ack-generate. DO NOT EDIT. + +package metric_stream + +import ( + "context" + "errors" + "fmt" + "reflect" + "strings" + + ackv1alpha1 "github.com/aws-controllers-k8s/runtime/apis/core/v1alpha1" + ackcompare "github.com/aws-controllers-k8s/runtime/pkg/compare" + ackcondition "github.com/aws-controllers-k8s/runtime/pkg/condition" + ackerr "github.com/aws-controllers-k8s/runtime/pkg/errors" + ackrequeue "github.com/aws-controllers-k8s/runtime/pkg/requeue" + ackrtlog "github.com/aws-controllers-k8s/runtime/pkg/runtime/log" + "github.com/aws/aws-sdk-go-v2/aws" + svcsdk "github.com/aws/aws-sdk-go-v2/service/cloudwatch" + svcsdktypes "github.com/aws/aws-sdk-go-v2/service/cloudwatch/types" + smithy "github.com/aws/smithy-go" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + svcapitypes "github.com/aws-controllers-k8s/cloudwatch-controller/apis/v1alpha1" +) + +// Hack to avoid import errors during build... +var ( + _ = &metav1.Time{} + _ = strings.ToLower("") + _ = &svcsdk.Client{} + _ = &svcapitypes.MetricStream{} + _ = ackv1alpha1.AWSAccountID("") + _ = &ackerr.NotFound + _ = &ackcondition.NotManagedMessage + _ = &reflect.Value{} + _ = fmt.Sprintf("") + _ = &ackrequeue.NoRequeue{} + _ = &aws.Config{} +) + +// sdkFind returns SDK-specific information about a supplied resource +func (rm *resourceManager) sdkFind( + ctx context.Context, + r *resource, +) (latest *resource, err error) { + rlog := ackrtlog.FromContext(ctx) + exit := rlog.Trace("rm.sdkFind") + defer func() { + exit(err) + }() + // If any required fields in the input shape are missing, AWS resource is + // not created yet. Return NotFound here to indicate to callers that the + // resource isn't yet created. + if rm.requiredFieldsMissingFromReadOneInput(r) { + return nil, ackerr.NotFound + } + + input, err := rm.newDescribeRequestPayload(r) + if err != nil { + return nil, err + } + + var resp *svcsdk.GetMetricStreamOutput + resp, err = rm.sdkapi.GetMetricStream(ctx, input) + rm.metrics.RecordAPICall("READ_ONE", "GetMetricStream", err) + if err != nil { + var awsErr smithy.APIError + if errors.As(err, &awsErr) && awsErr.ErrorCode() == "ResourceNotFoundException" { + return nil, ackerr.NotFound + } + return nil, err + } + + // Merge in the information we read from the API call above to the copy of + // the original Kubernetes object we passed to the function + ko := r.ko.DeepCopy() + + if ko.Status.ACKResourceMetadata == nil { + ko.Status.ACKResourceMetadata = &ackv1alpha1.ResourceMetadata{} + } + if resp.Arn != nil { + arn := ackv1alpha1.AWSResourceName(*resp.Arn) + ko.Status.ACKResourceMetadata.ARN = &arn + } + if resp.ExcludeFilters != nil { + f2 := []*svcapitypes.MetricStreamFilter{} + for _, f2iter := range resp.ExcludeFilters { + f2elem := &svcapitypes.MetricStreamFilter{} + if f2iter.MetricNames != nil { + f2elem.MetricNames = aws.StringSlice(f2iter.MetricNames) + } + if f2iter.Namespace != nil { + f2elem.Namespace = f2iter.Namespace + } + f2 = append(f2, f2elem) + } + ko.Spec.ExcludeFilters = f2 + } else { + ko.Spec.ExcludeFilters = nil + } + if resp.FirehoseArn != nil { + ko.Spec.FirehoseARN = resp.FirehoseArn + } else { + ko.Spec.FirehoseARN = nil + } + if resp.IncludeFilters != nil { + f4 := []*svcapitypes.MetricStreamFilter{} + for _, f4iter := range resp.IncludeFilters { + f4elem := &svcapitypes.MetricStreamFilter{} + if f4iter.MetricNames != nil { + f4elem.MetricNames = aws.StringSlice(f4iter.MetricNames) + } + if f4iter.Namespace != nil { + f4elem.Namespace = f4iter.Namespace + } + f4 = append(f4, f4elem) + } + ko.Spec.IncludeFilters = f4 + } else { + ko.Spec.IncludeFilters = nil + } + if resp.IncludeLinkedAccountsMetrics != nil { + ko.Spec.IncludeLinkedAccountsMetrics = resp.IncludeLinkedAccountsMetrics + } else { + ko.Spec.IncludeLinkedAccountsMetrics = nil + } + if resp.Name != nil { + ko.Spec.Name = resp.Name + } else { + ko.Spec.Name = nil + } + if resp.OutputFormat != "" { + ko.Spec.OutputFormat = aws.String(string(resp.OutputFormat)) + } else { + ko.Spec.OutputFormat = nil + } + if resp.RoleArn != nil { + ko.Spec.RoleARN = resp.RoleArn + } else { + ko.Spec.RoleARN = nil + } + if resp.StatisticsConfigurations != nil { + f11 := []*svcapitypes.MetricStreamStatisticsConfiguration{} + for _, f11iter := range resp.StatisticsConfigurations { + f11elem := &svcapitypes.MetricStreamStatisticsConfiguration{} + if f11iter.AdditionalStatistics != nil { + f11elem.AdditionalStatistics = aws.StringSlice(f11iter.AdditionalStatistics) + } + if f11iter.IncludeMetrics != nil { + f11elemf1 := []*svcapitypes.MetricStreamStatisticsMetric{} + for _, f11elemf1iter := range f11iter.IncludeMetrics { + f11elemf1elem := &svcapitypes.MetricStreamStatisticsMetric{} + if f11elemf1iter.MetricName != nil { + f11elemf1elem.MetricName = f11elemf1iter.MetricName + } + if f11elemf1iter.Namespace != nil { + f11elemf1elem.Namespace = f11elemf1iter.Namespace + } + f11elemf1 = append(f11elemf1, f11elemf1elem) + } + f11elem.IncludeMetrics = f11elemf1 + } + f11 = append(f11, f11elem) + } + ko.Spec.StatisticsConfigurations = f11 + } else { + ko.Spec.StatisticsConfigurations = nil + } + + rm.setStatusDefaults(ko) + return &resource{ko}, nil +} + +// requiredFieldsMissingFromReadOneInput returns true if there are any fields +// for the ReadOne Input shape that are required but not present in the +// resource's Spec or Status +func (rm *resourceManager) requiredFieldsMissingFromReadOneInput( + r *resource, +) bool { + return r.ko.Spec.Name == nil + +} + +// newDescribeRequestPayload returns SDK-specific struct for the HTTP request +// payload of the Describe API call for the resource +func (rm *resourceManager) newDescribeRequestPayload( + r *resource, +) (*svcsdk.GetMetricStreamInput, error) { + res := &svcsdk.GetMetricStreamInput{} + + if r.ko.Spec.Name != nil { + res.Name = r.ko.Spec.Name + } + + return res, nil +} + +// sdkCreate creates the supplied resource in the backend AWS service API and +// returns a copy of the resource with resource fields (in both Spec and +// Status) filled in with values from the CREATE API operation's Output shape. +func (rm *resourceManager) sdkCreate( + ctx context.Context, + desired *resource, +) (created *resource, err error) { + rlog := ackrtlog.FromContext(ctx) + exit := rlog.Trace("rm.sdkCreate") + defer func() { + exit(err) + }() + input, err := rm.newCreateRequestPayload(ctx, desired) + if err != nil { + return nil, err + } + + var resp *svcsdk.PutMetricStreamOutput + _ = resp + resp, err = rm.sdkapi.PutMetricStream(ctx, input) + rm.metrics.RecordAPICall("CREATE", "PutMetricStream", err) + if err != nil { + return nil, err + } + // Merge in the information we read from the API call above to the copy of + // the original Kubernetes object we passed to the function + ko := desired.ko.DeepCopy() + + if ko.Status.ACKResourceMetadata == nil { + ko.Status.ACKResourceMetadata = &ackv1alpha1.ResourceMetadata{} + } + if resp.Arn != nil { + arn := ackv1alpha1.AWSResourceName(*resp.Arn) + ko.Status.ACKResourceMetadata.ARN = &arn + } + + rm.setStatusDefaults(ko) + return &resource{ko}, nil +} + +// newCreateRequestPayload returns an SDK-specific struct for the HTTP request +// payload of the Create API call for the resource +func (rm *resourceManager) newCreateRequestPayload( + ctx context.Context, + r *resource, +) (*svcsdk.PutMetricStreamInput, error) { + res := &svcsdk.PutMetricStreamInput{} + + if r.ko.Spec.ExcludeFilters != nil { + f0 := []svcsdktypes.MetricStreamFilter{} + for _, f0iter := range r.ko.Spec.ExcludeFilters { + f0elem := &svcsdktypes.MetricStreamFilter{} + if f0iter.MetricNames != nil { + f0elem.MetricNames = aws.ToStringSlice(f0iter.MetricNames) + } + if f0iter.Namespace != nil { + f0elem.Namespace = f0iter.Namespace + } + f0 = append(f0, *f0elem) + } + res.ExcludeFilters = f0 + } + if r.ko.Spec.FirehoseARN != nil { + res.FirehoseArn = r.ko.Spec.FirehoseARN + } + if r.ko.Spec.IncludeFilters != nil { + f2 := []svcsdktypes.MetricStreamFilter{} + for _, f2iter := range r.ko.Spec.IncludeFilters { + f2elem := &svcsdktypes.MetricStreamFilter{} + if f2iter.MetricNames != nil { + f2elem.MetricNames = aws.ToStringSlice(f2iter.MetricNames) + } + if f2iter.Namespace != nil { + f2elem.Namespace = f2iter.Namespace + } + f2 = append(f2, *f2elem) + } + res.IncludeFilters = f2 + } + if r.ko.Spec.IncludeLinkedAccountsMetrics != nil { + res.IncludeLinkedAccountsMetrics = r.ko.Spec.IncludeLinkedAccountsMetrics + } + if r.ko.Spec.Name != nil { + res.Name = r.ko.Spec.Name + } + if r.ko.Spec.OutputFormat != nil { + res.OutputFormat = svcsdktypes.MetricStreamOutputFormat(*r.ko.Spec.OutputFormat) + } + if r.ko.Spec.RoleARN != nil { + res.RoleArn = r.ko.Spec.RoleARN + } + if r.ko.Spec.StatisticsConfigurations != nil { + f7 := []svcsdktypes.MetricStreamStatisticsConfiguration{} + for _, f7iter := range r.ko.Spec.StatisticsConfigurations { + f7elem := &svcsdktypes.MetricStreamStatisticsConfiguration{} + if f7iter.AdditionalStatistics != nil { + f7elem.AdditionalStatistics = aws.ToStringSlice(f7iter.AdditionalStatistics) + } + if f7iter.IncludeMetrics != nil { + f7elemf1 := []svcsdktypes.MetricStreamStatisticsMetric{} + for _, f7elemf1iter := range f7iter.IncludeMetrics { + f7elemf1elem := &svcsdktypes.MetricStreamStatisticsMetric{} + if f7elemf1iter.MetricName != nil { + f7elemf1elem.MetricName = f7elemf1iter.MetricName + } + if f7elemf1iter.Namespace != nil { + f7elemf1elem.Namespace = f7elemf1iter.Namespace + } + f7elemf1 = append(f7elemf1, *f7elemf1elem) + } + f7elem.IncludeMetrics = f7elemf1 + } + f7 = append(f7, *f7elem) + } + res.StatisticsConfigurations = f7 + } + if r.ko.Spec.Tags != nil { + f8 := []svcsdktypes.Tag{} + for _, f8iter := range r.ko.Spec.Tags { + f8elem := &svcsdktypes.Tag{} + if f8iter.Key != nil { + f8elem.Key = f8iter.Key + } + if f8iter.Value != nil { + f8elem.Value = f8iter.Value + } + f8 = append(f8, *f8elem) + } + res.Tags = f8 + } + + return res, nil +} + +// sdkUpdate patches the supplied resource in the backend AWS service API and +// returns a new resource with updated fields. +func (rm *resourceManager) sdkUpdate( + ctx context.Context, + desired *resource, + latest *resource, + delta *ackcompare.Delta, +) (updated *resource, err error) { + rlog := ackrtlog.FromContext(ctx) + exit := rlog.Trace("rm.sdkUpdate") + defer func() { + exit(err) + }() + input, err := rm.newUpdateRequestPayload(ctx, desired, delta) + if err != nil { + return nil, err + } + + var resp *svcsdk.PutMetricStreamOutput + _ = resp + resp, err = rm.sdkapi.PutMetricStream(ctx, input) + rm.metrics.RecordAPICall("UPDATE", "PutMetricStream", err) + if err != nil { + return nil, err + } + // Merge in the information we read from the API call above to the copy of + // the original Kubernetes object we passed to the function + ko := desired.ko.DeepCopy() + + if ko.Status.ACKResourceMetadata == nil { + ko.Status.ACKResourceMetadata = &ackv1alpha1.ResourceMetadata{} + } + if resp.Arn != nil { + arn := ackv1alpha1.AWSResourceName(*resp.Arn) + ko.Status.ACKResourceMetadata.ARN = &arn + } + + rm.setStatusDefaults(ko) + return &resource{ko}, nil +} + +// newUpdateRequestPayload returns an SDK-specific struct for the HTTP request +// payload of the Update API call for the resource +func (rm *resourceManager) newUpdateRequestPayload( + ctx context.Context, + r *resource, + delta *ackcompare.Delta, +) (*svcsdk.PutMetricStreamInput, error) { + res := &svcsdk.PutMetricStreamInput{} + + if r.ko.Spec.ExcludeFilters != nil { + f0 := []svcsdktypes.MetricStreamFilter{} + for _, f0iter := range r.ko.Spec.ExcludeFilters { + f0elem := &svcsdktypes.MetricStreamFilter{} + if f0iter.MetricNames != nil { + f0elem.MetricNames = aws.ToStringSlice(f0iter.MetricNames) + } + if f0iter.Namespace != nil { + f0elem.Namespace = f0iter.Namespace + } + f0 = append(f0, *f0elem) + } + res.ExcludeFilters = f0 + } + if r.ko.Spec.FirehoseARN != nil { + res.FirehoseArn = r.ko.Spec.FirehoseARN + } + if r.ko.Spec.IncludeFilters != nil { + f2 := []svcsdktypes.MetricStreamFilter{} + for _, f2iter := range r.ko.Spec.IncludeFilters { + f2elem := &svcsdktypes.MetricStreamFilter{} + if f2iter.MetricNames != nil { + f2elem.MetricNames = aws.ToStringSlice(f2iter.MetricNames) + } + if f2iter.Namespace != nil { + f2elem.Namespace = f2iter.Namespace + } + f2 = append(f2, *f2elem) + } + res.IncludeFilters = f2 + } + if r.ko.Spec.IncludeLinkedAccountsMetrics != nil { + res.IncludeLinkedAccountsMetrics = r.ko.Spec.IncludeLinkedAccountsMetrics + } + if r.ko.Spec.Name != nil { + res.Name = r.ko.Spec.Name + } + if r.ko.Spec.OutputFormat != nil { + res.OutputFormat = svcsdktypes.MetricStreamOutputFormat(*r.ko.Spec.OutputFormat) + } + if r.ko.Spec.RoleARN != nil { + res.RoleArn = r.ko.Spec.RoleARN + } + if r.ko.Spec.StatisticsConfigurations != nil { + f7 := []svcsdktypes.MetricStreamStatisticsConfiguration{} + for _, f7iter := range r.ko.Spec.StatisticsConfigurations { + f7elem := &svcsdktypes.MetricStreamStatisticsConfiguration{} + if f7iter.AdditionalStatistics != nil { + f7elem.AdditionalStatistics = aws.ToStringSlice(f7iter.AdditionalStatistics) + } + if f7iter.IncludeMetrics != nil { + f7elemf1 := []svcsdktypes.MetricStreamStatisticsMetric{} + for _, f7elemf1iter := range f7iter.IncludeMetrics { + f7elemf1elem := &svcsdktypes.MetricStreamStatisticsMetric{} + if f7elemf1iter.MetricName != nil { + f7elemf1elem.MetricName = f7elemf1iter.MetricName + } + if f7elemf1iter.Namespace != nil { + f7elemf1elem.Namespace = f7elemf1iter.Namespace + } + f7elemf1 = append(f7elemf1, *f7elemf1elem) + } + f7elem.IncludeMetrics = f7elemf1 + } + f7 = append(f7, *f7elem) + } + res.StatisticsConfigurations = f7 + } + if r.ko.Spec.Tags != nil { + f8 := []svcsdktypes.Tag{} + for _, f8iter := range r.ko.Spec.Tags { + f8elem := &svcsdktypes.Tag{} + if f8iter.Key != nil { + f8elem.Key = f8iter.Key + } + if f8iter.Value != nil { + f8elem.Value = f8iter.Value + } + f8 = append(f8, *f8elem) + } + res.Tags = f8 + } + + return res, nil +} + +// sdkDelete deletes the supplied resource in the backend AWS service API +func (rm *resourceManager) sdkDelete( + ctx context.Context, + r *resource, +) (latest *resource, err error) { + rlog := ackrtlog.FromContext(ctx) + exit := rlog.Trace("rm.sdkDelete") + defer func() { + exit(err) + }() + input, err := rm.newDeleteRequestPayload(r) + if err != nil { + return nil, err + } + var resp *svcsdk.DeleteMetricStreamOutput + _ = resp + resp, err = rm.sdkapi.DeleteMetricStream(ctx, input) + rm.metrics.RecordAPICall("DELETE", "DeleteMetricStream", err) + return nil, err +} + +// newDeleteRequestPayload returns an SDK-specific struct for the HTTP request +// payload of the Delete API call for the resource +func (rm *resourceManager) newDeleteRequestPayload( + r *resource, +) (*svcsdk.DeleteMetricStreamInput, error) { + res := &svcsdk.DeleteMetricStreamInput{} + + if r.ko.Spec.Name != nil { + res.Name = r.ko.Spec.Name + } + + return res, nil +} + +// setStatusDefaults sets default properties into supplied custom resource +func (rm *resourceManager) setStatusDefaults( + ko *svcapitypes.MetricStream, +) { + if ko.Status.ACKResourceMetadata == nil { + ko.Status.ACKResourceMetadata = &ackv1alpha1.ResourceMetadata{} + } + if ko.Status.ACKResourceMetadata.Region == nil { + ko.Status.ACKResourceMetadata.Region = &rm.awsRegion + } + if ko.Status.ACKResourceMetadata.OwnerAccountID == nil { + ko.Status.ACKResourceMetadata.OwnerAccountID = &rm.awsAccountID + } + if ko.Status.Conditions == nil { + ko.Status.Conditions = []*ackv1alpha1.Condition{} + } +} + +// updateConditions returns updated resource, true; if conditions were updated +// else it returns nil, false +func (rm *resourceManager) updateConditions( + r *resource, + onSuccess bool, + err error, +) (*resource, bool) { + ko := r.ko.DeepCopy() + rm.setStatusDefaults(ko) + + // Terminal condition + var terminalCondition *ackv1alpha1.Condition = nil + var recoverableCondition *ackv1alpha1.Condition = nil + var syncCondition *ackv1alpha1.Condition = nil + for _, condition := range ko.Status.Conditions { + if condition.Type == ackv1alpha1.ConditionTypeTerminal { + terminalCondition = condition + } + if condition.Type == ackv1alpha1.ConditionTypeRecoverable { + recoverableCondition = condition + } + if condition.Type == ackv1alpha1.ConditionTypeResourceSynced { + syncCondition = condition + } + } + var termError *ackerr.TerminalError + if rm.terminalAWSError(err) || err == ackerr.SecretTypeNotSupported || err == ackerr.SecretNotFound || errors.As(err, &termError) { + if terminalCondition == nil { + terminalCondition = &ackv1alpha1.Condition{ + Type: ackv1alpha1.ConditionTypeTerminal, + } + ko.Status.Conditions = append(ko.Status.Conditions, terminalCondition) + } + var errorMessage = "" + if err == ackerr.SecretTypeNotSupported || err == ackerr.SecretNotFound || errors.As(err, &termError) { + errorMessage = err.Error() + } else { + awsErr, _ := ackerr.AWSError(err) + errorMessage = awsErr.Error() + } + terminalCondition.Status = corev1.ConditionTrue + terminalCondition.Message = &errorMessage + } else { + // Clear the terminal condition if no longer present + if terminalCondition != nil { + terminalCondition.Status = corev1.ConditionFalse + terminalCondition.Message = nil + } + // Handling Recoverable Conditions + if err != nil { + if recoverableCondition == nil { + // Add a new Condition containing a non-terminal error + recoverableCondition = &ackv1alpha1.Condition{ + Type: ackv1alpha1.ConditionTypeRecoverable, + } + ko.Status.Conditions = append(ko.Status.Conditions, recoverableCondition) + } + recoverableCondition.Status = corev1.ConditionTrue + awsErr, _ := ackerr.AWSError(err) + errorMessage := err.Error() + if awsErr != nil { + errorMessage = awsErr.Error() + } + recoverableCondition.Message = &errorMessage + } else if recoverableCondition != nil { + recoverableCondition.Status = corev1.ConditionFalse + recoverableCondition.Message = nil + } + } + // Required to avoid the "declared but not used" error in the default case + _ = syncCondition + if terminalCondition != nil || recoverableCondition != nil || syncCondition != nil { + return &resource{ko}, true // updated + } + return nil, false // not updated +} + +// terminalAWSError returns awserr, true; if the supplied error is an aws Error type +// and if the exception indicates that it is a Terminal exception +// 'Terminal' exception are specified in generator configuration +func (rm *resourceManager) terminalAWSError(err error) bool { + // No terminal_errors specified for this resource in generator config + return false +} diff --git a/pkg/resource/metric_stream/tags.go b/pkg/resource/metric_stream/tags.go new file mode 100644 index 0000000..5df8d86 --- /dev/null +++ b/pkg/resource/metric_stream/tags.go @@ -0,0 +1,119 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +// Code generated by ack-generate. DO NOT EDIT. + +package metric_stream + +import ( + "slices" + "strings" + + acktags "github.com/aws-controllers-k8s/runtime/pkg/tags" + + svcapitypes "github.com/aws-controllers-k8s/cloudwatch-controller/apis/v1alpha1" +) + +var ( + _ = svcapitypes.MetricStream{} + _ = acktags.NewTags() + ACKSystemTags = []string{"services.k8s.aws/namespace", "services.k8s.aws/controller-version"} +) + +// convertToOrderedACKTags converts the tags parameter into 'acktags.Tags' shape. +// This method helps in creating the hub(acktags.Tags) for merging +// default controller tags with existing resource tags. It also returns a slice +// of keys maintaining the original key Order when the tags are a list +func convertToOrderedACKTags(tags []*svcapitypes.Tag) (acktags.Tags, []string) { + result := acktags.NewTags() + keyOrder := []string{} + + if len(tags) == 0 { + return result, keyOrder + } + for _, t := range tags { + if t.Key != nil { + keyOrder = append(keyOrder, *t.Key) + if t.Value != nil { + result[*t.Key] = *t.Value + } else { + result[*t.Key] = "" + } + } + } + + return result, keyOrder +} + +// fromACKTags converts the tags parameter into []*svcapitypes.Tag shape. +// This method helps in setting the tags back inside AWSResource after merging +// default controller tags with existing resource tags. When a list, +// it maintains the order from original +func fromACKTags(tags acktags.Tags, keyOrder []string) []*svcapitypes.Tag { + result := []*svcapitypes.Tag{} + + for _, k := range keyOrder { + v, ok := tags[k] + if ok { + tag := svcapitypes.Tag{Key: &k, Value: &v} + result = append(result, &tag) + delete(tags, k) + } + } + for k, v := range tags { + tag := svcapitypes.Tag{Key: &k, Value: &v} + result = append(result, &tag) + } + + return result +} + +// ignoreSystemTags ignores tags that have keys that start with "aws:" +// and ACKSystemTags, to avoid patching them to the resourceSpec. +// Eg. resources created with cloudformation have tags that cannot be +// removed by an ACK controller +func ignoreSystemTags(tags acktags.Tags) { + for k := range tags { + if strings.HasPrefix(k, "aws:") || + slices.Contains(ACKSystemTags, k) { + delete(tags, k) + } + } +} + +// syncAWSTags ensures AWS-managed tags (prefixed with "aws:") from the latest resource state +// are preserved in the desired state. This prevents the controller from attempting to +// modify AWS-managed tags, which would result in an error. +// +// AWS-managed tags are automatically added by AWS services (e.g., CloudFormation, Service Catalog) +// and cannot be modified or deleted through normal tag operations. Common examples include: +// - aws:cloudformation:stack-name +// - aws:servicecatalog:productArn +// +// Parameters: +// - a: The target Tags map to be updated (typically desired state) +// - b: The source Tags map containing AWS-managed tags (typically latest state) +// +// Example: +// +// latest := Tags{"aws:cloudformation:stack-name": "my-stack", "environment": "prod"} +// desired := Tags{"environment": "dev"} +// SyncAWSTags(desired, latest) +// desired now contains {"aws:cloudformation:stack-name": "my-stack", "environment": "dev"} +func syncAWSTags(a acktags.Tags, b acktags.Tags) { + for k := range b { + if strings.HasPrefix(k, "aws:") { + a[k] = b[k] + } + } +} diff --git a/test/e2e/bootstrap_resources.py b/test/e2e/bootstrap_resources.py index 450a769..ebb9f97 100644 --- a/test/e2e/bootstrap_resources.py +++ b/test/e2e/bootstrap_resources.py @@ -17,11 +17,14 @@ from dataclasses import dataclass from acktest.bootstrapping import Resources +from acktest.bootstrapping.iam import Role +from acktest.bootstrapping.firehose import DeliveryStream from e2e import bootstrap_directory @dataclass class BootstrapResources(Resources): - pass + MetricStreamRole: Role + DeliveryStream: DeliveryStream _bootstrap_resources = None diff --git a/test/e2e/metric_stream.py b/test/e2e/metric_stream.py new file mode 100644 index 0000000..7618526 --- /dev/null +++ b/test/e2e/metric_stream.py @@ -0,0 +1,75 @@ +# Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You may +# not use this file except in compliance with the License. A copy of the +# License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is distributed +# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +# express or implied. See the License for the specific language governing +# permissions and limitations under the License. + +"""Utilities for working with Metric Stream resources""" + +import datetime +import time + +import boto3 +import pytest + +DEFAULT_WAIT_UNTIL_DELETED_TIMEOUT_SECONDS = 60*20 +DEFAULT_WAIT_UNTIL_DELETED_INTERVAL_SECONDS = 15 + + +def wait_until_deleted( + metric_stream_name: str, + timeout_seconds: int = DEFAULT_WAIT_UNTIL_DELETED_TIMEOUT_SECONDS, + interval_seconds: int = DEFAULT_WAIT_UNTIL_DELETED_INTERVAL_SECONDS, + ) -> None: + """Waits until a Metric Stream with a supplied name is no longer returned from + the CloudWatch API. + + Usage: + from e2e.metric_stream import wait_until_deleted + + wait_until_deleted(stream_name) + + Raises: + pytest.fail upon timeout or if the Metric Stream goes to any other status + other than 'deleting' + """ + now = datetime.datetime.now() + timeout = now + datetime.timedelta(seconds=timeout_seconds) + + while True: + if datetime.datetime.now() >= timeout: + pytest.fail( + "Timed out waiting for Metric Stream to be " + "deleted in CloudWatch API" + ) + time.sleep(interval_seconds) + + latest = get(metric_stream_name) + if latest is None: + break + + +def exists(metric_stream_name): + """Returns True if the supplied Metric Stream exists, False otherwise. + """ + return get(metric_stream_name) is not None + + +def get(metric_stream_name): + """Returns a dict containing the Metric Stream record from the CloudWatch API. + + If no such Metric Stream exists, returns None. + """ + c = boto3.client('cloudwatch') + try: + resp = c.get_metric_stream(Name=metric_stream_name) + return resp + except c.exceptions.ResourceNotFoundException: + return None \ No newline at end of file diff --git a/test/e2e/replacement_values.py b/test/e2e/replacement_values.py index a00c62a..d51c09b 100644 --- a/test/e2e/replacement_values.py +++ b/test/e2e/replacement_values.py @@ -14,5 +14,10 @@ CloudWatch-specific test variables. """ +from e2e.bootstrap_resources import get_bootstrap_resources + REPLACEMENT_VALUES = { + "ROLE_ARN": get_bootstrap_resources().MetricStreamRole.arn, + "FIREHOSE_ARN": get_bootstrap_resources().DeliveryStream.arn, + "S3_BUCKET_NAME": get_bootstrap_resources().DeliveryStream.s3_bucket.name, } diff --git a/test/e2e/requirements.txt b/test/e2e/requirements.txt index feedf7f..4e87452 100644 --- a/test/e2e/requirements.txt +++ b/test/e2e/requirements.txt @@ -1 +1 @@ -acktest @ git+https://github.com/aws-controllers-k8s/test-infra.git@371852014efcb8c26c454f861eb546c93a48f205 +acktest @ git+https://github.com/aws-controllers-k8s/test-infra.git@01a753c63b44af5f745c64d02d5f5b0ce7fef360 diff --git a/test/e2e/resources/metric_stream.yaml b/test/e2e/resources/metric_stream.yaml new file mode 100644 index 0000000..a8310dd --- /dev/null +++ b/test/e2e/resources/metric_stream.yaml @@ -0,0 +1,17 @@ +apiVersion: cloudwatch.services.k8s.aws/v1alpha1 +kind: MetricStream +metadata: + name: $METRIC_STREAM_NAME +spec: + name: $METRIC_STREAM_NAME + firehoseARN: $FIREHOSE_ARN + roleARN: $ROLE_ARN + outputFormat: json + includeFilters: + - namespace: AWS/EC2 + - namespace: AWS/RDS + tags: + - key: Environment + value: test + - key: Purpose + value: e2e-testing \ No newline at end of file diff --git a/test/e2e/service_bootstrap.py b/test/e2e/service_bootstrap.py index 1dd6244..792c871 100644 --- a/test/e2e/service_bootstrap.py +++ b/test/e2e/service_bootstrap.py @@ -13,17 +13,39 @@ """Bootstraps the resources required to run the CloudWatch integration tests. """ import logging +import json from acktest.bootstrapping import Resources, BootstrapFailureException - from e2e import bootstrap_directory from e2e.bootstrap_resources import BootstrapResources +from acktest.bootstrapping.iam import Role, UserPolicies +from acktest.bootstrapping.firehose import DeliveryStream def service_bootstrap() -> Resources: logging.getLogger().setLevel(logging.INFO) + metric_stream_policy_doc = { + "Version": "2012-10-17", + "Statement": [{ + "Effect": "Allow", + "Action": ["firehose:PutRecord", "firehose:PutRecordBatch"], + "Resource": "*" + }] + } resources = BootstrapResources( - # TODO: Add bootstrapping when you have defined the resources + MetricStreamRole=Role( + name_prefix="cloudwatch-metric-stream-role", + principal_service="streams.metrics.cloudwatch.amazonaws.com", + description="Role for CloudWatch Metric Stream", + user_policies=UserPolicies( + name_prefix="metric-stream-firehose-policy", + policy_documents=[json.dumps(metric_stream_policy_doc)] + ) + ), + DeliveryStream=DeliveryStream( + name_prefix="cloudwatch-metric-stream", + s3_bucket_prefix="ack-test-cw-metrics" + ) ) try: diff --git a/test/e2e/tests/test_metric_stream.py b/test/e2e/tests/test_metric_stream.py new file mode 100644 index 0000000..d9a95d6 --- /dev/null +++ b/test/e2e/tests/test_metric_stream.py @@ -0,0 +1,114 @@ +# Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You may +# not use this file except in compliance with the License. A copy of the +# License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is distributed +# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +# express or implied. See the License for the specific language governing +# permissions and limitations under the License. + +"""Integration tests for the CloudWatch API MetricStream resource +""" + +import time + +import pytest + +from acktest.k8s import resource as k8s +from acktest.resources import random_suffix_name +from e2e import service_marker, CRD_GROUP, CRD_VERSION, load_cloudwatch_resource +from e2e.replacement_values import REPLACEMENT_VALUES +from e2e import condition +from e2e import metric_stream +from e2e.bootstrap_resources import get_bootstrap_resources + +RESOURCE_PLURAL = 'metricstreams' + +CHECK_STATUS_WAIT_SECONDS = 10 +MODIFY_WAIT_AFTER_SECONDS = 30 +DELETE_WAIT_AFTER_SECONDS = 5 + +@pytest.fixture +def _metric_stream(): + metric_stream_name = random_suffix_name("ack-test-metric-stream", 24) + + resources = get_bootstrap_resources() + + replacements = REPLACEMENT_VALUES.copy() + replacements["METRIC_STREAM_NAME"] = metric_stream_name + replacements["FIREHOSE_ARN"] = resources.DeliveryStream.arn + replacements["ROLE_ARN"] = resources.MetricStreamRole.arn + + resource_data = load_cloudwatch_resource( + "metric_stream", + additional_replacements=replacements, + ) + + # Create the k8s resource + ref = k8s.CustomResourceReference( + CRD_GROUP, CRD_VERSION, RESOURCE_PLURAL, + metric_stream_name, namespace="default", + ) + k8s.create_custom_resource(ref, resource_data) + cr = k8s.wait_resource_consumed_by_controller(ref) + + assert cr is not None + assert k8s.get_resource_exists(ref) + + yield (ref, cr) + + # Try to delete, if doesn't already exist + _, deleted = k8s.delete_custom_resource( + ref, + period_length=DELETE_WAIT_AFTER_SECONDS, + ) + assert deleted + + metric_stream.wait_until_deleted(metric_stream_name) + + +@service_marker +@pytest.mark.canary +class TestMetricStream: + def test_crud(self, _metric_stream): + (ref, cr) = _metric_stream + metric_stream_name = ref.name + time.sleep(CHECK_STATUS_WAIT_SECONDS) + condition.assert_synced(ref) + + assert metric_stream.exists(metric_stream_name) + + initial_stream_data = metric_stream.get(metric_stream_name) + assert initial_stream_data is not None, "MetricStream not found in AWS API" + initial_filters = initial_stream_data.get('IncludeFilters', []) + assert len(initial_filters) == 2, f"Expected 2 initial filters, got {len(initial_filters)}: {initial_filters}" + + updates = { + "spec": { + "includeFilters": [ + {"namespace": "AWS/EC2"} + ] + } + } + + k8s.patch_custom_resource(ref, updates) + cr = k8s.wait_resource_consumed_by_controller(ref) + + assert cr is not None + assert k8s.get_resource_exists(ref) + + time.sleep(MODIFY_WAIT_AFTER_SECONDS) + condition.assert_synced(ref) + + assert metric_stream.exists(metric_stream_name) + + updated_stream_data = metric_stream.get(metric_stream_name) + assert updated_stream_data is not None, "MetricStream not found in AWS API after update" + + updated_filters = updated_stream_data.get('IncludeFilters', []) + assert len(updated_filters) == 1, f"Expected 1 filter after update, got {len(updated_filters)}: {updated_filters}" + assert updated_filters[0]['Namespace'] == 'AWS/EC2', f"Expected AWS/EC2 filter, got {updated_filters[0]}" \ No newline at end of file