From 6eefb634684b4591e0fe1dd1508266658af95cf0 Mon Sep 17 00:00:00 2001 From: AWS SDK for Go v2 automation user Date: Tue, 19 Mar 2024 18:13:25 +0000 Subject: [PATCH] Regenerated Clients --- .../13e544a2279b4763ad0bc97ea652c612.json | 8 + .../64d06f83fff3495b9f0c933c68f8070a.json | 8 + .../74b11f829fcc45238597306b65f52168.json | 8 + .../df9b1f28a5274bddaaeb7b984b3932da.json | 8 + .../e3cb6df6e8ef47628e4686ae7f1a7ae5.json | 8 + .../cloudwatchlogs/api_op_CreateDelivery.go | 6 +- .../api_op_DescribeDeliveries.go | 6 +- service/cloudwatchlogs/api_op_GetDelivery.go | 6 +- .../cloudwatchlogs/api_op_PutAccountPolicy.go | 25 +- .../api_op_PutDataProtectionPolicy.go | 3 +- .../api_op_PutDeliveryDestination.go | 7 +- .../api_op_PutDeliverySource.go | 15 +- .../api_op_PutSubscriptionFilter.go | 2 +- service/cloudwatchlogs/deserializers.go | 71 +++- service/cloudwatchlogs/types/types.go | 39 +- service/ec2/api_op_DescribeMacHosts.go | 243 ++++++++++++ service/ec2/deserializers.go | 349 ++++++++++++++++++ service/ec2/generated.json | 1 + service/ec2/serializers.go | 95 +++++ service/ec2/snapshot_test.go | 24 ++ service/ec2/types/types.go | 13 + service/finspace/api_op_CreateKxDataview.go | 29 +- .../finspace/api_op_CreateKxScalingGroup.go | 14 +- service/finspace/api_op_CreateKxVolume.go | 8 +- service/finspace/api_op_GetKxDataview.go | 9 +- service/finspace/api_op_GetKxScalingGroup.go | 14 +- service/finspace/api_op_GetKxVolume.go | 4 +- service/finspace/api_op_UpdateKxDataview.go | 9 +- service/finspace/api_op_UpdateKxVolume.go | 4 +- service/finspace/deserializers.go | 45 +++ service/finspace/serializers.go | 10 + service/finspace/types/types.go | 33 +- .../api_op_GetTokenBalance.go | 2 +- .../api_op_GetTransaction.go | 3 +- .../api_op_ListFilteredTransactionEvents.go | 271 ++++++++++++++ .../api_op_ListTransactionEvents.go | 20 +- .../api_op_ListTransactions.go | 8 +- .../managedblockchainquery/deserializers.go | 222 +++++++++++ service/managedblockchainquery/generated.json | 1 + service/managedblockchainquery/serializers.go | 200 ++++++++++ .../managedblockchainquery/snapshot_test.go | 24 ++ service/managedblockchainquery/types/enums.go | 18 + service/managedblockchainquery/types/types.go | 102 ++++- service/managedblockchainquery/validators.go | 89 ++++- 44 files changed, 1955 insertions(+), 129 deletions(-) create mode 100644 .changelog/13e544a2279b4763ad0bc97ea652c612.json create mode 100644 .changelog/64d06f83fff3495b9f0c933c68f8070a.json create mode 100644 .changelog/74b11f829fcc45238597306b65f52168.json create mode 100644 .changelog/df9b1f28a5274bddaaeb7b984b3932da.json create mode 100644 .changelog/e3cb6df6e8ef47628e4686ae7f1a7ae5.json create mode 100644 service/ec2/api_op_DescribeMacHosts.go create mode 100644 service/managedblockchainquery/api_op_ListFilteredTransactionEvents.go diff --git a/.changelog/13e544a2279b4763ad0bc97ea652c612.json b/.changelog/13e544a2279b4763ad0bc97ea652c612.json new file mode 100644 index 00000000000..29d20ac7f27 --- /dev/null +++ b/.changelog/13e544a2279b4763ad0bc97ea652c612.json @@ -0,0 +1,8 @@ +{ + "id": "13e544a2-279b-4763-ad0b-c97ea652c612", + "type": "feature", + "description": "This release adds the new DescribeMacHosts API operation for getting information about EC2 Mac Dedicated Hosts. Users can now see the latest macOS versions that their underlying Apple Mac can support without needing to be updated.", + "modules": [ + "service/ec2" + ] +} \ No newline at end of file diff --git a/.changelog/64d06f83fff3495b9f0c933c68f8070a.json b/.changelog/64d06f83fff3495b9f0c933c68f8070a.json new file mode 100644 index 00000000000..5bfda1f3b0e --- /dev/null +++ b/.changelog/64d06f83fff3495b9f0c933c68f8070a.json @@ -0,0 +1,8 @@ +{ + "id": "64d06f83-fff3-495b-9f0c-933c68f8070a", + "type": "feature", + "description": "Adding new attributes readWrite and onDemand to dataview models for Database Maintenance operations.", + "modules": [ + "service/finspace" + ] +} \ No newline at end of file diff --git a/.changelog/74b11f829fcc45238597306b65f52168.json b/.changelog/74b11f829fcc45238597306b65f52168.json new file mode 100644 index 00000000000..53488d6e95e --- /dev/null +++ b/.changelog/74b11f829fcc45238597306b65f52168.json @@ -0,0 +1,8 @@ +{ + "id": "74b11f82-9fcc-4523-8597-306b65f52168", + "type": "documentation", + "description": "Documentation update, March 2024. Corrects some formatting.", + "modules": [ + "service/cloudformation" + ] +} \ No newline at end of file diff --git a/.changelog/df9b1f28a5274bddaaeb7b984b3932da.json b/.changelog/df9b1f28a5274bddaaeb7b984b3932da.json new file mode 100644 index 00000000000..d822abc0e6d --- /dev/null +++ b/.changelog/df9b1f28a5274bddaaeb7b984b3932da.json @@ -0,0 +1,8 @@ +{ + "id": "df9b1f28-a527-4bdd-aaeb-7b984b3932da", + "type": "feature", + "description": "Update LogSamples field in Anomaly model to be a list of LogEvent", + "modules": [ + "service/cloudwatchlogs" + ] +} \ No newline at end of file diff --git a/.changelog/e3cb6df6e8ef47628e4686ae7f1a7ae5.json b/.changelog/e3cb6df6e8ef47628e4686ae7f1a7ae5.json new file mode 100644 index 00000000000..bb6c5d53f9f --- /dev/null +++ b/.changelog/e3cb6df6e8ef47628e4686ae7f1a7ae5.json @@ -0,0 +1,8 @@ +{ + "id": "e3cb6df6-e8ef-4762-8e46-86ae7f1a7ae5", + "type": "feature", + "description": "Introduces a new API for Amazon Managed Blockchain Query: ListFilteredTransactionEvents.", + "modules": [ + "service/managedblockchainquery" + ] +} \ No newline at end of file diff --git a/service/cloudwatchlogs/api_op_CreateDelivery.go b/service/cloudwatchlogs/api_op_CreateDelivery.go index 43f074ead5a..2216154163c 100644 --- a/service/cloudwatchlogs/api_op_CreateDelivery.go +++ b/service/cloudwatchlogs/api_op_CreateDelivery.go @@ -17,9 +17,9 @@ import ( // using this operation. These services are listed as Supported [V2 Permissions] in // the table at Enabling logging from Amazon Web Services services. (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/AWS-logs-and-resource-policy.html) // A delivery destination can represent a log group in CloudWatch Logs, an Amazon -// S3 bucket, or a delivery stream in Kinesis Data Firehose. To configure logs -// delivery between a supported Amazon Web Services service and a destination, you -// must do the following: +// S3 bucket, or a delivery stream in Firehose. To configure logs delivery between +// a supported Amazon Web Services service and a destination, you must do the +// following: // - Create a delivery source, which is a logical object that represents the // resource that is actually sending the logs. For more information, see // PutDeliverySource (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutDeliverySource.html) diff --git a/service/cloudwatchlogs/api_op_DescribeDeliveries.go b/service/cloudwatchlogs/api_op_DescribeDeliveries.go index 4cee3a2ad4e..7f12033e05d 100644 --- a/service/cloudwatchlogs/api_op_DescribeDeliveries.go +++ b/service/cloudwatchlogs/api_op_DescribeDeliveries.go @@ -16,9 +16,9 @@ import ( // and a delivery destination (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutDeliveryDestination.html) // . A delivery source represents an Amazon Web Services resource that sends logs // to an logs delivery destination. The destination can be CloudWatch Logs, Amazon -// S3, or Kinesis Data Firehose. Only some Amazon Web Services services support -// being configured as a delivery source. These services are listed in Enable -// logging from Amazon Web Services services. (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/AWS-logs-and-resource-policy.html) +// S3, or Firehose. Only some Amazon Web Services services support being configured +// as a delivery source. These services are listed in Enable logging from Amazon +// Web Services services. (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/AWS-logs-and-resource-policy.html) func (c *Client) DescribeDeliveries(ctx context.Context, params *DescribeDeliveriesInput, optFns ...func(*Options)) (*DescribeDeliveriesOutput, error) { if params == nil { params = &DescribeDeliveriesInput{} diff --git a/service/cloudwatchlogs/api_op_GetDelivery.go b/service/cloudwatchlogs/api_op_GetDelivery.go index 4561cc281f2..8c6c5fa7620 100644 --- a/service/cloudwatchlogs/api_op_GetDelivery.go +++ b/service/cloudwatchlogs/api_op_GetDelivery.go @@ -16,9 +16,9 @@ import ( // and a delivery destination (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutDeliveryDestination.html) // . A delivery source represents an Amazon Web Services resource that sends logs // to an logs delivery destination. The destination can be CloudWatch Logs, Amazon -// S3, or Kinesis Data Firehose. Only some Amazon Web Services services support -// being configured as a delivery source. These services are listed in Enable -// logging from Amazon Web Services services. (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/AWS-logs-and-resource-policy.html) +// S3, or Firehose. Only some Amazon Web Services services support being configured +// as a delivery source. These services are listed in Enable logging from Amazon +// Web Services services. (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/AWS-logs-and-resource-policy.html) // You need to specify the delivery id in this operation. You can find the IDs of // the deliveries in your account with the DescribeDeliveries (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_DescribeDeliveries.html) // operation. diff --git a/service/cloudwatchlogs/api_op_PutAccountPolicy.go b/service/cloudwatchlogs/api_op_PutAccountPolicy.go index 51fe37ce807..7a893f43bbc 100644 --- a/service/cloudwatchlogs/api_op_PutAccountPolicy.go +++ b/service/cloudwatchlogs/api_op_PutAccountPolicy.go @@ -44,18 +44,18 @@ import ( // CloudWatch Logs to other Amazon Web Services services. Account-level // subscription filter policies apply to both existing log groups and log groups // that are created later in this account. Supported destinations are Kinesis Data -// Streams, Kinesis Data Firehose, and Lambda. When log events are sent to the -// receiving service, they are Base64 encoded and compressed with the GZIP format. -// The following destinations are supported for subscription filters: +// Streams, Firehose, and Lambda. When log events are sent to the receiving +// service, they are Base64 encoded and compressed with the GZIP format. The +// following destinations are supported for subscription filters: // - An Kinesis Data Streams data stream in the same account as the subscription // policy, for same-account delivery. -// - An Kinesis Data Firehose data stream in the same account as the -// subscription policy, for same-account delivery. +// - An Firehose data stream in the same account as the subscription policy, for +// same-account delivery. // - A Lambda function in the same account as the subscription policy, for // same-account delivery. // - A logical destination in a different account created with PutDestination (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutDestination.html) -// , for cross-account delivery. Kinesis Data Streams and Kinesis Data Firehose are -// supported as logical destinations. +// , for cross-account delivery. Kinesis Data Streams and Firehose are supported as +// logical destinations. // // Each account can have one account-level subscription filter policy. If you are // updating an existing filter, you must specify the correct name in PolicyName . @@ -88,8 +88,7 @@ type PutAccountPolicyInput struct { // sensitive data terms. This Audit action must contain a FindingsDestination // object. You can optionally use that FindingsDestination object to list one or // more destinations to send audit findings to. If you specify destinations such as - // log groups, Kinesis Data Firehose streams, and S3 buckets, they must already - // exist. + // log groups, Firehose streams, and S3 buckets, they must already exist. // - The second block must include both a DataIdentifer array and an Operation // property with an Deidentify action. The DataIdentifer array must exactly match // the DataIdentifer array in the first block of the policy. The Operation @@ -108,13 +107,13 @@ type PutAccountPolicyInput struct { // Supported destinations are: // - An Kinesis Data Streams data stream in the same account as the subscription // policy, for same-account delivery. - // - An Kinesis Data Firehose data stream in the same account as the - // subscription policy, for same-account delivery. + // - An Firehose data stream in the same account as the subscription policy, for + // same-account delivery. // - A Lambda function in the same account as the subscription policy, for // same-account delivery. // - A logical destination in a different account created with PutDestination (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutDestination.html) - // , for cross-account delivery. Kinesis Data Streams and Kinesis Data Firehose are - // supported as logical destinations. + // , for cross-account delivery. Kinesis Data Streams and Firehose are supported as + // logical destinations. // - RoleArn The ARN of an IAM role that grants CloudWatch Logs permissions to // deliver ingested log events to the destination stream. You don't need to provide // the ARN when you are working with a logical destination for cross-account diff --git a/service/cloudwatchlogs/api_op_PutDataProtectionPolicy.go b/service/cloudwatchlogs/api_op_PutDataProtectionPolicy.go index 9a643010ac6..58ffd49a42f 100644 --- a/service/cloudwatchlogs/api_op_PutDataProtectionPolicy.go +++ b/service/cloudwatchlogs/api_op_PutDataProtectionPolicy.go @@ -63,8 +63,7 @@ type PutDataProtectionPolicyInput struct { // sensitive data terms. This Audit action must contain a FindingsDestination // object. You can optionally use that FindingsDestination object to list one or // more destinations to send audit findings to. If you specify destinations such as - // log groups, Kinesis Data Firehose streams, and S3 buckets, they must already - // exist. + // log groups, Firehose streams, and S3 buckets, they must already exist. // - The second block must include both a DataIdentifer array and an Operation // property with an Deidentify action. The DataIdentifer array must exactly match // the DataIdentifer array in the first block of the policy. The Operation diff --git a/service/cloudwatchlogs/api_op_PutDeliveryDestination.go b/service/cloudwatchlogs/api_op_PutDeliveryDestination.go index 7c2a8879a6a..ddf1a1dbf8e 100644 --- a/service/cloudwatchlogs/api_op_PutDeliveryDestination.go +++ b/service/cloudwatchlogs/api_op_PutDeliveryDestination.go @@ -13,10 +13,9 @@ import ( // Creates or updates a logical delivery destination. A delivery destination is an // Amazon Web Services resource that represents an Amazon Web Services service that -// logs can be sent to. CloudWatch Logs, Amazon S3, and Kinesis Data Firehose are -// supported as logs delivery destinations. To configure logs delivery between a -// supported Amazon Web Services service and a destination, you must do the -// following: +// logs can be sent to. CloudWatch Logs, Amazon S3, and Firehose are supported as +// logs delivery destinations. To configure logs delivery between a supported +// Amazon Web Services service and a destination, you must do the following: // - Create a delivery source, which is a logical object that represents the // resource that is actually sending the logs. For more information, see // PutDeliverySource (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutDeliverySource.html) diff --git a/service/cloudwatchlogs/api_op_PutDeliverySource.go b/service/cloudwatchlogs/api_op_PutDeliverySource.go index 60937e057f9..ae6e6903f93 100644 --- a/service/cloudwatchlogs/api_op_PutDeliverySource.go +++ b/service/cloudwatchlogs/api_op_PutDeliverySource.go @@ -13,10 +13,9 @@ import ( // Creates or updates a logical delivery source. A delivery source represents an // Amazon Web Services resource that sends logs to an logs delivery destination. -// The destination can be CloudWatch Logs, Amazon S3, or Kinesis Data Firehose. To -// configure logs delivery between a delivery destination and an Amazon Web -// Services service that is supported as a delivery source, you must do the -// following: +// The destination can be CloudWatch Logs, Amazon S3, or Firehose. To configure +// logs delivery between a delivery destination and an Amazon Web Services service +// that is supported as a delivery source, you must do the following: // - Use PutDeliverySource to create a delivery source, which is a logical object // that represents the resource that is actually sending the logs. // - Use PutDeliveryDestination to create a delivery destination, which is a @@ -57,8 +56,12 @@ func (c *Client) PutDeliverySource(ctx context.Context, params *PutDeliverySourc type PutDeliverySourceInput struct { - // Defines the type of log that the source is sending. For Amazon CodeWhisperer, - // the valid value is EVENT_LOGS . + // Defines the type of log that the source is sending. + // - For Amazon CodeWhisperer, the valid value is EVENT_LOGS . + // - For IAM Identity Centerr, the valid value is ERROR_LOGS . + // - For Amazon WorkMail, the valid values are ACCESS_CONTROL_LOGS , + // AUTHENTICATION_LOGS , WORKMAIL_AVAILABILITY_PROVIDER_LOGS , and + // WORKMAIL_MAILBOX_ACCESS_LOGS . // // This member is required. LogType *string diff --git a/service/cloudwatchlogs/api_op_PutSubscriptionFilter.go b/service/cloudwatchlogs/api_op_PutSubscriptionFilter.go index 123ccb0085b..91e999f12de 100644 --- a/service/cloudwatchlogs/api_op_PutSubscriptionFilter.go +++ b/service/cloudwatchlogs/api_op_PutSubscriptionFilter.go @@ -21,7 +21,7 @@ import ( // subscription filter, for same-account delivery. // - A logical destination created with PutDestination (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutDestination.html) // that belongs to a different account, for cross-account delivery. We currently -// support Kinesis Data Streams and Kinesis Data Firehose as logical destinations. +// support Kinesis Data Streams and Firehose as logical destinations. // - An Amazon Kinesis Data Firehose delivery stream that belongs to the same // account as the subscription filter, for same-account delivery. // - An Lambda function that belongs to the same account as the subscription diff --git a/service/cloudwatchlogs/deserializers.go b/service/cloudwatchlogs/deserializers.go index ba2ec7a8359..2a38f1e6319 100644 --- a/service/cloudwatchlogs/deserializers.go +++ b/service/cloudwatchlogs/deserializers.go @@ -11117,6 +11117,59 @@ func awsAwsjson11_deserializeDocumentLimitExceededException(v **types.LimitExcee return nil } +func awsAwsjson11_deserializeDocumentLogEvent(v **types.LogEvent, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.LogEvent + if *v == nil { + sv = &types.LogEvent{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected EventMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + case "timestamp": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Timestamp to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.Timestamp = ptr.Int64(i64) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + func awsAwsjson11_deserializeDocumentLogGroup(v **types.LogGroup, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -11488,7 +11541,7 @@ func awsAwsjson11_deserializeDocumentLogRecord(v *map[string]string, value inter return nil } -func awsAwsjson11_deserializeDocumentLogSamples(v *[]string, value interface{}) error { +func awsAwsjson11_deserializeDocumentLogSamples(v *[]types.LogEvent, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -11501,22 +11554,20 @@ func awsAwsjson11_deserializeDocumentLogSamples(v *[]string, value interface{}) return fmt.Errorf("unexpected JSON type %v", value) } - var cv []string + var cv []types.LogEvent if *v == nil { - cv = []string{} + cv = []types.LogEvent{} } else { cv = *v } for _, value := range shape { - var col string - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected LogEvent to be of type string, got %T instead", value) - } - col = jtv + var col types.LogEvent + destAddr := &col + if err := awsAwsjson11_deserializeDocumentLogEvent(&destAddr, value); err != nil { + return err } + col = *destAddr cv = append(cv, col) } diff --git a/service/cloudwatchlogs/types/types.go b/service/cloudwatchlogs/types/types.go index b08a4d29c6f..beaef415310 100644 --- a/service/cloudwatchlogs/types/types.go +++ b/service/cloudwatchlogs/types/types.go @@ -93,7 +93,7 @@ type Anomaly struct { // anomaly. // // This member is required. - LogSamples []string + LogSamples []LogEvent // The ID of the pattern used to help identify this anomaly. // @@ -206,7 +206,7 @@ type Delivery struct { DeliveryDestinationArn *string // Displays whether the delivery destination associated with this delivery is - // CloudWatch Logs, Amazon S3, or Kinesis Data Firehose. + // CloudWatch Logs, Amazon S3, or Firehose. DeliveryDestinationType DeliveryDestinationType // The name of the delivery source that is associated with this delivery. @@ -224,9 +224,9 @@ type Delivery struct { // This structure contains information about one delivery destination in your // account. A delivery destination is an Amazon Web Services resource that // represents an Amazon Web Services service that logs can be sent to. CloudWatch -// Logs, Amazon S3, are supported as Kinesis Data Firehose delivery destinations. -// To configure logs delivery between a supported Amazon Web Services service and a -// destination, you must do the following: +// Logs, Amazon S3, are supported as Firehose delivery destinations. To configure +// logs delivery between a supported Amazon Web Services service and a destination, +// you must do the following: // - Create a delivery source, which is a logical object that represents the // resource that is actually sending the logs. For more information, see // PutDeliverySource (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutDeliverySource.html) @@ -256,7 +256,7 @@ type DeliveryDestination struct { DeliveryDestinationConfiguration *DeliveryDestinationConfiguration // Displays whether this delivery destination is CloudWatch Logs, Amazon S3, or - // Kinesis Data Firehose. + // Firehose. DeliveryDestinationType DeliveryDestinationType // The name of this delivery destination. @@ -276,8 +276,7 @@ type DeliveryDestinationConfiguration struct { // The ARN of the Amazon Web Services destination that this delivery destination // represents. That Amazon Web Services destination can be a log group in - // CloudWatch Logs, an Amazon S3 bucket, or a delivery stream in Kinesis Data - // Firehose. + // CloudWatch Logs, an Amazon S3 bucket, or a delivery stream in Firehose. // // This member is required. DestinationResourceArn *string @@ -288,10 +287,9 @@ type DeliveryDestinationConfiguration struct { // This structure contains information about one delivery source in your account. // A delivery source is an Amazon Web Services resource that sends logs to an // Amazon Web Services destination. The destination can be CloudWatch Logs, Amazon -// S3, or Kinesis Data Firehose. Only some Amazon Web Services services support -// being configured as a delivery source. These services are listed as Supported -// [V2 Permissions] in the table at Enabling logging from Amazon Web Services -// services. (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/AWS-logs-and-resource-policy.html) +// S3, or Firehose. Only some Amazon Web Services services support being configured +// as a delivery source. These services are listed as Supported [V2 Permissions] in +// the table at Enabling logging from Amazon Web Services services. (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/AWS-logs-and-resource-policy.html) // To configure logs delivery between a supported Amazon Web Services service and a // destination, you must do the following: // - Create a delivery source, which is a logical object that represents the @@ -559,6 +557,19 @@ type LiveTailSessionUpdate struct { noSmithyDocumentSerde } +// This structure contains the information for one sample log event that is +// associated with an anomaly found by a log anomaly detector. +type LogEvent struct { + + // The message content of the log event. + Message *string + + // The time stamp of the log event. + Timestamp *int64 + + noSmithyDocumentSerde +} + // Represents a log group. type LogGroup struct { @@ -922,10 +933,10 @@ type RejectedLogEventsInfo struct { // The expired log events. ExpiredLogEventEndIndex *int32 - // The log events that are too new. + // The index of the first log event that is too new. This field is inclusive. TooNewLogEventStartIndex *int32 - // The log events that are dated too far in the past. + // The index of the last log event that is too old. This field is exclusive. TooOldLogEventEndIndex *int32 noSmithyDocumentSerde diff --git a/service/ec2/api_op_DescribeMacHosts.go b/service/ec2/api_op_DescribeMacHosts.go new file mode 100644 index 00000000000..5c7166b6c92 --- /dev/null +++ b/service/ec2/api_op_DescribeMacHosts.go @@ -0,0 +1,243 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ec2 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/ec2/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Describes the specified EC2 Mac Dedicated Host or all of your EC2 Mac Dedicated +// Hosts. +func (c *Client) DescribeMacHosts(ctx context.Context, params *DescribeMacHostsInput, optFns ...func(*Options)) (*DescribeMacHostsOutput, error) { + if params == nil { + params = &DescribeMacHostsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DescribeMacHosts", params, optFns, c.addOperationDescribeMacHostsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DescribeMacHostsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DescribeMacHostsInput struct { + + // The filters. + // - availability-zone - The Availability Zone of the EC2 Mac Dedicated Host. + // - instance-type - The instance type size that the EC2 Mac Dedicated Host is + // configured to support. + Filters []types.Filter + + // The IDs of the EC2 Mac Dedicated Hosts. + HostIds []string + + // The maximum number of results to return for the request in a single page. The + // remaining results can be seen by sending another request with the returned + // nextToken value. This value can be between 5 and 500. If maxResults is given a + // larger value than 500, you receive an error. + MaxResults *int32 + + // The token to use to retrieve the next page of results. + NextToken *string + + noSmithyDocumentSerde +} + +type DescribeMacHostsOutput struct { + + // Information about the EC2 Mac Dedicated Hosts. + MacHosts []types.MacHost + + // The token to use to retrieve the next page of results. + NextToken *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDescribeMacHostsMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsEc2query_serializeOpDescribeMacHosts{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsEc2query_deserializeOpDescribeMacHosts{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "DescribeMacHosts"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeMacHosts(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + return nil +} + +// DescribeMacHostsAPIClient is a client that implements the DescribeMacHosts +// operation. +type DescribeMacHostsAPIClient interface { + DescribeMacHosts(context.Context, *DescribeMacHostsInput, ...func(*Options)) (*DescribeMacHostsOutput, error) +} + +var _ DescribeMacHostsAPIClient = (*Client)(nil) + +// DescribeMacHostsPaginatorOptions is the paginator options for DescribeMacHosts +type DescribeMacHostsPaginatorOptions struct { + // The maximum number of results to return for the request in a single page. The + // remaining results can be seen by sending another request with the returned + // nextToken value. This value can be between 5 and 500. If maxResults is given a + // larger value than 500, you receive an error. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// DescribeMacHostsPaginator is a paginator for DescribeMacHosts +type DescribeMacHostsPaginator struct { + options DescribeMacHostsPaginatorOptions + client DescribeMacHostsAPIClient + params *DescribeMacHostsInput + nextToken *string + firstPage bool +} + +// NewDescribeMacHostsPaginator returns a new DescribeMacHostsPaginator +func NewDescribeMacHostsPaginator(client DescribeMacHostsAPIClient, params *DescribeMacHostsInput, optFns ...func(*DescribeMacHostsPaginatorOptions)) *DescribeMacHostsPaginator { + if params == nil { + params = &DescribeMacHostsInput{} + } + + options := DescribeMacHostsPaginatorOptions{} + if params.MaxResults != nil { + options.Limit = *params.MaxResults + } + + for _, fn := range optFns { + fn(&options) + } + + return &DescribeMacHostsPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.NextToken, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *DescribeMacHostsPaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) +} + +// NextPage retrieves the next DescribeMacHosts page. +func (p *DescribeMacHostsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*DescribeMacHostsOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.NextToken = p.nextToken + + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.MaxResults = limit + + result, err := p.client.DescribeMacHosts(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.NextToken + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +func newServiceMetadataMiddleware_opDescribeMacHosts(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "DescribeMacHosts", + } +} diff --git a/service/ec2/deserializers.go b/service/ec2/deserializers.go index cdb4c28ebd3..2243e3975b9 100644 --- a/service/ec2/deserializers.go +++ b/service/ec2/deserializers.go @@ -25975,6 +25975,97 @@ func awsEc2query_deserializeOpErrorDescribeLockedSnapshots(response *smithyhttp. } } +type awsEc2query_deserializeOpDescribeMacHosts struct { +} + +func (*awsEc2query_deserializeOpDescribeMacHosts) ID() string { + return "OperationDeserializer" +} + +func (m *awsEc2query_deserializeOpDescribeMacHosts) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsEc2query_deserializeOpErrorDescribeMacHosts(response, &metadata) + } + output := &DescribeMacHostsOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsEc2query_deserializeOpDocumentDescribeMacHostsOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsEc2query_deserializeOpErrorDescribeMacHosts(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := ec2query.GetErrorResponseComponents(errorBody) + if err != nil { + return err + } + awsmiddleware.SetRequestIDMetadata(metadata, errorComponents.RequestID) + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + type awsEc2query_deserializeOpDescribeManagedPrefixLists struct { } @@ -99198,6 +99289,209 @@ func awsEc2query_deserializeDocumentLockedSnapshotsInfoListUnwrapped(v *[]types. *v = sv return nil } +func awsEc2query_deserializeDocumentMacHost(v **types.MacHost, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.MacHost + if *v == nil { + sv = &types.MacHost{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("hostId", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.HostId = ptr.String(xtv) + } + + case strings.EqualFold("macOSLatestSupportedVersionSet", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsEc2query_deserializeDocumentMacOSVersionStringList(&sv.MacOSLatestSupportedVersions, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsEc2query_deserializeDocumentMacHostList(v *[]types.MacHost, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.MacHost + if *v == nil { + sv = make([]types.MacHost, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + switch { + case strings.EqualFold("item", t.Name.Local): + var col types.MacHost + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &col + if err := awsEc2query_deserializeDocumentMacHost(&destAddr, nodeDecoder); err != nil { + return err + } + col = *destAddr + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsEc2query_deserializeDocumentMacHostListUnwrapped(v *[]types.MacHost, decoder smithyxml.NodeDecoder) error { + var sv []types.MacHost + if *v == nil { + sv = make([]types.MacHost, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.MacHost + t := decoder.StartEl + _ = t + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsEc2query_deserializeDocumentMacHost(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsEc2query_deserializeDocumentMacOSVersionStringList(v *[]string, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []string + if *v == nil { + sv = make([]string, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + memberDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + decoder = memberDecoder + switch { + case strings.EqualFold("item", t.Name.Local): + var col string + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + col = xtv + } + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsEc2query_deserializeDocumentMacOSVersionStringListUnwrapped(v *[]string, decoder smithyxml.NodeDecoder) error { + var sv []string + if *v == nil { + sv = make([]string, 0) + } else { + sv = *v + } + + switch { + default: + var mv string + t := decoder.StartEl + _ = t + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + mv = xtv + } + sv = append(sv, mv) + } + *v = sv + return nil +} func awsEc2query_deserializeDocumentMaintenanceDetails(v **types.MaintenanceDetails, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -152817,6 +153111,61 @@ func awsEc2query_deserializeOpDocumentDescribeLockedSnapshotsOutput(v **Describe return nil } +func awsEc2query_deserializeOpDocumentDescribeMacHostsOutput(v **DescribeMacHostsOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *DescribeMacHostsOutput + if *v == nil { + sv = &DescribeMacHostsOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("macHostSet", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsEc2query_deserializeDocumentMacHostList(&sv.MacHosts, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("nextToken", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.NextToken = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + func awsEc2query_deserializeOpDocumentDescribeManagedPrefixListsOutput(v **DescribeManagedPrefixListsOutput, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) diff --git a/service/ec2/generated.json b/service/ec2/generated.json index b7049e9b1f8..f61543b9741 100644 --- a/service/ec2/generated.json +++ b/service/ec2/generated.json @@ -304,6 +304,7 @@ "api_op_DescribeLocalGatewayVirtualInterfaces.go", "api_op_DescribeLocalGateways.go", "api_op_DescribeLockedSnapshots.go", + "api_op_DescribeMacHosts.go", "api_op_DescribeManagedPrefixLists.go", "api_op_DescribeMovingAddresses.go", "api_op_DescribeNatGateways.go", diff --git a/service/ec2/serializers.go b/service/ec2/serializers.go index 5b9988fdc3c..982a2091ba0 100644 --- a/service/ec2/serializers.go +++ b/service/ec2/serializers.go @@ -18705,6 +18705,70 @@ func (m *awsEc2query_serializeOpDescribeLockedSnapshots) HandleSerialize(ctx con return next.HandleSerialize(ctx, in) } +type awsEc2query_serializeOpDescribeMacHosts struct { +} + +func (*awsEc2query_serializeOpDescribeMacHosts) ID() string { + return "OperationSerializer" +} + +func (m *awsEc2query_serializeOpDescribeMacHosts) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DescribeMacHostsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded") + + bodyWriter := bytes.NewBuffer(nil) + bodyEncoder := query.NewEncoder(bodyWriter) + body := bodyEncoder.Object() + body.Key("Action").String("DescribeMacHosts") + body.Key("Version").String("2016-11-15") + + if err := awsEc2query_serializeOpDocumentDescribeMacHostsInput(input, bodyEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + err = bodyEncoder.Encode() + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + type awsEc2query_serializeOpDescribeManagedPrefixLists struct { } @@ -57877,6 +57941,37 @@ func awsEc2query_serializeOpDocumentDescribeLockedSnapshotsInput(v *DescribeLock return nil } +func awsEc2query_serializeOpDocumentDescribeMacHostsInput(v *DescribeMacHostsInput, value query.Value) error { + object := value.Object() + _ = object + + if v.Filters != nil { + objectKey := object.FlatKey("Filter") + if err := awsEc2query_serializeDocumentFilterList(v.Filters, objectKey); err != nil { + return err + } + } + + if v.HostIds != nil { + objectKey := object.FlatKey("HostId") + if err := awsEc2query_serializeDocumentRequestHostIdList(v.HostIds, objectKey); err != nil { + return err + } + } + + if v.MaxResults != nil { + objectKey := object.Key("MaxResults") + objectKey.Integer(*v.MaxResults) + } + + if v.NextToken != nil { + objectKey := object.Key("NextToken") + objectKey.String(*v.NextToken) + } + + return nil +} + func awsEc2query_serializeOpDocumentDescribeManagedPrefixListsInput(v *DescribeManagedPrefixListsInput, value query.Value) error { object := value.Object() _ = object diff --git a/service/ec2/snapshot_test.go b/service/ec2/snapshot_test.go index 164019d7d7c..70ccc8c9028 100644 --- a/service/ec2/snapshot_test.go +++ b/service/ec2/snapshot_test.go @@ -3566,6 +3566,18 @@ func TestCheckSnapshot_DescribeLockedSnapshots(t *testing.T) { } } +func TestCheckSnapshot_DescribeMacHosts(t *testing.T) { + svc := New(Options{}) + _, err := svc.DescribeMacHosts(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return testSnapshot(stack, "DescribeMacHosts") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + func TestCheckSnapshot_DescribeManagedPrefixLists(t *testing.T) { svc := New(Options{}) _, err := svc.DescribeManagedPrefixLists(context.Background(), nil, func(o *Options) { @@ -10945,6 +10957,18 @@ func TestUpdateSnapshot_DescribeLockedSnapshots(t *testing.T) { } } +func TestUpdateSnapshot_DescribeMacHosts(t *testing.T) { + svc := New(Options{}) + _, err := svc.DescribeMacHosts(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return updateSnapshot(stack, "DescribeMacHosts") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + func TestUpdateSnapshot_DescribeManagedPrefixLists(t *testing.T) { svc := New(Options{}) _, err := svc.DescribeManagedPrefixLists(context.Background(), nil, func(o *Options) { diff --git a/service/ec2/types/types.go b/service/ec2/types/types.go index f2aba2a5c5d..0d40ec10706 100644 --- a/service/ec2/types/types.go +++ b/service/ec2/types/types.go @@ -10034,6 +10034,19 @@ type LockedSnapshotsInfo struct { noSmithyDocumentSerde } +// Information about the EC2 Mac Dedicated Host. +type MacHost struct { + + // The EC2 Mac Dedicated Host ID. + HostId *string + + // The latest macOS versions that the EC2 Mac Dedicated Host can launch without + // being upgraded. + MacOSLatestSupportedVersions []string + + noSmithyDocumentSerde +} + // Details for Site-to-Site VPN tunnel endpoint maintenance events. type MaintenanceDetails struct { diff --git a/service/finspace/api_op_CreateKxDataview.go b/service/finspace/api_op_CreateKxDataview.go index 04560720c53..f84dbb26835 100644 --- a/service/finspace/api_op_CreateKxDataview.go +++ b/service/finspace/api_op_CreateKxDataview.go @@ -33,10 +33,8 @@ func (c *Client) CreateKxDataview(ctx context.Context, params *CreateKxDataviewI type CreateKxDataviewInput struct { - // The number of availability zones you want to assign per cluster. This can be - // one of the following - // - SINGLE – Assigns one availability zone per cluster. - // - MULTI – Assigns all the availability zones per cluster. + // The number of availability zones you want to assign per volume. Currently, + // FinSpace only supports SINGLE for volumes. This places dataview in a single AZ. // // This member is required. AzMode types.KxAzMode @@ -76,6 +74,20 @@ type CreateKxDataviewInput struct { // A description of the dataview. Description *string + // The option to specify whether you want to make the dataview writable to perform + // database maintenance. The following are some considerations related to writable + // dataviews. + // - You cannot create partial writable dataviews. When you create writeable + // dataviews you must provide the entire database path. + // - You cannot perform updates on a writeable dataview. Hence, autoUpdate must + // be set as False if readWrite is True for a dataview. + // - You must also use a unique volume for creating a writeable dataview. So, if + // you choose a volume that is already in use by another dataview, the dataview + // creation fails. + // - Once you create a dataview as writeable, you cannot change it to read-only. + // So, you cannot update the readWrite parameter later. + ReadWrite bool + // The configuration that contains the database path of the data that you want to // place on each selected volume. Each segment must have a unique database path for // each volume. If you do not explicitly specify any database path for a volume, @@ -100,10 +112,8 @@ type CreateKxDataviewOutput struct { // The identifier of the availability zones. AvailabilityZoneId *string - // The number of availability zones you want to assign per cluster. This can be - // one of the following - // - SINGLE – Assigns one availability zone per cluster. - // - MULTI – Assigns all the availability zones per cluster. + // The number of availability zones you want to assign per volume. Currently, + // FinSpace only supports SINGLE for volumes. This places dataview in a single AZ. AzMode types.KxAzMode // A unique identifier for the changeset. @@ -132,6 +142,9 @@ type CreateKxDataviewOutput struct { // November 1, 2021 12:00:00 PM UTC is specified as 1635768000000. LastModifiedTimestamp *time.Time + // Returns True if the dataview is created as writeable and False otherwise. + ReadWrite bool + // The configuration that contains the database path of the data that you want to // place on each selected volume. Each segment must have a unique database path for // each volume. If you do not explicitly specify any database path for a volume, diff --git a/service/finspace/api_op_CreateKxScalingGroup.go b/service/finspace/api_op_CreateKxScalingGroup.go index 99267533921..20dc7d926c0 100644 --- a/service/finspace/api_op_CreateKxScalingGroup.go +++ b/service/finspace/api_op_CreateKxScalingGroup.go @@ -47,7 +47,19 @@ type CreateKxScalingGroupInput struct { EnvironmentId *string // The memory and CPU capabilities of the scaling group host on which FinSpace - // Managed kdb clusters will be placed. + // Managed kdb clusters will be placed. You can add one of the following values: + // - kx.sg.4xlarge – The host type with a configuration of 108 GiB memory and 16 + // vCPUs. + // - kx.sg.8xlarge – The host type with a configuration of 216 GiB memory and 32 + // vCPUs. + // - kx.sg.16xlarge – The host type with a configuration of 432 GiB memory and 64 + // vCPUs. + // - kx.sg.32xlarge – The host type with a configuration of 864 GiB memory and + // 128 vCPUs. + // - kx.sg1.16xlarge – The host type with a configuration of 1949 GiB memory and + // 64 vCPUs. + // - kx.sg1.24xlarge – The host type with a configuration of 2948 GiB memory and + // 96 vCPUs. // // This member is required. HostType *string diff --git a/service/finspace/api_op_CreateKxVolume.go b/service/finspace/api_op_CreateKxVolume.go index 5f73ac4c95f..843486bd163 100644 --- a/service/finspace/api_op_CreateKxVolume.go +++ b/service/finspace/api_op_CreateKxVolume.go @@ -35,8 +35,8 @@ type CreateKxVolumeInput struct { // This member is required. AvailabilityZoneIds []string - // The number of availability zones you want to assign per cluster. Currently, - // FinSpace only support SINGLE for volumes. + // The number of availability zones you want to assign per volume. Currently, + // FinSpace only supports SINGLE for volumes. This places dataview in a single AZ. // // This member is required. AzMode types.KxAzMode @@ -81,8 +81,8 @@ type CreateKxVolumeOutput struct { // The identifier of the availability zones. AvailabilityZoneIds []string - // The number of availability zones you want to assign per cluster. Currently, - // FinSpace only support SINGLE for volumes. + // The number of availability zones you want to assign per volume. Currently, + // FinSpace only supports SINGLE for volumes. This places dataview in a single AZ. AzMode types.KxAzMode // The timestamp at which the volume was created in FinSpace. The value is diff --git a/service/finspace/api_op_GetKxDataview.go b/service/finspace/api_op_GetKxDataview.go index c9f6dbd73ab..f1553d98373 100644 --- a/service/finspace/api_op_GetKxDataview.go +++ b/service/finspace/api_op_GetKxDataview.go @@ -62,10 +62,8 @@ type GetKxDataviewOutput struct { // The identifier of the availability zones. AvailabilityZoneId *string - // The number of availability zones you want to assign per cluster. This can be - // one of the following - // - SINGLE – Assigns one availability zone per cluster. - // - MULTI – Assigns all the availability zones per cluster. + // The number of availability zones you want to assign per volume. Currently, + // FinSpace only supports SINGLE for volumes. This places dataview in a single AZ. AzMode types.KxAzMode // A unique identifier of the changeset that you want to use to ingest data. @@ -94,6 +92,9 @@ type GetKxDataviewOutput struct { // November 1, 2021 12:00:00 PM UTC is specified as 1635768000000. LastModifiedTimestamp *time.Time + // Returns True if the dataview is created as writeable and False otherwise. + ReadWrite bool + // The configuration that contains the database path of the data that you want to // place on each selected volume. Each segment must have a unique database path for // each volume. If you do not explicitly specify any database path for a volume, diff --git a/service/finspace/api_op_GetKxScalingGroup.go b/service/finspace/api_op_GetKxScalingGroup.go index a43af3207b2..ecfbd833ed9 100644 --- a/service/finspace/api_op_GetKxScalingGroup.go +++ b/service/finspace/api_op_GetKxScalingGroup.go @@ -58,7 +58,19 @@ type GetKxScalingGroupOutput struct { CreatedTimestamp *time.Time // The memory and CPU capabilities of the scaling group host on which FinSpace - // Managed kdb clusters will be placed. + // Managed kdb clusters will be placed. It can have one of the following values: + // - kx.sg.4xlarge – The host type with a configuration of 108 GiB memory and 16 + // vCPUs. + // - kx.sg.8xlarge – The host type with a configuration of 216 GiB memory and 32 + // vCPUs. + // - kx.sg.16xlarge – The host type with a configuration of 432 GiB memory and 64 + // vCPUs. + // - kx.sg.32xlarge – The host type with a configuration of 864 GiB memory and + // 128 vCPUs. + // - kx.sg1.16xlarge – The host type with a configuration of 1949 GiB memory and + // 64 vCPUs. + // - kx.sg1.24xlarge – The host type with a configuration of 2948 GiB memory and + // 96 vCPUs. HostType *string // The last time that the scaling group was updated in FinSpace. The value is diff --git a/service/finspace/api_op_GetKxVolume.go b/service/finspace/api_op_GetKxVolume.go index e7c6e911f1e..547e1b252c2 100644 --- a/service/finspace/api_op_GetKxVolume.go +++ b/service/finspace/api_op_GetKxVolume.go @@ -52,8 +52,8 @@ type GetKxVolumeOutput struct { // The identifier of the availability zones. AvailabilityZoneIds []string - // The number of availability zones you want to assign per cluster. Currently, - // FinSpace only support SINGLE for volumes. + // The number of availability zones you want to assign per volume. Currently, + // FinSpace only supports SINGLE for volumes. This places dataview in a single AZ. AzMode types.KxAzMode // The timestamp at which the volume was created in FinSpace. The value is diff --git a/service/finspace/api_op_UpdateKxDataview.go b/service/finspace/api_op_UpdateKxDataview.go index 28a25fe8197..28ceca95833 100644 --- a/service/finspace/api_op_UpdateKxDataview.go +++ b/service/finspace/api_op_UpdateKxDataview.go @@ -82,10 +82,8 @@ type UpdateKxDataviewOutput struct { // The identifier of the availability zones. AvailabilityZoneId *string - // The number of availability zones you want to assign per cluster. This can be - // one of the following - // - SINGLE – Assigns one availability zone per cluster. - // - MULTI – Assigns all the availability zones per cluster. + // The number of availability zones you want to assign per volume. Currently, + // FinSpace only supports SINGLE for volumes. This places dataview in a single AZ. AzMode types.KxAzMode // A unique identifier for the changeset. @@ -114,6 +112,9 @@ type UpdateKxDataviewOutput struct { // November 1, 2021 12:00:00 PM UTC is specified as 1635768000000. LastModifiedTimestamp *time.Time + // Returns True if the dataview is created as writeable and False otherwise. + ReadWrite bool + // The configuration that contains the database path of the data that you want to // place on each selected volume. Each segment must have a unique database path for // each volume. If you do not explicitly specify any database path for a volume, diff --git a/service/finspace/api_op_UpdateKxVolume.go b/service/finspace/api_op_UpdateKxVolume.go index be3edeaca29..bca5bc8a898 100644 --- a/service/finspace/api_op_UpdateKxVolume.go +++ b/service/finspace/api_op_UpdateKxVolume.go @@ -64,8 +64,8 @@ type UpdateKxVolumeOutput struct { // The identifier of the availability zones. AvailabilityZoneIds []string - // The number of availability zones you want to assign per cluster. Currently, - // FinSpace only support SINGLE for volumes. + // The number of availability zones you want to assign per volume. Currently, + // FinSpace only supports SINGLE for volumes. This places dataview in a single AZ. AzMode types.KxAzMode // The timestamp at which the volume was created in FinSpace. The value is diff --git a/service/finspace/deserializers.go b/service/finspace/deserializers.go index 94172dd8fef..a73ec472c52 100644 --- a/service/finspace/deserializers.go +++ b/service/finspace/deserializers.go @@ -1277,6 +1277,15 @@ func awsRestjson1_deserializeOpDocumentCreateKxDataviewOutput(v **CreateKxDatavi } } + case "readWrite": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected booleanValue to be of type *bool, got %T instead", value) + } + sv.ReadWrite = jtv + } + case "segmentConfigurations": if err := awsRestjson1_deserializeDocumentKxDataviewSegmentConfigurationList(&sv.SegmentConfigurations, value); err != nil { return err @@ -4445,6 +4454,15 @@ func awsRestjson1_deserializeOpDocumentGetKxDataviewOutput(v **GetKxDataviewOutp } } + case "readWrite": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected booleanValue to be of type *bool, got %T instead", value) + } + sv.ReadWrite = jtv + } + case "segmentConfigurations": if err := awsRestjson1_deserializeDocumentKxDataviewSegmentConfigurationList(&sv.SegmentConfigurations, value); err != nil { return err @@ -8347,6 +8365,15 @@ func awsRestjson1_deserializeOpDocumentUpdateKxDataviewOutput(v **UpdateKxDatavi } } + case "readWrite": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected booleanValue to be of type *bool, got %T instead", value) + } + sv.ReadWrite = jtv + } + case "segmentConfigurations": if err := awsRestjson1_deserializeDocumentKxDataviewSegmentConfigurationList(&sv.SegmentConfigurations, value); err != nil { return err @@ -12154,6 +12181,15 @@ func awsRestjson1_deserializeDocumentKxDataviewListEntry(v **types.KxDataviewLis } } + case "readWrite": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected booleanValue to be of type *bool, got %T instead", value) + } + sv.ReadWrite = jtv + } + case "segmentConfigurations": if err := awsRestjson1_deserializeDocumentKxDataviewSegmentConfigurationList(&sv.SegmentConfigurations, value); err != nil { return err @@ -12247,6 +12283,15 @@ func awsRestjson1_deserializeDocumentKxDataviewSegmentConfiguration(v **types.Kx return err } + case "onDemand": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected booleanValue to be of type *bool, got %T instead", value) + } + sv.OnDemand = jtv + } + case "volumeName": if value != nil { jtv, ok := value.(string) diff --git a/service/finspace/serializers.go b/service/finspace/serializers.go index 83607d2e918..2dd4b18d63d 100644 --- a/service/finspace/serializers.go +++ b/service/finspace/serializers.go @@ -671,6 +671,11 @@ func awsRestjson1_serializeOpDocumentCreateKxDataviewInput(v *CreateKxDataviewIn ok.String(*v.Description) } + if v.ReadWrite { + ok := object.Key("readWrite") + ok.Boolean(v.ReadWrite) + } + if v.SegmentConfigurations != nil { ok := object.Key("segmentConfigurations") if err := awsRestjson1_serializeDocumentKxDataviewSegmentConfigurationList(v.SegmentConfigurations, ok); err != nil { @@ -4905,6 +4910,11 @@ func awsRestjson1_serializeDocumentKxDataviewSegmentConfiguration(v *types.KxDat } } + if v.OnDemand { + ok := object.Key("onDemand") + ok.Boolean(v.OnDemand) + } + if v.VolumeName != nil { ok := object.Key("volumeName") ok.String(*v.VolumeName) diff --git a/service/finspace/types/types.go b/service/finspace/types/types.go index 88ca17a4f71..dff00582294 100644 --- a/service/finspace/types/types.go +++ b/service/finspace/types/types.go @@ -553,10 +553,8 @@ type KxDataviewListEntry struct { // The identifier of the availability zones. AvailabilityZoneId *string - // The number of availability zones you want to assign per cluster. This can be - // one of the following - // - SINGLE – Assigns one availability zone per cluster. - // - MULTI – Assigns all the availability zones per cluster. + // The number of availability zones you want to assign per volume. Currently, + // FinSpace only supports SINGLE for volumes. This places dataview in a single AZ. AzMode KxAzMode // A unique identifier for the changeset. @@ -584,6 +582,9 @@ type KxDataviewListEntry struct { // November 1, 2021 12:00:00 PM UTC is specified as 1635768000000. LastModifiedTimestamp *time.Time + // Returns True if the dataview is created as writeable and False otherwise. + ReadWrite bool + // The configuration that contains the database path of the data that you want to // place on each selected volume. Each segment must have a unique database path for // each volume. If you do not explicitly specify any database path for a volume, @@ -618,6 +619,12 @@ type KxDataviewSegmentConfiguration struct { // This member is required. VolumeName *string + // Enables on-demand caching on the selected database path when a particular file + // or a column of the database is accessed. When on demand caching is True, + // dataviews perform minimal loading of files on the filesystem as needed. When it + // is set to False, everything is cached. The default value is False. + OnDemand bool + noSmithyDocumentSerde } @@ -782,7 +789,19 @@ type KxScalingGroup struct { CreatedTimestamp *time.Time // The memory and CPU capabilities of the scaling group host on which FinSpace - // Managed kdb clusters will be placed. + // Managed kdb clusters will be placed. You can add one of the following values: + // - kx.sg.4xlarge – The host type with a configuration of 108 GiB memory and 16 + // vCPUs. + // - kx.sg.8xlarge – The host type with a configuration of 216 GiB memory and 32 + // vCPUs. + // - kx.sg.16xlarge – The host type with a configuration of 432 GiB memory and 64 + // vCPUs. + // - kx.sg.32xlarge – The host type with a configuration of 864 GiB memory and + // 128 vCPUs. + // - kx.sg1.16xlarge – The host type with a configuration of 1949 GiB memory and + // 64 vCPUs. + // - kx.sg1.24xlarge – The host type with a configuration of 2948 GiB memory and + // 96 vCPUs. HostType *string // The last time that the scaling group was updated in FinSpace. The value is @@ -860,8 +879,8 @@ type KxVolume struct { // The identifier of the availability zones. AvailabilityZoneIds []string - // The number of availability zones assigned to the volume. Currently, only SINGLE - // is supported. + // The number of availability zones you want to assign per volume. Currently, + // FinSpace only supports SINGLE for volumes. This places dataview in a single AZ. AzMode KxAzMode // The timestamp at which the volume was created in FinSpace. The value is diff --git a/service/managedblockchainquery/api_op_GetTokenBalance.go b/service/managedblockchainquery/api_op_GetTokenBalance.go index b37478044f9..dae5ef05ca5 100644 --- a/service/managedblockchainquery/api_op_GetTokenBalance.go +++ b/service/managedblockchainquery/api_op_GetTokenBalance.go @@ -65,7 +65,7 @@ type GetTokenBalanceOutput struct { // The container for time. LastUpdatedTime *types.BlockchainInstant - // The container for the identifier of the owner. + // The container for the owner identifier. OwnerIdentifier *types.OwnerIdentifier // The container for the identifier for the token including the unique token ID diff --git a/service/managedblockchainquery/api_op_GetTransaction.go b/service/managedblockchainquery/api_op_GetTransaction.go index 320768436ea..08695252bea 100644 --- a/service/managedblockchainquery/api_op_GetTransaction.go +++ b/service/managedblockchainquery/api_op_GetTransaction.go @@ -37,8 +37,7 @@ type GetTransactionInput struct { // This member is required. Network types.QueryNetwork - // The hash of the transaction. It is generated whenever a transaction is verified - // and added to the blockchain. + // The hash of a transaction. It is generated when a transaction is created. // // This member is required. TransactionHash *string diff --git a/service/managedblockchainquery/api_op_ListFilteredTransactionEvents.go b/service/managedblockchainquery/api_op_ListFilteredTransactionEvents.go new file mode 100644 index 00000000000..b86bc99a732 --- /dev/null +++ b/service/managedblockchainquery/api_op_ListFilteredTransactionEvents.go @@ -0,0 +1,271 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package managedblockchainquery + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/managedblockchainquery/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Lists all the transaction events for an address on the blockchain. This +// operation is only supported on the Bitcoin networks. +func (c *Client) ListFilteredTransactionEvents(ctx context.Context, params *ListFilteredTransactionEventsInput, optFns ...func(*Options)) (*ListFilteredTransactionEventsOutput, error) { + if params == nil { + params = &ListFilteredTransactionEventsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListFilteredTransactionEvents", params, optFns, c.addOperationListFilteredTransactionEventsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListFilteredTransactionEventsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListFilteredTransactionEventsInput struct { + + // This is the unique public address on the blockchain for which the transaction + // events are being requested. + // + // This member is required. + AddressIdentifierFilter *types.AddressIdentifierFilter + + // The blockchain network where the transaction occurred. Valid Values: + // BITCOIN_MAINNET | BITCOIN_TESTNET + // + // This member is required. + Network *string + + // The container for the ConfirmationStatusFilter that filters for the finality (https://docs.aws.amazon.com/managed-blockchain/latest/ambq-dg/key-concepts.html#finality) + // of the results. + ConfirmationStatusFilter *types.ConfirmationStatusFilter + + // The maximum number of transaction events to list. Default: 100 Even if + // additional results can be retrieved, the request can return less results than + // maxResults or an empty array of results. To retrieve the next set of results, + // make another request with the returned nextToken value. The value of nextToken + // is null when there are no more results to return + MaxResults *int32 + + // The pagination token that indicates the next set of results to retrieve. + NextToken *string + + // The order by which the results will be sorted. + Sort *types.ListFilteredTransactionEventsSort + + // This container specifies the time frame for the transaction events returned in + // the response. + TimeFilter *types.TimeFilter + + // This container specifies filtering attributes related to BITCOIN_VOUT event + // types + VoutFilter *types.VoutFilter + + noSmithyDocumentSerde +} + +type ListFilteredTransactionEventsOutput struct { + + // The transaction events returned by the request. + // + // This member is required. + Events []types.TransactionEvent + + // The pagination token that indicates the next set of results to retrieve. + NextToken *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListFilteredTransactionEventsMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestjson1_serializeOpListFilteredTransactionEvents{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpListFilteredTransactionEvents{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "ListFilteredTransactionEvents"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addOpListFilteredTransactionEventsValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListFilteredTransactionEvents(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + return nil +} + +// ListFilteredTransactionEventsAPIClient is a client that implements the +// ListFilteredTransactionEvents operation. +type ListFilteredTransactionEventsAPIClient interface { + ListFilteredTransactionEvents(context.Context, *ListFilteredTransactionEventsInput, ...func(*Options)) (*ListFilteredTransactionEventsOutput, error) +} + +var _ ListFilteredTransactionEventsAPIClient = (*Client)(nil) + +// ListFilteredTransactionEventsPaginatorOptions is the paginator options for +// ListFilteredTransactionEvents +type ListFilteredTransactionEventsPaginatorOptions struct { + // The maximum number of transaction events to list. Default: 100 Even if + // additional results can be retrieved, the request can return less results than + // maxResults or an empty array of results. To retrieve the next set of results, + // make another request with the returned nextToken value. The value of nextToken + // is null when there are no more results to return + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// ListFilteredTransactionEventsPaginator is a paginator for +// ListFilteredTransactionEvents +type ListFilteredTransactionEventsPaginator struct { + options ListFilteredTransactionEventsPaginatorOptions + client ListFilteredTransactionEventsAPIClient + params *ListFilteredTransactionEventsInput + nextToken *string + firstPage bool +} + +// NewListFilteredTransactionEventsPaginator returns a new +// ListFilteredTransactionEventsPaginator +func NewListFilteredTransactionEventsPaginator(client ListFilteredTransactionEventsAPIClient, params *ListFilteredTransactionEventsInput, optFns ...func(*ListFilteredTransactionEventsPaginatorOptions)) *ListFilteredTransactionEventsPaginator { + if params == nil { + params = &ListFilteredTransactionEventsInput{} + } + + options := ListFilteredTransactionEventsPaginatorOptions{} + if params.MaxResults != nil { + options.Limit = *params.MaxResults + } + + for _, fn := range optFns { + fn(&options) + } + + return &ListFilteredTransactionEventsPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.NextToken, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *ListFilteredTransactionEventsPaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) +} + +// NextPage retrieves the next ListFilteredTransactionEvents page. +func (p *ListFilteredTransactionEventsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListFilteredTransactionEventsOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.NextToken = p.nextToken + + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.MaxResults = limit + + result, err := p.client.ListFilteredTransactionEvents(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.NextToken + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +func newServiceMetadataMiddleware_opListFilteredTransactionEvents(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "ListFilteredTransactionEvents", + } +} diff --git a/service/managedblockchainquery/api_op_ListTransactionEvents.go b/service/managedblockchainquery/api_op_ListTransactionEvents.go index 3cb0a298970..6ff5cad4eac 100644 --- a/service/managedblockchainquery/api_op_ListTransactionEvents.go +++ b/service/managedblockchainquery/api_op_ListTransactionEvents.go @@ -11,10 +11,9 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// An array of TransactionEvent objects. Each object contains details about the -// transaction event. This action will return transaction details for all -// transactions that are confirmed on the blockchain, even if they have not reached -// finality (https://docs.aws.amazon.com/managed-blockchain/latest/ambq-dg/key-concepts.html#finality) +// Lists all the transaction events for a transaction This action will return +// transaction details for all transactions that are confirmed on the blockchain, +// even if they have not reached finality (https://docs.aws.amazon.com/managed-blockchain/latest/ambq-dg/key-concepts.html#finality) // . func (c *Client) ListTransactionEvents(ctx context.Context, params *ListTransactionEventsInput, optFns ...func(*Options)) (*ListTransactionEventsOutput, error) { if params == nil { @@ -38,12 +37,6 @@ type ListTransactionEventsInput struct { // This member is required. Network types.QueryNetwork - // The hash of the transaction. It is generated whenever a transaction is verified - // and added to the blockchain. - // - // This member is required. - TransactionHash *string - // The maximum number of transaction events to list. Default: 100 Even if // additional results can be retrieved, the request can return less results than // maxResults or an empty array of results. To retrieve the next set of results, @@ -54,6 +47,13 @@ type ListTransactionEventsInput struct { // The pagination token that indicates the next set of results to retrieve. NextToken *string + // The hash of a transaction. It is generated when a transaction is created. + TransactionHash *string + + // The identifier of a Bitcoin transaction. It is generated when a transaction is + // created. transactionId is only supported on the Bitcoin networks. + TransactionId *string + noSmithyDocumentSerde } diff --git a/service/managedblockchainquery/api_op_ListTransactions.go b/service/managedblockchainquery/api_op_ListTransactions.go index 6a9c617a091..3093a0aa983 100644 --- a/service/managedblockchainquery/api_op_ListTransactions.go +++ b/service/managedblockchainquery/api_op_ListTransactions.go @@ -11,8 +11,7 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Lists all of the transactions on a given wallet address or to a specific -// contract. +// Lists all the transaction events for a transaction. func (c *Client) ListTransactions(ctx context.Context, params *ListTransactionsInput, optFns ...func(*Options)) (*ListTransactionsOutput, error) { if params == nil { params = &ListTransactionsInput{} @@ -43,7 +42,7 @@ type ListTransactionsInput struct { // This filter is used to include transactions in the response that haven't // reached finality (https://docs.aws.amazon.com/managed-blockchain/latest/ambq-dg/key-concepts.html#finality) - // . Transactions that have reached finiality are always part of the response. + // . Transactions that have reached finality are always part of the response. ConfirmationStatusFilter *types.ConfirmationStatusFilter // The container for time. @@ -59,8 +58,7 @@ type ListTransactionsInput struct { // The pagination token that indicates the next set of results to retrieve. NextToken *string - // The order by which the results will be sorted. If ASCENNDING is selected, the - // results will be ordered by fromTime . + // The order by which the results will be sorted. Sort *types.ListTransactionsSort // The container for time. diff --git a/service/managedblockchainquery/deserializers.go b/service/managedblockchainquery/deserializers.go index b5fcb74251b..bc8f82f7657 100644 --- a/service/managedblockchainquery/deserializers.go +++ b/service/managedblockchainquery/deserializers.go @@ -888,6 +888,174 @@ func awsRestjson1_deserializeOpDocumentListAssetContractsOutput(v **ListAssetCon return nil } +type awsRestjson1_deserializeOpListFilteredTransactionEvents struct { +} + +func (*awsRestjson1_deserializeOpListFilteredTransactionEvents) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpListFilteredTransactionEvents) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorListFilteredTransactionEvents(response, &metadata) + } + output := &ListFilteredTransactionEventsOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsRestjson1_deserializeOpDocumentListFilteredTransactionEventsOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorListFilteredTransactionEvents(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("AccessDeniedException", errorCode): + return awsRestjson1_deserializeErrorAccessDeniedException(response, errorBody) + + case strings.EqualFold("InternalServerException", errorCode): + return awsRestjson1_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("ServiceQuotaExceededException", errorCode): + return awsRestjson1_deserializeErrorServiceQuotaExceededException(response, errorBody) + + case strings.EqualFold("ThrottlingException", errorCode): + return awsRestjson1_deserializeErrorThrottlingException(response, errorBody) + + case strings.EqualFold("ValidationException", errorCode): + return awsRestjson1_deserializeErrorValidationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestjson1_deserializeOpDocumentListFilteredTransactionEventsOutput(v **ListFilteredTransactionEventsOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ListFilteredTransactionEventsOutput + if *v == nil { + sv = &ListFilteredTransactionEventsOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "events": + if err := awsRestjson1_deserializeDocumentTransactionEventList(&sv.Events, value); err != nil { + return err + } + + case "nextToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected NextToken to be of type string, got %T instead", value) + } + sv.NextToken = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + type awsRestjson1_deserializeOpListTokenBalances struct { } @@ -2837,6 +3005,20 @@ func awsRestjson1_deserializeDocumentTransactionEvent(v **types.TransactionEvent for key, value := range shape { switch key { + case "blockchainInstant": + if err := awsRestjson1_deserializeDocumentBlockchainInstant(&sv.BlockchainInstant, value); err != nil { + return err + } + + case "confirmationStatus": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ConfirmationStatus to be of type string, got %T instead", value) + } + sv.ConfirmationStatus = types.ConfirmationStatus(jtv) + } + case "contractAddress": if value != nil { jtv, ok := value.(string) @@ -2873,6 +3055,37 @@ func awsRestjson1_deserializeDocumentTransactionEvent(v **types.TransactionEvent sv.Network = types.QueryNetwork(jtv) } + case "spentVoutIndex": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Integer to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.SpentVoutIndex = ptr.Int32(int32(i64)) + } + + case "spentVoutTransactionHash": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.SpentVoutTransactionHash = ptr.String(jtv) + } + + case "spentVoutTransactionId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.SpentVoutTransactionId = ptr.String(jtv) + } + case "to": if value != nil { jtv, ok := value.(string) @@ -2931,6 +3144,15 @@ func awsRestjson1_deserializeDocumentTransactionEvent(v **types.TransactionEvent sv.VoutIndex = ptr.Int32(int32(i64)) } + case "voutSpent": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected Boolean to be of type *bool, got %T instead", value) + } + sv.VoutSpent = ptr.Bool(jtv) + } + default: _, _ = key, value diff --git a/service/managedblockchainquery/generated.json b/service/managedblockchainquery/generated.json index ff19d7a9246..65f9857181a 100644 --- a/service/managedblockchainquery/generated.json +++ b/service/managedblockchainquery/generated.json @@ -13,6 +13,7 @@ "api_op_GetTokenBalance.go", "api_op_GetTransaction.go", "api_op_ListAssetContracts.go", + "api_op_ListFilteredTransactionEvents.go", "api_op_ListTokenBalances.go", "api_op_ListTransactionEvents.go", "api_op_ListTransactions.go", diff --git a/service/managedblockchainquery/serializers.go b/service/managedblockchainquery/serializers.go index 1ba9d4bc850..607655c050e 100644 --- a/service/managedblockchainquery/serializers.go +++ b/service/managedblockchainquery/serializers.go @@ -427,6 +427,126 @@ func awsRestjson1_serializeOpDocumentListAssetContractsInput(v *ListAssetContrac return nil } +type awsRestjson1_serializeOpListFilteredTransactionEvents struct { +} + +func (*awsRestjson1_serializeOpListFilteredTransactionEvents) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpListFilteredTransactionEvents) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListFilteredTransactionEventsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/list-filtered-transaction-events") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "POST" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + restEncoder.SetHeader("Content-Type").String("application/json") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsRestjson1_serializeOpDocumentListFilteredTransactionEventsInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsListFilteredTransactionEventsInput(v *ListFilteredTransactionEventsInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + return nil +} + +func awsRestjson1_serializeOpDocumentListFilteredTransactionEventsInput(v *ListFilteredTransactionEventsInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.AddressIdentifierFilter != nil { + ok := object.Key("addressIdentifierFilter") + if err := awsRestjson1_serializeDocumentAddressIdentifierFilter(v.AddressIdentifierFilter, ok); err != nil { + return err + } + } + + if v.ConfirmationStatusFilter != nil { + ok := object.Key("confirmationStatusFilter") + if err := awsRestjson1_serializeDocumentConfirmationStatusFilter(v.ConfirmationStatusFilter, ok); err != nil { + return err + } + } + + if v.MaxResults != nil { + ok := object.Key("maxResults") + ok.Integer(*v.MaxResults) + } + + if v.Network != nil { + ok := object.Key("network") + ok.String(*v.Network) + } + + if v.NextToken != nil { + ok := object.Key("nextToken") + ok.String(*v.NextToken) + } + + if v.Sort != nil { + ok := object.Key("sort") + if err := awsRestjson1_serializeDocumentListFilteredTransactionEventsSort(v.Sort, ok); err != nil { + return err + } + } + + if v.TimeFilter != nil { + ok := object.Key("timeFilter") + if err := awsRestjson1_serializeDocumentTimeFilter(v.TimeFilter, ok); err != nil { + return err + } + } + + if v.VoutFilter != nil { + ok := object.Key("voutFilter") + if err := awsRestjson1_serializeDocumentVoutFilter(v.VoutFilter, ok); err != nil { + return err + } + } + + return nil +} + type awsRestjson1_serializeOpListTokenBalances struct { } @@ -608,6 +728,11 @@ func awsRestjson1_serializeOpDocumentListTransactionEventsInput(v *ListTransacti ok.String(*v.TransactionHash) } + if v.TransactionId != nil { + ok := object.Key("transactionId") + ok.String(*v.TransactionId) + } + return nil } @@ -729,6 +854,20 @@ func awsRestjson1_serializeOpDocumentListTransactionsInput(v *ListTransactionsIn return nil } +func awsRestjson1_serializeDocumentAddressIdentifierFilter(v *types.AddressIdentifierFilter, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.TransactionEventToAddress != nil { + ok := object.Key("transactionEventToAddress") + if err := awsRestjson1_serializeDocumentChainAddresses(v.TransactionEventToAddress, ok); err != nil { + return err + } + } + + return nil +} + func awsRestjson1_serializeDocumentBatchGetTokenBalanceInputItem(v *types.BatchGetTokenBalanceInputItem, value smithyjson.Value) error { object := value.Object() defer object.Close() @@ -769,6 +908,17 @@ func awsRestjson1_serializeDocumentBlockchainInstant(v *types.BlockchainInstant, return nil } +func awsRestjson1_serializeDocumentChainAddresses(v []string, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + av.String(v[i]) + } + return nil +} + func awsRestjson1_serializeDocumentConfirmationStatusFilter(v *types.ConfirmationStatusFilter, value smithyjson.Value) error { object := value.Object() defer object.Close() @@ -846,6 +996,23 @@ func awsRestjson1_serializeDocumentGetTokenBalanceInputList(v []types.BatchGetTo return nil } +func awsRestjson1_serializeDocumentListFilteredTransactionEventsSort(v *types.ListFilteredTransactionEventsSort, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if len(v.SortBy) > 0 { + ok := object.Key("sortBy") + ok.String(string(v.SortBy)) + } + + if len(v.SortOrder) > 0 { + ok := object.Key("sortOrder") + ok.String(string(v.SortOrder)) + } + + return nil +} + func awsRestjson1_serializeDocumentListTransactionsSort(v *types.ListTransactionsSort, value smithyjson.Value) error { object := value.Object() defer object.Close() @@ -887,6 +1054,27 @@ func awsRestjson1_serializeDocumentOwnerIdentifier(v *types.OwnerIdentifier, val return nil } +func awsRestjson1_serializeDocumentTimeFilter(v *types.TimeFilter, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.From != nil { + ok := object.Key("from") + if err := awsRestjson1_serializeDocumentBlockchainInstant(v.From, ok); err != nil { + return err + } + } + + if v.To != nil { + ok := object.Key("to") + if err := awsRestjson1_serializeDocumentBlockchainInstant(v.To, ok); err != nil { + return err + } + } + + return nil +} + func awsRestjson1_serializeDocumentTokenFilter(v *types.TokenFilter, value smithyjson.Value) error { object := value.Object() defer object.Close() @@ -930,3 +1118,15 @@ func awsRestjson1_serializeDocumentTokenIdentifier(v *types.TokenIdentifier, val return nil } + +func awsRestjson1_serializeDocumentVoutFilter(v *types.VoutFilter, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.VoutSpent != nil { + ok := object.Key("voutSpent") + ok.Boolean(*v.VoutSpent) + } + + return nil +} diff --git a/service/managedblockchainquery/snapshot_test.go b/service/managedblockchainquery/snapshot_test.go index 67f9a7ea1fc..10ab05f624a 100644 --- a/service/managedblockchainquery/snapshot_test.go +++ b/service/managedblockchainquery/snapshot_test.go @@ -122,6 +122,18 @@ func TestCheckSnapshot_ListAssetContracts(t *testing.T) { } } +func TestCheckSnapshot_ListFilteredTransactionEvents(t *testing.T) { + svc := New(Options{}) + _, err := svc.ListFilteredTransactionEvents(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return testSnapshot(stack, "ListFilteredTransactionEvents") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + func TestCheckSnapshot_ListTokenBalances(t *testing.T) { svc := New(Options{}) _, err := svc.ListTokenBalances(context.Background(), nil, func(o *Options) { @@ -217,6 +229,18 @@ func TestUpdateSnapshot_ListAssetContracts(t *testing.T) { } } +func TestUpdateSnapshot_ListFilteredTransactionEvents(t *testing.T) { + svc := New(Options{}) + _, err := svc.ListFilteredTransactionEvents(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return updateSnapshot(stack, "ListFilteredTransactionEvents") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + func TestUpdateSnapshot_ListTokenBalances(t *testing.T) { svc := New(Options{}) _, err := svc.ListTokenBalances(context.Background(), nil, func(o *Options) { diff --git a/service/managedblockchainquery/types/enums.go b/service/managedblockchainquery/types/enums.go index 6cfa4f3e915..d1f80fe3bdd 100644 --- a/service/managedblockchainquery/types/enums.go +++ b/service/managedblockchainquery/types/enums.go @@ -58,6 +58,24 @@ func (ExecutionStatus) Values() []ExecutionStatus { } } +type ListFilteredTransactionEventsSortBy string + +// Enum values for ListFilteredTransactionEventsSortBy +const ( + // Timestamp of a transaction + ListFilteredTransactionEventsSortByBlockchainInstant ListFilteredTransactionEventsSortBy = "blockchainInstant" +) + +// Values returns all known values for ListFilteredTransactionEventsSortBy. Note +// that this can be expanded in the future, and so it is only as up to date as the +// client. The ordering of this slice is not guaranteed to be stable across +// updates. +func (ListFilteredTransactionEventsSortBy) Values() []ListFilteredTransactionEventsSortBy { + return []ListFilteredTransactionEventsSortBy{ + "blockchainInstant", + } +} + type ListTransactionsSortBy string // Enum values for ListTransactionsSortBy diff --git a/service/managedblockchainquery/types/types.go b/service/managedblockchainquery/types/types.go index af3572ec00f..ffcebcf91e0 100644 --- a/service/managedblockchainquery/types/types.go +++ b/service/managedblockchainquery/types/types.go @@ -7,6 +7,17 @@ import ( "time" ) +// This is the container for the unique public address on the blockchain. +type AddressIdentifierFilter struct { + + // The container for the recipient address of the transaction. + // + // This member is required. + TransactionEventToAddress []string + + noSmithyDocumentSerde +} + // This container contains information about an contract. type AssetContract struct { @@ -50,7 +61,7 @@ type BatchGetTokenBalanceErrorItem struct { // The container for time. AtBlockchainInstant *BlockchainInstant - // The container for the identifier of the owner. + // The container for the owner identifier. OwnerIdentifier *OwnerIdentifier // The container for the identifier for the token including the unique token ID @@ -64,7 +75,7 @@ type BatchGetTokenBalanceErrorItem struct { // The container for the input for getting a token balance. type BatchGetTokenBalanceInputItem struct { - // The container for the identifier of the owner. + // The container for the owner identifier. // // This member is required. OwnerIdentifier *OwnerIdentifier @@ -98,7 +109,7 @@ type BatchGetTokenBalanceOutputItem struct { // The container for time. LastUpdatedTime *BlockchainInstant - // The container for the identifier of the owner. + // The container for the owner identifier. OwnerIdentifier *OwnerIdentifier // The container for the identifier for the token including the unique token ID @@ -185,6 +196,21 @@ type ContractMetadata struct { noSmithyDocumentSerde } +// Lists all the transaction events for an address on the blockchain. This +// operation is only supported on the Bitcoin blockchain networks. +type ListFilteredTransactionEventsSort struct { + + // Container on how the results will be sorted by? + SortBy ListFilteredTransactionEventsSortBy + + // The container for the sort order for ListFilteredTransactionEvents . The + // SortOrder field only accepts the values ASCENDING and DESCENDING . Not providing + // SortOrder will default to ASCENDING . + SortOrder SortOrder + + noSmithyDocumentSerde +} + // The container for determining how the list transaction result will be sorted. type ListTransactionsSort struct { @@ -210,7 +236,7 @@ type OwnerFilter struct { noSmithyDocumentSerde } -// The container for the identifier of the owner. +// The container for the owner identifier. type OwnerIdentifier struct { // The contract or wallet address for the owner. @@ -221,6 +247,18 @@ type OwnerIdentifier struct { noSmithyDocumentSerde } +// This container is used to specify a time frame. +type TimeFilter struct { + + // The container for time. + From *BlockchainInstant + + // The container for time. + To *BlockchainInstant + + noSmithyDocumentSerde +} + // The balance of the token. type TokenBalance struct { @@ -316,8 +354,7 @@ type Transaction struct { // This member is required. To *string - // The hash of the transaction. It is generated whenever a transaction is verified - // and added to the blockchain. + // The hash of a transaction. It is generated when a transaction is created. // // This member is required. TransactionHash *string @@ -374,8 +411,8 @@ type Transaction struct { // The transaction fee. TransactionFee *string - // The unique identifier of the transaction. It is generated whenever a - // transaction is verified and added to the blockchain. + // The identifier of a Bitcoin transaction. It is generated when a transaction is + // created. TransactionId *string noSmithyDocumentSerde @@ -394,19 +431,36 @@ type TransactionEvent struct { // This member is required. Network QueryNetwork - // The hash of the transaction. It is generated whenever a transaction is verified - // and added to the blockchain. + // The hash of a transaction. It is generated when a transaction is created. // // This member is required. TransactionHash *string - // The blockchain address. for the contract + // The container for time. + BlockchainInstant *BlockchainInstant + + // This container specifies whether the transaction has reached Finality. + ConfirmationStatus ConfirmationStatus + + // The blockchain address for the contract ContractAddress *string // The wallet address initiating the transaction. It can either be a public key or // a contract. From *string + // The position of the spent transaction output in the output list of the creating + // transaction. This is only returned for BITCOIN_VIN event types. + SpentVoutIndex *int32 + + // The transactionHash that created the spent transaction output. This is only + // returned for BITCOIN_VIN event types. + SpentVoutTransactionHash *string + + // The transactionId that created the spent transaction output. This is only + // returned for BITCOIN_VIN event types. + SpentVoutTransactionId *string + // The wallet address receiving the transaction. It can either be a public key or // a contract. To *string @@ -414,16 +468,21 @@ type TransactionEvent struct { // The unique identifier for the token involved in the transaction. TokenId *string - // The unique identifier of the transaction. It is generated whenever a - // transaction is verified and added to the blockchain. + // The identifier of a Bitcoin transaction. It is generated when a transaction is + // created. TransactionId *string // The value that was transacted. Value *string - // The position of the vout in the transaction output list. + // The position of the transaction output in the transaction output list. VoutIndex *int32 + // Specifies if the transaction output is spent or unspent. This is only returned + // for BITCOIN_VOUT event types. This is only returned for BITCOIN_VOUT event + // types. + VoutSpent *bool + noSmithyDocumentSerde } @@ -435,8 +494,7 @@ type TransactionOutputItem struct { // This member is required. Network QueryNetwork - // The hash of the transaction. It is generated whenever a transaction is verified - // and added to the blockchain. + // The hash of a transaction. It is generated when a transaction is created. // // This member is required. TransactionHash *string @@ -468,4 +526,16 @@ type ValidationExceptionField struct { noSmithyDocumentSerde } +// This container specifies filtering attributes related to BITCOIN_VOUT event +// types +type VoutFilter struct { + + // Specifies if the transaction output is spent or unspent. + // + // This member is required. + VoutSpent *bool + + noSmithyDocumentSerde +} + type noSmithyDocumentSerde = smithydocument.NoSerde diff --git a/service/managedblockchainquery/validators.go b/service/managedblockchainquery/validators.go index 89eacddda63..adc1dd6d17a 100644 --- a/service/managedblockchainquery/validators.go +++ b/service/managedblockchainquery/validators.go @@ -110,6 +110,26 @@ func (m *validateOpListAssetContracts) HandleInitialize(ctx context.Context, in return next.HandleInitialize(ctx, in) } +type validateOpListFilteredTransactionEvents struct { +} + +func (*validateOpListFilteredTransactionEvents) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpListFilteredTransactionEvents) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ListFilteredTransactionEventsInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpListFilteredTransactionEventsInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + type validateOpListTokenBalances struct { } @@ -190,6 +210,10 @@ func addOpListAssetContractsValidationMiddleware(stack *middleware.Stack) error return stack.Initialize.Add(&validateOpListAssetContracts{}, middleware.After) } +func addOpListFilteredTransactionEventsValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpListFilteredTransactionEvents{}, middleware.After) +} + func addOpListTokenBalancesValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpListTokenBalances{}, middleware.After) } @@ -202,6 +226,21 @@ func addOpListTransactionsValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpListTransactions{}, middleware.After) } +func validateAddressIdentifierFilter(v *types.AddressIdentifierFilter) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "AddressIdentifierFilter"} + if v.TransactionEventToAddress == nil { + invalidParams.Add(smithy.NewErrParamRequired("TransactionEventToAddress")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateBatchGetTokenBalanceInputItem(v *types.BatchGetTokenBalanceInputItem) error { if v == nil { return nil @@ -359,6 +398,21 @@ func validateTokenIdentifier(v *types.TokenIdentifier) error { } } +func validateVoutFilter(v *types.VoutFilter) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "VoutFilter"} + if v.VoutSpent == nil { + invalidParams.Add(smithy.NewErrParamRequired("VoutSpent")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateOpBatchGetTokenBalanceInput(v *BatchGetTokenBalanceInput) error { if v == nil { return nil @@ -458,6 +512,38 @@ func validateOpListAssetContractsInput(v *ListAssetContractsInput) error { } } +func validateOpListFilteredTransactionEventsInput(v *ListFilteredTransactionEventsInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ListFilteredTransactionEventsInput"} + if v.Network == nil { + invalidParams.Add(smithy.NewErrParamRequired("Network")) + } + if v.AddressIdentifierFilter == nil { + invalidParams.Add(smithy.NewErrParamRequired("AddressIdentifierFilter")) + } else if v.AddressIdentifierFilter != nil { + if err := validateAddressIdentifierFilter(v.AddressIdentifierFilter); err != nil { + invalidParams.AddNested("AddressIdentifierFilter", err.(smithy.InvalidParamsError)) + } + } + if v.VoutFilter != nil { + if err := validateVoutFilter(v.VoutFilter); err != nil { + invalidParams.AddNested("VoutFilter", err.(smithy.InvalidParamsError)) + } + } + if v.ConfirmationStatusFilter != nil { + if err := validateConfirmationStatusFilter(v.ConfirmationStatusFilter); err != nil { + invalidParams.AddNested("ConfirmationStatusFilter", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateOpListTokenBalancesInput(v *ListTokenBalancesInput) error { if v == nil { return nil @@ -487,9 +573,6 @@ func validateOpListTransactionEventsInput(v *ListTransactionEventsInput) error { return nil } invalidParams := smithy.InvalidParamsError{Context: "ListTransactionEventsInput"} - if v.TransactionHash == nil { - invalidParams.Add(smithy.NewErrParamRequired("TransactionHash")) - } if len(v.Network) == 0 { invalidParams.Add(smithy.NewErrParamRequired("Network")) }