From a93c2ba5f1405442702bcbe7a8bb5fd6163bad16 Mon Sep 17 00:00:00 2001 From: AWS SDK for Go v2 automation user Date: Wed, 27 Apr 2022 18:14:59 +0000 Subject: [PATCH] Regenerated Clients --- .../1a9d13566c5243d5a5d0caf1cfca931f.json | 8 + .../34ed04ccb3424879922e5a64d78d9627.json | 8 + .../41575353444b40ffbf474f4155544f00.json | 8 + .../6757f1a5aaeb4b4c9729f2629d986fe5.json | 8 + .../753e75fc65704ca3b64002fba4388d62.json | 8 + .../8dae6f4eef844a3d96ffff97862cc66e.json | 8 + .../bda271bc22c644bcbfc8f33dc3c4ec8a.json | 8 + .../f4572723830440f382adfda74ac87485.json | 8 + service/amplify/api_op_CreateApp.go | 27 +- service/amplify/api_op_UpdateApp.go | 26 +- service/amplify/types/types.go | 8 +- service/chimesdkmediapipelines/LICENSE.txt | 202 ++ service/chimesdkmediapipelines/api_client.go | 454 ++++ .../chimesdkmediapipelines/api_client_test.go | 123 + .../api_op_CreateMediaCapturePipeline.go | 186 ++ .../api_op_DeleteMediaCapturePipeline.go | 116 + .../api_op_GetMediaCapturePipeline.go | 121 + .../api_op_ListMediaCapturePipelines.go | 215 ++ .../api_op_ListTagsForResource.go | 121 + .../api_op_TagResource.go | 123 + .../api_op_UntagResource.go | 122 + .../chimesdkmediapipelines/deserializers.go | 2416 +++++++++++++++++ service/chimesdkmediapipelines/doc.go | 11 + service/chimesdkmediapipelines/endpoints.go | 200 ++ service/chimesdkmediapipelines/generated.json | 34 + service/chimesdkmediapipelines/go.mod | 16 + service/chimesdkmediapipelines/go.sum | 13 + .../go_module_metadata.go | 6 + .../internal/endpoints/endpoints.go | 250 ++ .../internal/endpoints/endpoints_test.go | 11 + .../chimesdkmediapipelines/protocol_test.go | 3 + service/chimesdkmediapipelines/serializers.go | 686 +++++ service/chimesdkmediapipelines/types/enums.go | 155 ++ .../chimesdkmediapipelines/types/errors.go | 184 ++ service/chimesdkmediapipelines/types/types.go | 173 ++ service/chimesdkmediapipelines/validators.go | 404 +++ service/cloudtrail/api_op_AddTags.go | 21 +- service/cloudtrail/api_op_ListTags.go | 9 +- service/cloudtrail/api_op_RemoveTags.go | 11 +- service/cloudtrail/types/errors.go | 31 +- service/cloudtrail/types/types.go | 45 +- ...i_op_CreateNetworkAnalyzerConfiguration.go | 185 ++ ...i_op_DeleteNetworkAnalyzerConfiguration.go | 116 + .../api_op_DeleteQueuedMessages.go | 11 +- ...op_GetEventConfigurationByResourceTypes.go | 121 + .../api_op_GetLogLevelsByResourceTypes.go | 4 +- .../api_op_GetNetworkAnalyzerConfiguration.go | 21 +- .../api_op_GetResourceEventConfiguration.go | 6 + .../iotwireless/api_op_GetResourceLogLevel.go | 4 +- .../iotwireless/api_op_GetServiceEndpoint.go | 2 +- .../api_op_ListEventConfigurations.go | 132 + ...pi_op_ListNetworkAnalyzerConfigurations.go | 214 ++ .../iotwireless/api_op_ListQueuedMessages.go | 9 +- .../iotwireless/api_op_PutResourceLogLevel.go | 4 +- ...UpdateEventConfigurationByResourceTypes.go | 125 + .../api_op_UpdateLogLevelsByResourceTypes.go | 4 +- ...i_op_UpdateNetworkAnalyzerConfiguration.go | 21 +- ...api_op_UpdateResourceEventConfiguration.go | 6 + service/iotwireless/deserializers.go | 2107 ++++++++++++-- service/iotwireless/generated.json | 6 + service/iotwireless/serializers.go | 677 ++++- service/iotwireless/types/enums.go | 31 +- service/iotwireless/types/types.go | 244 +- service/iotwireless/validators.go | 122 + .../lookoutequipment/api_op_CreateDataset.go | 2 - .../api_op_DescribeDataIngestionJob.go | 27 +- .../api_op_DescribeDataset.go | 26 +- .../api_op_ListSensorStatistics.go | 234 ++ service/lookoutequipment/deserializers.go | 1516 ++++++++++- service/lookoutequipment/generated.json | 1 + service/lookoutequipment/serializers.go | 87 + service/lookoutequipment/types/enums.go | 38 + service/lookoutequipment/types/types.go | 306 ++- service/lookoutequipment/validators.go | 42 +- .../rekognition/api_op_CreateCollection.go | 5 +- .../api_op_CreateStreamProcessor.go | 97 +- .../rekognition/api_op_DeleteCollection.go | 6 +- .../rekognition/api_op_DescribeCollection.go | 2 +- .../api_op_DescribeStreamProcessor.go | 31 +- .../rekognition/api_op_DetectCustomLabels.go | 4 +- service/rekognition/api_op_DetectLabels.go | 2 +- service/rekognition/api_op_DetectText.go | 4 +- .../rekognition/api_op_GetCelebrityInfo.go | 2 +- .../api_op_GetContentModeration.go | 2 +- .../rekognition/api_op_GetSegmentDetection.go | 2 +- service/rekognition/api_op_IndexFaces.go | 23 +- service/rekognition/api_op_ListCollections.go | 11 +- service/rekognition/api_op_ListFaces.go | 5 +- .../api_op_RecognizeCelebrities.go | 6 +- service/rekognition/api_op_SearchFaces.go | 9 +- .../rekognition/api_op_SearchFacesByImage.go | 5 +- .../api_op_StartCelebrityRecognition.go | 2 +- .../api_op_StartContentModeration.go | 2 +- .../rekognition/api_op_StartFaceDetection.go | 2 +- service/rekognition/api_op_StartFaceSearch.go | 3 +- .../api_op_StartSegmentDetection.go | 2 +- .../api_op_StartStreamProcessor.go | 23 +- .../rekognition/api_op_StartTextDetection.go | 9 +- .../api_op_UpdateStreamProcessor.go | 135 + service/rekognition/deserializers.go | 510 ++++ service/rekognition/doc.go | 20 +- service/rekognition/generated.json | 1 + service/rekognition/serializers.go | 396 +++ service/rekognition/types/enums.go | 21 + service/rekognition/types/errors.go | 7 +- service/rekognition/types/types.go | 269 +- service/rekognition/validators.go | 117 + service/sagemaker/api_op_AddTags.go | 8 +- service/sagemaker/api_op_CreateAlgorithm.go | 11 +- .../sagemaker/api_op_CreateCodeRepository.go | 10 +- service/sagemaker/api_op_CreateEndpoint.go | 36 +- .../sagemaker/api_op_CreateEndpointConfig.go | 35 +- .../api_op_CreateHyperParameterTuningJob.go | 4 +- service/sagemaker/api_op_CreateImage.go | 6 +- .../sagemaker/api_op_CreateImageVersion.go | 8 +- service/sagemaker/api_op_CreateModel.go | 52 +- .../sagemaker/api_op_CreateModelPackage.go | 20 +- .../api_op_CreateNotebookInstance.go | 84 +- ...i_op_CreatePresignedNotebookInstanceUrl.go | 26 +- service/sagemaker/api_op_CreateTrainingJob.go | 105 +- .../sagemaker/api_op_CreateTransformJob.go | 11 +- service/sagemaker/api_op_DeleteEndpoint.go | 14 +- service/sagemaker/api_op_DeleteModel.go | 4 +- .../sagemaker/api_op_DeleteModelPackage.go | 7 +- .../api_op_DeleteNotebookInstance.go | 10 +- service/sagemaker/api_op_DeleteTags.go | 14 +- service/sagemaker/api_op_DescribeAlgorithm.go | 4 +- .../api_op_DescribeEndpointConfig.go | 2 +- .../sagemaker/api_op_DescribeLabelingJob.go | 4 +- service/sagemaker/api_op_DescribeModel.go | 2 +- .../sagemaker/api_op_DescribeModelPackage.go | 2 +- .../api_op_DescribeModelPackageGroup.go | 2 +- .../api_op_DescribeNotebookInstance.go | 23 +- .../sagemaker/api_op_DescribeTrainingJob.go | 47 +- service/sagemaker/api_op_ListAlgorithms.go | 4 +- .../sagemaker/api_op_ListEndpointConfigs.go | 4 +- service/sagemaker/api_op_ListEndpoints.go | 4 +- service/sagemaker/api_op_ListLabelingJobs.go | 4 +- .../api_op_ListLabelingJobsForWorkteam.go | 4 +- service/sagemaker/api_op_ListModelPackages.go | 4 +- service/sagemaker/api_op_ListModels.go | 4 +- ...op_ListNotebookInstanceLifecycleConfigs.go | 4 +- .../sagemaker/api_op_ListNotebookInstances.go | 8 +- service/sagemaker/api_op_ListTags.go | 11 +- service/sagemaker/api_op_ListTrainingJobs.go | 4 +- service/sagemaker/api_op_QueryLineage.go | 10 +- .../sagemaker/api_op_StartNotebookInstance.go | 2 +- .../sagemaker/api_op_StopNotebookInstance.go | 15 +- service/sagemaker/api_op_StopTrainingJob.go | 12 +- service/sagemaker/api_op_UpdateEndpoint.go | 10 +- ...i_op_UpdateEndpointWeightsAndCapacities.go | 8 +- .../api_op_UpdateNotebookInstance.go | 24 +- service/sagemaker/deserializers.go | 79 + service/sagemaker/doc.go | 6 +- service/sagemaker/serializers.go | 37 + service/sagemaker/types/enums.go | 18 + service/sagemaker/types/errors.go | 4 +- service/sagemaker/types/types.go | 437 +-- 158 files changed, 14825 insertions(+), 1216 deletions(-) create mode 100644 .changelog/1a9d13566c5243d5a5d0caf1cfca931f.json create mode 100644 .changelog/34ed04ccb3424879922e5a64d78d9627.json create mode 100644 .changelog/41575353444b40ffbf474f4155544f00.json create mode 100644 .changelog/6757f1a5aaeb4b4c9729f2629d986fe5.json create mode 100644 .changelog/753e75fc65704ca3b64002fba4388d62.json create mode 100644 .changelog/8dae6f4eef844a3d96ffff97862cc66e.json create mode 100644 .changelog/bda271bc22c644bcbfc8f33dc3c4ec8a.json create mode 100644 .changelog/f4572723830440f382adfda74ac87485.json create mode 100644 service/chimesdkmediapipelines/LICENSE.txt create mode 100644 service/chimesdkmediapipelines/api_client.go create mode 100644 service/chimesdkmediapipelines/api_client_test.go create mode 100644 service/chimesdkmediapipelines/api_op_CreateMediaCapturePipeline.go create mode 100644 service/chimesdkmediapipelines/api_op_DeleteMediaCapturePipeline.go create mode 100644 service/chimesdkmediapipelines/api_op_GetMediaCapturePipeline.go create mode 100644 service/chimesdkmediapipelines/api_op_ListMediaCapturePipelines.go create mode 100644 service/chimesdkmediapipelines/api_op_ListTagsForResource.go create mode 100644 service/chimesdkmediapipelines/api_op_TagResource.go create mode 100644 service/chimesdkmediapipelines/api_op_UntagResource.go create mode 100644 service/chimesdkmediapipelines/deserializers.go create mode 100644 service/chimesdkmediapipelines/doc.go create mode 100644 service/chimesdkmediapipelines/endpoints.go create mode 100644 service/chimesdkmediapipelines/generated.json create mode 100644 service/chimesdkmediapipelines/go.mod create mode 100644 service/chimesdkmediapipelines/go.sum create mode 100644 service/chimesdkmediapipelines/go_module_metadata.go create mode 100644 service/chimesdkmediapipelines/internal/endpoints/endpoints.go create mode 100644 service/chimesdkmediapipelines/internal/endpoints/endpoints_test.go create mode 100644 service/chimesdkmediapipelines/protocol_test.go create mode 100644 service/chimesdkmediapipelines/serializers.go create mode 100644 service/chimesdkmediapipelines/types/enums.go create mode 100644 service/chimesdkmediapipelines/types/errors.go create mode 100644 service/chimesdkmediapipelines/types/types.go create mode 100644 service/chimesdkmediapipelines/validators.go create mode 100644 service/iotwireless/api_op_CreateNetworkAnalyzerConfiguration.go create mode 100644 service/iotwireless/api_op_DeleteNetworkAnalyzerConfiguration.go create mode 100644 service/iotwireless/api_op_GetEventConfigurationByResourceTypes.go create mode 100644 service/iotwireless/api_op_ListEventConfigurations.go create mode 100644 service/iotwireless/api_op_ListNetworkAnalyzerConfigurations.go create mode 100644 service/iotwireless/api_op_UpdateEventConfigurationByResourceTypes.go create mode 100644 service/lookoutequipment/api_op_ListSensorStatistics.go create mode 100644 service/rekognition/api_op_UpdateStreamProcessor.go diff --git a/.changelog/1a9d13566c5243d5a5d0caf1cfca931f.json b/.changelog/1a9d13566c5243d5a5d0caf1cfca931f.json new file mode 100644 index 00000000000..d8f15fa0080 --- /dev/null +++ b/.changelog/1a9d13566c5243d5a5d0caf1cfca931f.json @@ -0,0 +1,8 @@ +{ + "id": "1a9d1356-6c52-43d5-a5d0-caf1cfca931f", + "type": "feature", + "description": "This release adds support to configure stream-processor resources for label detections on streaming-videos. UpateStreamProcessor API is also launched with this release, which could be used to update an existing stream-processor.", + "modules": [ + "service/rekognition" + ] +} \ No newline at end of file diff --git a/.changelog/34ed04ccb3424879922e5a64d78d9627.json b/.changelog/34ed04ccb3424879922e5a64d78d9627.json new file mode 100644 index 00000000000..656777c6e06 --- /dev/null +++ b/.changelog/34ed04ccb3424879922e5a64d78d9627.json @@ -0,0 +1,8 @@ +{ + "id": "34ed04cc-b342-4879-922e-5a64d78d9627", + "type": "feature", + "description": "This release adds the following new features: 1) Introduces an option for automatic schema creation 2) Now allows for Ingestion of data containing most common errors and allows automatic data cleaning 3) Introduces new API ListSensorStatistics that gives further information about the ingested data", + "modules": [ + "service/lookoutequipment" + ] +} \ No newline at end of file diff --git a/.changelog/41575353444b40ffbf474f4155544f00.json b/.changelog/41575353444b40ffbf474f4155544f00.json new file mode 100644 index 00000000000..a2d571a0ac6 --- /dev/null +++ b/.changelog/41575353444b40ffbf474f4155544f00.json @@ -0,0 +1,8 @@ +{ + "id": "41575353-444b-40ff-bf47-4f4155544f00", + "type": "release", + "description": "New AWS service client module", + "modules": [ + "service/chimesdkmediapipelines" + ] +} \ No newline at end of file diff --git a/.changelog/6757f1a5aaeb4b4c9729f2629d986fe5.json b/.changelog/6757f1a5aaeb4b4c9729f2629d986fe5.json new file mode 100644 index 00000000000..d1558628ae1 --- /dev/null +++ b/.changelog/6757f1a5aaeb4b4c9729f2629d986fe5.json @@ -0,0 +1,8 @@ +{ + "id": "6757f1a5-aaeb-4b4c-9729-f2629d986fe5", + "type": "feature", + "description": "Amazon SageMaker Autopilot adds support for custom validation dataset and validation ratio through the CreateAutoMLJob and DescribeAutoMLJob APIs.", + "modules": [ + "service/sagemaker" + ] +} \ No newline at end of file diff --git a/.changelog/753e75fc65704ca3b64002fba4388d62.json b/.changelog/753e75fc65704ca3b64002fba4388d62.json new file mode 100644 index 00000000000..0e7cc899968 --- /dev/null +++ b/.changelog/753e75fc65704ca3b64002fba4388d62.json @@ -0,0 +1,8 @@ +{ + "id": "753e75fc-6570-4ca3-b640-02fba4388d62", + "type": "feature", + "description": "For Amazon Chime SDK meetings, the Amazon Chime Media Pipelines SDK allows builders to capture audio, video, and content share streams. You can also capture meeting events, live transcripts, and data messages. The pipelines save the artifacts to an Amazon S3 bucket that you designate.", + "modules": [ + "service/chimesdkmediapipelines" + ] +} \ No newline at end of file diff --git a/.changelog/8dae6f4eef844a3d96ffff97862cc66e.json b/.changelog/8dae6f4eef844a3d96ffff97862cc66e.json new file mode 100644 index 00000000000..758b94244ea --- /dev/null +++ b/.changelog/8dae6f4eef844a3d96ffff97862cc66e.json @@ -0,0 +1,8 @@ +{ + "id": "8dae6f4e-ef84-4a3d-96ff-ff97862cc66e", + "type": "feature", + "description": "Increases the retention period maximum to 2557 days. Deprecates unused fields of the ListEventDataStores API response. Updates documentation.", + "modules": [ + "service/cloudtrail" + ] +} \ No newline at end of file diff --git a/.changelog/bda271bc22c644bcbfc8f33dc3c4ec8a.json b/.changelog/bda271bc22c644bcbfc8f33dc3c4ec8a.json new file mode 100644 index 00000000000..a004c7c05f4 --- /dev/null +++ b/.changelog/bda271bc22c644bcbfc8f33dc3c4ec8a.json @@ -0,0 +1,8 @@ +{ + "id": "bda271bc-22c6-44bc-bfc8-f33dc3c4ec8a", + "type": "documentation", + "description": "Documentation only update to support the Amplify GitHub App feature launch", + "modules": [ + "service/amplify" + ] +} \ No newline at end of file diff --git a/.changelog/f4572723830440f382adfda74ac87485.json b/.changelog/f4572723830440f382adfda74ac87485.json new file mode 100644 index 00000000000..e21b0d223bb --- /dev/null +++ b/.changelog/f4572723830440f382adfda74ac87485.json @@ -0,0 +1,8 @@ +{ + "id": "f4572723-8304-40f3-82ad-fda74ac87485", + "type": "feature", + "description": "Add list support for event configurations, allow to get and update event configurations by resource type, support LoRaWAN events; Make NetworkAnalyzerConfiguration as a resource, add List, Create, Delete API support; Add FCntStart attribute support for ABP WirelessDevice.", + "modules": [ + "service/iotwireless" + ] +} \ No newline at end of file diff --git a/service/amplify/api_op_CreateApp.go b/service/amplify/api_op_CreateApp.go index 1c98c02476d..ead9e1ce896 100644 --- a/service/amplify/api_op_CreateApp.go +++ b/service/amplify/api_op_CreateApp.go @@ -35,9 +35,17 @@ type CreateAppInput struct { // This member is required. Name *string - // The personal access token for a third-party source control system for an Amplify - // app. The personal access token is used to create a webhook and a read-only - // deploy key. The token is not stored. + // The personal access token for a GitHub repository for an Amplify app. The + // personal access token is used to authorize access to a GitHub repository using + // the Amplify GitHub App. The token is not stored. Use accessToken for GitHub + // repositories only. To authorize access to a repository provider such as + // Bitbucket or CodeCommit, use oauthToken. You must specify either accessToken or + // oauthToken when you create a new app. Existing Amplify apps deployed from a + // GitHub repository using OAuth continue to work with CI/CD. However, we strongly + // recommend that you migrate these apps to use the GitHub App. For more + // information, see Migrating an existing OAuth app to the Amplify GitHub App + // (https://docs.aws.amazon.com/amplify/latest/UserGuide/setting-up-GitHub-access.html#migrating-to-github-app-auth) + // in the Amplify User Guide . AccessToken *string // The automated branch creation configuration for an Amplify app. @@ -84,8 +92,17 @@ type CreateAppInput struct { IamServiceRoleArn *string // The OAuth token for a third-party source control system for an Amplify app. The - // OAuth token is used to create a webhook and a read-only deploy key. The OAuth - // token is not stored. + // OAuth token is used to create a webhook and a read-only deploy key using SSH + // cloning. The OAuth token is not stored. Use oauthToken for repository providers + // other than GitHub, such as Bitbucket or CodeCommit. To authorize access to + // GitHub as your repository provider, use accessToken. You must specify either + // oauthToken or accessToken when you create a new app. Existing Amplify apps + // deployed from a GitHub repository using OAuth continue to work with CI/CD. + // However, we strongly recommend that you migrate these apps to use the GitHub + // App. For more information, see Migrating an existing OAuth app to the Amplify + // GitHub App + // (https://docs.aws.amazon.com/amplify/latest/UserGuide/setting-up-GitHub-access.html#migrating-to-github-app-auth) + // in the Amplify User Guide . OauthToken *string // The platform or framework for an Amplify app. diff --git a/service/amplify/api_op_UpdateApp.go b/service/amplify/api_op_UpdateApp.go index 88aa7e793ca..e548b1038f8 100644 --- a/service/amplify/api_op_UpdateApp.go +++ b/service/amplify/api_op_UpdateApp.go @@ -35,9 +35,17 @@ type UpdateAppInput struct { // This member is required. AppId *string - // The personal access token for a third-party source control system for an Amplify - // app. The token is used to create webhook and a read-only deploy key. The token - // is not stored. + // The personal access token for a GitHub repository for an Amplify app. The + // personal access token is used to authorize access to a GitHub repository using + // the Amplify GitHub App. The token is not stored. Use accessToken for GitHub + // repositories only. To authorize access to a repository provider such as + // Bitbucket or CodeCommit, use oauthToken. You must specify either accessToken or + // oauthToken when you update an app. Existing Amplify apps deployed from a GitHub + // repository using OAuth continue to work with CI/CD. However, we strongly + // recommend that you migrate these apps to use the GitHub App. For more + // information, see Migrating an existing OAuth app to the Amplify GitHub App + // (https://docs.aws.amazon.com/amplify/latest/UserGuide/setting-up-GitHub-access.html#migrating-to-github-app-auth) + // in the Amplify User Guide . AccessToken *string // The automated branch creation configuration for an Amplify app. @@ -85,8 +93,16 @@ type UpdateAppInput struct { Name *string // The OAuth token for a third-party source control system for an Amplify app. The - // token is used to create a webhook and a read-only deploy key. The OAuth token is - // not stored. + // OAuth token is used to create a webhook and a read-only deploy key using SSH + // cloning. The OAuth token is not stored. Use oauthToken for repository providers + // other than GitHub, such as Bitbucket or CodeCommit. To authorize access to + // GitHub as your repository provider, use accessToken. You must specify either + // oauthToken or accessToken when you update an app. Existing Amplify apps deployed + // from a GitHub repository using OAuth continue to work with CI/CD. However, we + // strongly recommend that you migrate these apps to use the GitHub App. For more + // information, see Migrating an existing OAuth app to the Amplify GitHub App + // (https://docs.aws.amazon.com/amplify/latest/UserGuide/setting-up-GitHub-access.html#migrating-to-github-app-auth) + // in the Amplify User Guide . OauthToken *string // The platform for an Amplify app. diff --git a/service/amplify/types/types.go b/service/amplify/types/types.go index 29d59d2389e..1edaca26f89 100644 --- a/service/amplify/types/types.go +++ b/service/amplify/types/types.go @@ -106,10 +106,10 @@ type App struct { // Describes the information about a production branch of the Amplify app. ProductionBranch *ProductionBranch - // The authentication protocol to use to access the Git repository for an Amplify - // app. For a GitHub repository, specify TOKEN. For an Amazon Web Services - // CodeCommit repository, specify SIGV4. For GitLab and Bitbucket repositories, - // specify SSH. + // This is for internal use. The Amplify service uses this parameter to specify the + // authentication protocol to use to access the Git repository for an Amplify app. + // Amplify specifies TOKEN for a GitHub repository, SIGV4 for an Amazon Web + // Services CodeCommit repository, and SSH for GitLab and Bitbucket repositories. RepositoryCloneMethod RepositoryCloneMethod // The tag for the Amplify app. diff --git a/service/chimesdkmediapipelines/LICENSE.txt b/service/chimesdkmediapipelines/LICENSE.txt new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/service/chimesdkmediapipelines/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/service/chimesdkmediapipelines/api_client.go b/service/chimesdkmediapipelines/api_client.go new file mode 100644 index 00000000000..2d31b7453e2 --- /dev/null +++ b/service/chimesdkmediapipelines/api_client.go @@ -0,0 +1,454 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package chimesdkmediapipelines + +import ( + "context" + cryptorand "crypto/rand" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/defaults" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/retry" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" + internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" + smithy "github.com/aws/smithy-go" + smithydocument "github.com/aws/smithy-go/document" + "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/middleware" + smithyrand "github.com/aws/smithy-go/rand" + smithyhttp "github.com/aws/smithy-go/transport/http" + "net" + "net/http" + "time" +) + +const ServiceID = "Chime SDK Media Pipelines" +const ServiceAPIVersion = "2021-07-15" + +// Client provides the API client to make operations call for Amazon Chime SDK +// Media Pipelines. +type Client struct { + options Options +} + +// New returns an initialized Client based on the functional options. Provide +// additional functional options to further configure the behavior of the client, +// such as changing the client's endpoint or adding custom middleware behavior. +func New(options Options, optFns ...func(*Options)) *Client { + options = options.Copy() + + resolveDefaultLogger(&options) + + setResolvedDefaultsMode(&options) + + resolveRetryer(&options) + + resolveHTTPClient(&options) + + resolveHTTPSignerV4(&options) + + resolveDefaultEndpointConfiguration(&options) + + resolveIdempotencyTokenProvider(&options) + + for _, fn := range optFns { + fn(&options) + } + + client := &Client{ + options: options, + } + + return client +} + +type Options struct { + // Set of options to modify how an operation is invoked. These apply to all + // operations invoked for this client. Use functional options on operation call to + // modify this list for per operation behavior. + APIOptions []func(*middleware.Stack) error + + // Configures the events that will be sent to the configured logger. + ClientLogMode aws.ClientLogMode + + // The credentials object to use when signing requests. + Credentials aws.CredentialsProvider + + // The configuration DefaultsMode that the SDK should use when constructing the + // clients initial default settings. + DefaultsMode aws.DefaultsMode + + // The endpoint options to be used when attempting to resolve an endpoint. + EndpointOptions EndpointResolverOptions + + // The service endpoint resolver. + EndpointResolver EndpointResolver + + // Signature Version 4 (SigV4) Signer + HTTPSignerV4 HTTPSignerV4 + + // Provides idempotency tokens values that will be automatically populated into + // idempotent API operations. + IdempotencyTokenProvider IdempotencyTokenProvider + + // The logger writer interface to write logging messages to. + Logger logging.Logger + + // The region to send requests to. (Required) + Region string + + // RetryMaxAttempts specifies the maximum number attempts an API client will call + // an operation that fails with a retryable error. A value of 0 is ignored, and + // will not be used to configure the API client created default retryer, or modify + // per operation call's retry max attempts. When creating a new API Clients this + // member will only be used if the Retryer Options member is nil. This value will + // be ignored if Retryer is not nil. If specified in an operation call's functional + // options with a value that is different than the constructed client's Options, + // the Client's Retryer will be wrapped to use the operation's specific + // RetryMaxAttempts value. + RetryMaxAttempts int + + // RetryMode specifies the retry mode the API client will be created with, if + // Retryer option is not also specified. When creating a new API Clients this + // member will only be used if the Retryer Options member is nil. This value will + // be ignored if Retryer is not nil. Currently does not support per operation call + // overrides, may in the future. + RetryMode aws.RetryMode + + // Retryer guides how HTTP requests should be retried in case of recoverable + // failures. When nil the API client will use a default retryer. The kind of + // default retry created by the API client can be changed with the RetryMode + // option. + Retryer aws.Retryer + + // The RuntimeEnvironment configuration, only populated if the DefaultsMode is set + // to DefaultsModeAuto and is initialized using config.LoadDefaultConfig. You + // should not populate this structure programmatically, or rely on the values here + // within your applications. + RuntimeEnvironment aws.RuntimeEnvironment + + // The initial DefaultsMode used when the client options were constructed. If the + // DefaultsMode was set to aws.DefaultsModeAuto this will store what the resolved + // value was at that point in time. Currently does not support per operation call + // overrides, may in the future. + resolvedDefaultsMode aws.DefaultsMode + + // The HTTP client to invoke API calls with. Defaults to client's default HTTP + // implementation if nil. + HTTPClient HTTPClient +} + +// WithAPIOptions returns a functional option for setting the Client's APIOptions +// option. +func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) { + return func(o *Options) { + o.APIOptions = append(o.APIOptions, optFns...) + } +} + +// WithEndpointResolver returns a functional option for setting the Client's +// EndpointResolver option. +func WithEndpointResolver(v EndpointResolver) func(*Options) { + return func(o *Options) { + o.EndpointResolver = v + } +} + +type HTTPClient interface { + Do(*http.Request) (*http.Response, error) +} + +// Copy creates a clone where the APIOptions list is deep copied. +func (o Options) Copy() Options { + to := o + to.APIOptions = make([]func(*middleware.Stack) error, len(o.APIOptions)) + copy(to.APIOptions, o.APIOptions) + + return to +} +func (c *Client) invokeOperation(ctx context.Context, opID string, params interface{}, optFns []func(*Options), stackFns ...func(*middleware.Stack, Options) error) (result interface{}, metadata middleware.Metadata, err error) { + ctx = middleware.ClearStackValues(ctx) + stack := middleware.NewStack(opID, smithyhttp.NewStackRequest) + options := c.options.Copy() + for _, fn := range optFns { + fn(&options) + } + + finalizeRetryMaxAttemptOptions(&options, *c) + + finalizeClientEndpointResolverOptions(&options) + + for _, fn := range stackFns { + if err := fn(stack, options); err != nil { + return nil, metadata, err + } + } + + for _, fn := range options.APIOptions { + if err := fn(stack); err != nil { + return nil, metadata, err + } + } + + handler := middleware.DecorateHandler(smithyhttp.NewClientHandler(options.HTTPClient), stack) + result, metadata, err = handler.Handle(ctx, params) + if err != nil { + err = &smithy.OperationError{ + ServiceID: ServiceID, + OperationName: opID, + Err: err, + } + } + return result, metadata, err +} + +type noSmithyDocumentSerde = smithydocument.NoSerde + +func resolveDefaultLogger(o *Options) { + if o.Logger != nil { + return + } + o.Logger = logging.Nop{} +} + +func addSetLoggerMiddleware(stack *middleware.Stack, o Options) error { + return middleware.AddSetLoggerMiddleware(stack, o.Logger) +} + +func setResolvedDefaultsMode(o *Options) { + if len(o.resolvedDefaultsMode) > 0 { + return + } + + var mode aws.DefaultsMode + mode.SetFromString(string(o.DefaultsMode)) + + if mode == aws.DefaultsModeAuto { + mode = defaults.ResolveDefaultsModeAuto(o.Region, o.RuntimeEnvironment) + } + + o.resolvedDefaultsMode = mode +} + +// NewFromConfig returns a new client from the provided config. +func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client { + opts := Options{ + Region: cfg.Region, + DefaultsMode: cfg.DefaultsMode, + RuntimeEnvironment: cfg.RuntimeEnvironment, + HTTPClient: cfg.HTTPClient, + Credentials: cfg.Credentials, + APIOptions: cfg.APIOptions, + Logger: cfg.Logger, + ClientLogMode: cfg.ClientLogMode, + } + resolveAWSRetryerProvider(cfg, &opts) + resolveAWSRetryMaxAttempts(cfg, &opts) + resolveAWSRetryMode(cfg, &opts) + resolveAWSEndpointResolver(cfg, &opts) + resolveUseDualStackEndpoint(cfg, &opts) + resolveUseFIPSEndpoint(cfg, &opts) + return New(opts, optFns...) +} + +func resolveHTTPClient(o *Options) { + var buildable *awshttp.BuildableClient + + if o.HTTPClient != nil { + var ok bool + buildable, ok = o.HTTPClient.(*awshttp.BuildableClient) + if !ok { + return + } + } else { + buildable = awshttp.NewBuildableClient() + } + + modeConfig, err := defaults.GetModeConfiguration(o.resolvedDefaultsMode) + if err == nil { + buildable = buildable.WithDialerOptions(func(dialer *net.Dialer) { + if dialerTimeout, ok := modeConfig.GetConnectTimeout(); ok { + dialer.Timeout = dialerTimeout + } + }) + + buildable = buildable.WithTransportOptions(func(transport *http.Transport) { + if tlsHandshakeTimeout, ok := modeConfig.GetTLSNegotiationTimeout(); ok { + transport.TLSHandshakeTimeout = tlsHandshakeTimeout + } + }) + } + + o.HTTPClient = buildable +} + +func resolveRetryer(o *Options) { + if o.Retryer != nil { + return + } + + if len(o.RetryMode) == 0 { + modeConfig, err := defaults.GetModeConfiguration(o.resolvedDefaultsMode) + if err == nil { + o.RetryMode = modeConfig.RetryMode + } + } + if len(o.RetryMode) == 0 { + o.RetryMode = aws.RetryModeStandard + } + + var standardOptions []func(*retry.StandardOptions) + if v := o.RetryMaxAttempts; v != 0 { + standardOptions = append(standardOptions, func(so *retry.StandardOptions) { + so.MaxAttempts = v + }) + } + + switch o.RetryMode { + case aws.RetryModeAdaptive: + var adaptiveOptions []func(*retry.AdaptiveModeOptions) + if len(standardOptions) != 0 { + adaptiveOptions = append(adaptiveOptions, func(ao *retry.AdaptiveModeOptions) { + ao.StandardOptions = append(ao.StandardOptions, standardOptions...) + }) + } + o.Retryer = retry.NewAdaptiveMode(adaptiveOptions...) + + default: + o.Retryer = retry.NewStandard(standardOptions...) + } +} + +func resolveAWSRetryerProvider(cfg aws.Config, o *Options) { + if cfg.Retryer == nil { + return + } + o.Retryer = cfg.Retryer() +} + +func resolveAWSRetryMode(cfg aws.Config, o *Options) { + if len(cfg.RetryMode) == 0 { + return + } + o.RetryMode = cfg.RetryMode +} +func resolveAWSRetryMaxAttempts(cfg aws.Config, o *Options) { + if cfg.RetryMaxAttempts == 0 { + return + } + o.RetryMaxAttempts = cfg.RetryMaxAttempts +} + +func finalizeRetryMaxAttemptOptions(o *Options, client Client) { + if v := o.RetryMaxAttempts; v == 0 || v == client.options.RetryMaxAttempts { + return + } + + o.Retryer = retry.AddWithMaxAttempts(o.Retryer, o.RetryMaxAttempts) +} + +func resolveAWSEndpointResolver(cfg aws.Config, o *Options) { + if cfg.EndpointResolver == nil && cfg.EndpointResolverWithOptions == nil { + return + } + o.EndpointResolver = withEndpointResolver(cfg.EndpointResolver, cfg.EndpointResolverWithOptions, NewDefaultEndpointResolver()) +} + +func addClientUserAgent(stack *middleware.Stack) error { + return awsmiddleware.AddSDKAgentKeyValue(awsmiddleware.APIMetadata, "chimesdkmediapipelines", goModuleVersion)(stack) +} + +func addHTTPSignerV4Middleware(stack *middleware.Stack, o Options) error { + mw := v4.NewSignHTTPRequestMiddleware(v4.SignHTTPRequestMiddlewareOptions{ + CredentialsProvider: o.Credentials, + Signer: o.HTTPSignerV4, + LogSigning: o.ClientLogMode.IsSigning(), + }) + return stack.Finalize.Add(mw, middleware.After) +} + +type HTTPSignerV4 interface { + SignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*v4.SignerOptions)) error +} + +func resolveHTTPSignerV4(o *Options) { + if o.HTTPSignerV4 != nil { + return + } + o.HTTPSignerV4 = newDefaultV4Signer(*o) +} + +func newDefaultV4Signer(o Options) *v4.Signer { + return v4.NewSigner(func(so *v4.SignerOptions) { + so.Logger = o.Logger + so.LogSigning = o.ClientLogMode.IsSigning() + }) +} + +func resolveIdempotencyTokenProvider(o *Options) { + if o.IdempotencyTokenProvider != nil { + return + } + o.IdempotencyTokenProvider = smithyrand.NewUUIDIdempotencyToken(cryptorand.Reader) +} + +func addRetryMiddlewares(stack *middleware.Stack, o Options) error { + mo := retry.AddRetryMiddlewaresOptions{ + Retryer: o.Retryer, + LogRetryAttempts: o.ClientLogMode.IsRetries(), + } + return retry.AddRetryMiddlewares(stack, mo) +} + +// resolves dual-stack endpoint configuration +func resolveUseDualStackEndpoint(cfg aws.Config, o *Options) error { + if len(cfg.ConfigSources) == 0 { + return nil + } + value, found, err := internalConfig.ResolveUseDualStackEndpoint(context.Background(), cfg.ConfigSources) + if err != nil { + return err + } + if found { + o.EndpointOptions.UseDualStackEndpoint = value + } + return nil +} + +// resolves FIPS endpoint configuration +func resolveUseFIPSEndpoint(cfg aws.Config, o *Options) error { + if len(cfg.ConfigSources) == 0 { + return nil + } + value, found, err := internalConfig.ResolveUseFIPSEndpoint(context.Background(), cfg.ConfigSources) + if err != nil { + return err + } + if found { + o.EndpointOptions.UseFIPSEndpoint = value + } + return nil +} + +// IdempotencyTokenProvider interface for providing idempotency token +type IdempotencyTokenProvider interface { + GetIdempotencyToken() (string, error) +} + +func addRequestIDRetrieverMiddleware(stack *middleware.Stack) error { + return awsmiddleware.AddRequestIDRetrieverMiddleware(stack) +} + +func addResponseErrorMiddleware(stack *middleware.Stack) error { + return awshttp.AddResponseErrorMiddleware(stack) +} + +func addRequestResponseLogging(stack *middleware.Stack, o Options) error { + return stack.Deserialize.Add(&smithyhttp.RequestResponseLogger{ + LogRequest: o.ClientLogMode.IsRequest(), + LogRequestWithBody: o.ClientLogMode.IsRequestWithBody(), + LogResponse: o.ClientLogMode.IsResponse(), + LogResponseWithBody: o.ClientLogMode.IsResponseWithBody(), + }, middleware.After) +} diff --git a/service/chimesdkmediapipelines/api_client_test.go b/service/chimesdkmediapipelines/api_client_test.go new file mode 100644 index 00000000000..60ce162f109 --- /dev/null +++ b/service/chimesdkmediapipelines/api_client_test.go @@ -0,0 +1,123 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package chimesdkmediapipelines + +import ( + "context" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "io/ioutil" + "net/http" + "strings" + "testing" +) + +func TestClient_resolveRetryOptions(t *testing.T) { + nopClient := smithyhttp.ClientDoFunc(func(_ *http.Request) (*http.Response, error) { + return &http.Response{ + StatusCode: 200, + Header: http.Header{}, + Body: ioutil.NopCloser(strings.NewReader("")), + }, nil + }) + + cases := map[string]struct { + defaultsMode aws.DefaultsMode + retryer aws.Retryer + retryMaxAttempts int + opRetryMaxAttempts *int + retryMode aws.RetryMode + expectClientRetryMode aws.RetryMode + expectClientMaxAttempts int + expectOpMaxAttempts int + }{ + "defaults": { + defaultsMode: aws.DefaultsModeStandard, + expectClientRetryMode: aws.RetryModeStandard, + expectClientMaxAttempts: 3, + expectOpMaxAttempts: 3, + }, + "custom default retry": { + retryMode: aws.RetryModeAdaptive, + retryMaxAttempts: 10, + expectClientRetryMode: aws.RetryModeAdaptive, + expectClientMaxAttempts: 10, + expectOpMaxAttempts: 10, + }, + "custom op max attempts": { + retryMode: aws.RetryModeAdaptive, + retryMaxAttempts: 10, + opRetryMaxAttempts: aws.Int(2), + expectClientRetryMode: aws.RetryModeAdaptive, + expectClientMaxAttempts: 10, + expectOpMaxAttempts: 2, + }, + "custom op no change max attempts": { + retryMode: aws.RetryModeAdaptive, + retryMaxAttempts: 10, + opRetryMaxAttempts: aws.Int(10), + expectClientRetryMode: aws.RetryModeAdaptive, + expectClientMaxAttempts: 10, + expectOpMaxAttempts: 10, + }, + "custom op 0 max attempts": { + retryMode: aws.RetryModeAdaptive, + retryMaxAttempts: 10, + opRetryMaxAttempts: aws.Int(0), + expectClientRetryMode: aws.RetryModeAdaptive, + expectClientMaxAttempts: 10, + expectOpMaxAttempts: 10, + }, + } + + for name, c := range cases { + t.Run(name, func(t *testing.T) { + client := NewFromConfig(aws.Config{ + DefaultsMode: c.defaultsMode, + Retryer: func() func() aws.Retryer { + if c.retryer == nil { + return nil + } + + return func() aws.Retryer { return c.retryer } + }(), + HTTPClient: nopClient, + RetryMaxAttempts: c.retryMaxAttempts, + RetryMode: c.retryMode, + }) + + if e, a := c.expectClientRetryMode, client.options.RetryMode; e != a { + t.Errorf("expect %v retry mode, got %v", e, a) + } + if e, a := c.expectClientMaxAttempts, client.options.Retryer.MaxAttempts(); e != a { + t.Errorf("expect %v max attempts, got %v", e, a) + } + + _, _, err := client.invokeOperation(context.Background(), "mockOperation", struct{}{}, + []func(*Options){ + func(o *Options) { + if c.opRetryMaxAttempts == nil { + return + } + o.RetryMaxAttempts = *c.opRetryMaxAttempts + }, + }, + func(s *middleware.Stack, o Options) error { + s.Initialize.Clear() + s.Serialize.Clear() + s.Build.Clear() + s.Finalize.Clear() + s.Deserialize.Clear() + + if e, a := c.expectOpMaxAttempts, o.Retryer.MaxAttempts(); e != a { + t.Errorf("expect %v op max attempts, got %v", e, a) + } + return nil + }) + if err != nil { + t.Fatalf("expect no operation error, got %v", err) + } + }) + } +} diff --git a/service/chimesdkmediapipelines/api_op_CreateMediaCapturePipeline.go b/service/chimesdkmediapipelines/api_op_CreateMediaCapturePipeline.go new file mode 100644 index 00000000000..ffad5e64781 --- /dev/null +++ b/service/chimesdkmediapipelines/api_op_CreateMediaCapturePipeline.go @@ -0,0 +1,186 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package chimesdkmediapipelines + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/chimesdkmediapipelines/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Creates a media capture pipeline. +func (c *Client) CreateMediaCapturePipeline(ctx context.Context, params *CreateMediaCapturePipelineInput, optFns ...func(*Options)) (*CreateMediaCapturePipelineOutput, error) { + if params == nil { + params = &CreateMediaCapturePipelineInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "CreateMediaCapturePipeline", params, optFns, c.addOperationCreateMediaCapturePipelineMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*CreateMediaCapturePipelineOutput) + out.ResultMetadata = metadata + return out, nil +} + +type CreateMediaCapturePipelineInput struct { + + // The ARN of the sink type. + // + // This member is required. + SinkArn *string + + // Destination type to which the media artifacts are saved. You must use an S3 + // bucket. + // + // This member is required. + SinkType types.MediaPipelineSinkType + + // ARN of the source from which the media artifacts are captured. + // + // This member is required. + SourceArn *string + + // Source type from which the media artifacts are captured. A Chime SDK Meeting is + // the only supported source. + // + // This member is required. + SourceType types.MediaPipelineSourceType + + // The configuration for a specified media capture pipeline. SourceType must be + // ChimeSdkMeeting. + ChimeSdkMeetingConfiguration *types.ChimeSdkMeetingConfiguration + + // The token assigned to the client making the pipeline request. + ClientRequestToken *string + + // The list of tags. + Tags []types.Tag + + noSmithyDocumentSerde +} + +type CreateMediaCapturePipelineOutput struct { + + // A media capture pipeline object, the ID, source type, source ARN, sink type, and + // sink ARN of a media capture pipeline object. + MediaCapturePipeline *types.MediaCapturePipeline + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationCreateMediaCapturePipelineMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestjson1_serializeOpCreateMediaCapturePipeline{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpCreateMediaCapturePipeline{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addIdempotencyToken_opCreateMediaCapturePipelineMiddleware(stack, options); err != nil { + return err + } + if err = addOpCreateMediaCapturePipelineValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateMediaCapturePipeline(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +type idempotencyToken_initializeOpCreateMediaCapturePipeline struct { + tokenProvider IdempotencyTokenProvider +} + +func (*idempotencyToken_initializeOpCreateMediaCapturePipeline) ID() string { + return "OperationIdempotencyTokenAutoFill" +} + +func (m *idempotencyToken_initializeOpCreateMediaCapturePipeline) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + if m.tokenProvider == nil { + return next.HandleInitialize(ctx, in) + } + + input, ok := in.Parameters.(*CreateMediaCapturePipelineInput) + if !ok { + return out, metadata, fmt.Errorf("expected middleware input to be of type *CreateMediaCapturePipelineInput ") + } + + if input.ClientRequestToken == nil { + t, err := m.tokenProvider.GetIdempotencyToken() + if err != nil { + return out, metadata, err + } + input.ClientRequestToken = &t + } + return next.HandleInitialize(ctx, in) +} +func addIdempotencyToken_opCreateMediaCapturePipelineMiddleware(stack *middleware.Stack, cfg Options) error { + return stack.Initialize.Add(&idempotencyToken_initializeOpCreateMediaCapturePipeline{tokenProvider: cfg.IdempotencyTokenProvider}, middleware.Before) +} + +func newServiceMetadataMiddleware_opCreateMediaCapturePipeline(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "chime", + OperationName: "CreateMediaCapturePipeline", + } +} diff --git a/service/chimesdkmediapipelines/api_op_DeleteMediaCapturePipeline.go b/service/chimesdkmediapipelines/api_op_DeleteMediaCapturePipeline.go new file mode 100644 index 00000000000..dab67d4083a --- /dev/null +++ b/service/chimesdkmediapipelines/api_op_DeleteMediaCapturePipeline.go @@ -0,0 +1,116 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package chimesdkmediapipelines + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Deletes the media capture pipeline. +func (c *Client) DeleteMediaCapturePipeline(ctx context.Context, params *DeleteMediaCapturePipelineInput, optFns ...func(*Options)) (*DeleteMediaCapturePipelineOutput, error) { + if params == nil { + params = &DeleteMediaCapturePipelineInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DeleteMediaCapturePipeline", params, optFns, c.addOperationDeleteMediaCapturePipelineMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DeleteMediaCapturePipelineOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DeleteMediaCapturePipelineInput struct { + + // The ID of the media capture pipeline being deleted. + // + // This member is required. + MediaPipelineId *string + + noSmithyDocumentSerde +} + +type DeleteMediaCapturePipelineOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeleteMediaCapturePipelineMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestjson1_serializeOpDeleteMediaCapturePipeline{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpDeleteMediaCapturePipeline{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpDeleteMediaCapturePipelineValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteMediaCapturePipeline(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDeleteMediaCapturePipeline(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "chime", + OperationName: "DeleteMediaCapturePipeline", + } +} diff --git a/service/chimesdkmediapipelines/api_op_GetMediaCapturePipeline.go b/service/chimesdkmediapipelines/api_op_GetMediaCapturePipeline.go new file mode 100644 index 00000000000..c11ac9447bc --- /dev/null +++ b/service/chimesdkmediapipelines/api_op_GetMediaCapturePipeline.go @@ -0,0 +1,121 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package chimesdkmediapipelines + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/chimesdkmediapipelines/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Gets an existing media capture pipeline. +func (c *Client) GetMediaCapturePipeline(ctx context.Context, params *GetMediaCapturePipelineInput, optFns ...func(*Options)) (*GetMediaCapturePipelineOutput, error) { + if params == nil { + params = &GetMediaCapturePipelineInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetMediaCapturePipeline", params, optFns, c.addOperationGetMediaCapturePipelineMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetMediaCapturePipelineOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetMediaCapturePipelineInput struct { + + // The ID of the pipeline that you want to get. + // + // This member is required. + MediaPipelineId *string + + noSmithyDocumentSerde +} + +type GetMediaCapturePipelineOutput struct { + + // The media capture pipeline object. + MediaCapturePipeline *types.MediaCapturePipeline + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetMediaCapturePipelineMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestjson1_serializeOpGetMediaCapturePipeline{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpGetMediaCapturePipeline{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpGetMediaCapturePipelineValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetMediaCapturePipeline(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetMediaCapturePipeline(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "chime", + OperationName: "GetMediaCapturePipeline", + } +} diff --git a/service/chimesdkmediapipelines/api_op_ListMediaCapturePipelines.go b/service/chimesdkmediapipelines/api_op_ListMediaCapturePipelines.go new file mode 100644 index 00000000000..a1392924f4e --- /dev/null +++ b/service/chimesdkmediapipelines/api_op_ListMediaCapturePipelines.go @@ -0,0 +1,215 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package chimesdkmediapipelines + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/chimesdkmediapipelines/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns a list of media capture pipelines. +func (c *Client) ListMediaCapturePipelines(ctx context.Context, params *ListMediaCapturePipelinesInput, optFns ...func(*Options)) (*ListMediaCapturePipelinesOutput, error) { + if params == nil { + params = &ListMediaCapturePipelinesInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListMediaCapturePipelines", params, optFns, c.addOperationListMediaCapturePipelinesMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListMediaCapturePipelinesOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListMediaCapturePipelinesInput struct { + + // The maximum number of results to return in a single call. Valid Range: 1 - 99. + MaxResults *int32 + + // The token used to retrieve the next page of results. + NextToken *string + + noSmithyDocumentSerde +} + +type ListMediaCapturePipelinesOutput struct { + + // The media capture pipeline objects in the list. + MediaCapturePipelines []types.MediaCapturePipelineSummary + + // The token used to retrieve the next page of results. + NextToken *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListMediaCapturePipelinesMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestjson1_serializeOpListMediaCapturePipelines{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpListMediaCapturePipelines{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListMediaCapturePipelines(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +// ListMediaCapturePipelinesAPIClient is a client that implements the +// ListMediaCapturePipelines operation. +type ListMediaCapturePipelinesAPIClient interface { + ListMediaCapturePipelines(context.Context, *ListMediaCapturePipelinesInput, ...func(*Options)) (*ListMediaCapturePipelinesOutput, error) +} + +var _ ListMediaCapturePipelinesAPIClient = (*Client)(nil) + +// ListMediaCapturePipelinesPaginatorOptions is the paginator options for +// ListMediaCapturePipelines +type ListMediaCapturePipelinesPaginatorOptions struct { + // The maximum number of results to return in a single call. Valid Range: 1 - 99. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// ListMediaCapturePipelinesPaginator is a paginator for ListMediaCapturePipelines +type ListMediaCapturePipelinesPaginator struct { + options ListMediaCapturePipelinesPaginatorOptions + client ListMediaCapturePipelinesAPIClient + params *ListMediaCapturePipelinesInput + nextToken *string + firstPage bool +} + +// NewListMediaCapturePipelinesPaginator returns a new +// ListMediaCapturePipelinesPaginator +func NewListMediaCapturePipelinesPaginator(client ListMediaCapturePipelinesAPIClient, params *ListMediaCapturePipelinesInput, optFns ...func(*ListMediaCapturePipelinesPaginatorOptions)) *ListMediaCapturePipelinesPaginator { + if params == nil { + params = &ListMediaCapturePipelinesInput{} + } + + options := ListMediaCapturePipelinesPaginatorOptions{} + if params.MaxResults != nil { + options.Limit = *params.MaxResults + } + + for _, fn := range optFns { + fn(&options) + } + + return &ListMediaCapturePipelinesPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.NextToken, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *ListMediaCapturePipelinesPaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) +} + +// NextPage retrieves the next ListMediaCapturePipelines page. +func (p *ListMediaCapturePipelinesPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListMediaCapturePipelinesOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.NextToken = p.nextToken + + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.MaxResults = limit + + result, err := p.client.ListMediaCapturePipelines(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.NextToken + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +func newServiceMetadataMiddleware_opListMediaCapturePipelines(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "chime", + OperationName: "ListMediaCapturePipelines", + } +} diff --git a/service/chimesdkmediapipelines/api_op_ListTagsForResource.go b/service/chimesdkmediapipelines/api_op_ListTagsForResource.go new file mode 100644 index 00000000000..97763b42143 --- /dev/null +++ b/service/chimesdkmediapipelines/api_op_ListTagsForResource.go @@ -0,0 +1,121 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package chimesdkmediapipelines + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/chimesdkmediapipelines/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Lists the tags applied to an Amazon Chime SDK media capture pipeline. +func (c *Client) ListTagsForResource(ctx context.Context, params *ListTagsForResourceInput, optFns ...func(*Options)) (*ListTagsForResourceOutput, error) { + if params == nil { + params = &ListTagsForResourceInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListTagsForResource", params, optFns, c.addOperationListTagsForResourceMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListTagsForResourceOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListTagsForResourceInput struct { + + // The resource ARN. + // + // This member is required. + ResourceARN *string + + noSmithyDocumentSerde +} + +type ListTagsForResourceOutput struct { + + // The tag key-value pairs. + Tags []types.Tag + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListTagsForResourceMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestjson1_serializeOpListTagsForResource{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpListTagsForResource{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpListTagsForResourceValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListTagsForResource(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opListTagsForResource(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "chime", + OperationName: "ListTagsForResource", + } +} diff --git a/service/chimesdkmediapipelines/api_op_TagResource.go b/service/chimesdkmediapipelines/api_op_TagResource.go new file mode 100644 index 00000000000..15cc757b087 --- /dev/null +++ b/service/chimesdkmediapipelines/api_op_TagResource.go @@ -0,0 +1,123 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package chimesdkmediapipelines + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/chimesdkmediapipelines/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Applies the specified tags to the specified Amazon Chime SDK media capture +// pipeline. +func (c *Client) TagResource(ctx context.Context, params *TagResourceInput, optFns ...func(*Options)) (*TagResourceOutput, error) { + if params == nil { + params = &TagResourceInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "TagResource", params, optFns, c.addOperationTagResourceMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*TagResourceOutput) + out.ResultMetadata = metadata + return out, nil +} + +type TagResourceInput struct { + + // The resource ARN. + // + // This member is required. + ResourceARN *string + + // The tag key-value pairs. + // + // This member is required. + Tags []types.Tag + + noSmithyDocumentSerde +} + +type TagResourceOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationTagResourceMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestjson1_serializeOpTagResource{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpTagResource{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpTagResourceValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opTagResource(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opTagResource(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "chime", + OperationName: "TagResource", + } +} diff --git a/service/chimesdkmediapipelines/api_op_UntagResource.go b/service/chimesdkmediapipelines/api_op_UntagResource.go new file mode 100644 index 00000000000..60c2bfad8ea --- /dev/null +++ b/service/chimesdkmediapipelines/api_op_UntagResource.go @@ -0,0 +1,122 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package chimesdkmediapipelines + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Removes the specified tags from the specified Amazon Chime SDK media capture +// pipeline. +func (c *Client) UntagResource(ctx context.Context, params *UntagResourceInput, optFns ...func(*Options)) (*UntagResourceOutput, error) { + if params == nil { + params = &UntagResourceInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "UntagResource", params, optFns, c.addOperationUntagResourceMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*UntagResourceOutput) + out.ResultMetadata = metadata + return out, nil +} + +type UntagResourceInput struct { + + // The resource ARN. + // + // This member is required. + ResourceARN *string + + // The tag keys. + // + // This member is required. + TagKeys []string + + noSmithyDocumentSerde +} + +type UntagResourceOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationUntagResourceMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestjson1_serializeOpUntagResource{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpUntagResource{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpUntagResourceValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUntagResource(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opUntagResource(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "chime", + OperationName: "UntagResource", + } +} diff --git a/service/chimesdkmediapipelines/deserializers.go b/service/chimesdkmediapipelines/deserializers.go new file mode 100644 index 00000000000..369c4aae49b --- /dev/null +++ b/service/chimesdkmediapipelines/deserializers.go @@ -0,0 +1,2416 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package chimesdkmediapipelines + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws/protocol/restjson" + "github.com/aws/aws-sdk-go-v2/service/chimesdkmediapipelines/types" + smithy "github.com/aws/smithy-go" + smithyio "github.com/aws/smithy-go/io" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithytime "github.com/aws/smithy-go/time" + smithyhttp "github.com/aws/smithy-go/transport/http" + "io" + "io/ioutil" + "strings" +) + +type awsRestjson1_deserializeOpCreateMediaCapturePipeline struct { +} + +func (*awsRestjson1_deserializeOpCreateMediaCapturePipeline) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpCreateMediaCapturePipeline) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorCreateMediaCapturePipeline(response, &metadata) + } + output := &CreateMediaCapturePipelineOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsRestjson1_deserializeOpDocumentCreateMediaCapturePipelineOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorCreateMediaCapturePipeline(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("BadRequestException", errorCode): + return awsRestjson1_deserializeErrorBadRequestException(response, errorBody) + + case strings.EqualFold("ForbiddenException", errorCode): + return awsRestjson1_deserializeErrorForbiddenException(response, errorBody) + + case strings.EqualFold("ResourceLimitExceededException", errorCode): + return awsRestjson1_deserializeErrorResourceLimitExceededException(response, errorBody) + + case strings.EqualFold("ServiceFailureException", errorCode): + return awsRestjson1_deserializeErrorServiceFailureException(response, errorBody) + + case strings.EqualFold("ServiceUnavailableException", errorCode): + return awsRestjson1_deserializeErrorServiceUnavailableException(response, errorBody) + + case strings.EqualFold("ThrottledClientException", errorCode): + return awsRestjson1_deserializeErrorThrottledClientException(response, errorBody) + + case strings.EqualFold("UnauthorizedClientException", errorCode): + return awsRestjson1_deserializeErrorUnauthorizedClientException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestjson1_deserializeOpDocumentCreateMediaCapturePipelineOutput(v **CreateMediaCapturePipelineOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *CreateMediaCapturePipelineOutput + if *v == nil { + sv = &CreateMediaCapturePipelineOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "MediaCapturePipeline": + if err := awsRestjson1_deserializeDocumentMediaCapturePipeline(&sv.MediaCapturePipeline, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +type awsRestjson1_deserializeOpDeleteMediaCapturePipeline struct { +} + +func (*awsRestjson1_deserializeOpDeleteMediaCapturePipeline) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpDeleteMediaCapturePipeline) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorDeleteMediaCapturePipeline(response, &metadata) + } + output := &DeleteMediaCapturePipelineOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorDeleteMediaCapturePipeline(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("BadRequestException", errorCode): + return awsRestjson1_deserializeErrorBadRequestException(response, errorBody) + + case strings.EqualFold("ForbiddenException", errorCode): + return awsRestjson1_deserializeErrorForbiddenException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsRestjson1_deserializeErrorNotFoundException(response, errorBody) + + case strings.EqualFold("ServiceFailureException", errorCode): + return awsRestjson1_deserializeErrorServiceFailureException(response, errorBody) + + case strings.EqualFold("ServiceUnavailableException", errorCode): + return awsRestjson1_deserializeErrorServiceUnavailableException(response, errorBody) + + case strings.EqualFold("ThrottledClientException", errorCode): + return awsRestjson1_deserializeErrorThrottledClientException(response, errorBody) + + case strings.EqualFold("UnauthorizedClientException", errorCode): + return awsRestjson1_deserializeErrorUnauthorizedClientException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestjson1_deserializeOpGetMediaCapturePipeline struct { +} + +func (*awsRestjson1_deserializeOpGetMediaCapturePipeline) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpGetMediaCapturePipeline) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorGetMediaCapturePipeline(response, &metadata) + } + output := &GetMediaCapturePipelineOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsRestjson1_deserializeOpDocumentGetMediaCapturePipelineOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorGetMediaCapturePipeline(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("BadRequestException", errorCode): + return awsRestjson1_deserializeErrorBadRequestException(response, errorBody) + + case strings.EqualFold("ForbiddenException", errorCode): + return awsRestjson1_deserializeErrorForbiddenException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsRestjson1_deserializeErrorNotFoundException(response, errorBody) + + case strings.EqualFold("ServiceFailureException", errorCode): + return awsRestjson1_deserializeErrorServiceFailureException(response, errorBody) + + case strings.EqualFold("ServiceUnavailableException", errorCode): + return awsRestjson1_deserializeErrorServiceUnavailableException(response, errorBody) + + case strings.EqualFold("ThrottledClientException", errorCode): + return awsRestjson1_deserializeErrorThrottledClientException(response, errorBody) + + case strings.EqualFold("UnauthorizedClientException", errorCode): + return awsRestjson1_deserializeErrorUnauthorizedClientException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestjson1_deserializeOpDocumentGetMediaCapturePipelineOutput(v **GetMediaCapturePipelineOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *GetMediaCapturePipelineOutput + if *v == nil { + sv = &GetMediaCapturePipelineOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "MediaCapturePipeline": + if err := awsRestjson1_deserializeDocumentMediaCapturePipeline(&sv.MediaCapturePipeline, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +type awsRestjson1_deserializeOpListMediaCapturePipelines struct { +} + +func (*awsRestjson1_deserializeOpListMediaCapturePipelines) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpListMediaCapturePipelines) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorListMediaCapturePipelines(response, &metadata) + } + output := &ListMediaCapturePipelinesOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsRestjson1_deserializeOpDocumentListMediaCapturePipelinesOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorListMediaCapturePipelines(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("BadRequestException", errorCode): + return awsRestjson1_deserializeErrorBadRequestException(response, errorBody) + + case strings.EqualFold("ForbiddenException", errorCode): + return awsRestjson1_deserializeErrorForbiddenException(response, errorBody) + + case strings.EqualFold("ResourceLimitExceededException", errorCode): + return awsRestjson1_deserializeErrorResourceLimitExceededException(response, errorBody) + + case strings.EqualFold("ServiceFailureException", errorCode): + return awsRestjson1_deserializeErrorServiceFailureException(response, errorBody) + + case strings.EqualFold("ServiceUnavailableException", errorCode): + return awsRestjson1_deserializeErrorServiceUnavailableException(response, errorBody) + + case strings.EqualFold("ThrottledClientException", errorCode): + return awsRestjson1_deserializeErrorThrottledClientException(response, errorBody) + + case strings.EqualFold("UnauthorizedClientException", errorCode): + return awsRestjson1_deserializeErrorUnauthorizedClientException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestjson1_deserializeOpDocumentListMediaCapturePipelinesOutput(v **ListMediaCapturePipelinesOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ListMediaCapturePipelinesOutput + if *v == nil { + sv = &ListMediaCapturePipelinesOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "MediaCapturePipelines": + if err := awsRestjson1_deserializeDocumentMediaCapturePipelineSummaryList(&sv.MediaCapturePipelines, value); err != nil { + return err + } + + case "NextToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.NextToken = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +type awsRestjson1_deserializeOpListTagsForResource struct { +} + +func (*awsRestjson1_deserializeOpListTagsForResource) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpListTagsForResource) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorListTagsForResource(response, &metadata) + } + output := &ListTagsForResourceOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsRestjson1_deserializeOpDocumentListTagsForResourceOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorListTagsForResource(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("BadRequestException", errorCode): + return awsRestjson1_deserializeErrorBadRequestException(response, errorBody) + + case strings.EqualFold("ForbiddenException", errorCode): + return awsRestjson1_deserializeErrorForbiddenException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsRestjson1_deserializeErrorNotFoundException(response, errorBody) + + case strings.EqualFold("ServiceFailureException", errorCode): + return awsRestjson1_deserializeErrorServiceFailureException(response, errorBody) + + case strings.EqualFold("ServiceUnavailableException", errorCode): + return awsRestjson1_deserializeErrorServiceUnavailableException(response, errorBody) + + case strings.EqualFold("ThrottledClientException", errorCode): + return awsRestjson1_deserializeErrorThrottledClientException(response, errorBody) + + case strings.EqualFold("UnauthorizedClientException", errorCode): + return awsRestjson1_deserializeErrorUnauthorizedClientException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestjson1_deserializeOpDocumentListTagsForResourceOutput(v **ListTagsForResourceOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ListTagsForResourceOutput + if *v == nil { + sv = &ListTagsForResourceOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Tags": + if err := awsRestjson1_deserializeDocumentTagList(&sv.Tags, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +type awsRestjson1_deserializeOpTagResource struct { +} + +func (*awsRestjson1_deserializeOpTagResource) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpTagResource) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorTagResource(response, &metadata) + } + output := &TagResourceOutput{} + out.Result = output + + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorTagResource(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("BadRequestException", errorCode): + return awsRestjson1_deserializeErrorBadRequestException(response, errorBody) + + case strings.EqualFold("ForbiddenException", errorCode): + return awsRestjson1_deserializeErrorForbiddenException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsRestjson1_deserializeErrorNotFoundException(response, errorBody) + + case strings.EqualFold("ServiceFailureException", errorCode): + return awsRestjson1_deserializeErrorServiceFailureException(response, errorBody) + + case strings.EqualFold("ServiceUnavailableException", errorCode): + return awsRestjson1_deserializeErrorServiceUnavailableException(response, errorBody) + + case strings.EqualFold("ThrottledClientException", errorCode): + return awsRestjson1_deserializeErrorThrottledClientException(response, errorBody) + + case strings.EqualFold("UnauthorizedClientException", errorCode): + return awsRestjson1_deserializeErrorUnauthorizedClientException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestjson1_deserializeOpUntagResource struct { +} + +func (*awsRestjson1_deserializeOpUntagResource) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpUntagResource) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorUntagResource(response, &metadata) + } + output := &UntagResourceOutput{} + out.Result = output + + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorUntagResource(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("BadRequestException", errorCode): + return awsRestjson1_deserializeErrorBadRequestException(response, errorBody) + + case strings.EqualFold("ForbiddenException", errorCode): + return awsRestjson1_deserializeErrorForbiddenException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsRestjson1_deserializeErrorNotFoundException(response, errorBody) + + case strings.EqualFold("ServiceFailureException", errorCode): + return awsRestjson1_deserializeErrorServiceFailureException(response, errorBody) + + case strings.EqualFold("ServiceUnavailableException", errorCode): + return awsRestjson1_deserializeErrorServiceUnavailableException(response, errorBody) + + case strings.EqualFold("ThrottledClientException", errorCode): + return awsRestjson1_deserializeErrorThrottledClientException(response, errorBody) + + case strings.EqualFold("UnauthorizedClientException", errorCode): + return awsRestjson1_deserializeErrorUnauthorizedClientException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestjson1_deserializeErrorBadRequestException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.BadRequestException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentBadRequestException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeErrorForbiddenException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.ForbiddenException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentForbiddenException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeErrorNotFoundException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.NotFoundException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentNotFoundException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeErrorResourceLimitExceededException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.ResourceLimitExceededException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentResourceLimitExceededException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeErrorServiceFailureException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.ServiceFailureException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentServiceFailureException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeErrorServiceUnavailableException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.ServiceUnavailableException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentServiceUnavailableException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeErrorThrottledClientException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.ThrottledClientException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentThrottledClientException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeErrorUnauthorizedClientException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.UnauthorizedClientException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentUnauthorizedClientException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeDocumentArtifactsConfiguration(v **types.ArtifactsConfiguration, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ArtifactsConfiguration + if *v == nil { + sv = &types.ArtifactsConfiguration{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Audio": + if err := awsRestjson1_deserializeDocumentAudioArtifactsConfiguration(&sv.Audio, value); err != nil { + return err + } + + case "Content": + if err := awsRestjson1_deserializeDocumentContentArtifactsConfiguration(&sv.Content, value); err != nil { + return err + } + + case "Video": + if err := awsRestjson1_deserializeDocumentVideoArtifactsConfiguration(&sv.Video, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentAttendeeIdList(v *[]string, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []string + if *v == nil { + cv = []string{} + } else { + cv = *v + } + + for _, value := range shape { + var col string + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected GuidString to be of type string, got %T instead", value) + } + col = jtv + } + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsRestjson1_deserializeDocumentAudioArtifactsConfiguration(v **types.AudioArtifactsConfiguration, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.AudioArtifactsConfiguration + if *v == nil { + sv = &types.AudioArtifactsConfiguration{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "MuxType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected AudioMuxType to be of type string, got %T instead", value) + } + sv.MuxType = types.AudioMuxType(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentBadRequestException(v **types.BadRequestException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.BadRequestException + if *v == nil { + sv = &types.BadRequestException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Code": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorCode to be of type string, got %T instead", value) + } + sv.Code = types.ErrorCode(jtv) + } + + case "Message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + case "RequestId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.RequestId = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentChimeSdkMeetingConfiguration(v **types.ChimeSdkMeetingConfiguration, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ChimeSdkMeetingConfiguration + if *v == nil { + sv = &types.ChimeSdkMeetingConfiguration{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "ArtifactsConfiguration": + if err := awsRestjson1_deserializeDocumentArtifactsConfiguration(&sv.ArtifactsConfiguration, value); err != nil { + return err + } + + case "SourceConfiguration": + if err := awsRestjson1_deserializeDocumentSourceConfiguration(&sv.SourceConfiguration, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentContentArtifactsConfiguration(v **types.ContentArtifactsConfiguration, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ContentArtifactsConfiguration + if *v == nil { + sv = &types.ContentArtifactsConfiguration{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "MuxType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ContentMuxType to be of type string, got %T instead", value) + } + sv.MuxType = types.ContentMuxType(jtv) + } + + case "State": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ArtifactsState to be of type string, got %T instead", value) + } + sv.State = types.ArtifactsState(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentExternalUserIdList(v *[]string, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []string + if *v == nil { + cv = []string{} + } else { + cv = *v + } + + for _, value := range shape { + var col string + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExternalUserIdType to be of type string, got %T instead", value) + } + col = jtv + } + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsRestjson1_deserializeDocumentForbiddenException(v **types.ForbiddenException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ForbiddenException + if *v == nil { + sv = &types.ForbiddenException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Code": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorCode to be of type string, got %T instead", value) + } + sv.Code = types.ErrorCode(jtv) + } + + case "Message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + case "RequestId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.RequestId = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentMediaCapturePipeline(v **types.MediaCapturePipeline, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.MediaCapturePipeline + if *v == nil { + sv = &types.MediaCapturePipeline{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "ChimeSdkMeetingConfiguration": + if err := awsRestjson1_deserializeDocumentChimeSdkMeetingConfiguration(&sv.ChimeSdkMeetingConfiguration, value); err != nil { + return err + } + + case "CreatedTimestamp": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Iso8601Timestamp to be of type string, got %T instead", value) + } + t, err := smithytime.ParseDateTime(jtv) + if err != nil { + return err + } + sv.CreatedTimestamp = ptr.Time(t) + } + + case "MediaPipelineArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected AmazonResourceName to be of type string, got %T instead", value) + } + sv.MediaPipelineArn = ptr.String(jtv) + } + + case "MediaPipelineId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected GuidString to be of type string, got %T instead", value) + } + sv.MediaPipelineId = ptr.String(jtv) + } + + case "SinkArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Arn to be of type string, got %T instead", value) + } + sv.SinkArn = ptr.String(jtv) + } + + case "SinkType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected MediaPipelineSinkType to be of type string, got %T instead", value) + } + sv.SinkType = types.MediaPipelineSinkType(jtv) + } + + case "SourceArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Arn to be of type string, got %T instead", value) + } + sv.SourceArn = ptr.String(jtv) + } + + case "SourceType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected MediaPipelineSourceType to be of type string, got %T instead", value) + } + sv.SourceType = types.MediaPipelineSourceType(jtv) + } + + case "Status": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected MediaPipelineStatus to be of type string, got %T instead", value) + } + sv.Status = types.MediaPipelineStatus(jtv) + } + + case "UpdatedTimestamp": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Iso8601Timestamp to be of type string, got %T instead", value) + } + t, err := smithytime.ParseDateTime(jtv) + if err != nil { + return err + } + sv.UpdatedTimestamp = ptr.Time(t) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentMediaCapturePipelineSummary(v **types.MediaCapturePipelineSummary, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.MediaCapturePipelineSummary + if *v == nil { + sv = &types.MediaCapturePipelineSummary{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "MediaPipelineArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected AmazonResourceName to be of type string, got %T instead", value) + } + sv.MediaPipelineArn = ptr.String(jtv) + } + + case "MediaPipelineId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected GuidString to be of type string, got %T instead", value) + } + sv.MediaPipelineId = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentMediaCapturePipelineSummaryList(v *[]types.MediaCapturePipelineSummary, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.MediaCapturePipelineSummary + if *v == nil { + cv = []types.MediaCapturePipelineSummary{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.MediaCapturePipelineSummary + destAddr := &col + if err := awsRestjson1_deserializeDocumentMediaCapturePipelineSummary(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsRestjson1_deserializeDocumentNotFoundException(v **types.NotFoundException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.NotFoundException + if *v == nil { + sv = &types.NotFoundException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Code": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorCode to be of type string, got %T instead", value) + } + sv.Code = types.ErrorCode(jtv) + } + + case "Message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + case "RequestId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.RequestId = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentResourceLimitExceededException(v **types.ResourceLimitExceededException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ResourceLimitExceededException + if *v == nil { + sv = &types.ResourceLimitExceededException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Code": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorCode to be of type string, got %T instead", value) + } + sv.Code = types.ErrorCode(jtv) + } + + case "Message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + case "RequestId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.RequestId = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentSelectedVideoStreams(v **types.SelectedVideoStreams, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.SelectedVideoStreams + if *v == nil { + sv = &types.SelectedVideoStreams{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "AttendeeIds": + if err := awsRestjson1_deserializeDocumentAttendeeIdList(&sv.AttendeeIds, value); err != nil { + return err + } + + case "ExternalUserIds": + if err := awsRestjson1_deserializeDocumentExternalUserIdList(&sv.ExternalUserIds, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentServiceFailureException(v **types.ServiceFailureException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ServiceFailureException + if *v == nil { + sv = &types.ServiceFailureException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Code": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorCode to be of type string, got %T instead", value) + } + sv.Code = types.ErrorCode(jtv) + } + + case "Message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + case "RequestId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.RequestId = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentServiceUnavailableException(v **types.ServiceUnavailableException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ServiceUnavailableException + if *v == nil { + sv = &types.ServiceUnavailableException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Code": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorCode to be of type string, got %T instead", value) + } + sv.Code = types.ErrorCode(jtv) + } + + case "Message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + case "RequestId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.RequestId = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentSourceConfiguration(v **types.SourceConfiguration, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.SourceConfiguration + if *v == nil { + sv = &types.SourceConfiguration{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "SelectedVideoStreams": + if err := awsRestjson1_deserializeDocumentSelectedVideoStreams(&sv.SelectedVideoStreams, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentTag(v **types.Tag, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.Tag + if *v == nil { + sv = &types.Tag{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Key": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected TagKey to be of type string, got %T instead", value) + } + sv.Key = ptr.String(jtv) + } + + case "Value": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected TagValue to be of type string, got %T instead", value) + } + sv.Value = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentTagList(v *[]types.Tag, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.Tag + if *v == nil { + cv = []types.Tag{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.Tag + destAddr := &col + if err := awsRestjson1_deserializeDocumentTag(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsRestjson1_deserializeDocumentThrottledClientException(v **types.ThrottledClientException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ThrottledClientException + if *v == nil { + sv = &types.ThrottledClientException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Code": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorCode to be of type string, got %T instead", value) + } + sv.Code = types.ErrorCode(jtv) + } + + case "Message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + case "RequestId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.RequestId = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentUnauthorizedClientException(v **types.UnauthorizedClientException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.UnauthorizedClientException + if *v == nil { + sv = &types.UnauthorizedClientException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Code": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorCode to be of type string, got %T instead", value) + } + sv.Code = types.ErrorCode(jtv) + } + + case "Message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + case "RequestId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.RequestId = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentVideoArtifactsConfiguration(v **types.VideoArtifactsConfiguration, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.VideoArtifactsConfiguration + if *v == nil { + sv = &types.VideoArtifactsConfiguration{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "MuxType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected VideoMuxType to be of type string, got %T instead", value) + } + sv.MuxType = types.VideoMuxType(jtv) + } + + case "State": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ArtifactsState to be of type string, got %T instead", value) + } + sv.State = types.ArtifactsState(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} diff --git a/service/chimesdkmediapipelines/doc.go b/service/chimesdkmediapipelines/doc.go new file mode 100644 index 00000000000..ef6fe7e77ae --- /dev/null +++ b/service/chimesdkmediapipelines/doc.go @@ -0,0 +1,11 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +// Package chimesdkmediapipelines provides the API client, operations, and +// parameter types for Amazon Chime SDK Media Pipelines. +// +// The Amazon Chime SDK media pipeline APIs in this section allow software +// developers to create Amazon Chime SDK media pipelines and capture audio, video, +// events, and data messages from Amazon Chime SDK meetings. For more information +// about media pipleines, see Amzon Chime SDK media pipelines +// (https://docs.aws.amazon.com/chime/latest/APIReference/API_Operations_Amazon_Chime_SDK_Media_Pipelines.html). +package chimesdkmediapipelines diff --git a/service/chimesdkmediapipelines/endpoints.go b/service/chimesdkmediapipelines/endpoints.go new file mode 100644 index 00000000000..246bcf7d462 --- /dev/null +++ b/service/chimesdkmediapipelines/endpoints.go @@ -0,0 +1,200 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package chimesdkmediapipelines + +import ( + "context" + "errors" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + internalendpoints "github.com/aws/aws-sdk-go-v2/service/chimesdkmediapipelines/internal/endpoints" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "net/url" + "strings" +) + +// EndpointResolverOptions is the service endpoint resolver options +type EndpointResolverOptions = internalendpoints.Options + +// EndpointResolver interface for resolving service endpoints. +type EndpointResolver interface { + ResolveEndpoint(region string, options EndpointResolverOptions) (aws.Endpoint, error) +} + +var _ EndpointResolver = &internalendpoints.Resolver{} + +// NewDefaultEndpointResolver constructs a new service endpoint resolver +func NewDefaultEndpointResolver() *internalendpoints.Resolver { + return internalendpoints.New() +} + +// EndpointResolverFunc is a helper utility that wraps a function so it satisfies +// the EndpointResolver interface. This is useful when you want to add additional +// endpoint resolving logic, or stub out specific endpoints with custom values. +type EndpointResolverFunc func(region string, options EndpointResolverOptions) (aws.Endpoint, error) + +func (fn EndpointResolverFunc) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) { + return fn(region, options) +} + +func resolveDefaultEndpointConfiguration(o *Options) { + if o.EndpointResolver != nil { + return + } + o.EndpointResolver = NewDefaultEndpointResolver() +} + +// EndpointResolverFromURL returns an EndpointResolver configured using the +// provided endpoint url. By default, the resolved endpoint resolver uses the +// client region as signing region, and the endpoint source is set to +// EndpointSourceCustom.You can provide functional options to configure endpoint +// values for the resolved endpoint. +func EndpointResolverFromURL(url string, optFns ...func(*aws.Endpoint)) EndpointResolver { + e := aws.Endpoint{URL: url, Source: aws.EndpointSourceCustom} + for _, fn := range optFns { + fn(&e) + } + + return EndpointResolverFunc( + func(region string, options EndpointResolverOptions) (aws.Endpoint, error) { + if len(e.SigningRegion) == 0 { + e.SigningRegion = region + } + return e, nil + }, + ) +} + +type ResolveEndpoint struct { + Resolver EndpointResolver + Options EndpointResolverOptions +} + +func (*ResolveEndpoint) ID() string { + return "ResolveEndpoint" +} + +func (m *ResolveEndpoint) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + if m.Resolver == nil { + return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") + } + + eo := m.Options + eo.Logger = middleware.GetLogger(ctx) + + var endpoint aws.Endpoint + endpoint, err = m.Resolver.ResolveEndpoint(awsmiddleware.GetRegion(ctx), eo) + if err != nil { + return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) + } + + req.URL, err = url.Parse(endpoint.URL) + if err != nil { + return out, metadata, fmt.Errorf("failed to parse endpoint URL: %w", err) + } + + if len(awsmiddleware.GetSigningName(ctx)) == 0 { + signingName := endpoint.SigningName + if len(signingName) == 0 { + signingName = "chime" + } + ctx = awsmiddleware.SetSigningName(ctx, signingName) + } + ctx = awsmiddleware.SetEndpointSource(ctx, endpoint.Source) + ctx = smithyhttp.SetHostnameImmutable(ctx, endpoint.HostnameImmutable) + ctx = awsmiddleware.SetSigningRegion(ctx, endpoint.SigningRegion) + ctx = awsmiddleware.SetPartitionID(ctx, endpoint.PartitionID) + return next.HandleSerialize(ctx, in) +} +func addResolveEndpointMiddleware(stack *middleware.Stack, o Options) error { + return stack.Serialize.Insert(&ResolveEndpoint{ + Resolver: o.EndpointResolver, + Options: o.EndpointOptions, + }, "OperationSerializer", middleware.Before) +} + +func removeResolveEndpointMiddleware(stack *middleware.Stack) error { + _, err := stack.Serialize.Remove((&ResolveEndpoint{}).ID()) + return err +} + +type wrappedEndpointResolver struct { + awsResolver aws.EndpointResolverWithOptions + resolver EndpointResolver +} + +func (w *wrappedEndpointResolver) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) { + if w.awsResolver == nil { + goto fallback + } + endpoint, err = w.awsResolver.ResolveEndpoint(ServiceID, region, options) + if err == nil { + return endpoint, nil + } + + if nf := (&aws.EndpointNotFoundError{}); !errors.As(err, &nf) { + return endpoint, err + } + +fallback: + if w.resolver == nil { + return endpoint, fmt.Errorf("default endpoint resolver provided was nil") + } + return w.resolver.ResolveEndpoint(region, options) +} + +type awsEndpointResolverAdaptor func(service, region string) (aws.Endpoint, error) + +func (a awsEndpointResolverAdaptor) ResolveEndpoint(service, region string, options ...interface{}) (aws.Endpoint, error) { + return a(service, region) +} + +var _ aws.EndpointResolverWithOptions = awsEndpointResolverAdaptor(nil) + +// withEndpointResolver returns an EndpointResolver that first delegates endpoint resolution to the awsResolver. +// If awsResolver returns aws.EndpointNotFoundError error, the resolver will use the the provided +// fallbackResolver for resolution. +// +// fallbackResolver must not be nil +func withEndpointResolver(awsResolver aws.EndpointResolver, awsResolverWithOptions aws.EndpointResolverWithOptions, fallbackResolver EndpointResolver) EndpointResolver { + var resolver aws.EndpointResolverWithOptions + + if awsResolverWithOptions != nil { + resolver = awsResolverWithOptions + } else if awsResolver != nil { + resolver = awsEndpointResolverAdaptor(awsResolver.ResolveEndpoint) + } + + return &wrappedEndpointResolver{ + awsResolver: resolver, + resolver: fallbackResolver, + } +} + +func finalizeClientEndpointResolverOptions(options *Options) { + options.EndpointOptions.LogDeprecated = options.ClientLogMode.IsDeprecatedUsage() + + if len(options.EndpointOptions.ResolvedRegion) == 0 { + const fipsInfix = "-fips-" + const fipsPrefix = "fips-" + const fipsSuffix = "-fips" + + if strings.Contains(options.Region, fipsInfix) || + strings.Contains(options.Region, fipsPrefix) || + strings.Contains(options.Region, fipsSuffix) { + options.EndpointOptions.ResolvedRegion = strings.ReplaceAll(strings.ReplaceAll(strings.ReplaceAll( + options.Region, fipsInfix, "-"), fipsPrefix, ""), fipsSuffix, "") + options.EndpointOptions.UseFIPSEndpoint = aws.FIPSEndpointStateEnabled + } + } + +} diff --git a/service/chimesdkmediapipelines/generated.json b/service/chimesdkmediapipelines/generated.json new file mode 100644 index 00000000000..b0fbae641fa --- /dev/null +++ b/service/chimesdkmediapipelines/generated.json @@ -0,0 +1,34 @@ +{ + "dependencies": { + "github.com/aws/aws-sdk-go-v2": "v1.4.0", + "github.com/aws/aws-sdk-go-v2/internal/configsources": "v0.0.0-00010101000000-000000000000", + "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2": "v2.0.0-00010101000000-000000000000", + "github.com/aws/smithy-go": "v1.4.0" + }, + "files": [ + "api_client.go", + "api_client_test.go", + "api_op_CreateMediaCapturePipeline.go", + "api_op_DeleteMediaCapturePipeline.go", + "api_op_GetMediaCapturePipeline.go", + "api_op_ListMediaCapturePipelines.go", + "api_op_ListTagsForResource.go", + "api_op_TagResource.go", + "api_op_UntagResource.go", + "deserializers.go", + "doc.go", + "endpoints.go", + "generated.json", + "internal/endpoints/endpoints.go", + "internal/endpoints/endpoints_test.go", + "protocol_test.go", + "serializers.go", + "types/enums.go", + "types/errors.go", + "types/types.go", + "validators.go" + ], + "go": "1.15", + "module": "github.com/aws/aws-sdk-go-v2/service/chimesdkmediapipelines", + "unstable": false +} diff --git a/service/chimesdkmediapipelines/go.mod b/service/chimesdkmediapipelines/go.mod new file mode 100644 index 00000000000..ef5f8531713 --- /dev/null +++ b/service/chimesdkmediapipelines/go.mod @@ -0,0 +1,16 @@ +module github.com/aws/aws-sdk-go-v2/service/chimesdkmediapipelines + +go 1.15 + +require ( + github.com/aws/aws-sdk-go-v2 v1.16.3 + github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.10 + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.4 + github.com/aws/smithy-go v1.11.2 +) + +replace github.com/aws/aws-sdk-go-v2 => ../../ + +replace github.com/aws/aws-sdk-go-v2/internal/configsources => ../../internal/configsources/ + +replace github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 => ../../internal/endpoints/v2/ diff --git a/service/chimesdkmediapipelines/go.sum b/service/chimesdkmediapipelines/go.sum new file mode 100644 index 00000000000..d50e4572cb0 --- /dev/null +++ b/service/chimesdkmediapipelines/go.sum @@ -0,0 +1,13 @@ +github.com/aws/smithy-go v1.11.2 h1:eG/N+CcUMAvsdffgMvjMKwfyDzIkjM6pfxMJ8Mzc6mE= +github.com/aws/smithy-go v1.11.2/go.mod h1:3xHYmszWVx2c0kIwQeEVf9uSm4fYZt67FBJnwub1bgM= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/service/chimesdkmediapipelines/go_module_metadata.go b/service/chimesdkmediapipelines/go_module_metadata.go new file mode 100644 index 00000000000..b2a2fc4a655 --- /dev/null +++ b/service/chimesdkmediapipelines/go_module_metadata.go @@ -0,0 +1,6 @@ +// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. + +package chimesdkmediapipelines + +// goModuleVersion is the tagged release for this module +const goModuleVersion = "tip" diff --git a/service/chimesdkmediapipelines/internal/endpoints/endpoints.go b/service/chimesdkmediapipelines/internal/endpoints/endpoints.go new file mode 100644 index 00000000000..5df5225495e --- /dev/null +++ b/service/chimesdkmediapipelines/internal/endpoints/endpoints.go @@ -0,0 +1,250 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package endpoints + +import ( + "github.com/aws/aws-sdk-go-v2/aws" + endpoints "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2" + "github.com/aws/smithy-go/logging" + "regexp" +) + +// Options is the endpoint resolver configuration options +type Options struct { + // Logger is a logging implementation that log events should be sent to. + Logger logging.Logger + + // LogDeprecated indicates that deprecated endpoints should be logged to the + // provided logger. + LogDeprecated bool + + // ResolvedRegion is used to override the region to be resolved, rather then the + // using the value passed to the ResolveEndpoint method. This value is used by the + // SDK to translate regions like fips-us-east-1 or us-east-1-fips to an alternative + // name. You must not set this value directly in your application. + ResolvedRegion string + + // DisableHTTPS informs the resolver to return an endpoint that does not use the + // HTTPS scheme. + DisableHTTPS bool + + // UseDualStackEndpoint specifies the resolver must resolve a dual-stack endpoint. + UseDualStackEndpoint aws.DualStackEndpointState + + // UseFIPSEndpoint specifies the resolver must resolve a FIPS endpoint. + UseFIPSEndpoint aws.FIPSEndpointState +} + +func (o Options) GetResolvedRegion() string { + return o.ResolvedRegion +} + +func (o Options) GetDisableHTTPS() bool { + return o.DisableHTTPS +} + +func (o Options) GetUseDualStackEndpoint() aws.DualStackEndpointState { + return o.UseDualStackEndpoint +} + +func (o Options) GetUseFIPSEndpoint() aws.FIPSEndpointState { + return o.UseFIPSEndpoint +} + +func transformToSharedOptions(options Options) endpoints.Options { + return endpoints.Options{ + Logger: options.Logger, + LogDeprecated: options.LogDeprecated, + ResolvedRegion: options.ResolvedRegion, + DisableHTTPS: options.DisableHTTPS, + UseDualStackEndpoint: options.UseDualStackEndpoint, + UseFIPSEndpoint: options.UseFIPSEndpoint, + } +} + +// Resolver Chime SDK Media Pipelines endpoint resolver +type Resolver struct { + partitions endpoints.Partitions +} + +// ResolveEndpoint resolves the service endpoint for the given region and options +func (r *Resolver) ResolveEndpoint(region string, options Options) (endpoint aws.Endpoint, err error) { + if len(region) == 0 { + return endpoint, &aws.MissingRegionError{} + } + + opt := transformToSharedOptions(options) + return r.partitions.ResolveEndpoint(region, opt) +} + +// New returns a new Resolver +func New() *Resolver { + return &Resolver{ + partitions: defaultPartitions, + } +} + +var partitionRegexp = struct { + Aws *regexp.Regexp + AwsCn *regexp.Regexp + AwsIso *regexp.Regexp + AwsIsoB *regexp.Regexp + AwsUsGov *regexp.Regexp +}{ + + Aws: regexp.MustCompile("^(us|eu|ap|sa|ca|me|af)\\-\\w+\\-\\d+$"), + AwsCn: regexp.MustCompile("^cn\\-\\w+\\-\\d+$"), + AwsIso: regexp.MustCompile("^us\\-iso\\-\\w+\\-\\d+$"), + AwsIsoB: regexp.MustCompile("^us\\-isob\\-\\w+\\-\\d+$"), + AwsUsGov: regexp.MustCompile("^us\\-gov\\-\\w+\\-\\d+$"), +} + +var defaultPartitions = endpoints.Partitions{ + { + ID: "aws", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.DualStackVariant, + }: { + Hostname: "media-pipelines-chime.{region}.api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "media-pipelines-chime-fips.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "media-pipelines-chime-fips.{region}.api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "media-pipelines-chime.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.Aws, + IsRegionalized: true, + }, + { + ID: "aws-cn", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.DualStackVariant, + }: { + Hostname: "media-pipelines-chime.{region}.api.amazonwebservices.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "media-pipelines-chime-fips.{region}.amazonaws.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "media-pipelines-chime-fips.{region}.api.amazonwebservices.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "media-pipelines-chime.{region}.amazonaws.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsCn, + IsRegionalized: true, + }, + { + ID: "aws-iso", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "media-pipelines-chime-fips.{region}.c2s.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "media-pipelines-chime.{region}.c2s.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsIso, + IsRegionalized: true, + }, + { + ID: "aws-iso-b", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "media-pipelines-chime-fips.{region}.sc2s.sgov.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "media-pipelines-chime.{region}.sc2s.sgov.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsIsoB, + IsRegionalized: true, + }, + { + ID: "aws-us-gov", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.DualStackVariant, + }: { + Hostname: "media-pipelines-chime.{region}.api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "media-pipelines-chime-fips.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "media-pipelines-chime-fips.{region}.api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "media-pipelines-chime.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsUsGov, + IsRegionalized: true, + }, +} diff --git a/service/chimesdkmediapipelines/internal/endpoints/endpoints_test.go b/service/chimesdkmediapipelines/internal/endpoints/endpoints_test.go new file mode 100644 index 00000000000..08e5da2d833 --- /dev/null +++ b/service/chimesdkmediapipelines/internal/endpoints/endpoints_test.go @@ -0,0 +1,11 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package endpoints + +import ( + "testing" +) + +func TestRegexCompile(t *testing.T) { + _ = defaultPartitions +} diff --git a/service/chimesdkmediapipelines/protocol_test.go b/service/chimesdkmediapipelines/protocol_test.go new file mode 100644 index 00000000000..1d82210743c --- /dev/null +++ b/service/chimesdkmediapipelines/protocol_test.go @@ -0,0 +1,3 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package chimesdkmediapipelines diff --git a/service/chimesdkmediapipelines/serializers.go b/service/chimesdkmediapipelines/serializers.go new file mode 100644 index 00000000000..49f0e2b0624 --- /dev/null +++ b/service/chimesdkmediapipelines/serializers.go @@ -0,0 +1,686 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package chimesdkmediapipelines + +import ( + "bytes" + "context" + "fmt" + "github.com/aws/aws-sdk-go-v2/service/chimesdkmediapipelines/types" + smithy "github.com/aws/smithy-go" + "github.com/aws/smithy-go/encoding/httpbinding" + smithyjson "github.com/aws/smithy-go/encoding/json" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +type awsRestjson1_serializeOpCreateMediaCapturePipeline struct { +} + +func (*awsRestjson1_serializeOpCreateMediaCapturePipeline) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpCreateMediaCapturePipeline) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*CreateMediaCapturePipelineInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/sdk-media-capture-pipelines") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "POST" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + restEncoder.SetHeader("Content-Type").String("application/json") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsRestjson1_serializeOpDocumentCreateMediaCapturePipelineInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsCreateMediaCapturePipelineInput(v *CreateMediaCapturePipelineInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + return nil +} + +func awsRestjson1_serializeOpDocumentCreateMediaCapturePipelineInput(v *CreateMediaCapturePipelineInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ChimeSdkMeetingConfiguration != nil { + ok := object.Key("ChimeSdkMeetingConfiguration") + if err := awsRestjson1_serializeDocumentChimeSdkMeetingConfiguration(v.ChimeSdkMeetingConfiguration, ok); err != nil { + return err + } + } + + if v.ClientRequestToken != nil { + ok := object.Key("ClientRequestToken") + ok.String(*v.ClientRequestToken) + } + + if v.SinkArn != nil { + ok := object.Key("SinkArn") + ok.String(*v.SinkArn) + } + + if len(v.SinkType) > 0 { + ok := object.Key("SinkType") + ok.String(string(v.SinkType)) + } + + if v.SourceArn != nil { + ok := object.Key("SourceArn") + ok.String(*v.SourceArn) + } + + if len(v.SourceType) > 0 { + ok := object.Key("SourceType") + ok.String(string(v.SourceType)) + } + + if v.Tags != nil { + ok := object.Key("Tags") + if err := awsRestjson1_serializeDocumentTagList(v.Tags, ok); err != nil { + return err + } + } + + return nil +} + +type awsRestjson1_serializeOpDeleteMediaCapturePipeline struct { +} + +func (*awsRestjson1_serializeOpDeleteMediaCapturePipeline) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpDeleteMediaCapturePipeline) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeleteMediaCapturePipelineInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/sdk-media-capture-pipelines/{MediaPipelineId}") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "DELETE" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestjson1_serializeOpHttpBindingsDeleteMediaCapturePipelineInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsDeleteMediaCapturePipelineInput(v *DeleteMediaCapturePipelineInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.MediaPipelineId == nil || len(*v.MediaPipelineId) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member MediaPipelineId must not be empty")} + } + if v.MediaPipelineId != nil { + if err := encoder.SetURI("MediaPipelineId").String(*v.MediaPipelineId); err != nil { + return err + } + } + + return nil +} + +type awsRestjson1_serializeOpGetMediaCapturePipeline struct { +} + +func (*awsRestjson1_serializeOpGetMediaCapturePipeline) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpGetMediaCapturePipeline) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetMediaCapturePipelineInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/sdk-media-capture-pipelines/{MediaPipelineId}") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestjson1_serializeOpHttpBindingsGetMediaCapturePipelineInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsGetMediaCapturePipelineInput(v *GetMediaCapturePipelineInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.MediaPipelineId == nil || len(*v.MediaPipelineId) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member MediaPipelineId must not be empty")} + } + if v.MediaPipelineId != nil { + if err := encoder.SetURI("MediaPipelineId").String(*v.MediaPipelineId); err != nil { + return err + } + } + + return nil +} + +type awsRestjson1_serializeOpListMediaCapturePipelines struct { +} + +func (*awsRestjson1_serializeOpListMediaCapturePipelines) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpListMediaCapturePipelines) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListMediaCapturePipelinesInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/sdk-media-capture-pipelines") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestjson1_serializeOpHttpBindingsListMediaCapturePipelinesInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsListMediaCapturePipelinesInput(v *ListMediaCapturePipelinesInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.MaxResults != nil { + encoder.SetQuery("max-results").Integer(*v.MaxResults) + } + + if v.NextToken != nil { + encoder.SetQuery("next-token").String(*v.NextToken) + } + + return nil +} + +type awsRestjson1_serializeOpListTagsForResource struct { +} + +func (*awsRestjson1_serializeOpListTagsForResource) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpListTagsForResource) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListTagsForResourceInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/tags") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestjson1_serializeOpHttpBindingsListTagsForResourceInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsListTagsForResourceInput(v *ListTagsForResourceInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.ResourceARN != nil { + encoder.SetQuery("arn").String(*v.ResourceARN) + } + + return nil +} + +type awsRestjson1_serializeOpTagResource struct { +} + +func (*awsRestjson1_serializeOpTagResource) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpTagResource) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*TagResourceInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/tags?operation=tag-resource") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "POST" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + restEncoder.SetHeader("Content-Type").String("application/json") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsRestjson1_serializeOpDocumentTagResourceInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsTagResourceInput(v *TagResourceInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + return nil +} + +func awsRestjson1_serializeOpDocumentTagResourceInput(v *TagResourceInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ResourceARN != nil { + ok := object.Key("ResourceARN") + ok.String(*v.ResourceARN) + } + + if v.Tags != nil { + ok := object.Key("Tags") + if err := awsRestjson1_serializeDocumentTagList(v.Tags, ok); err != nil { + return err + } + } + + return nil +} + +type awsRestjson1_serializeOpUntagResource struct { +} + +func (*awsRestjson1_serializeOpUntagResource) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpUntagResource) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*UntagResourceInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/tags?operation=untag-resource") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "POST" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + restEncoder.SetHeader("Content-Type").String("application/json") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsRestjson1_serializeOpDocumentUntagResourceInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsUntagResourceInput(v *UntagResourceInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + return nil +} + +func awsRestjson1_serializeOpDocumentUntagResourceInput(v *UntagResourceInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ResourceARN != nil { + ok := object.Key("ResourceARN") + ok.String(*v.ResourceARN) + } + + if v.TagKeys != nil { + ok := object.Key("TagKeys") + if err := awsRestjson1_serializeDocumentTagKeyList(v.TagKeys, ok); err != nil { + return err + } + } + + return nil +} + +func awsRestjson1_serializeDocumentArtifactsConfiguration(v *types.ArtifactsConfiguration, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Audio != nil { + ok := object.Key("Audio") + if err := awsRestjson1_serializeDocumentAudioArtifactsConfiguration(v.Audio, ok); err != nil { + return err + } + } + + if v.Content != nil { + ok := object.Key("Content") + if err := awsRestjson1_serializeDocumentContentArtifactsConfiguration(v.Content, ok); err != nil { + return err + } + } + + if v.Video != nil { + ok := object.Key("Video") + if err := awsRestjson1_serializeDocumentVideoArtifactsConfiguration(v.Video, ok); err != nil { + return err + } + } + + return nil +} + +func awsRestjson1_serializeDocumentAttendeeIdList(v []string, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + av.String(v[i]) + } + return nil +} + +func awsRestjson1_serializeDocumentAudioArtifactsConfiguration(v *types.AudioArtifactsConfiguration, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if len(v.MuxType) > 0 { + ok := object.Key("MuxType") + ok.String(string(v.MuxType)) + } + + return nil +} + +func awsRestjson1_serializeDocumentChimeSdkMeetingConfiguration(v *types.ChimeSdkMeetingConfiguration, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ArtifactsConfiguration != nil { + ok := object.Key("ArtifactsConfiguration") + if err := awsRestjson1_serializeDocumentArtifactsConfiguration(v.ArtifactsConfiguration, ok); err != nil { + return err + } + } + + if v.SourceConfiguration != nil { + ok := object.Key("SourceConfiguration") + if err := awsRestjson1_serializeDocumentSourceConfiguration(v.SourceConfiguration, ok); err != nil { + return err + } + } + + return nil +} + +func awsRestjson1_serializeDocumentContentArtifactsConfiguration(v *types.ContentArtifactsConfiguration, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if len(v.MuxType) > 0 { + ok := object.Key("MuxType") + ok.String(string(v.MuxType)) + } + + if len(v.State) > 0 { + ok := object.Key("State") + ok.String(string(v.State)) + } + + return nil +} + +func awsRestjson1_serializeDocumentExternalUserIdList(v []string, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + av.String(v[i]) + } + return nil +} + +func awsRestjson1_serializeDocumentSelectedVideoStreams(v *types.SelectedVideoStreams, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.AttendeeIds != nil { + ok := object.Key("AttendeeIds") + if err := awsRestjson1_serializeDocumentAttendeeIdList(v.AttendeeIds, ok); err != nil { + return err + } + } + + if v.ExternalUserIds != nil { + ok := object.Key("ExternalUserIds") + if err := awsRestjson1_serializeDocumentExternalUserIdList(v.ExternalUserIds, ok); err != nil { + return err + } + } + + return nil +} + +func awsRestjson1_serializeDocumentSourceConfiguration(v *types.SourceConfiguration, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.SelectedVideoStreams != nil { + ok := object.Key("SelectedVideoStreams") + if err := awsRestjson1_serializeDocumentSelectedVideoStreams(v.SelectedVideoStreams, ok); err != nil { + return err + } + } + + return nil +} + +func awsRestjson1_serializeDocumentTag(v *types.Tag, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Key != nil { + ok := object.Key("Key") + ok.String(*v.Key) + } + + if v.Value != nil { + ok := object.Key("Value") + ok.String(*v.Value) + } + + return nil +} + +func awsRestjson1_serializeDocumentTagKeyList(v []string, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + av.String(v[i]) + } + return nil +} + +func awsRestjson1_serializeDocumentTagList(v []types.Tag, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + if err := awsRestjson1_serializeDocumentTag(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsRestjson1_serializeDocumentVideoArtifactsConfiguration(v *types.VideoArtifactsConfiguration, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if len(v.MuxType) > 0 { + ok := object.Key("MuxType") + ok.String(string(v.MuxType)) + } + + if len(v.State) > 0 { + ok := object.Key("State") + ok.String(string(v.State)) + } + + return nil +} diff --git a/service/chimesdkmediapipelines/types/enums.go b/service/chimesdkmediapipelines/types/enums.go new file mode 100644 index 00000000000..384576fc32d --- /dev/null +++ b/service/chimesdkmediapipelines/types/enums.go @@ -0,0 +1,155 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package types + +type ArtifactsState string + +// Enum values for ArtifactsState +const ( + ArtifactsStateEnabled ArtifactsState = "Enabled" + ArtifactsStateDisabled ArtifactsState = "Disabled" +) + +// Values returns all known values for ArtifactsState. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (ArtifactsState) Values() []ArtifactsState { + return []ArtifactsState{ + "Enabled", + "Disabled", + } +} + +type AudioMuxType string + +// Enum values for AudioMuxType +const ( + AudioMuxTypeAudioOnly AudioMuxType = "AudioOnly" + AudioMuxTypeAudioWithActiveSpeakerVideo AudioMuxType = "AudioWithActiveSpeakerVideo" +) + +// Values returns all known values for AudioMuxType. Note that this can be expanded +// in the future, and so it is only as up to date as the client. The ordering of +// this slice is not guaranteed to be stable across updates. +func (AudioMuxType) Values() []AudioMuxType { + return []AudioMuxType{ + "AudioOnly", + "AudioWithActiveSpeakerVideo", + } +} + +type ContentMuxType string + +// Enum values for ContentMuxType +const ( + ContentMuxTypeContentOnly ContentMuxType = "ContentOnly" +) + +// Values returns all known values for ContentMuxType. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (ContentMuxType) Values() []ContentMuxType { + return []ContentMuxType{ + "ContentOnly", + } +} + +type ErrorCode string + +// Enum values for ErrorCode +const ( + ErrorCodeBadRequest ErrorCode = "BadRequest" + ErrorCodeForbidden ErrorCode = "Forbidden" + ErrorCodeNotFound ErrorCode = "NotFound" + ErrorCodeResourceLimitExceeded ErrorCode = "ResourceLimitExceeded" + ErrorCodeServiceFailure ErrorCode = "ServiceFailure" + ErrorCodeServiceUnavailable ErrorCode = "ServiceUnavailable" + ErrorCodeThrottling ErrorCode = "Throttling" +) + +// Values returns all known values for ErrorCode. Note that this can be expanded in +// the future, and so it is only as up to date as the client. The ordering of this +// slice is not guaranteed to be stable across updates. +func (ErrorCode) Values() []ErrorCode { + return []ErrorCode{ + "BadRequest", + "Forbidden", + "NotFound", + "ResourceLimitExceeded", + "ServiceFailure", + "ServiceUnavailable", + "Throttling", + } +} + +type MediaPipelineSinkType string + +// Enum values for MediaPipelineSinkType +const ( + MediaPipelineSinkTypeS3Bucket MediaPipelineSinkType = "S3Bucket" +) + +// Values returns all known values for MediaPipelineSinkType. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (MediaPipelineSinkType) Values() []MediaPipelineSinkType { + return []MediaPipelineSinkType{ + "S3Bucket", + } +} + +type MediaPipelineSourceType string + +// Enum values for MediaPipelineSourceType +const ( + MediaPipelineSourceTypeChimeSdkMeeting MediaPipelineSourceType = "ChimeSdkMeeting" +) + +// Values returns all known values for MediaPipelineSourceType. Note that this can +// be expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (MediaPipelineSourceType) Values() []MediaPipelineSourceType { + return []MediaPipelineSourceType{ + "ChimeSdkMeeting", + } +} + +type MediaPipelineStatus string + +// Enum values for MediaPipelineStatus +const ( + MediaPipelineStatusInitializing MediaPipelineStatus = "Initializing" + MediaPipelineStatusInProgress MediaPipelineStatus = "InProgress" + MediaPipelineStatusFailed MediaPipelineStatus = "Failed" + MediaPipelineStatusStopping MediaPipelineStatus = "Stopping" + MediaPipelineStatusStopped MediaPipelineStatus = "Stopped" +) + +// Values returns all known values for MediaPipelineStatus. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (MediaPipelineStatus) Values() []MediaPipelineStatus { + return []MediaPipelineStatus{ + "Initializing", + "InProgress", + "Failed", + "Stopping", + "Stopped", + } +} + +type VideoMuxType string + +// Enum values for VideoMuxType +const ( + VideoMuxTypeVideoOnly VideoMuxType = "VideoOnly" +) + +// Values returns all known values for VideoMuxType. Note that this can be expanded +// in the future, and so it is only as up to date as the client. The ordering of +// this slice is not guaranteed to be stable across updates. +func (VideoMuxType) Values() []VideoMuxType { + return []VideoMuxType{ + "VideoOnly", + } +} diff --git a/service/chimesdkmediapipelines/types/errors.go b/service/chimesdkmediapipelines/types/errors.go new file mode 100644 index 00000000000..1bc5d78f3e2 --- /dev/null +++ b/service/chimesdkmediapipelines/types/errors.go @@ -0,0 +1,184 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package types + +import ( + "fmt" + smithy "github.com/aws/smithy-go" +) + +// The input parameters don't match the service's restrictions. +type BadRequestException struct { + Message *string + + Code ErrorCode + RequestId *string + + noSmithyDocumentSerde +} + +func (e *BadRequestException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *BadRequestException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *BadRequestException) ErrorCode() string { return "BadRequestException" } +func (e *BadRequestException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The client is permanently forbidden from making the request. +type ForbiddenException struct { + Message *string + + Code ErrorCode + RequestId *string + + noSmithyDocumentSerde +} + +func (e *ForbiddenException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *ForbiddenException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *ForbiddenException) ErrorCode() string { return "ForbiddenException" } +func (e *ForbiddenException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// One or more of the resources in the request does not exist in the system. +type NotFoundException struct { + Message *string + + Code ErrorCode + RequestId *string + + noSmithyDocumentSerde +} + +func (e *NotFoundException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *NotFoundException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *NotFoundException) ErrorCode() string { return "NotFoundException" } +func (e *NotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request exceeds the resource limit. +type ResourceLimitExceededException struct { + Message *string + + Code ErrorCode + RequestId *string + + noSmithyDocumentSerde +} + +func (e *ResourceLimitExceededException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *ResourceLimitExceededException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *ResourceLimitExceededException) ErrorCode() string { return "ResourceLimitExceededException" } +func (e *ResourceLimitExceededException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The service encountered an unexpected error. +type ServiceFailureException struct { + Message *string + + Code ErrorCode + RequestId *string + + noSmithyDocumentSerde +} + +func (e *ServiceFailureException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *ServiceFailureException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *ServiceFailureException) ErrorCode() string { return "ServiceFailureException" } +func (e *ServiceFailureException) ErrorFault() smithy.ErrorFault { return smithy.FaultServer } + +// The service is currently unavailable. +type ServiceUnavailableException struct { + Message *string + + Code ErrorCode + RequestId *string + + noSmithyDocumentSerde +} + +func (e *ServiceUnavailableException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *ServiceUnavailableException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *ServiceUnavailableException) ErrorCode() string { return "ServiceUnavailableException" } +func (e *ServiceUnavailableException) ErrorFault() smithy.ErrorFault { return smithy.FaultServer } + +// The client exceeded its request rate limit. +type ThrottledClientException struct { + Message *string + + Code ErrorCode + RequestId *string + + noSmithyDocumentSerde +} + +func (e *ThrottledClientException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *ThrottledClientException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *ThrottledClientException) ErrorCode() string { return "ThrottledClientException" } +func (e *ThrottledClientException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The client is not currently authorized to make the request. +type UnauthorizedClientException struct { + Message *string + + Code ErrorCode + RequestId *string + + noSmithyDocumentSerde +} + +func (e *UnauthorizedClientException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *UnauthorizedClientException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *UnauthorizedClientException) ErrorCode() string { return "UnauthorizedClientException" } +func (e *UnauthorizedClientException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } diff --git a/service/chimesdkmediapipelines/types/types.go b/service/chimesdkmediapipelines/types/types.go new file mode 100644 index 00000000000..9d58cc9f6f2 --- /dev/null +++ b/service/chimesdkmediapipelines/types/types.go @@ -0,0 +1,173 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package types + +import ( + smithydocument "github.com/aws/smithy-go/document" + "time" +) + +// The configuration for the artifacts. +type ArtifactsConfiguration struct { + + // The configuration for the audio artifacts. + // + // This member is required. + Audio *AudioArtifactsConfiguration + + // The configuration for the content artifacts. + // + // This member is required. + Content *ContentArtifactsConfiguration + + // The configuration for the video artifacts. + // + // This member is required. + Video *VideoArtifactsConfiguration + + noSmithyDocumentSerde +} + +// The audio artifact configuration object. +type AudioArtifactsConfiguration struct { + + // The MUX type of the audio artifact configuration object. + // + // This member is required. + MuxType AudioMuxType + + noSmithyDocumentSerde +} + +// The configuration object of the Amazon Chime SDK meeting for a specified media +// capture pipeline. SourceType must be ChimeSdkMeeting. +type ChimeSdkMeetingConfiguration struct { + + // The configuration for the artifacts in an Amazon Chime SDK meeting. + ArtifactsConfiguration *ArtifactsConfiguration + + // The source configuration for a specified media capture pipline. + SourceConfiguration *SourceConfiguration + + noSmithyDocumentSerde +} + +// The content artifact object. +type ContentArtifactsConfiguration struct { + + // Indicates whether the content artifact is enabled or disabled. + // + // This member is required. + State ArtifactsState + + // The MUX type of the artifact configuration. + MuxType ContentMuxType + + noSmithyDocumentSerde +} + +// A media capture pipeline object consisting of an ID, source type, source ARN, a +// sink type, a sink ARN, and a configuration object. +type MediaCapturePipeline struct { + + // The configuration for a specified media capture pipeline. SourceType must be + // ChimeSdkMeeting. + ChimeSdkMeetingConfiguration *ChimeSdkMeetingConfiguration + + // The time at which the capture pipeline was created, in ISO 8601 format. + CreatedTimestamp *time.Time + + // The ARN of a media capture pipeline. + MediaPipelineArn *string + + // The ID of a media capture pipeline. + MediaPipelineId *string + + // ARN of the destination to which the media artifacts are saved. + SinkArn *string + + // Destination type to which the media artifacts are saved. You must use an S3 + // Bucket. + SinkType MediaPipelineSinkType + + // ARN of the source from which the media artifacts are saved. + SourceArn *string + + // Source type from which media artifacts are saved. You must use ChimeMeeting. + SourceType MediaPipelineSourceType + + // The status of the media capture pipeline. + Status MediaPipelineStatus + + // The time at which the capture pipeline was updated, in ISO 8601 format. + UpdatedTimestamp *time.Time + + noSmithyDocumentSerde +} + +// A summary of a media capture pipeline. +type MediaCapturePipelineSummary struct { + + // The ARN of a media capture pipeline. + MediaPipelineArn *string + + // The ID of a media capture pipeline. + MediaPipelineId *string + + noSmithyDocumentSerde +} + +// The video streams to capture for a specified media capture pipeline. The total +// number of video streams can't exceed 25. +type SelectedVideoStreams struct { + + // The attendee IDs of the streams selected for a media capture pipeline. + AttendeeIds []string + + // The external user IDs of the streams selected for a media capture pipeline. + ExternalUserIds []string + + noSmithyDocumentSerde +} + +// Source configuration for a specified media capture pipeline. +type SourceConfiguration struct { + + // The selected video streams to capture for a specified media capture pipeline. + // The number of video streams can't exceed 25. + SelectedVideoStreams *SelectedVideoStreams + + noSmithyDocumentSerde +} + +// Describes a tag applied to a resource. +type Tag struct { + + // The key of the tag. + // + // This member is required. + Key *string + + // The value of the tag. + // + // This member is required. + Value *string + + noSmithyDocumentSerde +} + +// The video artifact configuration object. +type VideoArtifactsConfiguration struct { + + // Indicates whether the video artifact is enabled or disabled. + // + // This member is required. + State ArtifactsState + + // The MUX type of the video artifact configuration object. + MuxType VideoMuxType + + noSmithyDocumentSerde +} + +type noSmithyDocumentSerde = smithydocument.NoSerde diff --git a/service/chimesdkmediapipelines/validators.go b/service/chimesdkmediapipelines/validators.go new file mode 100644 index 00000000000..25b6ded8e33 --- /dev/null +++ b/service/chimesdkmediapipelines/validators.go @@ -0,0 +1,404 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package chimesdkmediapipelines + +import ( + "context" + "fmt" + "github.com/aws/aws-sdk-go-v2/service/chimesdkmediapipelines/types" + smithy "github.com/aws/smithy-go" + "github.com/aws/smithy-go/middleware" +) + +type validateOpCreateMediaCapturePipeline struct { +} + +func (*validateOpCreateMediaCapturePipeline) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpCreateMediaCapturePipeline) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*CreateMediaCapturePipelineInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpCreateMediaCapturePipelineInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDeleteMediaCapturePipeline struct { +} + +func (*validateOpDeleteMediaCapturePipeline) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeleteMediaCapturePipeline) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeleteMediaCapturePipelineInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeleteMediaCapturePipelineInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetMediaCapturePipeline struct { +} + +func (*validateOpGetMediaCapturePipeline) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetMediaCapturePipeline) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetMediaCapturePipelineInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetMediaCapturePipelineInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpListTagsForResource struct { +} + +func (*validateOpListTagsForResource) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpListTagsForResource) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ListTagsForResourceInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpListTagsForResourceInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpTagResource struct { +} + +func (*validateOpTagResource) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpTagResource) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*TagResourceInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpTagResourceInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpUntagResource struct { +} + +func (*validateOpUntagResource) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpUntagResource) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*UntagResourceInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpUntagResourceInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +func addOpCreateMediaCapturePipelineValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpCreateMediaCapturePipeline{}, middleware.After) +} + +func addOpDeleteMediaCapturePipelineValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeleteMediaCapturePipeline{}, middleware.After) +} + +func addOpGetMediaCapturePipelineValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetMediaCapturePipeline{}, middleware.After) +} + +func addOpListTagsForResourceValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpListTagsForResource{}, middleware.After) +} + +func addOpTagResourceValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpTagResource{}, middleware.After) +} + +func addOpUntagResourceValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpUntagResource{}, middleware.After) +} + +func validateArtifactsConfiguration(v *types.ArtifactsConfiguration) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ArtifactsConfiguration"} + if v.Audio == nil { + invalidParams.Add(smithy.NewErrParamRequired("Audio")) + } else if v.Audio != nil { + if err := validateAudioArtifactsConfiguration(v.Audio); err != nil { + invalidParams.AddNested("Audio", err.(smithy.InvalidParamsError)) + } + } + if v.Video == nil { + invalidParams.Add(smithy.NewErrParamRequired("Video")) + } else if v.Video != nil { + if err := validateVideoArtifactsConfiguration(v.Video); err != nil { + invalidParams.AddNested("Video", err.(smithy.InvalidParamsError)) + } + } + if v.Content == nil { + invalidParams.Add(smithy.NewErrParamRequired("Content")) + } else if v.Content != nil { + if err := validateContentArtifactsConfiguration(v.Content); err != nil { + invalidParams.AddNested("Content", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateAudioArtifactsConfiguration(v *types.AudioArtifactsConfiguration) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "AudioArtifactsConfiguration"} + if len(v.MuxType) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("MuxType")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateChimeSdkMeetingConfiguration(v *types.ChimeSdkMeetingConfiguration) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ChimeSdkMeetingConfiguration"} + if v.ArtifactsConfiguration != nil { + if err := validateArtifactsConfiguration(v.ArtifactsConfiguration); err != nil { + invalidParams.AddNested("ArtifactsConfiguration", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateContentArtifactsConfiguration(v *types.ContentArtifactsConfiguration) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ContentArtifactsConfiguration"} + if len(v.State) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("State")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateTag(v *types.Tag) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "Tag"} + if v.Key == nil { + invalidParams.Add(smithy.NewErrParamRequired("Key")) + } + if v.Value == nil { + invalidParams.Add(smithy.NewErrParamRequired("Value")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateTagList(v []types.Tag) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "TagList"} + for i := range v { + if err := validateTag(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateVideoArtifactsConfiguration(v *types.VideoArtifactsConfiguration) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "VideoArtifactsConfiguration"} + if len(v.State) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("State")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpCreateMediaCapturePipelineInput(v *CreateMediaCapturePipelineInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CreateMediaCapturePipelineInput"} + if len(v.SourceType) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("SourceType")) + } + if v.SourceArn == nil { + invalidParams.Add(smithy.NewErrParamRequired("SourceArn")) + } + if len(v.SinkType) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("SinkType")) + } + if v.SinkArn == nil { + invalidParams.Add(smithy.NewErrParamRequired("SinkArn")) + } + if v.ChimeSdkMeetingConfiguration != nil { + if err := validateChimeSdkMeetingConfiguration(v.ChimeSdkMeetingConfiguration); err != nil { + invalidParams.AddNested("ChimeSdkMeetingConfiguration", err.(smithy.InvalidParamsError)) + } + } + if v.Tags != nil { + if err := validateTagList(v.Tags); err != nil { + invalidParams.AddNested("Tags", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDeleteMediaCapturePipelineInput(v *DeleteMediaCapturePipelineInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeleteMediaCapturePipelineInput"} + if v.MediaPipelineId == nil { + invalidParams.Add(smithy.NewErrParamRequired("MediaPipelineId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetMediaCapturePipelineInput(v *GetMediaCapturePipelineInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetMediaCapturePipelineInput"} + if v.MediaPipelineId == nil { + invalidParams.Add(smithy.NewErrParamRequired("MediaPipelineId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpListTagsForResourceInput(v *ListTagsForResourceInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ListTagsForResourceInput"} + if v.ResourceARN == nil { + invalidParams.Add(smithy.NewErrParamRequired("ResourceARN")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpTagResourceInput(v *TagResourceInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "TagResourceInput"} + if v.ResourceARN == nil { + invalidParams.Add(smithy.NewErrParamRequired("ResourceARN")) + } + if v.Tags == nil { + invalidParams.Add(smithy.NewErrParamRequired("Tags")) + } else if v.Tags != nil { + if err := validateTagList(v.Tags); err != nil { + invalidParams.AddNested("Tags", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpUntagResourceInput(v *UntagResourceInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "UntagResourceInput"} + if v.ResourceARN == nil { + invalidParams.Add(smithy.NewErrParamRequired("ResourceARN")) + } + if v.TagKeys == nil { + invalidParams.Add(smithy.NewErrParamRequired("TagKeys")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} diff --git a/service/cloudtrail/api_op_AddTags.go b/service/cloudtrail/api_op_AddTags.go index 165437be567..dbd77bc6988 100644 --- a/service/cloudtrail/api_op_AddTags.go +++ b/service/cloudtrail/api_op_AddTags.go @@ -11,13 +11,14 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Adds one or more tags to a trail, up to a limit of 50. Overwrites an existing -// tag's value when a new value is specified for an existing tag key. Tag key names -// must be unique for a trail; you cannot have two keys with the same name but -// different values. If you specify a key without a value, the tag will be created -// with the specified key and a value of null. You can tag a trail that applies to -// all Amazon Web Services Regions only from the Region in which the trail was -// created (also known as its home region). +// Adds one or more tags to a trail or event data store, up to a limit of 50. +// Overwrites an existing tag's value when a new value is specified for an existing +// tag key. Tag key names must be unique for a trail; you cannot have two keys with +// the same name but different values. If you specify a key without a value, the +// tag will be created with the specified key and a value of null. You can tag a +// trail or event data store that applies to all Amazon Web Services Regions only +// from the Region in which the trail or event data store was created (also known +// as its home region). func (c *Client) AddTags(ctx context.Context, params *AddTagsInput, optFns ...func(*Options)) (*AddTagsOutput, error) { if params == nil { params = &AddTagsInput{} @@ -33,11 +34,11 @@ func (c *Client) AddTags(ctx context.Context, params *AddTagsInput, optFns ...fu return out, nil } -// Specifies the tags to add to a trail. +// Specifies the tags to add to a trail or event data store. type AddTagsInput struct { - // Specifies the ARN of the trail to which one or more tags will be added. The - // format of a trail ARN is: + // Specifies the ARN of the trail or event data store to which one or more tags + // will be added. The format of a trail ARN is: // arn:aws:cloudtrail:us-east-2:123456789012:trail/MyTrail // // This member is required. diff --git a/service/cloudtrail/api_op_ListTags.go b/service/cloudtrail/api_op_ListTags.go index 75362dfb944..0e0ab6a6847 100644 --- a/service/cloudtrail/api_op_ListTags.go +++ b/service/cloudtrail/api_op_ListTags.go @@ -12,7 +12,7 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Lists the tags for the trail in the current region. +// Lists the tags for the trail or event data store in the current region. func (c *Client) ListTags(ctx context.Context, params *ListTagsInput, optFns ...func(*Options)) (*ListTagsOutput, error) { if params == nil { params = &ListTagsInput{} @@ -28,12 +28,11 @@ func (c *Client) ListTags(ctx context.Context, params *ListTagsInput, optFns ... return out, nil } -// Specifies a list of trail tags to return. +// Specifies a list of tags to return. type ListTagsInput struct { - // Specifies a list of trail ARNs whose tags will be listed. The list has a limit - // of 20 ARNs. The following is the format of a trail ARN. - // arn:aws:cloudtrail:us-east-2:123456789012:trail/MyTrail + // Specifies a list of trail and event data store ARNs whose tags will be listed. + // The list has a limit of 20 ARNs. // // This member is required. ResourceIdList []string diff --git a/service/cloudtrail/api_op_RemoveTags.go b/service/cloudtrail/api_op_RemoveTags.go index 0c8debea77b..3bd9b89adfa 100644 --- a/service/cloudtrail/api_op_RemoveTags.go +++ b/service/cloudtrail/api_op_RemoveTags.go @@ -11,7 +11,7 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Removes the specified tags from a trail. +// Removes the specified tags from a trail or event data store. func (c *Client) RemoveTags(ctx context.Context, params *RemoveTagsInput, optFns ...func(*Options)) (*RemoveTagsOutput, error) { if params == nil { params = &RemoveTagsInput{} @@ -27,11 +27,14 @@ func (c *Client) RemoveTags(ctx context.Context, params *RemoveTagsInput, optFns return out, nil } -// Specifies the tags to remove from a trail. +// Specifies the tags to remove from a trail or event data store. type RemoveTagsInput struct { - // Specifies the ARN of the trail from which tags should be removed. The format of - // a trail ARN is: arn:aws:cloudtrail:us-east-2:123456789012:trail/MyTrail + // Specifies the ARN of the trail or event data store from which tags should be + // removed. Example trail ARN format: + // arn:aws:cloudtrail:us-east-2:123456789012:trail/MyTrail Example event data store + // ARN format: + // arn:aws:cloudtrail:us-east-2:12345678910:eventdatastore/EXAMPLE-f852-4e8f-8bd1-bcf6cEXAMPLE // // This member is required. ResourceId *string diff --git a/service/cloudtrail/types/errors.go b/service/cloudtrail/types/errors.go index 5ebb6435ced..10f975ef479 100644 --- a/service/cloudtrail/types/errors.go +++ b/service/cloudtrail/types/errors.go @@ -106,8 +106,8 @@ func (e *CloudWatchLogsDeliveryUnavailableException) ErrorFault() smithy.ErrorFa } // This exception is thrown when the specified resource is not ready for an -// operation. This can occur when you try to run an operation on a trail before -// CloudTrail has time to fully load the trail. If this exception occurs, wait a +// operation. This can occur when you try to run an operation on a resource before +// CloudTrail has time to fully load the resource. If this exception occurs, wait a // few minutes, and then try the operation again. type ConflictException struct { Message *string @@ -240,7 +240,7 @@ func (e *EventDataStoreTerminationProtectedException) ErrorFault() smithy.ErrorF return smithy.FaultClient } -// The event data store against which you ran your query is inactive. +// The event data store is inactive. type InactiveEventDataStoreException struct { Message *string @@ -302,10 +302,8 @@ func (e *InsightNotEnabledException) ErrorCode() string { return "In func (e *InsightNotEnabledException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } // This exception is thrown when the IAM user or role that is used to create the -// organization trail is lacking one or more required permissions for creating an -// organization trail in a required service. For more information, see Prepare For -// Creating a Trail For Your Organization -// (https://docs.aws.amazon.com/awscloudtrail/latest/userguide/creating-an-organizational-trail-prepare.html). +// organization resource lacks one or more required permissions for creating an +// organization resource in a required service. type InsufficientDependencyServiceAccessPermissionException struct { Message *string @@ -446,8 +444,9 @@ func (e *InvalidCloudWatchLogsRoleArnException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } -// A date range for the query was specified that is not valid. For more information -// about writing a query, see Create or edit a query +// A date range for the query was specified that is not valid. Be sure that the +// start time is chronologically before the end time. For more information about +// writing a query, see Create or edit a query // (https://docs.aws.amazon.com/awscloudtrail/latest/userguide/query-create-edit-query.html) // in the CloudTrail User Guide. type InvalidDateRangeException struct { @@ -1016,10 +1015,12 @@ func (e *MaximumNumberOfTrailsExceededException) ErrorFault() smithy.ErrorFault } // This exception is thrown when the Amazon Web Services account making the request -// to create or update an organization trail is not the management account for an -// organization in Organizations. For more information, see Prepare For Creating a -// Trail For Your Organization -// (https://docs.aws.amazon.com/awscloudtrail/latest/userguide/creating-an-organizational-trail-prepare.html). +// to create or update an organization trail or event data store is not the +// management account for an organization in Organizations. For more information, +// see Prepare For Creating a Trail For Your Organization +// (https://docs.aws.amazon.com/awscloudtrail/latest/userguide/creating-an-organizational-trail-prepare.html) +// or Create an event data store +// (https://docs.aws.amazon.com/awscloudtrail/latest/userguide/query-event-data-store.html). type NotOrganizationMasterAccountException struct { Message *string @@ -1063,9 +1064,7 @@ func (e *OperationNotPermittedException) ErrorFault() smithy.ErrorFault { return // This exception is thrown when Organizations is not configured to support all // features. All features must be enabled in Organizations to support creating an -// organization trail. For more information, see Prepare For Creating a Trail For -// Your Organization -// (https://docs.aws.amazon.com/awscloudtrail/latest/userguide/creating-an-organizational-trail-prepare.html). +// organization trail or event data store. type OrganizationNotInAllFeaturesModeException struct { Message *string diff --git a/service/cloudtrail/types/types.go b/service/cloudtrail/types/types.go index 99e1de7f683..9fc20b6a22e 100644 --- a/service/cloudtrail/types/types.go +++ b/service/cloudtrail/types/types.go @@ -372,38 +372,59 @@ type Event struct { // (https://docs.aws.amazon.com/awscloudtrail/latest/userguide/logging-data-events-with-cloudtrail.html#creating-data-event-selectors-advanced). type EventDataStore struct { - // The advanced event selectors that were used to select events for the data store. + // This field is being deprecated. The advanced event selectors that were used to + // select events for the data store. + // + // Deprecated: AdvancedEventSelectors is no longer returned by ListEventDataStores AdvancedEventSelectors []AdvancedEventSelector - // The timestamp of the event data store's creation. + // This field is being deprecated. The timestamp of the event data store's + // creation. + // + // Deprecated: CreatedTimestamp is no longer returned by ListEventDataStores CreatedTimestamp *time.Time // The ARN of the event data store. EventDataStoreArn *string - // Indicates whether the event data store includes events from all regions, or only - // from the region in which it was created. + // This field is being deprecated. Indicates whether the event data store includes + // events from all regions, or only from the region in which it was created. + // + // Deprecated: MultiRegionEnabled is no longer returned by ListEventDataStores MultiRegionEnabled *bool // The name of the event data store. Name *string - // Indicates that an event data store is collecting logged events for an - // organization. + // This field is being deprecated. Indicates that an event data store is collecting + // logged events for an organization. + // + // Deprecated: OrganizationEnabled is no longer returned by ListEventDataStores OrganizationEnabled *bool - // The retention period, in days. + // This field is being deprecated. The retention period, in days. + // + // Deprecated: RetentionPeriod is no longer returned by ListEventDataStores RetentionPeriod *int32 - // The status of an event data store. Values are ENABLED and PENDING_DELETION. + // This field is being deprecated. The status of an event data store. Values are + // ENABLED and PENDING_DELETION. + // + // Deprecated: Status is no longer returned by ListEventDataStores Status EventDataStoreStatus - // Indicates whether the event data store is protected from termination. + // This field is being deprecated. Indicates whether the event data store is + // protected from termination. + // + // Deprecated: TerminationProtectionEnabled is no longer returned by + // ListEventDataStores TerminationProtectionEnabled *bool - // The timestamp showing when an event data store was updated, if applicable. - // UpdatedTimestamp is always either the same or newer than the time shown in - // CreatedTimestamp. + // This field is being deprecated. The timestamp showing when an event data store + // was updated, if applicable. UpdatedTimestamp is always either the same or newer + // than the time shown in CreatedTimestamp. + // + // Deprecated: UpdatedTimestamp is no longer returned by ListEventDataStores UpdatedTimestamp *time.Time noSmithyDocumentSerde diff --git a/service/iotwireless/api_op_CreateNetworkAnalyzerConfiguration.go b/service/iotwireless/api_op_CreateNetworkAnalyzerConfiguration.go new file mode 100644 index 00000000000..bcab1527ed1 --- /dev/null +++ b/service/iotwireless/api_op_CreateNetworkAnalyzerConfiguration.go @@ -0,0 +1,185 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package iotwireless + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/iotwireless/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Creates a new network analyzer configuration. +func (c *Client) CreateNetworkAnalyzerConfiguration(ctx context.Context, params *CreateNetworkAnalyzerConfigurationInput, optFns ...func(*Options)) (*CreateNetworkAnalyzerConfigurationOutput, error) { + if params == nil { + params = &CreateNetworkAnalyzerConfigurationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "CreateNetworkAnalyzerConfiguration", params, optFns, c.addOperationCreateNetworkAnalyzerConfigurationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*CreateNetworkAnalyzerConfigurationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type CreateNetworkAnalyzerConfigurationInput struct { + + // Name of the network analyzer configuration. + // + // This member is required. + Name *string + + // Each resource must have a unique client request token. If you try to create a + // new resource with the same token as a resource that already exists, an exception + // occurs. If you omit this value, AWS SDKs will automatically generate a unique + // client request. + ClientRequestToken *string + + // The description of the new resource. + Description *string + + // The tag to attach to the specified resource. Tags are metadata that you can use + // to manage a resource. + Tags []types.Tag + + // Trace content for your wireless gateway and wireless device resources. + TraceContent *types.TraceContent + + // Wireless device resources to add to the network analyzer configuration. Provide + // the WirelessDeviceId of the resource to add in the input array. + WirelessDevices []string + + // Wireless gateway resources to add to the network analyzer configuration. Provide + // the WirelessGatewayId of the resource to add in the input array. + WirelessGateways []string + + noSmithyDocumentSerde +} + +type CreateNetworkAnalyzerConfigurationOutput struct { + + // The Amazon Resource Name of the new resource. + Arn *string + + // Name of the network analyzer configuration. + Name *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationCreateNetworkAnalyzerConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestjson1_serializeOpCreateNetworkAnalyzerConfiguration{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpCreateNetworkAnalyzerConfiguration{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addIdempotencyToken_opCreateNetworkAnalyzerConfigurationMiddleware(stack, options); err != nil { + return err + } + if err = addOpCreateNetworkAnalyzerConfigurationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateNetworkAnalyzerConfiguration(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +type idempotencyToken_initializeOpCreateNetworkAnalyzerConfiguration struct { + tokenProvider IdempotencyTokenProvider +} + +func (*idempotencyToken_initializeOpCreateNetworkAnalyzerConfiguration) ID() string { + return "OperationIdempotencyTokenAutoFill" +} + +func (m *idempotencyToken_initializeOpCreateNetworkAnalyzerConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + if m.tokenProvider == nil { + return next.HandleInitialize(ctx, in) + } + + input, ok := in.Parameters.(*CreateNetworkAnalyzerConfigurationInput) + if !ok { + return out, metadata, fmt.Errorf("expected middleware input to be of type *CreateNetworkAnalyzerConfigurationInput ") + } + + if input.ClientRequestToken == nil { + t, err := m.tokenProvider.GetIdempotencyToken() + if err != nil { + return out, metadata, err + } + input.ClientRequestToken = &t + } + return next.HandleInitialize(ctx, in) +} +func addIdempotencyToken_opCreateNetworkAnalyzerConfigurationMiddleware(stack *middleware.Stack, cfg Options) error { + return stack.Initialize.Add(&idempotencyToken_initializeOpCreateNetworkAnalyzerConfiguration{tokenProvider: cfg.IdempotencyTokenProvider}, middleware.Before) +} + +func newServiceMetadataMiddleware_opCreateNetworkAnalyzerConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "iotwireless", + OperationName: "CreateNetworkAnalyzerConfiguration", + } +} diff --git a/service/iotwireless/api_op_DeleteNetworkAnalyzerConfiguration.go b/service/iotwireless/api_op_DeleteNetworkAnalyzerConfiguration.go new file mode 100644 index 00000000000..ddae731f82c --- /dev/null +++ b/service/iotwireless/api_op_DeleteNetworkAnalyzerConfiguration.go @@ -0,0 +1,116 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package iotwireless + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Deletes a network analyzer configuration. +func (c *Client) DeleteNetworkAnalyzerConfiguration(ctx context.Context, params *DeleteNetworkAnalyzerConfigurationInput, optFns ...func(*Options)) (*DeleteNetworkAnalyzerConfigurationOutput, error) { + if params == nil { + params = &DeleteNetworkAnalyzerConfigurationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DeleteNetworkAnalyzerConfiguration", params, optFns, c.addOperationDeleteNetworkAnalyzerConfigurationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DeleteNetworkAnalyzerConfigurationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DeleteNetworkAnalyzerConfigurationInput struct { + + // Name of the network analyzer configuration. + // + // This member is required. + ConfigurationName *string + + noSmithyDocumentSerde +} + +type DeleteNetworkAnalyzerConfigurationOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeleteNetworkAnalyzerConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestjson1_serializeOpDeleteNetworkAnalyzerConfiguration{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpDeleteNetworkAnalyzerConfiguration{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpDeleteNetworkAnalyzerConfigurationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteNetworkAnalyzerConfiguration(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDeleteNetworkAnalyzerConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "iotwireless", + OperationName: "DeleteNetworkAnalyzerConfiguration", + } +} diff --git a/service/iotwireless/api_op_DeleteQueuedMessages.go b/service/iotwireless/api_op_DeleteQueuedMessages.go index 3e533686733..753a6a6b506 100644 --- a/service/iotwireless/api_op_DeleteQueuedMessages.go +++ b/service/iotwireless/api_op_DeleteQueuedMessages.go @@ -11,7 +11,7 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// The operation to delete queued messages. +// Remove queued messages from the downlink queue. func (c *Client) DeleteQueuedMessages(ctx context.Context, params *DeleteQueuedMessagesInput, optFns ...func(*Options)) (*DeleteQueuedMessagesOutput, error) { if params == nil { params = &DeleteQueuedMessagesInput{} @@ -29,18 +29,19 @@ func (c *Client) DeleteQueuedMessages(ctx context.Context, params *DeleteQueuedM type DeleteQueuedMessagesInput struct { - // Id of a given wireless device which messages will be deleted + // The ID of a given wireless device for which downlink messages will be deleted. // // This member is required. Id *string - // if messageID=="*", the queue for a particular wireless deviceId will be purged, - // otherwise, the specific message with messageId will be deleted + // If message ID is "*", it cleares the entire downlink queue for a given device, + // specified by the wireless device ID. Otherwise, the downlink message with the + // specified message ID will be deleted. // // This member is required. MessageId *string - // The wireless device type, it is either Sidewalk or LoRaWAN. + // The wireless device type, which can be either Sidewalk or LoRaWAN. WirelessDeviceType types.WirelessDeviceType noSmithyDocumentSerde diff --git a/service/iotwireless/api_op_GetEventConfigurationByResourceTypes.go b/service/iotwireless/api_op_GetEventConfigurationByResourceTypes.go new file mode 100644 index 00000000000..90e1c193e74 --- /dev/null +++ b/service/iotwireless/api_op_GetEventConfigurationByResourceTypes.go @@ -0,0 +1,121 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package iotwireless + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/iotwireless/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Get the event configuration by resource types. +func (c *Client) GetEventConfigurationByResourceTypes(ctx context.Context, params *GetEventConfigurationByResourceTypesInput, optFns ...func(*Options)) (*GetEventConfigurationByResourceTypesOutput, error) { + if params == nil { + params = &GetEventConfigurationByResourceTypesInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetEventConfigurationByResourceTypes", params, optFns, c.addOperationGetEventConfigurationByResourceTypesMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetEventConfigurationByResourceTypesOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetEventConfigurationByResourceTypesInput struct { + noSmithyDocumentSerde +} + +type GetEventConfigurationByResourceTypesOutput struct { + + // Resource type event configuration for the connection status event + ConnectionStatus *types.ConnectionStatusResourceTypeEventConfiguration + + // Resource type event configuration for the device registration state event + DeviceRegistrationState *types.DeviceRegistrationStateResourceTypeEventConfiguration + + // Resource type event configuration for the join event + Join *types.JoinResourceTypeEventConfiguration + + // Resource type event configuration for the proximity event + Proximity *types.ProximityResourceTypeEventConfiguration + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetEventConfigurationByResourceTypesMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestjson1_serializeOpGetEventConfigurationByResourceTypes{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpGetEventConfigurationByResourceTypes{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetEventConfigurationByResourceTypes(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetEventConfigurationByResourceTypes(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "iotwireless", + OperationName: "GetEventConfigurationByResourceTypes", + } +} diff --git a/service/iotwireless/api_op_GetLogLevelsByResourceTypes.go b/service/iotwireless/api_op_GetLogLevelsByResourceTypes.go index e89185f88f6..4104b021e14 100644 --- a/service/iotwireless/api_op_GetLogLevelsByResourceTypes.go +++ b/service/iotwireless/api_op_GetLogLevelsByResourceTypes.go @@ -35,7 +35,9 @@ type GetLogLevelsByResourceTypesInput struct { type GetLogLevelsByResourceTypesOutput struct { - // The log level for a log message. + // The log level for a log message. The log levels can be disabled, or set to ERROR + // to display less verbose logs containing only error information, or to INFO for + // more detailed logs. DefaultLogLevel types.LogLevel // The list of wireless device log options. diff --git a/service/iotwireless/api_op_GetNetworkAnalyzerConfiguration.go b/service/iotwireless/api_op_GetNetworkAnalyzerConfiguration.go index 9e5b919a832..5d93a344e25 100644 --- a/service/iotwireless/api_op_GetNetworkAnalyzerConfiguration.go +++ b/service/iotwireless/api_op_GetNetworkAnalyzerConfiguration.go @@ -11,7 +11,7 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Get NetworkAnalyzer configuration. +// Get network analyzer configuration. func (c *Client) GetNetworkAnalyzerConfiguration(ctx context.Context, params *GetNetworkAnalyzerConfigurationInput, optFns ...func(*Options)) (*GetNetworkAnalyzerConfigurationOutput, error) { if params == nil { params = &GetNetworkAnalyzerConfigurationInput{} @@ -29,7 +29,7 @@ func (c *Client) GetNetworkAnalyzerConfiguration(ctx context.Context, params *Ge type GetNetworkAnalyzerConfigurationInput struct { - // NetworkAnalyzer configuration name. + // Name of the network analyzer configuration. // // This member is required. ConfigurationName *string @@ -39,13 +39,24 @@ type GetNetworkAnalyzerConfigurationInput struct { type GetNetworkAnalyzerConfigurationOutput struct { - // Trace Content for resources. + // The Amazon Resource Name of the new resource. + Arn *string + + // The description of the new resource. + Description *string + + // Name of the network analyzer configuration. + Name *string + + // Trace content for your wireless gateway and wireless device resources. TraceContent *types.TraceContent - // List of WirelessDevices in the NetworkAnalyzerConfiguration. + // List of wireless gateway resources that have been added to the network analyzer + // configuration. WirelessDevices []string - // List of WirelessGateways in the NetworkAnalyzerConfiguration. + // List of wireless gateway resources that have been added to the network analyzer + // configuration. WirelessGateways []string // Metadata pertaining to the operation's result. diff --git a/service/iotwireless/api_op_GetResourceEventConfiguration.go b/service/iotwireless/api_op_GetResourceEventConfiguration.go index 51ab76d77d6..93ea38fcef2 100644 --- a/service/iotwireless/api_op_GetResourceEventConfiguration.go +++ b/service/iotwireless/api_op_GetResourceEventConfiguration.go @@ -47,9 +47,15 @@ type GetResourceEventConfigurationInput struct { type GetResourceEventConfigurationOutput struct { + // Event configuration for the connection status event. + ConnectionStatus *types.ConnectionStatusEventConfiguration + // Event configuration for the device registration state event DeviceRegistrationState *types.DeviceRegistrationStateEventConfiguration + // Event configuration for the join event. + Join *types.JoinEventConfiguration + // Event configuration for the Proximity event Proximity *types.ProximityEventConfiguration diff --git a/service/iotwireless/api_op_GetResourceLogLevel.go b/service/iotwireless/api_op_GetResourceLogLevel.go index 98afd5c778a..8a5b91755b1 100644 --- a/service/iotwireless/api_op_GetResourceLogLevel.go +++ b/service/iotwireless/api_op_GetResourceLogLevel.go @@ -46,7 +46,9 @@ type GetResourceLogLevelInput struct { type GetResourceLogLevelOutput struct { - // The log level for a log message. + // The log level for a log message. The log levels can be disabled, or set to ERROR + // to display less verbose logs containing only error information, or to INFO for + // more detailed logs. LogLevel types.LogLevel // Metadata pertaining to the operation's result. diff --git a/service/iotwireless/api_op_GetServiceEndpoint.go b/service/iotwireless/api_op_GetServiceEndpoint.go index 6e5e74fefb3..b3ea2c5d7a0 100644 --- a/service/iotwireless/api_op_GetServiceEndpoint.go +++ b/service/iotwireless/api_op_GetServiceEndpoint.go @@ -32,7 +32,7 @@ type GetServiceEndpointInput struct { // The service type for which to get endpoint information about. Can be CUPS for // the Configuration and Update Server endpoint, or LNS for the LoRaWAN Network - // Server endpoint. + // Server endpoint or CLAIM for the global endpoint. ServiceType types.WirelessGatewayServiceType noSmithyDocumentSerde diff --git a/service/iotwireless/api_op_ListEventConfigurations.go b/service/iotwireless/api_op_ListEventConfigurations.go new file mode 100644 index 00000000000..70f8b1f42b0 --- /dev/null +++ b/service/iotwireless/api_op_ListEventConfigurations.go @@ -0,0 +1,132 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package iotwireless + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/iotwireless/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// List event configurations where at least one event topic has been enabled. +func (c *Client) ListEventConfigurations(ctx context.Context, params *ListEventConfigurationsInput, optFns ...func(*Options)) (*ListEventConfigurationsOutput, error) { + if params == nil { + params = &ListEventConfigurationsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListEventConfigurations", params, optFns, c.addOperationListEventConfigurationsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListEventConfigurationsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListEventConfigurationsInput struct { + + // Resource type to filter event configurations. + // + // This member is required. + ResourceType types.EventNotificationResourceType + + // The maximum number of results to return in this operation. + MaxResults int32 + + // To retrieve the next set of results, the nextToken value from a previous + // response; otherwise null to receive the first set of results. + NextToken *string + + noSmithyDocumentSerde +} + +type ListEventConfigurationsOutput struct { + + // Event configurations of all events for a single resource. + EventConfigurationsList []types.EventConfigurationItem + + // To retrieve the next set of results, the nextToken value from a previous + // response; otherwise null to receive the first set of results. + NextToken *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListEventConfigurationsMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestjson1_serializeOpListEventConfigurations{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpListEventConfigurations{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpListEventConfigurationsValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListEventConfigurations(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opListEventConfigurations(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "iotwireless", + OperationName: "ListEventConfigurations", + } +} diff --git a/service/iotwireless/api_op_ListNetworkAnalyzerConfigurations.go b/service/iotwireless/api_op_ListNetworkAnalyzerConfigurations.go new file mode 100644 index 00000000000..4ea42c618c2 --- /dev/null +++ b/service/iotwireless/api_op_ListNetworkAnalyzerConfigurations.go @@ -0,0 +1,214 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package iotwireless + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/iotwireless/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Lists the network analyzer configurations. +func (c *Client) ListNetworkAnalyzerConfigurations(ctx context.Context, params *ListNetworkAnalyzerConfigurationsInput, optFns ...func(*Options)) (*ListNetworkAnalyzerConfigurationsOutput, error) { + if params == nil { + params = &ListNetworkAnalyzerConfigurationsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListNetworkAnalyzerConfigurations", params, optFns, c.addOperationListNetworkAnalyzerConfigurationsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListNetworkAnalyzerConfigurationsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListNetworkAnalyzerConfigurationsInput struct { + + // The maximum number of results to return in this operation. + MaxResults int32 + + // To retrieve the next set of results, the nextToken value from a previous + // response; otherwise null to receive the first set of results. + NextToken *string + + noSmithyDocumentSerde +} + +type ListNetworkAnalyzerConfigurationsOutput struct { + + // The list of network analyzer configurations. + NetworkAnalyzerConfigurationList []types.NetworkAnalyzerConfigurations + + // The token to use to get the next set of results, or null if there are no + // additional results. + NextToken *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListNetworkAnalyzerConfigurationsMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestjson1_serializeOpListNetworkAnalyzerConfigurations{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpListNetworkAnalyzerConfigurations{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListNetworkAnalyzerConfigurations(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +// ListNetworkAnalyzerConfigurationsAPIClient is a client that implements the +// ListNetworkAnalyzerConfigurations operation. +type ListNetworkAnalyzerConfigurationsAPIClient interface { + ListNetworkAnalyzerConfigurations(context.Context, *ListNetworkAnalyzerConfigurationsInput, ...func(*Options)) (*ListNetworkAnalyzerConfigurationsOutput, error) +} + +var _ ListNetworkAnalyzerConfigurationsAPIClient = (*Client)(nil) + +// ListNetworkAnalyzerConfigurationsPaginatorOptions is the paginator options for +// ListNetworkAnalyzerConfigurations +type ListNetworkAnalyzerConfigurationsPaginatorOptions struct { + // The maximum number of results to return in this operation. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// ListNetworkAnalyzerConfigurationsPaginator is a paginator for +// ListNetworkAnalyzerConfigurations +type ListNetworkAnalyzerConfigurationsPaginator struct { + options ListNetworkAnalyzerConfigurationsPaginatorOptions + client ListNetworkAnalyzerConfigurationsAPIClient + params *ListNetworkAnalyzerConfigurationsInput + nextToken *string + firstPage bool +} + +// NewListNetworkAnalyzerConfigurationsPaginator returns a new +// ListNetworkAnalyzerConfigurationsPaginator +func NewListNetworkAnalyzerConfigurationsPaginator(client ListNetworkAnalyzerConfigurationsAPIClient, params *ListNetworkAnalyzerConfigurationsInput, optFns ...func(*ListNetworkAnalyzerConfigurationsPaginatorOptions)) *ListNetworkAnalyzerConfigurationsPaginator { + if params == nil { + params = &ListNetworkAnalyzerConfigurationsInput{} + } + + options := ListNetworkAnalyzerConfigurationsPaginatorOptions{} + if params.MaxResults != 0 { + options.Limit = params.MaxResults + } + + for _, fn := range optFns { + fn(&options) + } + + return &ListNetworkAnalyzerConfigurationsPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.NextToken, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *ListNetworkAnalyzerConfigurationsPaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) +} + +// NextPage retrieves the next ListNetworkAnalyzerConfigurations page. +func (p *ListNetworkAnalyzerConfigurationsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListNetworkAnalyzerConfigurationsOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.NextToken = p.nextToken + + params.MaxResults = p.options.Limit + + result, err := p.client.ListNetworkAnalyzerConfigurations(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.NextToken + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +func newServiceMetadataMiddleware_opListNetworkAnalyzerConfigurations(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "iotwireless", + OperationName: "ListNetworkAnalyzerConfigurations", + } +} diff --git a/service/iotwireless/api_op_ListQueuedMessages.go b/service/iotwireless/api_op_ListQueuedMessages.go index a979d39e64d..2c98734833c 100644 --- a/service/iotwireless/api_op_ListQueuedMessages.go +++ b/service/iotwireless/api_op_ListQueuedMessages.go @@ -12,7 +12,7 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// The operation to list queued messages. +// List queued messages in the downlink queue. func (c *Client) ListQueuedMessages(ctx context.Context, params *ListQueuedMessagesInput, optFns ...func(*Options)) (*ListQueuedMessagesOutput, error) { if params == nil { params = &ListQueuedMessagesInput{} @@ -30,7 +30,8 @@ func (c *Client) ListQueuedMessages(ctx context.Context, params *ListQueuedMessa type ListQueuedMessagesInput struct { - // Id of a given wireless device which the downlink packets are targeted + // The ID of a given wireless device which the downlink message packets are being + // sent. // // This member is required. Id *string @@ -42,7 +43,7 @@ type ListQueuedMessagesInput struct { // response; otherwise null to receive the first set of results. NextToken *string - // The wireless device type, it is either Sidewalk or LoRaWAN. + // The wireless device type, whic can be either Sidewalk or LoRaWAN. WirelessDeviceType types.WirelessDeviceType noSmithyDocumentSerde @@ -50,7 +51,7 @@ type ListQueuedMessagesInput struct { type ListQueuedMessagesOutput struct { - // The messages in downlink queue. + // The messages in the downlink queue. DownlinkQueueMessagesList []types.DownlinkQueueMessage // To retrieve the next set of results, the nextToken value from a previous diff --git a/service/iotwireless/api_op_PutResourceLogLevel.go b/service/iotwireless/api_op_PutResourceLogLevel.go index 7404f59eeb2..de4ba6b4e43 100644 --- a/service/iotwireless/api_op_PutResourceLogLevel.go +++ b/service/iotwireless/api_op_PutResourceLogLevel.go @@ -31,7 +31,9 @@ func (c *Client) PutResourceLogLevel(ctx context.Context, params *PutResourceLog type PutResourceLogLevelInput struct { - // The log level for a log message. + // The log level for a log message. The log levels can be disabled, or set to ERROR + // to display less verbose logs containing only error information, or to INFO for + // more detailed logs. // // This member is required. LogLevel types.LogLevel diff --git a/service/iotwireless/api_op_UpdateEventConfigurationByResourceTypes.go b/service/iotwireless/api_op_UpdateEventConfigurationByResourceTypes.go new file mode 100644 index 00000000000..c5af03dd5ca --- /dev/null +++ b/service/iotwireless/api_op_UpdateEventConfigurationByResourceTypes.go @@ -0,0 +1,125 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package iotwireless + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/iotwireless/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Update the event configuration by resource types. +func (c *Client) UpdateEventConfigurationByResourceTypes(ctx context.Context, params *UpdateEventConfigurationByResourceTypesInput, optFns ...func(*Options)) (*UpdateEventConfigurationByResourceTypesOutput, error) { + if params == nil { + params = &UpdateEventConfigurationByResourceTypesInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "UpdateEventConfigurationByResourceTypes", params, optFns, c.addOperationUpdateEventConfigurationByResourceTypesMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*UpdateEventConfigurationByResourceTypesOutput) + out.ResultMetadata = metadata + return out, nil +} + +type UpdateEventConfigurationByResourceTypesInput struct { + + // Connection status resource type event configuration object for enabling and + // disabling wireless gateway topic. + ConnectionStatus *types.ConnectionStatusResourceTypeEventConfiguration + + // Device registration state resource type event configuration object for enabling + // and disabling wireless gateway topic. + DeviceRegistrationState *types.DeviceRegistrationStateResourceTypeEventConfiguration + + // Join resource type event configuration object for enabling and disabling + // wireless device topic. + Join *types.JoinResourceTypeEventConfiguration + + // Proximity resource type event configuration object for enabling and disabling + // wireless gateway topic. + Proximity *types.ProximityResourceTypeEventConfiguration + + noSmithyDocumentSerde +} + +type UpdateEventConfigurationByResourceTypesOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationUpdateEventConfigurationByResourceTypesMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestjson1_serializeOpUpdateEventConfigurationByResourceTypes{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpUpdateEventConfigurationByResourceTypes{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUpdateEventConfigurationByResourceTypes(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opUpdateEventConfigurationByResourceTypes(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "iotwireless", + OperationName: "UpdateEventConfigurationByResourceTypes", + } +} diff --git a/service/iotwireless/api_op_UpdateLogLevelsByResourceTypes.go b/service/iotwireless/api_op_UpdateLogLevelsByResourceTypes.go index 2501c9a1148..61aa449ccbc 100644 --- a/service/iotwireless/api_op_UpdateLogLevelsByResourceTypes.go +++ b/service/iotwireless/api_op_UpdateLogLevelsByResourceTypes.go @@ -31,7 +31,9 @@ func (c *Client) UpdateLogLevelsByResourceTypes(ctx context.Context, params *Upd type UpdateLogLevelsByResourceTypesInput struct { - // The log level for a log message. + // The log level for a log message. The log levels can be disabled, or set to ERROR + // to display less verbose logs containing only error information, or to INFO for + // more detailed logs. DefaultLogLevel types.LogLevel // The list of wireless device log options. diff --git a/service/iotwireless/api_op_UpdateNetworkAnalyzerConfiguration.go b/service/iotwireless/api_op_UpdateNetworkAnalyzerConfiguration.go index b0541833aaa..29174bd1b79 100644 --- a/service/iotwireless/api_op_UpdateNetworkAnalyzerConfiguration.go +++ b/service/iotwireless/api_op_UpdateNetworkAnalyzerConfiguration.go @@ -11,7 +11,7 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Update NetworkAnalyzer configuration. +// Update network analyzer configuration. func (c *Client) UpdateNetworkAnalyzerConfiguration(ctx context.Context, params *UpdateNetworkAnalyzerConfigurationInput, optFns ...func(*Options)) (*UpdateNetworkAnalyzerConfigurationOutput, error) { if params == nil { params = &UpdateNetworkAnalyzerConfigurationInput{} @@ -29,24 +29,31 @@ func (c *Client) UpdateNetworkAnalyzerConfiguration(ctx context.Context, params type UpdateNetworkAnalyzerConfigurationInput struct { - // NetworkAnalyzer configuration name. + // Name of the network analyzer configuration. // // This member is required. ConfigurationName *string - // Trace Content for resources. + // The description of the new resource. + Description *string + + // Trace content for your wireless gateway and wireless device resources. TraceContent *types.TraceContent - // WirelessDevices to add into NetworkAnalyzerConfiguration. + // Wireless device resources to add to the network analyzer configuration. Provide + // the WirelessDeviceId of the resource to add in the input array. WirelessDevicesToAdd []string - // WirelessDevices to remove from NetworkAnalyzerConfiguration. + // Wireless device resources to remove from the network analyzer configuration. + // Provide the WirelessDeviceId of the resources to remove in the input array. WirelessDevicesToRemove []string - // WirelessGateways to add into NetworkAnalyzerConfiguration. + // Wireless gateway resources to add to the network analyzer configuration. Provide + // the WirelessGatewayId of the resource to add in the input array. WirelessGatewaysToAdd []string - // WirelessGateways to remove from NetworkAnalyzerConfiguration. + // Wireless gateway resources to remove from the network analyzer configuration. + // Provide the WirelessGatewayId of the resources to remove in the input array. WirelessGatewaysToRemove []string noSmithyDocumentSerde diff --git a/service/iotwireless/api_op_UpdateResourceEventConfiguration.go b/service/iotwireless/api_op_UpdateResourceEventConfiguration.go index 2e109206608..e13a9e0b414 100644 --- a/service/iotwireless/api_op_UpdateResourceEventConfiguration.go +++ b/service/iotwireless/api_op_UpdateResourceEventConfiguration.go @@ -39,9 +39,15 @@ type UpdateResourceEventConfigurationInput struct { // This member is required. IdentifierType types.IdentifierType + // Event configuration for the connection status event + ConnectionStatus *types.ConnectionStatusEventConfiguration + // Event configuration for the device registration state event DeviceRegistrationState *types.DeviceRegistrationStateEventConfiguration + // Event configuration for the join event + Join *types.JoinEventConfiguration + // Partner type of the resource if the identifier type is PartnerAccountId PartnerType types.EventNotificationPartnerType diff --git a/service/iotwireless/deserializers.go b/service/iotwireless/deserializers.go index dfb5cb470b3..05ff8f31176 100644 --- a/service/iotwireless/deserializers.go +++ b/service/iotwireless/deserializers.go @@ -1642,6 +1642,181 @@ func awsRestjson1_deserializeOpDocumentCreateMulticastGroupOutput(v **CreateMult return nil } +type awsRestjson1_deserializeOpCreateNetworkAnalyzerConfiguration struct { +} + +func (*awsRestjson1_deserializeOpCreateNetworkAnalyzerConfiguration) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpCreateNetworkAnalyzerConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorCreateNetworkAnalyzerConfiguration(response, &metadata) + } + output := &CreateNetworkAnalyzerConfigurationOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsRestjson1_deserializeOpDocumentCreateNetworkAnalyzerConfigurationOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorCreateNetworkAnalyzerConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("AccessDeniedException", errorCode): + return awsRestjson1_deserializeErrorAccessDeniedException(response, errorBody) + + case strings.EqualFold("ConflictException", errorCode): + return awsRestjson1_deserializeErrorConflictException(response, errorBody) + + case strings.EqualFold("InternalServerException", errorCode): + return awsRestjson1_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsRestjson1_deserializeErrorResourceNotFoundException(response, errorBody) + + case strings.EqualFold("ThrottlingException", errorCode): + return awsRestjson1_deserializeErrorThrottlingException(response, errorBody) + + case strings.EqualFold("ValidationException", errorCode): + return awsRestjson1_deserializeErrorValidationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestjson1_deserializeOpDocumentCreateNetworkAnalyzerConfigurationOutput(v **CreateNetworkAnalyzerConfigurationOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *CreateNetworkAnalyzerConfigurationOutput + if *v == nil { + sv = &CreateNetworkAnalyzerConfigurationOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Arn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected NetworkAnalyzerConfigurationArn to be of type string, got %T instead", value) + } + sv.Arn = ptr.String(jtv) + } + + case "Name": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected NetworkAnalyzerConfigurationName to be of type string, got %T instead", value) + } + sv.Name = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + type awsRestjson1_deserializeOpCreateServiceProfile struct { } @@ -2900,6 +3075,104 @@ func awsRestjson1_deserializeOpErrorDeleteMulticastGroup(response *smithyhttp.Re } } +type awsRestjson1_deserializeOpDeleteNetworkAnalyzerConfiguration struct { +} + +func (*awsRestjson1_deserializeOpDeleteNetworkAnalyzerConfiguration) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpDeleteNetworkAnalyzerConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorDeleteNetworkAnalyzerConfiguration(response, &metadata) + } + output := &DeleteNetworkAnalyzerConfigurationOutput{} + out.Result = output + + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorDeleteNetworkAnalyzerConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("AccessDeniedException", errorCode): + return awsRestjson1_deserializeErrorAccessDeniedException(response, errorBody) + + case strings.EqualFold("ConflictException", errorCode): + return awsRestjson1_deserializeErrorConflictException(response, errorBody) + + case strings.EqualFold("InternalServerException", errorCode): + return awsRestjson1_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsRestjson1_deserializeErrorResourceNotFoundException(response, errorBody) + + case strings.EqualFold("ThrottlingException", errorCode): + return awsRestjson1_deserializeErrorThrottlingException(response, errorBody) + + case strings.EqualFold("ValidationException", errorCode): + return awsRestjson1_deserializeErrorValidationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + type awsRestjson1_deserializeOpDeleteQueuedMessages struct { } @@ -4538,7 +4811,175 @@ func awsRestjson1_deserializeOpDocumentGetDeviceProfileOutput(v **GetDeviceProfi return nil } -type awsRestjson1_deserializeOpGetFuotaTask struct { +type awsRestjson1_deserializeOpGetEventConfigurationByResourceTypes struct { +} + +func (*awsRestjson1_deserializeOpGetEventConfigurationByResourceTypes) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpGetEventConfigurationByResourceTypes) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorGetEventConfigurationByResourceTypes(response, &metadata) + } + output := &GetEventConfigurationByResourceTypesOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsRestjson1_deserializeOpDocumentGetEventConfigurationByResourceTypesOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorGetEventConfigurationByResourceTypes(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("AccessDeniedException", errorCode): + return awsRestjson1_deserializeErrorAccessDeniedException(response, errorBody) + + case strings.EqualFold("InternalServerException", errorCode): + return awsRestjson1_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("ThrottlingException", errorCode): + return awsRestjson1_deserializeErrorThrottlingException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestjson1_deserializeOpDocumentGetEventConfigurationByResourceTypesOutput(v **GetEventConfigurationByResourceTypesOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *GetEventConfigurationByResourceTypesOutput + if *v == nil { + sv = &GetEventConfigurationByResourceTypesOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "ConnectionStatus": + if err := awsRestjson1_deserializeDocumentConnectionStatusResourceTypeEventConfiguration(&sv.ConnectionStatus, value); err != nil { + return err + } + + case "DeviceRegistrationState": + if err := awsRestjson1_deserializeDocumentDeviceRegistrationStateResourceTypeEventConfiguration(&sv.DeviceRegistrationState, value); err != nil { + return err + } + + case "Join": + if err := awsRestjson1_deserializeDocumentJoinResourceTypeEventConfiguration(&sv.Join, value); err != nil { + return err + } + + case "Proximity": + if err := awsRestjson1_deserializeDocumentProximityResourceTypeEventConfiguration(&sv.Proximity, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +type awsRestjson1_deserializeOpGetFuotaTask struct { } func (*awsRestjson1_deserializeOpGetFuotaTask) ID() string { @@ -5473,6 +5914,33 @@ func awsRestjson1_deserializeOpDocumentGetNetworkAnalyzerConfigurationOutput(v * for key, value := range shape { switch key { + case "Arn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected NetworkAnalyzerConfigurationArn to be of type string, got %T instead", value) + } + sv.Arn = ptr.String(jtv) + } + + case "Description": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Description to be of type string, got %T instead", value) + } + sv.Description = ptr.String(jtv) + } + + case "Name": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected NetworkAnalyzerConfigurationName to be of type string, got %T instead", value) + } + sv.Name = ptr.String(jtv) + } + case "TraceContent": if err := awsRestjson1_deserializeDocumentTraceContent(&sv.TraceContent, value); err != nil { return err @@ -5807,11 +6275,21 @@ func awsRestjson1_deserializeOpDocumentGetResourceEventConfigurationOutput(v **G for key, value := range shape { switch key { + case "ConnectionStatus": + if err := awsRestjson1_deserializeDocumentConnectionStatusEventConfiguration(&sv.ConnectionStatus, value); err != nil { + return err + } + case "DeviceRegistrationState": if err := awsRestjson1_deserializeDocumentDeviceRegistrationStateEventConfiguration(&sv.DeviceRegistrationState, value); err != nil { return err } + case "Join": + if err := awsRestjson1_deserializeDocumentJoinEventConfiguration(&sv.Join, value); err != nil { + return err + } + case "Proximity": if err := awsRestjson1_deserializeDocumentProximityEventConfiguration(&sv.Proximity, value); err != nil { return err @@ -8211,14 +8689,14 @@ func awsRestjson1_deserializeOpDocumentListDeviceProfilesOutput(v **ListDevicePr return nil } -type awsRestjson1_deserializeOpListFuotaTasks struct { +type awsRestjson1_deserializeOpListEventConfigurations struct { } -func (*awsRestjson1_deserializeOpListFuotaTasks) ID() string { +func (*awsRestjson1_deserializeOpListEventConfigurations) ID() string { return "OperationDeserializer" } -func (m *awsRestjson1_deserializeOpListFuotaTasks) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsRestjson1_deserializeOpListEventConfigurations) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -8232,9 +8710,9 @@ func (m *awsRestjson1_deserializeOpListFuotaTasks) HandleDeserialize(ctx context } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestjson1_deserializeOpErrorListFuotaTasks(response, &metadata) + return out, metadata, awsRestjson1_deserializeOpErrorListEventConfigurations(response, &metadata) } - output := &ListFuotaTasksOutput{} + output := &ListEventConfigurationsOutput{} out.Result = output var buff [1024]byte @@ -8255,7 +8733,7 @@ func (m *awsRestjson1_deserializeOpListFuotaTasks) HandleDeserialize(ctx context return out, metadata, err } - err = awsRestjson1_deserializeOpDocumentListFuotaTasksOutput(&output, shape) + err = awsRestjson1_deserializeOpDocumentListEventConfigurationsOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -8268,7 +8746,7 @@ func (m *awsRestjson1_deserializeOpListFuotaTasks) HandleDeserialize(ctx context return out, metadata, err } -func awsRestjson1_deserializeOpErrorListFuotaTasks(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsRestjson1_deserializeOpErrorListEventConfigurations(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -8331,7 +8809,7 @@ func awsRestjson1_deserializeOpErrorListFuotaTasks(response *smithyhttp.Response } } -func awsRestjson1_deserializeOpDocumentListFuotaTasksOutput(v **ListFuotaTasksOutput, value interface{}) error { +func awsRestjson1_deserializeOpDocumentListEventConfigurationsOutput(v **ListEventConfigurationsOutput, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -8344,17 +8822,17 @@ func awsRestjson1_deserializeOpDocumentListFuotaTasksOutput(v **ListFuotaTasksOu return fmt.Errorf("unexpected JSON type %v", value) } - var sv *ListFuotaTasksOutput + var sv *ListEventConfigurationsOutput if *v == nil { - sv = &ListFuotaTasksOutput{} + sv = &ListEventConfigurationsOutput{} } else { sv = *v } for key, value := range shape { switch key { - case "FuotaTaskList": - if err := awsRestjson1_deserializeDocumentFuotaTaskList(&sv.FuotaTaskList, value); err != nil { + case "EventConfigurationsList": + if err := awsRestjson1_deserializeDocumentEventConfigurationsList(&sv.EventConfigurationsList, value); err != nil { return err } @@ -8376,14 +8854,14 @@ func awsRestjson1_deserializeOpDocumentListFuotaTasksOutput(v **ListFuotaTasksOu return nil } -type awsRestjson1_deserializeOpListMulticastGroups struct { +type awsRestjson1_deserializeOpListFuotaTasks struct { } -func (*awsRestjson1_deserializeOpListMulticastGroups) ID() string { +func (*awsRestjson1_deserializeOpListFuotaTasks) ID() string { return "OperationDeserializer" } -func (m *awsRestjson1_deserializeOpListMulticastGroups) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsRestjson1_deserializeOpListFuotaTasks) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -8397,9 +8875,9 @@ func (m *awsRestjson1_deserializeOpListMulticastGroups) HandleDeserialize(ctx co } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestjson1_deserializeOpErrorListMulticastGroups(response, &metadata) + return out, metadata, awsRestjson1_deserializeOpErrorListFuotaTasks(response, &metadata) } - output := &ListMulticastGroupsOutput{} + output := &ListFuotaTasksOutput{} out.Result = output var buff [1024]byte @@ -8420,7 +8898,7 @@ func (m *awsRestjson1_deserializeOpListMulticastGroups) HandleDeserialize(ctx co return out, metadata, err } - err = awsRestjson1_deserializeOpDocumentListMulticastGroupsOutput(&output, shape) + err = awsRestjson1_deserializeOpDocumentListFuotaTasksOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -8433,7 +8911,7 @@ func (m *awsRestjson1_deserializeOpListMulticastGroups) HandleDeserialize(ctx co return out, metadata, err } -func awsRestjson1_deserializeOpErrorListMulticastGroups(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsRestjson1_deserializeOpErrorListFuotaTasks(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -8496,7 +8974,7 @@ func awsRestjson1_deserializeOpErrorListMulticastGroups(response *smithyhttp.Res } } -func awsRestjson1_deserializeOpDocumentListMulticastGroupsOutput(v **ListMulticastGroupsOutput, value interface{}) error { +func awsRestjson1_deserializeOpDocumentListFuotaTasksOutput(v **ListFuotaTasksOutput, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -8509,17 +8987,17 @@ func awsRestjson1_deserializeOpDocumentListMulticastGroupsOutput(v **ListMultica return fmt.Errorf("unexpected JSON type %v", value) } - var sv *ListMulticastGroupsOutput + var sv *ListFuotaTasksOutput if *v == nil { - sv = &ListMulticastGroupsOutput{} + sv = &ListFuotaTasksOutput{} } else { sv = *v } for key, value := range shape { switch key { - case "MulticastGroupList": - if err := awsRestjson1_deserializeDocumentMulticastGroupList(&sv.MulticastGroupList, value); err != nil { + case "FuotaTaskList": + if err := awsRestjson1_deserializeDocumentFuotaTaskList(&sv.FuotaTaskList, value); err != nil { return err } @@ -8541,14 +9019,14 @@ func awsRestjson1_deserializeOpDocumentListMulticastGroupsOutput(v **ListMultica return nil } -type awsRestjson1_deserializeOpListMulticastGroupsByFuotaTask struct { +type awsRestjson1_deserializeOpListMulticastGroups struct { } -func (*awsRestjson1_deserializeOpListMulticastGroupsByFuotaTask) ID() string { +func (*awsRestjson1_deserializeOpListMulticastGroups) ID() string { return "OperationDeserializer" } -func (m *awsRestjson1_deserializeOpListMulticastGroupsByFuotaTask) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsRestjson1_deserializeOpListMulticastGroups) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -8562,9 +9040,9 @@ func (m *awsRestjson1_deserializeOpListMulticastGroupsByFuotaTask) HandleDeseria } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestjson1_deserializeOpErrorListMulticastGroupsByFuotaTask(response, &metadata) + return out, metadata, awsRestjson1_deserializeOpErrorListMulticastGroups(response, &metadata) } - output := &ListMulticastGroupsByFuotaTaskOutput{} + output := &ListMulticastGroupsOutput{} out.Result = output var buff [1024]byte @@ -8585,7 +9063,7 @@ func (m *awsRestjson1_deserializeOpListMulticastGroupsByFuotaTask) HandleDeseria return out, metadata, err } - err = awsRestjson1_deserializeOpDocumentListMulticastGroupsByFuotaTaskOutput(&output, shape) + err = awsRestjson1_deserializeOpDocumentListMulticastGroupsOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -8598,7 +9076,7 @@ func (m *awsRestjson1_deserializeOpListMulticastGroupsByFuotaTask) HandleDeseria return out, metadata, err } -func awsRestjson1_deserializeOpErrorListMulticastGroupsByFuotaTask(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsRestjson1_deserializeOpErrorListMulticastGroups(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -8645,9 +9123,6 @@ func awsRestjson1_deserializeOpErrorListMulticastGroupsByFuotaTask(response *smi case strings.EqualFold("InternalServerException", errorCode): return awsRestjson1_deserializeErrorInternalServerException(response, errorBody) - case strings.EqualFold("ResourceNotFoundException", errorCode): - return awsRestjson1_deserializeErrorResourceNotFoundException(response, errorBody) - case strings.EqualFold("ThrottlingException", errorCode): return awsRestjson1_deserializeErrorThrottlingException(response, errorBody) @@ -8664,7 +9139,7 @@ func awsRestjson1_deserializeOpErrorListMulticastGroupsByFuotaTask(response *smi } } -func awsRestjson1_deserializeOpDocumentListMulticastGroupsByFuotaTaskOutput(v **ListMulticastGroupsByFuotaTaskOutput, value interface{}) error { +func awsRestjson1_deserializeOpDocumentListMulticastGroupsOutput(v **ListMulticastGroupsOutput, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -8677,9 +9152,9 @@ func awsRestjson1_deserializeOpDocumentListMulticastGroupsByFuotaTaskOutput(v ** return fmt.Errorf("unexpected JSON type %v", value) } - var sv *ListMulticastGroupsByFuotaTaskOutput + var sv *ListMulticastGroupsOutput if *v == nil { - sv = &ListMulticastGroupsByFuotaTaskOutput{} + sv = &ListMulticastGroupsOutput{} } else { sv = *v } @@ -8687,7 +9162,7 @@ func awsRestjson1_deserializeOpDocumentListMulticastGroupsByFuotaTaskOutput(v ** for key, value := range shape { switch key { case "MulticastGroupList": - if err := awsRestjson1_deserializeDocumentMulticastGroupListByFuotaTask(&sv.MulticastGroupList, value); err != nil { + if err := awsRestjson1_deserializeDocumentMulticastGroupList(&sv.MulticastGroupList, value); err != nil { return err } @@ -8709,14 +9184,14 @@ func awsRestjson1_deserializeOpDocumentListMulticastGroupsByFuotaTaskOutput(v ** return nil } -type awsRestjson1_deserializeOpListPartnerAccounts struct { +type awsRestjson1_deserializeOpListMulticastGroupsByFuotaTask struct { } -func (*awsRestjson1_deserializeOpListPartnerAccounts) ID() string { +func (*awsRestjson1_deserializeOpListMulticastGroupsByFuotaTask) ID() string { return "OperationDeserializer" } -func (m *awsRestjson1_deserializeOpListPartnerAccounts) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsRestjson1_deserializeOpListMulticastGroupsByFuotaTask) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -8730,9 +9205,9 @@ func (m *awsRestjson1_deserializeOpListPartnerAccounts) HandleDeserialize(ctx co } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestjson1_deserializeOpErrorListPartnerAccounts(response, &metadata) + return out, metadata, awsRestjson1_deserializeOpErrorListMulticastGroupsByFuotaTask(response, &metadata) } - output := &ListPartnerAccountsOutput{} + output := &ListMulticastGroupsByFuotaTaskOutput{} out.Result = output var buff [1024]byte @@ -8753,7 +9228,7 @@ func (m *awsRestjson1_deserializeOpListPartnerAccounts) HandleDeserialize(ctx co return out, metadata, err } - err = awsRestjson1_deserializeOpDocumentListPartnerAccountsOutput(&output, shape) + err = awsRestjson1_deserializeOpDocumentListMulticastGroupsByFuotaTaskOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -8766,7 +9241,7 @@ func (m *awsRestjson1_deserializeOpListPartnerAccounts) HandleDeserialize(ctx co return out, metadata, err } -func awsRestjson1_deserializeOpErrorListPartnerAccounts(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsRestjson1_deserializeOpErrorListMulticastGroupsByFuotaTask(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -8807,6 +9282,9 @@ func awsRestjson1_deserializeOpErrorListPartnerAccounts(response *smithyhttp.Res } switch { + case strings.EqualFold("AccessDeniedException", errorCode): + return awsRestjson1_deserializeErrorAccessDeniedException(response, errorBody) + case strings.EqualFold("InternalServerException", errorCode): return awsRestjson1_deserializeErrorInternalServerException(response, errorBody) @@ -8829,7 +9307,7 @@ func awsRestjson1_deserializeOpErrorListPartnerAccounts(response *smithyhttp.Res } } -func awsRestjson1_deserializeOpDocumentListPartnerAccountsOutput(v **ListPartnerAccountsOutput, value interface{}) error { +func awsRestjson1_deserializeOpDocumentListMulticastGroupsByFuotaTaskOutput(v **ListMulticastGroupsByFuotaTaskOutput, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -8842,15 +9320,20 @@ func awsRestjson1_deserializeOpDocumentListPartnerAccountsOutput(v **ListPartner return fmt.Errorf("unexpected JSON type %v", value) } - var sv *ListPartnerAccountsOutput + var sv *ListMulticastGroupsByFuotaTaskOutput if *v == nil { - sv = &ListPartnerAccountsOutput{} + sv = &ListMulticastGroupsByFuotaTaskOutput{} } else { sv = *v } for key, value := range shape { switch key { + case "MulticastGroupList": + if err := awsRestjson1_deserializeDocumentMulticastGroupListByFuotaTask(&sv.MulticastGroupList, value); err != nil { + return err + } + case "NextToken": if value != nil { jtv, ok := value.(string) @@ -8860,11 +9343,6 @@ func awsRestjson1_deserializeOpDocumentListPartnerAccountsOutput(v **ListPartner sv.NextToken = ptr.String(jtv) } - case "Sidewalk": - if err := awsRestjson1_deserializeDocumentSidewalkAccountList(&sv.Sidewalk, value); err != nil { - return err - } - default: _, _ = key, value @@ -8874,14 +9352,14 @@ func awsRestjson1_deserializeOpDocumentListPartnerAccountsOutput(v **ListPartner return nil } -type awsRestjson1_deserializeOpListQueuedMessages struct { +type awsRestjson1_deserializeOpListNetworkAnalyzerConfigurations struct { } -func (*awsRestjson1_deserializeOpListQueuedMessages) ID() string { +func (*awsRestjson1_deserializeOpListNetworkAnalyzerConfigurations) ID() string { return "OperationDeserializer" } -func (m *awsRestjson1_deserializeOpListQueuedMessages) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsRestjson1_deserializeOpListNetworkAnalyzerConfigurations) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -8895,9 +9373,9 @@ func (m *awsRestjson1_deserializeOpListQueuedMessages) HandleDeserialize(ctx con } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestjson1_deserializeOpErrorListQueuedMessages(response, &metadata) + return out, metadata, awsRestjson1_deserializeOpErrorListNetworkAnalyzerConfigurations(response, &metadata) } - output := &ListQueuedMessagesOutput{} + output := &ListNetworkAnalyzerConfigurationsOutput{} out.Result = output var buff [1024]byte @@ -8918,7 +9396,7 @@ func (m *awsRestjson1_deserializeOpListQueuedMessages) HandleDeserialize(ctx con return out, metadata, err } - err = awsRestjson1_deserializeOpDocumentListQueuedMessagesOutput(&output, shape) + err = awsRestjson1_deserializeOpDocumentListNetworkAnalyzerConfigurationsOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -8931,7 +9409,7 @@ func (m *awsRestjson1_deserializeOpListQueuedMessages) HandleDeserialize(ctx con return out, metadata, err } -func awsRestjson1_deserializeOpErrorListQueuedMessages(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsRestjson1_deserializeOpErrorListNetworkAnalyzerConfigurations(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -8978,9 +9456,6 @@ func awsRestjson1_deserializeOpErrorListQueuedMessages(response *smithyhttp.Resp case strings.EqualFold("InternalServerException", errorCode): return awsRestjson1_deserializeErrorInternalServerException(response, errorBody) - case strings.EqualFold("ResourceNotFoundException", errorCode): - return awsRestjson1_deserializeErrorResourceNotFoundException(response, errorBody) - case strings.EqualFold("ThrottlingException", errorCode): return awsRestjson1_deserializeErrorThrottlingException(response, errorBody) @@ -8997,7 +9472,7 @@ func awsRestjson1_deserializeOpErrorListQueuedMessages(response *smithyhttp.Resp } } -func awsRestjson1_deserializeOpDocumentListQueuedMessagesOutput(v **ListQueuedMessagesOutput, value interface{}) error { +func awsRestjson1_deserializeOpDocumentListNetworkAnalyzerConfigurationsOutput(v **ListNetworkAnalyzerConfigurationsOutput, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -9010,17 +9485,17 @@ func awsRestjson1_deserializeOpDocumentListQueuedMessagesOutput(v **ListQueuedMe return fmt.Errorf("unexpected JSON type %v", value) } - var sv *ListQueuedMessagesOutput + var sv *ListNetworkAnalyzerConfigurationsOutput if *v == nil { - sv = &ListQueuedMessagesOutput{} + sv = &ListNetworkAnalyzerConfigurationsOutput{} } else { sv = *v } for key, value := range shape { switch key { - case "DownlinkQueueMessagesList": - if err := awsRestjson1_deserializeDocumentDownlinkQueueMessagesList(&sv.DownlinkQueueMessagesList, value); err != nil { + case "NetworkAnalyzerConfigurationList": + if err := awsRestjson1_deserializeDocumentNetworkAnalyzerConfigurationList(&sv.NetworkAnalyzerConfigurationList, value); err != nil { return err } @@ -9042,14 +9517,14 @@ func awsRestjson1_deserializeOpDocumentListQueuedMessagesOutput(v **ListQueuedMe return nil } -type awsRestjson1_deserializeOpListServiceProfiles struct { +type awsRestjson1_deserializeOpListPartnerAccounts struct { } -func (*awsRestjson1_deserializeOpListServiceProfiles) ID() string { +func (*awsRestjson1_deserializeOpListPartnerAccounts) ID() string { return "OperationDeserializer" } -func (m *awsRestjson1_deserializeOpListServiceProfiles) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsRestjson1_deserializeOpListPartnerAccounts) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -9063,9 +9538,9 @@ func (m *awsRestjson1_deserializeOpListServiceProfiles) HandleDeserialize(ctx co } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestjson1_deserializeOpErrorListServiceProfiles(response, &metadata) + return out, metadata, awsRestjson1_deserializeOpErrorListPartnerAccounts(response, &metadata) } - output := &ListServiceProfilesOutput{} + output := &ListPartnerAccountsOutput{} out.Result = output var buff [1024]byte @@ -9086,7 +9561,7 @@ func (m *awsRestjson1_deserializeOpListServiceProfiles) HandleDeserialize(ctx co return out, metadata, err } - err = awsRestjson1_deserializeOpDocumentListServiceProfilesOutput(&output, shape) + err = awsRestjson1_deserializeOpDocumentListPartnerAccountsOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -9099,7 +9574,7 @@ func (m *awsRestjson1_deserializeOpListServiceProfiles) HandleDeserialize(ctx co return out, metadata, err } -func awsRestjson1_deserializeOpErrorListServiceProfiles(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsRestjson1_deserializeOpErrorListPartnerAccounts(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -9140,12 +9615,12 @@ func awsRestjson1_deserializeOpErrorListServiceProfiles(response *smithyhttp.Res } switch { - case strings.EqualFold("AccessDeniedException", errorCode): - return awsRestjson1_deserializeErrorAccessDeniedException(response, errorBody) - case strings.EqualFold("InternalServerException", errorCode): return awsRestjson1_deserializeErrorInternalServerException(response, errorBody) + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsRestjson1_deserializeErrorResourceNotFoundException(response, errorBody) + case strings.EqualFold("ThrottlingException", errorCode): return awsRestjson1_deserializeErrorThrottlingException(response, errorBody) @@ -9162,7 +9637,7 @@ func awsRestjson1_deserializeOpErrorListServiceProfiles(response *smithyhttp.Res } } -func awsRestjson1_deserializeOpDocumentListServiceProfilesOutput(v **ListServiceProfilesOutput, value interface{}) error { +func awsRestjson1_deserializeOpDocumentListPartnerAccountsOutput(v **ListPartnerAccountsOutput, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -9175,9 +9650,9 @@ func awsRestjson1_deserializeOpDocumentListServiceProfilesOutput(v **ListService return fmt.Errorf("unexpected JSON type %v", value) } - var sv *ListServiceProfilesOutput + var sv *ListPartnerAccountsOutput if *v == nil { - sv = &ListServiceProfilesOutput{} + sv = &ListPartnerAccountsOutput{} } else { sv = *v } @@ -9193,8 +9668,8 @@ func awsRestjson1_deserializeOpDocumentListServiceProfilesOutput(v **ListService sv.NextToken = ptr.String(jtv) } - case "ServiceProfileList": - if err := awsRestjson1_deserializeDocumentServiceProfileList(&sv.ServiceProfileList, value); err != nil { + case "Sidewalk": + if err := awsRestjson1_deserializeDocumentSidewalkAccountList(&sv.Sidewalk, value); err != nil { return err } @@ -9207,14 +9682,14 @@ func awsRestjson1_deserializeOpDocumentListServiceProfilesOutput(v **ListService return nil } -type awsRestjson1_deserializeOpListTagsForResource struct { +type awsRestjson1_deserializeOpListQueuedMessages struct { } -func (*awsRestjson1_deserializeOpListTagsForResource) ID() string { +func (*awsRestjson1_deserializeOpListQueuedMessages) ID() string { return "OperationDeserializer" } -func (m *awsRestjson1_deserializeOpListTagsForResource) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsRestjson1_deserializeOpListQueuedMessages) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -9228,9 +9703,9 @@ func (m *awsRestjson1_deserializeOpListTagsForResource) HandleDeserialize(ctx co } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestjson1_deserializeOpErrorListTagsForResource(response, &metadata) + return out, metadata, awsRestjson1_deserializeOpErrorListQueuedMessages(response, &metadata) } - output := &ListTagsForResourceOutput{} + output := &ListQueuedMessagesOutput{} out.Result = output var buff [1024]byte @@ -9251,7 +9726,7 @@ func (m *awsRestjson1_deserializeOpListTagsForResource) HandleDeserialize(ctx co return out, metadata, err } - err = awsRestjson1_deserializeOpDocumentListTagsForResourceOutput(&output, shape) + err = awsRestjson1_deserializeOpDocumentListQueuedMessagesOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -9264,7 +9739,7 @@ func (m *awsRestjson1_deserializeOpListTagsForResource) HandleDeserialize(ctx co return out, metadata, err } -func awsRestjson1_deserializeOpErrorListTagsForResource(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsRestjson1_deserializeOpErrorListQueuedMessages(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -9305,8 +9780,8 @@ func awsRestjson1_deserializeOpErrorListTagsForResource(response *smithyhttp.Res } switch { - case strings.EqualFold("ConflictException", errorCode): - return awsRestjson1_deserializeErrorConflictException(response, errorBody) + case strings.EqualFold("AccessDeniedException", errorCode): + return awsRestjson1_deserializeErrorAccessDeniedException(response, errorBody) case strings.EqualFold("InternalServerException", errorCode): return awsRestjson1_deserializeErrorInternalServerException(response, errorBody) @@ -9330,7 +9805,7 @@ func awsRestjson1_deserializeOpErrorListTagsForResource(response *smithyhttp.Res } } -func awsRestjson1_deserializeOpDocumentListTagsForResourceOutput(v **ListTagsForResourceOutput, value interface{}) error { +func awsRestjson1_deserializeOpDocumentListQueuedMessagesOutput(v **ListQueuedMessagesOutput, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -9343,20 +9818,29 @@ func awsRestjson1_deserializeOpDocumentListTagsForResourceOutput(v **ListTagsFor return fmt.Errorf("unexpected JSON type %v", value) } - var sv *ListTagsForResourceOutput + var sv *ListQueuedMessagesOutput if *v == nil { - sv = &ListTagsForResourceOutput{} + sv = &ListQueuedMessagesOutput{} } else { sv = *v } for key, value := range shape { switch key { - case "Tags": - if err := awsRestjson1_deserializeDocumentTagList(&sv.Tags, value); err != nil { + case "DownlinkQueueMessagesList": + if err := awsRestjson1_deserializeDocumentDownlinkQueueMessagesList(&sv.DownlinkQueueMessagesList, value); err != nil { return err } + case "NextToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected NextToken to be of type string, got %T instead", value) + } + sv.NextToken = ptr.String(jtv) + } + default: _, _ = key, value @@ -9366,14 +9850,14 @@ func awsRestjson1_deserializeOpDocumentListTagsForResourceOutput(v **ListTagsFor return nil } -type awsRestjson1_deserializeOpListWirelessDevices struct { +type awsRestjson1_deserializeOpListServiceProfiles struct { } -func (*awsRestjson1_deserializeOpListWirelessDevices) ID() string { +func (*awsRestjson1_deserializeOpListServiceProfiles) ID() string { return "OperationDeserializer" } -func (m *awsRestjson1_deserializeOpListWirelessDevices) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsRestjson1_deserializeOpListServiceProfiles) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -9387,9 +9871,9 @@ func (m *awsRestjson1_deserializeOpListWirelessDevices) HandleDeserialize(ctx co } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestjson1_deserializeOpErrorListWirelessDevices(response, &metadata) + return out, metadata, awsRestjson1_deserializeOpErrorListServiceProfiles(response, &metadata) } - output := &ListWirelessDevicesOutput{} + output := &ListServiceProfilesOutput{} out.Result = output var buff [1024]byte @@ -9410,7 +9894,7 @@ func (m *awsRestjson1_deserializeOpListWirelessDevices) HandleDeserialize(ctx co return out, metadata, err } - err = awsRestjson1_deserializeOpDocumentListWirelessDevicesOutput(&output, shape) + err = awsRestjson1_deserializeOpDocumentListServiceProfilesOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -9423,7 +9907,7 @@ func (m *awsRestjson1_deserializeOpListWirelessDevices) HandleDeserialize(ctx co return out, metadata, err } -func awsRestjson1_deserializeOpErrorListWirelessDevices(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsRestjson1_deserializeOpErrorListServiceProfiles(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -9486,7 +9970,7 @@ func awsRestjson1_deserializeOpErrorListWirelessDevices(response *smithyhttp.Res } } -func awsRestjson1_deserializeOpDocumentListWirelessDevicesOutput(v **ListWirelessDevicesOutput, value interface{}) error { +func awsRestjson1_deserializeOpDocumentListServiceProfilesOutput(v **ListServiceProfilesOutput, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -9499,9 +9983,9 @@ func awsRestjson1_deserializeOpDocumentListWirelessDevicesOutput(v **ListWireles return fmt.Errorf("unexpected JSON type %v", value) } - var sv *ListWirelessDevicesOutput + var sv *ListServiceProfilesOutput if *v == nil { - sv = &ListWirelessDevicesOutput{} + sv = &ListServiceProfilesOutput{} } else { sv = *v } @@ -9517,8 +10001,8 @@ func awsRestjson1_deserializeOpDocumentListWirelessDevicesOutput(v **ListWireles sv.NextToken = ptr.String(jtv) } - case "WirelessDeviceList": - if err := awsRestjson1_deserializeDocumentWirelessDeviceStatisticsList(&sv.WirelessDeviceList, value); err != nil { + case "ServiceProfileList": + if err := awsRestjson1_deserializeDocumentServiceProfileList(&sv.ServiceProfileList, value); err != nil { return err } @@ -9531,14 +10015,14 @@ func awsRestjson1_deserializeOpDocumentListWirelessDevicesOutput(v **ListWireles return nil } -type awsRestjson1_deserializeOpListWirelessGateways struct { +type awsRestjson1_deserializeOpListTagsForResource struct { } -func (*awsRestjson1_deserializeOpListWirelessGateways) ID() string { +func (*awsRestjson1_deserializeOpListTagsForResource) ID() string { return "OperationDeserializer" } -func (m *awsRestjson1_deserializeOpListWirelessGateways) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsRestjson1_deserializeOpListTagsForResource) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -9552,9 +10036,9 @@ func (m *awsRestjson1_deserializeOpListWirelessGateways) HandleDeserialize(ctx c } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestjson1_deserializeOpErrorListWirelessGateways(response, &metadata) + return out, metadata, awsRestjson1_deserializeOpErrorListTagsForResource(response, &metadata) } - output := &ListWirelessGatewaysOutput{} + output := &ListTagsForResourceOutput{} out.Result = output var buff [1024]byte @@ -9575,7 +10059,7 @@ func (m *awsRestjson1_deserializeOpListWirelessGateways) HandleDeserialize(ctx c return out, metadata, err } - err = awsRestjson1_deserializeOpDocumentListWirelessGatewaysOutput(&output, shape) + err = awsRestjson1_deserializeOpDocumentListTagsForResourceOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -9588,7 +10072,7 @@ func (m *awsRestjson1_deserializeOpListWirelessGateways) HandleDeserialize(ctx c return out, metadata, err } -func awsRestjson1_deserializeOpErrorListWirelessGateways(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsRestjson1_deserializeOpErrorListTagsForResource(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -9629,12 +10113,15 @@ func awsRestjson1_deserializeOpErrorListWirelessGateways(response *smithyhttp.Re } switch { - case strings.EqualFold("AccessDeniedException", errorCode): - return awsRestjson1_deserializeErrorAccessDeniedException(response, errorBody) + case strings.EqualFold("ConflictException", errorCode): + return awsRestjson1_deserializeErrorConflictException(response, errorBody) case strings.EqualFold("InternalServerException", errorCode): return awsRestjson1_deserializeErrorInternalServerException(response, errorBody) + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsRestjson1_deserializeErrorResourceNotFoundException(response, errorBody) + case strings.EqualFold("ThrottlingException", errorCode): return awsRestjson1_deserializeErrorThrottlingException(response, errorBody) @@ -9651,7 +10138,7 @@ func awsRestjson1_deserializeOpErrorListWirelessGateways(response *smithyhttp.Re } } -func awsRestjson1_deserializeOpDocumentListWirelessGatewaysOutput(v **ListWirelessGatewaysOutput, value interface{}) error { +func awsRestjson1_deserializeOpDocumentListTagsForResourceOutput(v **ListTagsForResourceOutput, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -9664,26 +10151,17 @@ func awsRestjson1_deserializeOpDocumentListWirelessGatewaysOutput(v **ListWirele return fmt.Errorf("unexpected JSON type %v", value) } - var sv *ListWirelessGatewaysOutput + var sv *ListTagsForResourceOutput if *v == nil { - sv = &ListWirelessGatewaysOutput{} + sv = &ListTagsForResourceOutput{} } else { sv = *v } for key, value := range shape { switch key { - case "NextToken": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected NextToken to be of type string, got %T instead", value) - } - sv.NextToken = ptr.String(jtv) - } - - case "WirelessGatewayList": - if err := awsRestjson1_deserializeDocumentWirelessGatewayStatisticsList(&sv.WirelessGatewayList, value); err != nil { + case "Tags": + if err := awsRestjson1_deserializeDocumentTagList(&sv.Tags, value); err != nil { return err } @@ -9696,14 +10174,14 @@ func awsRestjson1_deserializeOpDocumentListWirelessGatewaysOutput(v **ListWirele return nil } -type awsRestjson1_deserializeOpListWirelessGatewayTaskDefinitions struct { +type awsRestjson1_deserializeOpListWirelessDevices struct { } -func (*awsRestjson1_deserializeOpListWirelessGatewayTaskDefinitions) ID() string { +func (*awsRestjson1_deserializeOpListWirelessDevices) ID() string { return "OperationDeserializer" } -func (m *awsRestjson1_deserializeOpListWirelessGatewayTaskDefinitions) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsRestjson1_deserializeOpListWirelessDevices) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -9717,9 +10195,9 @@ func (m *awsRestjson1_deserializeOpListWirelessGatewayTaskDefinitions) HandleDes } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestjson1_deserializeOpErrorListWirelessGatewayTaskDefinitions(response, &metadata) + return out, metadata, awsRestjson1_deserializeOpErrorListWirelessDevices(response, &metadata) } - output := &ListWirelessGatewayTaskDefinitionsOutput{} + output := &ListWirelessDevicesOutput{} out.Result = output var buff [1024]byte @@ -9740,7 +10218,7 @@ func (m *awsRestjson1_deserializeOpListWirelessGatewayTaskDefinitions) HandleDes return out, metadata, err } - err = awsRestjson1_deserializeOpDocumentListWirelessGatewayTaskDefinitionsOutput(&output, shape) + err = awsRestjson1_deserializeOpDocumentListWirelessDevicesOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -9753,7 +10231,7 @@ func (m *awsRestjson1_deserializeOpListWirelessGatewayTaskDefinitions) HandleDes return out, metadata, err } -func awsRestjson1_deserializeOpErrorListWirelessGatewayTaskDefinitions(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsRestjson1_deserializeOpErrorListWirelessDevices(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -9816,7 +10294,7 @@ func awsRestjson1_deserializeOpErrorListWirelessGatewayTaskDefinitions(response } } -func awsRestjson1_deserializeOpDocumentListWirelessGatewayTaskDefinitionsOutput(v **ListWirelessGatewayTaskDefinitionsOutput, value interface{}) error { +func awsRestjson1_deserializeOpDocumentListWirelessDevicesOutput(v **ListWirelessDevicesOutput, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -9829,9 +10307,9 @@ func awsRestjson1_deserializeOpDocumentListWirelessGatewayTaskDefinitionsOutput( return fmt.Errorf("unexpected JSON type %v", value) } - var sv *ListWirelessGatewayTaskDefinitionsOutput + var sv *ListWirelessDevicesOutput if *v == nil { - sv = &ListWirelessGatewayTaskDefinitionsOutput{} + sv = &ListWirelessDevicesOutput{} } else { sv = *v } @@ -9847,8 +10325,8 @@ func awsRestjson1_deserializeOpDocumentListWirelessGatewayTaskDefinitionsOutput( sv.NextToken = ptr.String(jtv) } - case "TaskDefinitions": - if err := awsRestjson1_deserializeDocumentWirelessGatewayTaskDefinitionList(&sv.TaskDefinitions, value); err != nil { + case "WirelessDeviceList": + if err := awsRestjson1_deserializeDocumentWirelessDeviceStatisticsList(&sv.WirelessDeviceList, value); err != nil { return err } @@ -9861,14 +10339,14 @@ func awsRestjson1_deserializeOpDocumentListWirelessGatewayTaskDefinitionsOutput( return nil } -type awsRestjson1_deserializeOpPutResourceLogLevel struct { +type awsRestjson1_deserializeOpListWirelessGateways struct { } -func (*awsRestjson1_deserializeOpPutResourceLogLevel) ID() string { +func (*awsRestjson1_deserializeOpListWirelessGateways) ID() string { return "OperationDeserializer" } -func (m *awsRestjson1_deserializeOpPutResourceLogLevel) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsRestjson1_deserializeOpListWirelessGateways) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -9882,15 +10360,43 @@ func (m *awsRestjson1_deserializeOpPutResourceLogLevel) HandleDeserialize(ctx co } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestjson1_deserializeOpErrorPutResourceLogLevel(response, &metadata) + return out, metadata, awsRestjson1_deserializeOpErrorListWirelessGateways(response, &metadata) } - output := &PutResourceLogLevelOutput{} + output := &ListWirelessGatewaysOutput{} out.Result = output + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsRestjson1_deserializeOpDocumentListWirelessGatewaysOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), + Snapshot: snapshot.Bytes(), + } + } + return out, metadata, err } -func awsRestjson1_deserializeOpErrorPutResourceLogLevel(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsRestjson1_deserializeOpErrorListWirelessGateways(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -9937,9 +10443,6 @@ func awsRestjson1_deserializeOpErrorPutResourceLogLevel(response *smithyhttp.Res case strings.EqualFold("InternalServerException", errorCode): return awsRestjson1_deserializeErrorInternalServerException(response, errorBody) - case strings.EqualFold("ResourceNotFoundException", errorCode): - return awsRestjson1_deserializeErrorResourceNotFoundException(response, errorBody) - case strings.EqualFold("ThrottlingException", errorCode): return awsRestjson1_deserializeErrorThrottlingException(response, errorBody) @@ -9956,19 +10459,324 @@ func awsRestjson1_deserializeOpErrorPutResourceLogLevel(response *smithyhttp.Res } } -type awsRestjson1_deserializeOpResetAllResourceLogLevels struct { -} - -func (*awsRestjson1_deserializeOpResetAllResourceLogLevels) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestjson1_deserializeOpResetAllResourceLogLevels) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err +func awsRestjson1_deserializeOpDocumentListWirelessGatewaysOutput(v **ListWirelessGatewaysOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ListWirelessGatewaysOutput + if *v == nil { + sv = &ListWirelessGatewaysOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "NextToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected NextToken to be of type string, got %T instead", value) + } + sv.NextToken = ptr.String(jtv) + } + + case "WirelessGatewayList": + if err := awsRestjson1_deserializeDocumentWirelessGatewayStatisticsList(&sv.WirelessGatewayList, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +type awsRestjson1_deserializeOpListWirelessGatewayTaskDefinitions struct { +} + +func (*awsRestjson1_deserializeOpListWirelessGatewayTaskDefinitions) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpListWirelessGatewayTaskDefinitions) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorListWirelessGatewayTaskDefinitions(response, &metadata) + } + output := &ListWirelessGatewayTaskDefinitionsOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsRestjson1_deserializeOpDocumentListWirelessGatewayTaskDefinitionsOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorListWirelessGatewayTaskDefinitions(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("AccessDeniedException", errorCode): + return awsRestjson1_deserializeErrorAccessDeniedException(response, errorBody) + + case strings.EqualFold("InternalServerException", errorCode): + return awsRestjson1_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("ThrottlingException", errorCode): + return awsRestjson1_deserializeErrorThrottlingException(response, errorBody) + + case strings.EqualFold("ValidationException", errorCode): + return awsRestjson1_deserializeErrorValidationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestjson1_deserializeOpDocumentListWirelessGatewayTaskDefinitionsOutput(v **ListWirelessGatewayTaskDefinitionsOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ListWirelessGatewayTaskDefinitionsOutput + if *v == nil { + sv = &ListWirelessGatewayTaskDefinitionsOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "NextToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected NextToken to be of type string, got %T instead", value) + } + sv.NextToken = ptr.String(jtv) + } + + case "TaskDefinitions": + if err := awsRestjson1_deserializeDocumentWirelessGatewayTaskDefinitionList(&sv.TaskDefinitions, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +type awsRestjson1_deserializeOpPutResourceLogLevel struct { +} + +func (*awsRestjson1_deserializeOpPutResourceLogLevel) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpPutResourceLogLevel) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorPutResourceLogLevel(response, &metadata) + } + output := &PutResourceLogLevelOutput{} + out.Result = output + + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorPutResourceLogLevel(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("AccessDeniedException", errorCode): + return awsRestjson1_deserializeErrorAccessDeniedException(response, errorBody) + + case strings.EqualFold("InternalServerException", errorCode): + return awsRestjson1_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsRestjson1_deserializeErrorResourceNotFoundException(response, errorBody) + + case strings.EqualFold("ThrottlingException", errorCode): + return awsRestjson1_deserializeErrorThrottlingException(response, errorBody) + + case strings.EqualFold("ValidationException", errorCode): + return awsRestjson1_deserializeErrorValidationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestjson1_deserializeOpResetAllResourceLogLevels struct { +} + +func (*awsRestjson1_deserializeOpResetAllResourceLogLevels) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpResetAllResourceLogLevels) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err } response, ok := out.RawResponse.(*smithyhttp.Response) @@ -11112,18 +11920,113 @@ func awsRestjson1_deserializeOpDocumentTestWirelessDeviceOutput(v **TestWireless } } - *v = sv - return nil + *v = sv + return nil +} + +type awsRestjson1_deserializeOpUntagResource struct { +} + +func (*awsRestjson1_deserializeOpUntagResource) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpUntagResource) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorUntagResource(response, &metadata) + } + output := &UntagResourceOutput{} + out.Result = output + + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorUntagResource(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("ConflictException", errorCode): + return awsRestjson1_deserializeErrorConflictException(response, errorBody) + + case strings.EqualFold("InternalServerException", errorCode): + return awsRestjson1_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsRestjson1_deserializeErrorResourceNotFoundException(response, errorBody) + + case strings.EqualFold("ThrottlingException", errorCode): + return awsRestjson1_deserializeErrorThrottlingException(response, errorBody) + + case strings.EqualFold("ValidationException", errorCode): + return awsRestjson1_deserializeErrorValidationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } } -type awsRestjson1_deserializeOpUntagResource struct { +type awsRestjson1_deserializeOpUpdateDestination struct { } -func (*awsRestjson1_deserializeOpUntagResource) ID() string { +func (*awsRestjson1_deserializeOpUpdateDestination) ID() string { return "OperationDeserializer" } -func (m *awsRestjson1_deserializeOpUntagResource) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsRestjson1_deserializeOpUpdateDestination) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -11137,15 +12040,15 @@ func (m *awsRestjson1_deserializeOpUntagResource) HandleDeserialize(ctx context. } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestjson1_deserializeOpErrorUntagResource(response, &metadata) + return out, metadata, awsRestjson1_deserializeOpErrorUpdateDestination(response, &metadata) } - output := &UntagResourceOutput{} + output := &UpdateDestinationOutput{} out.Result = output return out, metadata, err } -func awsRestjson1_deserializeOpErrorUntagResource(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsRestjson1_deserializeOpErrorUpdateDestination(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -11186,8 +12089,8 @@ func awsRestjson1_deserializeOpErrorUntagResource(response *smithyhttp.Response, } switch { - case strings.EqualFold("ConflictException", errorCode): - return awsRestjson1_deserializeErrorConflictException(response, errorBody) + case strings.EqualFold("AccessDeniedException", errorCode): + return awsRestjson1_deserializeErrorAccessDeniedException(response, errorBody) case strings.EqualFold("InternalServerException", errorCode): return awsRestjson1_deserializeErrorInternalServerException(response, errorBody) @@ -11211,14 +12114,14 @@ func awsRestjson1_deserializeOpErrorUntagResource(response *smithyhttp.Response, } } -type awsRestjson1_deserializeOpUpdateDestination struct { +type awsRestjson1_deserializeOpUpdateEventConfigurationByResourceTypes struct { } -func (*awsRestjson1_deserializeOpUpdateDestination) ID() string { +func (*awsRestjson1_deserializeOpUpdateEventConfigurationByResourceTypes) ID() string { return "OperationDeserializer" } -func (m *awsRestjson1_deserializeOpUpdateDestination) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsRestjson1_deserializeOpUpdateEventConfigurationByResourceTypes) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -11232,15 +12135,15 @@ func (m *awsRestjson1_deserializeOpUpdateDestination) HandleDeserialize(ctx cont } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestjson1_deserializeOpErrorUpdateDestination(response, &metadata) + return out, metadata, awsRestjson1_deserializeOpErrorUpdateEventConfigurationByResourceTypes(response, &metadata) } - output := &UpdateDestinationOutput{} + output := &UpdateEventConfigurationByResourceTypesOutput{} out.Result = output return out, metadata, err } -func awsRestjson1_deserializeOpErrorUpdateDestination(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsRestjson1_deserializeOpErrorUpdateEventConfigurationByResourceTypes(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -11287,9 +12190,6 @@ func awsRestjson1_deserializeOpErrorUpdateDestination(response *smithyhttp.Respo case strings.EqualFold("InternalServerException", errorCode): return awsRestjson1_deserializeErrorInternalServerException(response, errorBody) - case strings.EqualFold("ResourceNotFoundException", errorCode): - return awsRestjson1_deserializeErrorResourceNotFoundException(response, errorBody) - case strings.EqualFold("ThrottlingException", errorCode): return awsRestjson1_deserializeErrorThrottlingException(response, errorBody) @@ -12358,6 +13258,19 @@ func awsRestjson1_deserializeDocumentAbpV1_0_x(v **types.AbpV1_0_x, value interf sv.DevAddr = ptr.String(jtv) } + case "FCntStart": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected FCntStart to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.FCntStart = ptr.Int32(int32(i64)) + } + case "SessionKeys": if err := awsRestjson1_deserializeDocumentSessionKeysAbpV1_0_x(&sv.SessionKeys, value); err != nil { return err @@ -12403,6 +13316,19 @@ func awsRestjson1_deserializeDocumentAbpV1_1(v **types.AbpV1_1, value interface{ sv.DevAddr = ptr.String(jtv) } + case "FCntStart": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected FCntStart to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.FCntStart = ptr.Int32(int32(i64)) + } + case "SessionKeys": if err := awsRestjson1_deserializeDocumentSessionKeysAbpV1_1(&sv.SessionKeys, value); err != nil { return err @@ -12564,6 +13490,87 @@ func awsRestjson1_deserializeDocumentConflictException(v **types.ConflictExcepti return nil } +func awsRestjson1_deserializeDocumentConnectionStatusEventConfiguration(v **types.ConnectionStatusEventConfiguration, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ConnectionStatusEventConfiguration + if *v == nil { + sv = &types.ConnectionStatusEventConfiguration{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "LoRaWAN": + if err := awsRestjson1_deserializeDocumentLoRaWANConnectionStatusEventNotificationConfigurations(&sv.LoRaWAN, value); err != nil { + return err + } + + case "WirelessGatewayIdEventTopic": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected EventNotificationTopicStatus to be of type string, got %T instead", value) + } + sv.WirelessGatewayIdEventTopic = types.EventNotificationTopicStatus(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentConnectionStatusResourceTypeEventConfiguration(v **types.ConnectionStatusResourceTypeEventConfiguration, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ConnectionStatusResourceTypeEventConfiguration + if *v == nil { + sv = &types.ConnectionStatusResourceTypeEventConfiguration{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "LoRaWAN": + if err := awsRestjson1_deserializeDocumentLoRaWANConnectionStatusResourceTypeEventConfiguration(&sv.LoRaWAN, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + func awsRestjson1_deserializeDocumentDestinationList(v *[]types.Destinations, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -12805,11 +13812,159 @@ func awsRestjson1_deserializeDocumentDeviceProfileList(v *[]types.DeviceProfile, cv = append(cv, col) } - *v = cv + *v = cv + return nil +} + +func awsRestjson1_deserializeDocumentDeviceRegistrationStateEventConfiguration(v **types.DeviceRegistrationStateEventConfiguration, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.DeviceRegistrationStateEventConfiguration + if *v == nil { + sv = &types.DeviceRegistrationStateEventConfiguration{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Sidewalk": + if err := awsRestjson1_deserializeDocumentSidewalkEventNotificationConfigurations(&sv.Sidewalk, value); err != nil { + return err + } + + case "WirelessDeviceIdEventTopic": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected EventNotificationTopicStatus to be of type string, got %T instead", value) + } + sv.WirelessDeviceIdEventTopic = types.EventNotificationTopicStatus(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentDeviceRegistrationStateResourceTypeEventConfiguration(v **types.DeviceRegistrationStateResourceTypeEventConfiguration, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.DeviceRegistrationStateResourceTypeEventConfiguration + if *v == nil { + sv = &types.DeviceRegistrationStateResourceTypeEventConfiguration{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Sidewalk": + if err := awsRestjson1_deserializeDocumentSidewalkResourceTypeEventConfiguration(&sv.Sidewalk, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentDownlinkQueueMessage(v **types.DownlinkQueueMessage, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.DownlinkQueueMessage + if *v == nil { + sv = &types.DownlinkQueueMessage{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "LoRaWAN": + if err := awsRestjson1_deserializeDocumentLoRaWANSendDataToDevice(&sv.LoRaWAN, value); err != nil { + return err + } + + case "MessageId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected MessageId to be of type string, got %T instead", value) + } + sv.MessageId = ptr.String(jtv) + } + + case "ReceivedAt": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ISODateTimeString to be of type string, got %T instead", value) + } + sv.ReceivedAt = ptr.String(jtv) + } + + case "TransmitMode": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected TransmitMode to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.TransmitMode = ptr.Int32(int32(i64)) + } + + default: + _, _ = key, value + + } + } + *v = sv return nil } -func awsRestjson1_deserializeDocumentDeviceRegistrationStateEventConfiguration(v **types.DeviceRegistrationStateEventConfiguration, value interface{}) error { +func awsRestjson1_deserializeDocumentDownlinkQueueMessagesList(v *[]types.DownlinkQueueMessage, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -12817,35 +13972,33 @@ func awsRestjson1_deserializeDocumentDeviceRegistrationStateEventConfiguration(v return nil } - shape, ok := value.(map[string]interface{}) + shape, ok := value.([]interface{}) if !ok { return fmt.Errorf("unexpected JSON type %v", value) } - var sv *types.DeviceRegistrationStateEventConfiguration + var cv []types.DownlinkQueueMessage if *v == nil { - sv = &types.DeviceRegistrationStateEventConfiguration{} + cv = []types.DownlinkQueueMessage{} } else { - sv = *v + cv = *v } - for key, value := range shape { - switch key { - case "Sidewalk": - if err := awsRestjson1_deserializeDocumentSidewalkEventNotificationConfigurations(&sv.Sidewalk, value); err != nil { - return err - } - - default: - _, _ = key, value - + for _, value := range shape { + var col types.DownlinkQueueMessage + destAddr := &col + if err := awsRestjson1_deserializeDocumentDownlinkQueueMessage(&destAddr, value); err != nil { + return err } + col = *destAddr + cv = append(cv, col) + } - *v = sv + *v = cv return nil } -func awsRestjson1_deserializeDocumentDownlinkQueueMessage(v **types.DownlinkQueueMessage, value interface{}) error { +func awsRestjson1_deserializeDocumentEventConfigurationItem(v **types.EventConfigurationItem, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -12858,49 +14011,45 @@ func awsRestjson1_deserializeDocumentDownlinkQueueMessage(v **types.DownlinkQueu return fmt.Errorf("unexpected JSON type %v", value) } - var sv *types.DownlinkQueueMessage + var sv *types.EventConfigurationItem if *v == nil { - sv = &types.DownlinkQueueMessage{} + sv = &types.EventConfigurationItem{} } else { sv = *v } for key, value := range shape { switch key { - case "LoRaWAN": - if err := awsRestjson1_deserializeDocumentLoRaWANSendDataToDevice(&sv.LoRaWAN, value); err != nil { + case "Events": + if err := awsRestjson1_deserializeDocumentEventNotificationItemConfigurations(&sv.Events, value); err != nil { return err } - case "MessageId": + case "Identifier": if value != nil { jtv, ok := value.(string) if !ok { - return fmt.Errorf("expected MessageId to be of type string, got %T instead", value) + return fmt.Errorf("expected Identifier to be of type string, got %T instead", value) } - sv.MessageId = ptr.String(jtv) + sv.Identifier = ptr.String(jtv) } - case "ReceivedAt": + case "IdentifierType": if value != nil { jtv, ok := value.(string) if !ok { - return fmt.Errorf("expected ISODateTimeString to be of type string, got %T instead", value) + return fmt.Errorf("expected IdentifierType to be of type string, got %T instead", value) } - sv.ReceivedAt = ptr.String(jtv) + sv.IdentifierType = types.IdentifierType(jtv) } - case "TransmitMode": + case "PartnerType": if value != nil { - jtv, ok := value.(json.Number) + jtv, ok := value.(string) if !ok { - return fmt.Errorf("expected TransmitMode to be json.Number, got %T instead", value) - } - i64, err := jtv.Int64() - if err != nil { - return err + return fmt.Errorf("expected EventNotificationPartnerType to be of type string, got %T instead", value) } - sv.TransmitMode = ptr.Int32(int32(i64)) + sv.PartnerType = types.EventNotificationPartnerType(jtv) } default: @@ -12912,7 +14061,7 @@ func awsRestjson1_deserializeDocumentDownlinkQueueMessage(v **types.DownlinkQueu return nil } -func awsRestjson1_deserializeDocumentDownlinkQueueMessagesList(v *[]types.DownlinkQueueMessage, value interface{}) error { +func awsRestjson1_deserializeDocumentEventConfigurationsList(v *[]types.EventConfigurationItem, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -12925,17 +14074,17 @@ func awsRestjson1_deserializeDocumentDownlinkQueueMessagesList(v *[]types.Downli return fmt.Errorf("unexpected JSON type %v", value) } - var cv []types.DownlinkQueueMessage + var cv []types.EventConfigurationItem if *v == nil { - cv = []types.DownlinkQueueMessage{} + cv = []types.EventConfigurationItem{} } else { cv = *v } for _, value := range shape { - var col types.DownlinkQueueMessage + var col types.EventConfigurationItem destAddr := &col - if err := awsRestjson1_deserializeDocumentDownlinkQueueMessage(&destAddr, value); err != nil { + if err := awsRestjson1_deserializeDocumentEventConfigurationItem(&destAddr, value); err != nil { return err } col = *destAddr @@ -12946,6 +14095,57 @@ func awsRestjson1_deserializeDocumentDownlinkQueueMessagesList(v *[]types.Downli return nil } +func awsRestjson1_deserializeDocumentEventNotificationItemConfigurations(v **types.EventNotificationItemConfigurations, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.EventNotificationItemConfigurations + if *v == nil { + sv = &types.EventNotificationItemConfigurations{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "ConnectionStatus": + if err := awsRestjson1_deserializeDocumentConnectionStatusEventConfiguration(&sv.ConnectionStatus, value); err != nil { + return err + } + + case "DeviceRegistrationState": + if err := awsRestjson1_deserializeDocumentDeviceRegistrationStateEventConfiguration(&sv.DeviceRegistrationState, value); err != nil { + return err + } + + case "Join": + if err := awsRestjson1_deserializeDocumentJoinEventConfiguration(&sv.Join, value); err != nil { + return err + } + + case "Proximity": + if err := awsRestjson1_deserializeDocumentProximityEventConfiguration(&sv.Proximity, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + func awsRestjson1_deserializeDocumentFactoryPresetFreqsList(v *[]int32, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -13208,19 +14408,176 @@ func awsRestjson1_deserializeDocumentJoinEuiFilters(v *[][]string, value interfa cv = *v } - for _, value := range shape { - var col []string - if err := awsRestjson1_deserializeDocumentJoinEuiRange(&col, value); err != nil { - return err + for _, value := range shape { + var col []string + if err := awsRestjson1_deserializeDocumentJoinEuiRange(&col, value); err != nil { + return err + } + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsRestjson1_deserializeDocumentJoinEuiRange(v *[]string, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []string + if *v == nil { + cv = []string{} + } else { + cv = *v + } + + for _, value := range shape { + var col string + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected JoinEui to be of type string, got %T instead", value) + } + col = jtv + } + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsRestjson1_deserializeDocumentJoinEventConfiguration(v **types.JoinEventConfiguration, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.JoinEventConfiguration + if *v == nil { + sv = &types.JoinEventConfiguration{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "LoRaWAN": + if err := awsRestjson1_deserializeDocumentLoRaWANJoinEventNotificationConfigurations(&sv.LoRaWAN, value); err != nil { + return err + } + + case "WirelessDeviceIdEventTopic": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected EventNotificationTopicStatus to be of type string, got %T instead", value) + } + sv.WirelessDeviceIdEventTopic = types.EventNotificationTopicStatus(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentJoinResourceTypeEventConfiguration(v **types.JoinResourceTypeEventConfiguration, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.JoinResourceTypeEventConfiguration + if *v == nil { + sv = &types.JoinResourceTypeEventConfiguration{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "LoRaWAN": + if err := awsRestjson1_deserializeDocumentLoRaWANJoinResourceTypeEventConfiguration(&sv.LoRaWAN, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentLoRaWANConnectionStatusEventNotificationConfigurations(v **types.LoRaWANConnectionStatusEventNotificationConfigurations, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.LoRaWANConnectionStatusEventNotificationConfigurations + if *v == nil { + sv = &types.LoRaWANConnectionStatusEventNotificationConfigurations{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "GatewayEuiEventTopic": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected EventNotificationTopicStatus to be of type string, got %T instead", value) + } + sv.GatewayEuiEventTopic = types.EventNotificationTopicStatus(jtv) + } + + default: + _, _ = key, value + } - cv = append(cv, col) - } - *v = cv + *v = sv return nil } -func awsRestjson1_deserializeDocumentJoinEuiRange(v *[]string, value interface{}) error { +func awsRestjson1_deserializeDocumentLoRaWANConnectionStatusResourceTypeEventConfiguration(v **types.LoRaWANConnectionStatusResourceTypeEventConfiguration, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -13228,31 +14585,35 @@ func awsRestjson1_deserializeDocumentJoinEuiRange(v *[]string, value interface{} return nil } - shape, ok := value.([]interface{}) + shape, ok := value.(map[string]interface{}) if !ok { return fmt.Errorf("unexpected JSON type %v", value) } - var cv []string + var sv *types.LoRaWANConnectionStatusResourceTypeEventConfiguration if *v == nil { - cv = []string{} + sv = &types.LoRaWANConnectionStatusResourceTypeEventConfiguration{} } else { - cv = *v + sv = *v } - for _, value := range shape { - var col string - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected JoinEui to be of type string, got %T instead", value) + for key, value := range shape { + switch key { + case "WirelessGatewayEventTopic": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected EventNotificationTopicStatus to be of type string, got %T instead", value) + } + sv.WirelessGatewayEventTopic = types.EventNotificationTopicStatus(jtv) } - col = jtv - } - cv = append(cv, col) + default: + _, _ = key, value + + } } - *v = cv + *v = sv return nil } @@ -14265,6 +15626,86 @@ func awsRestjson1_deserializeDocumentLoRaWANGetServiceProfileInfo(v **types.LoRa return nil } +func awsRestjson1_deserializeDocumentLoRaWANJoinEventNotificationConfigurations(v **types.LoRaWANJoinEventNotificationConfigurations, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.LoRaWANJoinEventNotificationConfigurations + if *v == nil { + sv = &types.LoRaWANJoinEventNotificationConfigurations{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "DevEuiEventTopic": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected EventNotificationTopicStatus to be of type string, got %T instead", value) + } + sv.DevEuiEventTopic = types.EventNotificationTopicStatus(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentLoRaWANJoinResourceTypeEventConfiguration(v **types.LoRaWANJoinResourceTypeEventConfiguration, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.LoRaWANJoinResourceTypeEventConfiguration + if *v == nil { + sv = &types.LoRaWANJoinResourceTypeEventConfiguration{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "WirelessDeviceEventTopic": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected EventNotificationTopicStatus to be of type string, got %T instead", value) + } + sv.WirelessDeviceEventTopic = types.EventNotificationTopicStatus(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + func awsRestjson1_deserializeDocumentLoRaWANListDevice(v **types.LoRaWANListDevice, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -14813,6 +16254,89 @@ func awsRestjson1_deserializeDocumentNetIdFilters(v *[]string, value interface{} return nil } +func awsRestjson1_deserializeDocumentNetworkAnalyzerConfigurationList(v *[]types.NetworkAnalyzerConfigurations, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.NetworkAnalyzerConfigurations + if *v == nil { + cv = []types.NetworkAnalyzerConfigurations{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.NetworkAnalyzerConfigurations + destAddr := &col + if err := awsRestjson1_deserializeDocumentNetworkAnalyzerConfigurations(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsRestjson1_deserializeDocumentNetworkAnalyzerConfigurations(v **types.NetworkAnalyzerConfigurations, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.NetworkAnalyzerConfigurations + if *v == nil { + sv = &types.NetworkAnalyzerConfigurations{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Arn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected NetworkAnalyzerConfigurationArn to be of type string, got %T instead", value) + } + sv.Arn = ptr.String(jtv) + } + + case "Name": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected NetworkAnalyzerConfigurationName to be of type string, got %T instead", value) + } + sv.Name = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + func awsRestjson1_deserializeDocumentOtaaV1_0_x(v **types.OtaaV1_0_x, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -14956,6 +16480,51 @@ func awsRestjson1_deserializeDocumentProximityEventConfiguration(v **types.Proxi return err } + case "WirelessDeviceIdEventTopic": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected EventNotificationTopicStatus to be of type string, got %T instead", value) + } + sv.WirelessDeviceIdEventTopic = types.EventNotificationTopicStatus(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentProximityResourceTypeEventConfiguration(v **types.ProximityResourceTypeEventConfiguration, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ProximityResourceTypeEventConfiguration + if *v == nil { + sv = &types.ProximityResourceTypeEventConfiguration{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Sidewalk": + if err := awsRestjson1_deserializeDocumentSidewalkResourceTypeEventConfiguration(&sv.Sidewalk, value); err != nil { + return err + } + default: _, _ = key, value @@ -15609,6 +17178,46 @@ func awsRestjson1_deserializeDocumentSidewalkListDevice(v **types.SidewalkListDe return nil } +func awsRestjson1_deserializeDocumentSidewalkResourceTypeEventConfiguration(v **types.SidewalkResourceTypeEventConfiguration, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.SidewalkResourceTypeEventConfiguration + if *v == nil { + sv = &types.SidewalkResourceTypeEventConfiguration{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "WirelessDeviceEventTopic": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected EventNotificationTopicStatus to be of type string, got %T instead", value) + } + sv.WirelessDeviceEventTopic = types.EventNotificationTopicStatus(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + func awsRestjson1_deserializeDocumentSubBands(v *[]int32, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) diff --git a/service/iotwireless/generated.json b/service/iotwireless/generated.json index fdd120558f1..dc91c0fc686 100644 --- a/service/iotwireless/generated.json +++ b/service/iotwireless/generated.json @@ -20,6 +20,7 @@ "api_op_CreateDeviceProfile.go", "api_op_CreateFuotaTask.go", "api_op_CreateMulticastGroup.go", + "api_op_CreateNetworkAnalyzerConfiguration.go", "api_op_CreateServiceProfile.go", "api_op_CreateWirelessDevice.go", "api_op_CreateWirelessGateway.go", @@ -29,6 +30,7 @@ "api_op_DeleteDeviceProfile.go", "api_op_DeleteFuotaTask.go", "api_op_DeleteMulticastGroup.go", + "api_op_DeleteNetworkAnalyzerConfiguration.go", "api_op_DeleteQueuedMessages.go", "api_op_DeleteServiceProfile.go", "api_op_DeleteWirelessDevice.go", @@ -44,6 +46,7 @@ "api_op_DisassociateWirelessGatewayFromThing.go", "api_op_GetDestination.go", "api_op_GetDeviceProfile.go", + "api_op_GetEventConfigurationByResourceTypes.go", "api_op_GetFuotaTask.go", "api_op_GetLogLevelsByResourceTypes.go", "api_op_GetMulticastGroup.go", @@ -64,9 +67,11 @@ "api_op_GetWirelessGatewayTaskDefinition.go", "api_op_ListDestinations.go", "api_op_ListDeviceProfiles.go", + "api_op_ListEventConfigurations.go", "api_op_ListFuotaTasks.go", "api_op_ListMulticastGroups.go", "api_op_ListMulticastGroupsByFuotaTask.go", + "api_op_ListNetworkAnalyzerConfigurations.go", "api_op_ListPartnerAccounts.go", "api_op_ListQueuedMessages.go", "api_op_ListServiceProfiles.go", @@ -87,6 +92,7 @@ "api_op_TestWirelessDevice.go", "api_op_UntagResource.go", "api_op_UpdateDestination.go", + "api_op_UpdateEventConfigurationByResourceTypes.go", "api_op_UpdateFuotaTask.go", "api_op_UpdateLogLevelsByResourceTypes.go", "api_op_UpdateMulticastGroup.go", diff --git a/service/iotwireless/serializers.go b/service/iotwireless/serializers.go index d80b445a9a6..cd447c05d86 100644 --- a/service/iotwireless/serializers.go +++ b/service/iotwireless/serializers.go @@ -1022,6 +1022,112 @@ func awsRestjson1_serializeOpDocumentCreateMulticastGroupInput(v *CreateMulticas return nil } +type awsRestjson1_serializeOpCreateNetworkAnalyzerConfiguration struct { +} + +func (*awsRestjson1_serializeOpCreateNetworkAnalyzerConfiguration) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpCreateNetworkAnalyzerConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*CreateNetworkAnalyzerConfigurationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/network-analyzer-configurations") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "POST" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + restEncoder.SetHeader("Content-Type").String("application/json") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsRestjson1_serializeOpDocumentCreateNetworkAnalyzerConfigurationInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsCreateNetworkAnalyzerConfigurationInput(v *CreateNetworkAnalyzerConfigurationInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + return nil +} + +func awsRestjson1_serializeOpDocumentCreateNetworkAnalyzerConfigurationInput(v *CreateNetworkAnalyzerConfigurationInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ClientRequestToken != nil { + ok := object.Key("ClientRequestToken") + ok.String(*v.ClientRequestToken) + } + + if v.Description != nil { + ok := object.Key("Description") + ok.String(*v.Description) + } + + if v.Name != nil { + ok := object.Key("Name") + ok.String(*v.Name) + } + + if v.Tags != nil { + ok := object.Key("Tags") + if err := awsRestjson1_serializeDocumentTagList(v.Tags, ok); err != nil { + return err + } + } + + if v.TraceContent != nil { + ok := object.Key("TraceContent") + if err := awsRestjson1_serializeDocumentTraceContent(v.TraceContent, ok); err != nil { + return err + } + } + + if v.WirelessDevices != nil { + ok := object.Key("WirelessDevices") + if err := awsRestjson1_serializeDocumentWirelessDeviceList(v.WirelessDevices, ok); err != nil { + return err + } + } + + if v.WirelessGateways != nil { + ok := object.Key("WirelessGateways") + if err := awsRestjson1_serializeDocumentWirelessGatewayList(v.WirelessGateways, ok); err != nil { + return err + } + } + + return nil +} + type awsRestjson1_serializeOpCreateServiceProfile struct { } @@ -1708,6 +1814,64 @@ func awsRestjson1_serializeOpHttpBindingsDeleteMulticastGroupInput(v *DeleteMult return nil } +type awsRestjson1_serializeOpDeleteNetworkAnalyzerConfiguration struct { +} + +func (*awsRestjson1_serializeOpDeleteNetworkAnalyzerConfiguration) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpDeleteNetworkAnalyzerConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeleteNetworkAnalyzerConfigurationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/network-analyzer-configurations/{ConfigurationName}") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "DELETE" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestjson1_serializeOpHttpBindingsDeleteNetworkAnalyzerConfigurationInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsDeleteNetworkAnalyzerConfigurationInput(v *DeleteNetworkAnalyzerConfigurationInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.ConfigurationName == nil || len(*v.ConfigurationName) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member ConfigurationName must not be empty")} + } + if v.ConfigurationName != nil { + if err := encoder.SetURI("ConfigurationName").String(*v.ConfigurationName); err != nil { + return err + } + } + + return nil +} + type awsRestjson1_serializeOpDeleteQueuedMessages struct { } @@ -2617,6 +2781,51 @@ func awsRestjson1_serializeOpHttpBindingsGetDeviceProfileInput(v *GetDeviceProfi return nil } +type awsRestjson1_serializeOpGetEventConfigurationByResourceTypes struct { +} + +func (*awsRestjson1_serializeOpGetEventConfigurationByResourceTypes) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpGetEventConfigurationByResourceTypes) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetEventConfigurationByResourceTypesInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/event-configurations-resource-types") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsGetEventConfigurationByResourceTypesInput(v *GetEventConfigurationByResourceTypesInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + return nil +} + type awsRestjson1_serializeOpGetFuotaTask struct { } @@ -3781,6 +3990,67 @@ func awsRestjson1_serializeOpHttpBindingsListDeviceProfilesInput(v *ListDevicePr return nil } +type awsRestjson1_serializeOpListEventConfigurations struct { +} + +func (*awsRestjson1_serializeOpListEventConfigurations) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpListEventConfigurations) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListEventConfigurationsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/event-configurations") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestjson1_serializeOpHttpBindingsListEventConfigurationsInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsListEventConfigurationsInput(v *ListEventConfigurationsInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.MaxResults != 0 { + encoder.SetQuery("maxResults").Integer(v.MaxResults) + } + + if v.NextToken != nil { + encoder.SetQuery("nextToken").String(*v.NextToken) + } + + if len(v.ResourceType) > 0 { + encoder.SetQuery("resourceType").String(string(v.ResourceType)) + } + + return nil +} + type awsRestjson1_serializeOpListFuotaTasks struct { } @@ -3961,14 +4231,14 @@ func awsRestjson1_serializeOpHttpBindingsListMulticastGroupsByFuotaTaskInput(v * return nil } -type awsRestjson1_serializeOpListPartnerAccounts struct { +type awsRestjson1_serializeOpListNetworkAnalyzerConfigurations struct { } -func (*awsRestjson1_serializeOpListPartnerAccounts) ID() string { +func (*awsRestjson1_serializeOpListNetworkAnalyzerConfigurations) ID() string { return "OperationSerializer" } -func (m *awsRestjson1_serializeOpListPartnerAccounts) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsRestjson1_serializeOpListNetworkAnalyzerConfigurations) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { request, ok := in.Request.(*smithyhttp.Request) @@ -3976,13 +4246,13 @@ func (m *awsRestjson1_serializeOpListPartnerAccounts) HandleSerialize(ctx contex return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ListPartnerAccountsInput) + input, ok := in.Parameters.(*ListNetworkAnalyzerConfigurationsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/partner-accounts") + opPath, opQuery := httpbinding.SplitURI("/network-analyzer-configurations") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "GET" @@ -3991,7 +4261,7 @@ func (m *awsRestjson1_serializeOpListPartnerAccounts) HandleSerialize(ctx contex return out, metadata, &smithy.SerializationError{Err: err} } - if err := awsRestjson1_serializeOpHttpBindingsListPartnerAccountsInput(input, restEncoder); err != nil { + if err := awsRestjson1_serializeOpHttpBindingsListNetworkAnalyzerConfigurationsInput(input, restEncoder); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -4002,7 +4272,7 @@ func (m *awsRestjson1_serializeOpListPartnerAccounts) HandleSerialize(ctx contex return next.HandleSerialize(ctx, in) } -func awsRestjson1_serializeOpHttpBindingsListPartnerAccountsInput(v *ListPartnerAccountsInput, encoder *httpbinding.Encoder) error { +func awsRestjson1_serializeOpHttpBindingsListNetworkAnalyzerConfigurationsInput(v *ListNetworkAnalyzerConfigurationsInput, encoder *httpbinding.Encoder) error { if v == nil { return fmt.Errorf("unsupported serialization of nil %T", v) } @@ -4018,14 +4288,14 @@ func awsRestjson1_serializeOpHttpBindingsListPartnerAccountsInput(v *ListPartner return nil } -type awsRestjson1_serializeOpListQueuedMessages struct { +type awsRestjson1_serializeOpListPartnerAccounts struct { } -func (*awsRestjson1_serializeOpListQueuedMessages) ID() string { +func (*awsRestjson1_serializeOpListPartnerAccounts) ID() string { return "OperationSerializer" } -func (m *awsRestjson1_serializeOpListQueuedMessages) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsRestjson1_serializeOpListPartnerAccounts) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { request, ok := in.Request.(*smithyhttp.Request) @@ -4033,13 +4303,13 @@ func (m *awsRestjson1_serializeOpListQueuedMessages) HandleSerialize(ctx context return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ListQueuedMessagesInput) + input, ok := in.Parameters.(*ListPartnerAccountsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/wireless-devices/{Id}/data") + opPath, opQuery := httpbinding.SplitURI("/partner-accounts") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "GET" @@ -4048,7 +4318,7 @@ func (m *awsRestjson1_serializeOpListQueuedMessages) HandleSerialize(ctx context return out, metadata, &smithy.SerializationError{Err: err} } - if err := awsRestjson1_serializeOpHttpBindingsListQueuedMessagesInput(input, restEncoder); err != nil { + if err := awsRestjson1_serializeOpHttpBindingsListPartnerAccountsInput(input, restEncoder); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -4059,7 +4329,64 @@ func (m *awsRestjson1_serializeOpListQueuedMessages) HandleSerialize(ctx context return next.HandleSerialize(ctx, in) } -func awsRestjson1_serializeOpHttpBindingsListQueuedMessagesInput(v *ListQueuedMessagesInput, encoder *httpbinding.Encoder) error { +func awsRestjson1_serializeOpHttpBindingsListPartnerAccountsInput(v *ListPartnerAccountsInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.MaxResults != 0 { + encoder.SetQuery("maxResults").Integer(v.MaxResults) + } + + if v.NextToken != nil { + encoder.SetQuery("nextToken").String(*v.NextToken) + } + + return nil +} + +type awsRestjson1_serializeOpListQueuedMessages struct { +} + +func (*awsRestjson1_serializeOpListQueuedMessages) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpListQueuedMessages) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListQueuedMessagesInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/wireless-devices/{Id}/data") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestjson1_serializeOpHttpBindingsListQueuedMessagesInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsListQueuedMessagesInput(v *ListQueuedMessagesInput, encoder *httpbinding.Encoder) error { if v == nil { return fmt.Errorf("unsupported serialization of nil %T", v) } @@ -5403,6 +5730,97 @@ func awsRestjson1_serializeOpDocumentUpdateDestinationInput(v *UpdateDestination return nil } +type awsRestjson1_serializeOpUpdateEventConfigurationByResourceTypes struct { +} + +func (*awsRestjson1_serializeOpUpdateEventConfigurationByResourceTypes) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpUpdateEventConfigurationByResourceTypes) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*UpdateEventConfigurationByResourceTypesInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/event-configurations-resource-types") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "PATCH" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + restEncoder.SetHeader("Content-Type").String("application/json") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsRestjson1_serializeOpDocumentUpdateEventConfigurationByResourceTypesInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsUpdateEventConfigurationByResourceTypesInput(v *UpdateEventConfigurationByResourceTypesInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + return nil +} + +func awsRestjson1_serializeOpDocumentUpdateEventConfigurationByResourceTypesInput(v *UpdateEventConfigurationByResourceTypesInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ConnectionStatus != nil { + ok := object.Key("ConnectionStatus") + if err := awsRestjson1_serializeDocumentConnectionStatusResourceTypeEventConfiguration(v.ConnectionStatus, ok); err != nil { + return err + } + } + + if v.DeviceRegistrationState != nil { + ok := object.Key("DeviceRegistrationState") + if err := awsRestjson1_serializeDocumentDeviceRegistrationStateResourceTypeEventConfiguration(v.DeviceRegistrationState, ok); err != nil { + return err + } + } + + if v.Join != nil { + ok := object.Key("Join") + if err := awsRestjson1_serializeDocumentJoinResourceTypeEventConfiguration(v.Join, ok); err != nil { + return err + } + } + + if v.Proximity != nil { + ok := object.Key("Proximity") + if err := awsRestjson1_serializeDocumentProximityResourceTypeEventConfiguration(v.Proximity, ok); err != nil { + return err + } + } + + return nil +} + type awsRestjson1_serializeOpUpdateFuotaTask struct { } @@ -5754,6 +6172,11 @@ func awsRestjson1_serializeOpDocumentUpdateNetworkAnalyzerConfigurationInput(v * object := value.Object() defer object.Close() + if v.Description != nil { + ok := object.Key("Description") + ok.String(*v.Description) + } + if v.TraceContent != nil { ok := object.Key("TraceContent") if err := awsRestjson1_serializeDocumentTraceContent(v.TraceContent, ok); err != nil { @@ -5960,6 +6383,13 @@ func awsRestjson1_serializeOpDocumentUpdateResourceEventConfigurationInput(v *Up object := value.Object() defer object.Close() + if v.ConnectionStatus != nil { + ok := object.Key("ConnectionStatus") + if err := awsRestjson1_serializeDocumentConnectionStatusEventConfiguration(v.ConnectionStatus, ok); err != nil { + return err + } + } + if v.DeviceRegistrationState != nil { ok := object.Key("DeviceRegistrationState") if err := awsRestjson1_serializeDocumentDeviceRegistrationStateEventConfiguration(v.DeviceRegistrationState, ok); err != nil { @@ -5967,6 +6397,13 @@ func awsRestjson1_serializeOpDocumentUpdateResourceEventConfigurationInput(v *Up } } + if v.Join != nil { + ok := object.Key("Join") + if err := awsRestjson1_serializeDocumentJoinEventConfiguration(v.Join, ok); err != nil { + return err + } + } + if v.Proximity != nil { ok := object.Key("Proximity") if err := awsRestjson1_serializeDocumentProximityEventConfiguration(v.Proximity, ok); err != nil { @@ -6184,6 +6621,11 @@ func awsRestjson1_serializeDocumentAbpV1_0_x(v *types.AbpV1_0_x, value smithyjso ok.String(*v.DevAddr) } + if v.FCntStart != nil { + ok := object.Key("FCntStart") + ok.Integer(*v.FCntStart) + } + if v.SessionKeys != nil { ok := object.Key("SessionKeys") if err := awsRestjson1_serializeDocumentSessionKeysAbpV1_0_x(v.SessionKeys, ok); err != nil { @@ -6203,6 +6645,11 @@ func awsRestjson1_serializeDocumentAbpV1_1(v *types.AbpV1_1, value smithyjson.Va ok.String(*v.DevAddr) } + if v.FCntStart != nil { + ok := object.Key("FCntStart") + ok.Integer(*v.FCntStart) + } + if v.SessionKeys != nil { ok := object.Key("SessionKeys") if err := awsRestjson1_serializeDocumentSessionKeysAbpV1_1(v.SessionKeys, ok); err != nil { @@ -6213,6 +6660,39 @@ func awsRestjson1_serializeDocumentAbpV1_1(v *types.AbpV1_1, value smithyjson.Va return nil } +func awsRestjson1_serializeDocumentConnectionStatusEventConfiguration(v *types.ConnectionStatusEventConfiguration, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.LoRaWAN != nil { + ok := object.Key("LoRaWAN") + if err := awsRestjson1_serializeDocumentLoRaWANConnectionStatusEventNotificationConfigurations(v.LoRaWAN, ok); err != nil { + return err + } + } + + if len(v.WirelessGatewayIdEventTopic) > 0 { + ok := object.Key("WirelessGatewayIdEventTopic") + ok.String(string(v.WirelessGatewayIdEventTopic)) + } + + return nil +} + +func awsRestjson1_serializeDocumentConnectionStatusResourceTypeEventConfiguration(v *types.ConnectionStatusResourceTypeEventConfiguration, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.LoRaWAN != nil { + ok := object.Key("LoRaWAN") + if err := awsRestjson1_serializeDocumentLoRaWANConnectionStatusResourceTypeEventConfiguration(v.LoRaWAN, ok); err != nil { + return err + } + } + + return nil +} + func awsRestjson1_serializeDocumentDeviceRegistrationStateEventConfiguration(v *types.DeviceRegistrationStateEventConfiguration, value smithyjson.Value) error { object := value.Object() defer object.Close() @@ -6224,6 +6704,25 @@ func awsRestjson1_serializeDocumentDeviceRegistrationStateEventConfiguration(v * } } + if len(v.WirelessDeviceIdEventTopic) > 0 { + ok := object.Key("WirelessDeviceIdEventTopic") + ok.String(string(v.WirelessDeviceIdEventTopic)) + } + + return nil +} + +func awsRestjson1_serializeDocumentDeviceRegistrationStateResourceTypeEventConfiguration(v *types.DeviceRegistrationStateResourceTypeEventConfiguration, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Sidewalk != nil { + ok := object.Key("Sidewalk") + if err := awsRestjson1_serializeDocumentSidewalkResourceTypeEventConfiguration(v.Sidewalk, ok); err != nil { + return err + } + } + return nil } @@ -6287,6 +6786,63 @@ func awsRestjson1_serializeDocumentJoinEuiRange(v []string, value smithyjson.Val return nil } +func awsRestjson1_serializeDocumentJoinEventConfiguration(v *types.JoinEventConfiguration, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.LoRaWAN != nil { + ok := object.Key("LoRaWAN") + if err := awsRestjson1_serializeDocumentLoRaWANJoinEventNotificationConfigurations(v.LoRaWAN, ok); err != nil { + return err + } + } + + if len(v.WirelessDeviceIdEventTopic) > 0 { + ok := object.Key("WirelessDeviceIdEventTopic") + ok.String(string(v.WirelessDeviceIdEventTopic)) + } + + return nil +} + +func awsRestjson1_serializeDocumentJoinResourceTypeEventConfiguration(v *types.JoinResourceTypeEventConfiguration, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.LoRaWAN != nil { + ok := object.Key("LoRaWAN") + if err := awsRestjson1_serializeDocumentLoRaWANJoinResourceTypeEventConfiguration(v.LoRaWAN, ok); err != nil { + return err + } + } + + return nil +} + +func awsRestjson1_serializeDocumentLoRaWANConnectionStatusEventNotificationConfigurations(v *types.LoRaWANConnectionStatusEventNotificationConfigurations, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if len(v.GatewayEuiEventTopic) > 0 { + ok := object.Key("GatewayEuiEventTopic") + ok.String(string(v.GatewayEuiEventTopic)) + } + + return nil +} + +func awsRestjson1_serializeDocumentLoRaWANConnectionStatusResourceTypeEventConfiguration(v *types.LoRaWANConnectionStatusResourceTypeEventConfiguration, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if len(v.WirelessGatewayEventTopic) > 0 { + ok := object.Key("WirelessGatewayEventTopic") + ok.String(string(v.WirelessGatewayEventTopic)) + } + + return nil +} + func awsRestjson1_serializeDocumentLoRaWANDevice(v *types.LoRaWANDevice, value smithyjson.Value) error { object := value.Object() defer object.Close() @@ -6520,6 +7076,30 @@ func awsRestjson1_serializeDocumentLoRaWANGatewayVersion(v *types.LoRaWANGateway return nil } +func awsRestjson1_serializeDocumentLoRaWANJoinEventNotificationConfigurations(v *types.LoRaWANJoinEventNotificationConfigurations, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if len(v.DevEuiEventTopic) > 0 { + ok := object.Key("DevEuiEventTopic") + ok.String(string(v.DevEuiEventTopic)) + } + + return nil +} + +func awsRestjson1_serializeDocumentLoRaWANJoinResourceTypeEventConfiguration(v *types.LoRaWANJoinResourceTypeEventConfiguration, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if len(v.WirelessDeviceEventTopic) > 0 { + ok := object.Key("WirelessDeviceEventTopic") + ok.String(string(v.WirelessDeviceEventTopic)) + } + + return nil +} + func awsRestjson1_serializeDocumentLoRaWANMulticast(v *types.LoRaWANMulticast, value smithyjson.Value) error { object := value.Object() defer object.Close() @@ -6616,6 +7196,20 @@ func awsRestjson1_serializeDocumentLoRaWANUpdateDevice(v *types.LoRaWANUpdateDev object := value.Object() defer object.Close() + if v.AbpV1_0_x != nil { + ok := object.Key("AbpV1_0_x") + if err := awsRestjson1_serializeDocumentUpdateAbpV1_0_x(v.AbpV1_0_x, ok); err != nil { + return err + } + } + + if v.AbpV1_1 != nil { + ok := object.Key("AbpV1_1") + if err := awsRestjson1_serializeDocumentUpdateAbpV1_1(v.AbpV1_1, ok); err != nil { + return err + } + } + if v.DeviceProfileId != nil { ok := object.Key("DeviceProfileId") ok.String(*v.DeviceProfileId) @@ -6740,6 +7334,25 @@ func awsRestjson1_serializeDocumentProximityEventConfiguration(v *types.Proximit } } + if len(v.WirelessDeviceIdEventTopic) > 0 { + ok := object.Key("WirelessDeviceIdEventTopic") + ok.String(string(v.WirelessDeviceIdEventTopic)) + } + + return nil +} + +func awsRestjson1_serializeDocumentProximityResourceTypeEventConfiguration(v *types.ProximityResourceTypeEventConfiguration, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Sidewalk != nil { + ok := object.Key("Sidewalk") + if err := awsRestjson1_serializeDocumentSidewalkResourceTypeEventConfiguration(v.Sidewalk, ok); err != nil { + return err + } + } + return nil } @@ -6816,6 +7429,18 @@ func awsRestjson1_serializeDocumentSidewalkEventNotificationConfigurations(v *ty return nil } +func awsRestjson1_serializeDocumentSidewalkResourceTypeEventConfiguration(v *types.SidewalkResourceTypeEventConfiguration, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if len(v.WirelessDeviceEventTopic) > 0 { + ok := object.Key("WirelessDeviceEventTopic") + ok.String(string(v.WirelessDeviceEventTopic)) + } + + return nil +} + func awsRestjson1_serializeDocumentSidewalkSendDataToDevice(v *types.SidewalkSendDataToDevice, value smithyjson.Value) error { object := value.Object() defer object.Close() @@ -6903,6 +7528,30 @@ func awsRestjson1_serializeDocumentTraceContent(v *types.TraceContent, value smi return nil } +func awsRestjson1_serializeDocumentUpdateAbpV1_0_x(v *types.UpdateAbpV1_0_x, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.FCntStart != nil { + ok := object.Key("FCntStart") + ok.Integer(*v.FCntStart) + } + + return nil +} + +func awsRestjson1_serializeDocumentUpdateAbpV1_1(v *types.UpdateAbpV1_1, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.FCntStart != nil { + ok := object.Key("FCntStart") + ok.Integer(*v.FCntStart) + } + + return nil +} + func awsRestjson1_serializeDocumentUpdateWirelessGatewayTaskCreate(v *types.UpdateWirelessGatewayTaskCreate, value smithyjson.Value) error { object := value.Object() defer object.Close() diff --git a/service/iotwireless/types/enums.go b/service/iotwireless/types/enums.go index 3243efd4d00..839d76a3e67 100644 --- a/service/iotwireless/types/enums.go +++ b/service/iotwireless/types/enums.go @@ -120,6 +120,27 @@ func (EventNotificationPartnerType) Values() []EventNotificationPartnerType { } } +type EventNotificationResourceType string + +// Enum values for EventNotificationResourceType +const ( + EventNotificationResourceTypeSidewalkAccount EventNotificationResourceType = "SidewalkAccount" + EventNotificationResourceTypeWirelessDevice EventNotificationResourceType = "WirelessDevice" + EventNotificationResourceTypeWirelessGateway EventNotificationResourceType = "WirelessGateway" +) + +// Values returns all known values for EventNotificationResourceType. Note that +// this can be expanded in the future, and so it is only as up to date as the +// client. The ordering of this slice is not guaranteed to be stable across +// updates. +func (EventNotificationResourceType) Values() []EventNotificationResourceType { + return []EventNotificationResourceType{ + "SidewalkAccount", + "WirelessDevice", + "WirelessGateway", + } +} + type EventNotificationTopicStatus string // Enum values for EventNotificationTopicStatus @@ -220,7 +241,11 @@ type IdentifierType string // Enum values for IdentifierType const ( - IdentifierTypePartnerAccountId IdentifierType = "PartnerAccountId" + IdentifierTypePartnerAccountId IdentifierType = "PartnerAccountId" + IdentifierTypeDevEui IdentifierType = "DevEui" + IdentifierTypeGatewayEui IdentifierType = "GatewayEui" + IdentifierTypeWirelessDeviceId IdentifierType = "WirelessDeviceId" + IdentifierTypeWirelessGatewayId IdentifierType = "WirelessGatewayId" ) // Values returns all known values for IdentifierType. Note that this can be @@ -229,6 +254,10 @@ const ( func (IdentifierType) Values() []IdentifierType { return []IdentifierType{ "PartnerAccountId", + "DevEui", + "GatewayEui", + "WirelessDeviceId", + "WirelessGatewayId", } } diff --git a/service/iotwireless/types/types.go b/service/iotwireless/types/types.go index 1cc318e8983..50dd9c9116e 100644 --- a/service/iotwireless/types/types.go +++ b/service/iotwireless/types/types.go @@ -13,6 +13,9 @@ type AbpV1_0_x struct { // The DevAddr value. DevAddr *string + // The FCnt init value. + FCntStart *int32 + // Session keys for ABP v1.0.x SessionKeys *SessionKeysAbpV1_0_x @@ -25,6 +28,9 @@ type AbpV1_1 struct { // The DevAddr value. DevAddr *string + // The FCnt init value. + FCntStart *int32 + // Session keys for ABP v1.1 SessionKeys *SessionKeysAbpV1_1 @@ -47,6 +53,31 @@ type CertificateList struct { noSmithyDocumentSerde } +// Connection status event configuration object for enabling or disabling topic. +type ConnectionStatusEventConfiguration struct { + + // Connection status event configuration object for enabling or disabling LoRaWAN + // related event topics. + LoRaWAN *LoRaWANConnectionStatusEventNotificationConfigurations + + // Enum to denote whether the wireless gateway id connection status event topic is + // enabled or disabled . + WirelessGatewayIdEventTopic EventNotificationTopicStatus + + noSmithyDocumentSerde +} + +// Connection status resource type event configuration object for enabling or +// disabling topic. +type ConnectionStatusResourceTypeEventConfiguration struct { + + // Connection status resource type event configuration object for enabling or + // disabling LoRaWAN related event topics. + LoRaWAN *LoRaWANConnectionStatusResourceTypeEventConfiguration + + noSmithyDocumentSerde +} + // Describes a destination. type Destinations struct { @@ -94,28 +125,80 @@ type DeviceRegistrationStateEventConfiguration struct { // Sidewalk related event topics. Sidewalk *SidewalkEventNotificationConfigurations + // Enum to denote whether the wireless device id device registration state event + // topic is enabled or disabled. + WirelessDeviceIdEventTopic EventNotificationTopicStatus + + noSmithyDocumentSerde +} + +// Device registration state resource type event configuration object for enabling +// or disabling topic. +type DeviceRegistrationStateResourceTypeEventConfiguration struct { + + // Device registration resource type state event configuration object for enabling + // or disabling Sidewalk related event topics. + Sidewalk *SidewalkResourceTypeEventConfiguration + noSmithyDocumentSerde } -// The message in downlink queue. +// The message in the downlink queue. type DownlinkQueueMessage struct { // LoRaWAN router info. LoRaWAN *LoRaWANSendDataToDevice - // The messageId allocated by IoT Wireless for tracing purpose + // The message ID assigned by IoT Wireless to each downlink message, which helps + // identify the message. MessageId *string - // The timestamp that Iot Wireless received the message. + // The time at which Iot Wireless received the downlink message. ReceivedAt *string - // The transmit mode to use to send data to the wireless device. Can be: 0 for UM - // (unacknowledge mode) or 1 for AM (acknowledge mode). + // The transmit mode to use for sending data to the wireless device. This can be 0 + // for UM (unacknowledge mode) or 1 for AM (acknowledge mode). TransmitMode *int32 noSmithyDocumentSerde } +// Event configuration object for a single resource. +type EventConfigurationItem struct { + + // Object of all event configurations and the status of the event topics. + Events *EventNotificationItemConfigurations + + // Resource identifier opted in for event messaging. + Identifier *string + + // Identifier type of the particular resource identifier for event configuration. + IdentifierType IdentifierType + + // Partner type of the resource if the identifier type is PartnerAccountId. + PartnerType EventNotificationPartnerType + + noSmithyDocumentSerde +} + +// Object of all event configurations and the status of the event topics. +type EventNotificationItemConfigurations struct { + + // Connection status event configuration for an event configuration item. + ConnectionStatus *ConnectionStatusEventConfiguration + + // Device registration state event configuration for an event configuration item. + DeviceRegistrationState *DeviceRegistrationStateEventConfiguration + + // Join event configuration for an event configuration item. + Join *JoinEventConfiguration + + // Proximity event configuration for an event configuration item. + Proximity *ProximityEventConfiguration + + noSmithyDocumentSerde +} + // List of FPort assigned for different LoRaWAN application packages to use type FPorts struct { @@ -146,6 +229,50 @@ type FuotaTask struct { noSmithyDocumentSerde } +// Join event configuration object for enabling or disabling topic. +type JoinEventConfiguration struct { + + // Join event configuration object for enabling or disabling LoRaWAN related event + // topics. + LoRaWAN *LoRaWANJoinEventNotificationConfigurations + + // Enum to denote whether the wireless device id join event topic is enabled or + // disabled. + WirelessDeviceIdEventTopic EventNotificationTopicStatus + + noSmithyDocumentSerde +} + +// Join resource type event configuration object for enabling or disabling topic. +type JoinResourceTypeEventConfiguration struct { + + // Join resource type event configuration object for enabling or disabling LoRaWAN + // related event topics. + LoRaWAN *LoRaWANJoinResourceTypeEventConfiguration + + noSmithyDocumentSerde +} + +// Object for LoRaWAN connection status resource type event configuration. +type LoRaWANConnectionStatusEventNotificationConfigurations struct { + + // Enum to denote whether the gateway eui connection status event topic is enabled + // or disabled. + GatewayEuiEventTopic EventNotificationTopicStatus + + noSmithyDocumentSerde +} + +// Object for LoRaWAN connection status resource type event configuration. +type LoRaWANConnectionStatusResourceTypeEventConfiguration struct { + + // Enum to denote whether the wireless gateway connection status event topic is + // enabled or disabled. + WirelessGatewayEventTopic EventNotificationTopicStatus + + noSmithyDocumentSerde +} + // LoRaWAN object for create functions. type LoRaWANDevice struct { @@ -409,6 +536,25 @@ type LoRaWANGetServiceProfileInfo struct { noSmithyDocumentSerde } +// Object for LoRaWAN join resource type event configuration. +type LoRaWANJoinEventNotificationConfigurations struct { + + // Enum to denote whether the dev eui join event topic is enabled or disabled. + DevEuiEventTopic EventNotificationTopicStatus + + noSmithyDocumentSerde +} + +// Object for LoRaWAN join resource type event configuration. +type LoRaWANJoinResourceTypeEventConfiguration struct { + + // Enum to denote whether the wireless device join event topic is enabled or + // disabled. + WirelessDeviceEventTopic EventNotificationTopicStatus + + noSmithyDocumentSerde +} + // LoRaWAN object for list functions. type LoRaWANListDevice struct { @@ -506,6 +652,12 @@ type LoRaWANStartFuotaTask struct { // LoRaWAN object for update functions. type LoRaWANUpdateDevice struct { + // ABP device object for update APIs for v1.0.x + AbpV1_0_x *UpdateAbpV1_0_x + + // ABP device object for update APIs for v1.1 + AbpV1_1 *UpdateAbpV1_1 + // The ID of the device profile for the wireless device. DeviceProfileId *string @@ -578,6 +730,18 @@ type MulticastWirelessMetadata struct { noSmithyDocumentSerde } +// Network analyzer configurations. +type NetworkAnalyzerConfigurations struct { + + // The Amazon Resource Name of the new resource. + Arn *string + + // Name of the network analyzer configuration. + Name *string + + noSmithyDocumentSerde +} + // OTAA device object for v1.0.x type OtaaV1_0_x struct { @@ -615,6 +779,21 @@ type ProximityEventConfiguration struct { // event topics. Sidewalk *SidewalkEventNotificationConfigurations + // Enum to denote whether the wireless device id proximity event topic is enabled + // or disabled. + WirelessDeviceIdEventTopic EventNotificationTopicStatus + + noSmithyDocumentSerde +} + +// Proximity resource type event configuration object for enabling or disabling +// topic. +type ProximityResourceTypeEventConfiguration struct { + + // Proximity resource type event configuration object for enabling and disabling + // wireless device topic. + Sidewalk *SidewalkResourceTypeEventConfiguration + noSmithyDocumentSerde } @@ -754,6 +933,17 @@ type SidewalkListDevice struct { noSmithyDocumentSerde } +// Sidewalk resource type event configuration object for enabling or disabling +// topic. +type SidewalkResourceTypeEventConfiguration struct { + + // Enum to denote whether the wireless device join event topic is enabled or + // disabled. + WirelessDeviceEventTopic EventNotificationTopicStatus + + noSmithyDocumentSerde +} + // Information about a Sidewalk router. type SidewalkSendDataToDevice struct { @@ -791,18 +981,40 @@ type Tag struct { noSmithyDocumentSerde } -// Trace Content for resources. +// Trace content for your wireless gateway and wireless device resources. type TraceContent struct { - // The log level for a log message. + // The log level for a log message. The log levels can be disabled, or set to ERROR + // to display less verbose logs containing only error information, or to INFO for + // more detailed logs. LogLevel LogLevel - // WirelessDevice FrameInfo for trace content. + // FrameInfo of your wireless device resources for the trace content. Use FrameInfo + // to debug the communication between your LoRaWAN end devices and the network + // server. WirelessDeviceFrameInfo WirelessDeviceFrameInfo noSmithyDocumentSerde } +// ABP device object for LoRaWAN specification v1.0.x +type UpdateAbpV1_0_x struct { + + // The FCnt init value. + FCntStart *int32 + + noSmithyDocumentSerde +} + +// ABP device object for LoRaWAN specification v1.1 +type UpdateAbpV1_1 struct { + + // The FCnt init value. + FCntStart *int32 + + noSmithyDocumentSerde +} + // UpdateWirelessGatewayTaskCreate object. type UpdateWirelessGatewayTaskCreate struct { @@ -845,7 +1057,9 @@ type WirelessDeviceEventLogOption struct { // This member is required. Event WirelessDeviceEvent - // The log level for a log message. + // The log level for a log message. The log levels can be disabled, or set to ERROR + // to display less verbose logs containing only error information, or to INFO for + // more detailed logs. // // This member is required. LogLevel LogLevel @@ -857,7 +1071,9 @@ type WirelessDeviceEventLogOption struct { // specific type of wireless device. type WirelessDeviceLogOption struct { - // The log level for a log message. + // The log level for a log message. The log levels can be disabled, or set to ERROR + // to display less verbose logs containing only error information, or to INFO for + // more detailed logs. // // This member is required. LogLevel LogLevel @@ -922,7 +1138,9 @@ type WirelessGatewayEventLogOption struct { // This member is required. Event WirelessGatewayEvent - // The log level for a log message. + // The log level for a log message. The log levels can be disabled, or set to ERROR + // to display less verbose logs containing only error information, or to INFO for + // more detailed logs. // // This member is required. LogLevel LogLevel @@ -934,7 +1152,9 @@ type WirelessGatewayEventLogOption struct { // specific type of wireless gateway. type WirelessGatewayLogOption struct { - // The log level for a log message. + // The log level for a log message. The log levels can be disabled, or set to ERROR + // to display less verbose logs containing only error information, or to INFO for + // more detailed logs. // // This member is required. LogLevel LogLevel diff --git a/service/iotwireless/validators.go b/service/iotwireless/validators.go index 8b837d05b5a..13163b31002 100644 --- a/service/iotwireless/validators.go +++ b/service/iotwireless/validators.go @@ -250,6 +250,26 @@ func (m *validateOpCreateMulticastGroup) HandleInitialize(ctx context.Context, i return next.HandleInitialize(ctx, in) } +type validateOpCreateNetworkAnalyzerConfiguration struct { +} + +func (*validateOpCreateNetworkAnalyzerConfiguration) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpCreateNetworkAnalyzerConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*CreateNetworkAnalyzerConfigurationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpCreateNetworkAnalyzerConfigurationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + type validateOpCreateServiceProfile struct { } @@ -430,6 +450,26 @@ func (m *validateOpDeleteMulticastGroup) HandleInitialize(ctx context.Context, i return next.HandleInitialize(ctx, in) } +type validateOpDeleteNetworkAnalyzerConfiguration struct { +} + +func (*validateOpDeleteNetworkAnalyzerConfiguration) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeleteNetworkAnalyzerConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeleteNetworkAnalyzerConfigurationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeleteNetworkAnalyzerConfigurationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + type validateOpDeleteQueuedMessages struct { } @@ -1050,6 +1090,26 @@ func (m *validateOpGetWirelessGatewayTask) HandleInitialize(ctx context.Context, return next.HandleInitialize(ctx, in) } +type validateOpListEventConfigurations struct { +} + +func (*validateOpListEventConfigurations) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpListEventConfigurations) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ListEventConfigurationsInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpListEventConfigurationsInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + type validateOpListMulticastGroupsByFuotaTask struct { } @@ -1558,6 +1618,10 @@ func addOpCreateMulticastGroupValidationMiddleware(stack *middleware.Stack) erro return stack.Initialize.Add(&validateOpCreateMulticastGroup{}, middleware.After) } +func addOpCreateNetworkAnalyzerConfigurationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpCreateNetworkAnalyzerConfiguration{}, middleware.After) +} + func addOpCreateServiceProfileValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpCreateServiceProfile{}, middleware.After) } @@ -1594,6 +1658,10 @@ func addOpDeleteMulticastGroupValidationMiddleware(stack *middleware.Stack) erro return stack.Initialize.Add(&validateOpDeleteMulticastGroup{}, middleware.After) } +func addOpDeleteNetworkAnalyzerConfigurationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeleteNetworkAnalyzerConfiguration{}, middleware.After) +} + func addOpDeleteQueuedMessagesValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpDeleteQueuedMessages{}, middleware.After) } @@ -1718,6 +1786,10 @@ func addOpGetWirelessGatewayTaskValidationMiddleware(stack *middleware.Stack) er return stack.Initialize.Add(&validateOpGetWirelessGatewayTask{}, middleware.After) } +func addOpListEventConfigurationsValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpListEventConfigurations{}, middleware.After) +} + func addOpListMulticastGroupsByFuotaTaskValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpListMulticastGroupsByFuotaTask{}, middleware.After) } @@ -2227,6 +2299,26 @@ func validateOpCreateMulticastGroupInput(v *CreateMulticastGroupInput) error { } } +func validateOpCreateNetworkAnalyzerConfigurationInput(v *CreateNetworkAnalyzerConfigurationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CreateNetworkAnalyzerConfigurationInput"} + if v.Name == nil { + invalidParams.Add(smithy.NewErrParamRequired("Name")) + } + if v.Tags != nil { + if err := validateTagList(v.Tags); err != nil { + invalidParams.AddNested("Tags", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateOpCreateServiceProfileInput(v *CreateServiceProfileInput) error { if v == nil { return nil @@ -2382,6 +2474,21 @@ func validateOpDeleteMulticastGroupInput(v *DeleteMulticastGroupInput) error { } } +func validateOpDeleteNetworkAnalyzerConfigurationInput(v *DeleteNetworkAnalyzerConfigurationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeleteNetworkAnalyzerConfigurationInput"} + if v.ConfigurationName == nil { + invalidParams.Add(smithy.NewErrParamRequired("ConfigurationName")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateOpDeleteQueuedMessagesInput(v *DeleteQueuedMessagesInput) error { if v == nil { return nil @@ -2877,6 +2984,21 @@ func validateOpGetWirelessGatewayTaskInput(v *GetWirelessGatewayTaskInput) error } } +func validateOpListEventConfigurationsInput(v *ListEventConfigurationsInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ListEventConfigurationsInput"} + if len(v.ResourceType) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("ResourceType")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateOpListMulticastGroupsByFuotaTaskInput(v *ListMulticastGroupsByFuotaTaskInput) error { if v == nil { return nil diff --git a/service/lookoutequipment/api_op_CreateDataset.go b/service/lookoutequipment/api_op_CreateDataset.go index 47b5b4ae5b6..ec1b96f8ff5 100644 --- a/service/lookoutequipment/api_op_CreateDataset.go +++ b/service/lookoutequipment/api_op_CreateDataset.go @@ -47,8 +47,6 @@ type CreateDatasetInput struct { // A JSON description of the data that is in each time series dataset, including // names, column names, and data types. - // - // This member is required. DatasetSchema *types.DatasetSchema // Provides the identifier of the KMS key used to encrypt dataset data by Amazon diff --git a/service/lookoutequipment/api_op_DescribeDataIngestionJob.go b/service/lookoutequipment/api_op_DescribeDataIngestionJob.go index 916ae885490..4643df5a4a5 100644 --- a/service/lookoutequipment/api_op_DescribeDataIngestionJob.go +++ b/service/lookoutequipment/api_op_DescribeDataIngestionJob.go @@ -13,7 +13,7 @@ import ( ) // Provides information on a specific data ingestion job such as creation time, -// dataset ARN, status, and so on. +// dataset ARN, and status. func (c *Client) DescribeDataIngestionJob(ctx context.Context, params *DescribeDataIngestionJobInput, optFns ...func(*Options)) (*DescribeDataIngestionJobOutput, error) { if params == nil { params = &DescribeDataIngestionJobInput{} @@ -44,6 +44,20 @@ type DescribeDataIngestionJobOutput struct { // The time at which the data ingestion job was created. CreatedAt *time.Time + // Indicates the latest timestamp corresponding to data that was successfully + // ingested during this specific ingestion job. + DataEndTime *time.Time + + // Gives statistics about a completed ingestion job. These statistics primarily + // relate to quantifying incorrect data such as MissingCompleteSensorData, + // MissingSensorData, UnsupportedDateFormats, InsufficientSensorData, and + // DuplicateTimeStamps. + DataQualitySummary *types.DataQualitySummary + + // Indicates the earliest timestamp corresponding to data that was successfully + // ingested during this specific ingestion job. + DataStartTime *time.Time + // The Amazon Resource Name (ARN) of the dataset being used in the data ingestion // job. DatasetArn *string @@ -51,6 +65,13 @@ type DescribeDataIngestionJobOutput struct { // Specifies the reason for failure when a data ingestion job has failed. FailedReason *string + // Indicates the size of the ingested dataset. + IngestedDataSize *int64 + + // Gives statistics about how many files have been ingested, and which files have + // not been ingested, for a particular ingestion job. + IngestedFilesSummary *types.IngestedFilesSummary + // Specifies the S3 location configuration for the data input for the data // ingestion job. IngestionInputConfiguration *types.IngestionInputConfiguration @@ -65,6 +86,10 @@ type DescribeDataIngestionJobOutput struct { // Indicates the status of the DataIngestionJob operation. Status types.IngestionJobStatus + // Provides details about status of the ingestion job that is currently in + // progress. + StatusDetail *string + // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata diff --git a/service/lookoutequipment/api_op_DescribeDataset.go b/service/lookoutequipment/api_op_DescribeDataset.go index c2a5c096a29..1735d013afa 100644 --- a/service/lookoutequipment/api_op_DescribeDataset.go +++ b/service/lookoutequipment/api_op_DescribeDataset.go @@ -12,8 +12,8 @@ import ( "time" ) -// Provides a JSON description of the data that is in each time series dataset, -// including names, column names, and data types. +// Provides a JSON description of the data in each time series dataset, including +// names, column names, and data types. func (c *Client) DescribeDataset(ctx context.Context, params *DescribeDatasetInput, optFns ...func(*Options)) (*DescribeDatasetOutput, error) { if params == nil { params = &DescribeDatasetInput{} @@ -44,12 +44,30 @@ type DescribeDatasetOutput struct { // Specifies the time the dataset was created in Amazon Lookout for Equipment. CreatedAt *time.Time + // Indicates the latest timestamp corresponding to data that was successfully + // ingested during the most recent ingestion of this particular dataset. + DataEndTime *time.Time + + // Gives statistics associated with the given dataset for the latest successful + // associated ingestion job id. These statistics primarily relate to quantifying + // incorrect data such as MissingCompleteSensorData, MissingSensorData, + // UnsupportedDateFormats, InsufficientSensorData, and DuplicateTimeStamps. + DataQualitySummary *types.DataQualitySummary + + // Indicates the earliest timestamp corresponding to data that was successfully + // ingested during the most recent ingestion of this particular dataset. + DataStartTime *time.Time + // The Amazon Resource Name (ARN) of the dataset being described. DatasetArn *string // The name of the dataset being described. DatasetName *string + // IngestedFilesSummary associated with the given dataset for the latest successful + // associated ingestion job id. + IngestedFilesSummary *types.IngestedFilesSummary + // Specifies the S3 location configuration for the data input for the data // ingestion job. IngestionInputConfiguration *types.IngestionInputConfiguration @@ -57,6 +75,10 @@ type DescribeDatasetOutput struct { // Specifies the time the dataset was last updated, if it was. LastUpdatedAt *time.Time + // The Amazon Resource Name (ARN) of the IAM role that you are using for this the + // data ingestion job. + RoleArn *string + // A JSON description of the data that is in each time series dataset, including // names, column names, and data types. // diff --git a/service/lookoutequipment/api_op_ListSensorStatistics.go b/service/lookoutequipment/api_op_ListSensorStatistics.go new file mode 100644 index 00000000000..b428c79c731 --- /dev/null +++ b/service/lookoutequipment/api_op_ListSensorStatistics.go @@ -0,0 +1,234 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package lookoutequipment + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/lookoutequipment/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Lists statistics about the data collected for each of the sensors that have been +// successfully ingested in the particular dataset. Can also be used to retreive +// Sensor Statistics for a previous ingestion job. +func (c *Client) ListSensorStatistics(ctx context.Context, params *ListSensorStatisticsInput, optFns ...func(*Options)) (*ListSensorStatisticsOutput, error) { + if params == nil { + params = &ListSensorStatisticsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListSensorStatistics", params, optFns, c.addOperationListSensorStatisticsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListSensorStatisticsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListSensorStatisticsInput struct { + + // The name of the dataset associated with the list of Sensor Statistics. + // + // This member is required. + DatasetName *string + + // The ingestion job id associated with the list of Sensor Statistics. To get + // sensor statistics for a particular ingestion job id, both dataset name and + // ingestion job id must be submitted as inputs. + IngestionJobId *string + + // Specifies the maximum number of sensors for which to retrieve statistics. + MaxResults *int32 + + // An opaque pagination token indicating where to continue the listing of sensor + // statistics. + NextToken *string + + noSmithyDocumentSerde +} + +type ListSensorStatisticsOutput struct { + + // An opaque pagination token indicating where to continue the listing of sensor + // statistics. + NextToken *string + + // Provides ingestion-based statistics regarding the specified sensor with respect + // to various validation types, such as whether data exists, the number and + // percentage of missing values, and the number and percentage of duplicate + // timestamps. + SensorStatisticsSummaries []types.SensorStatisticsSummary + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListSensorStatisticsMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson10_serializeOpListSensorStatistics{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpListSensorStatistics{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpListSensorStatisticsValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListSensorStatistics(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +// ListSensorStatisticsAPIClient is a client that implements the +// ListSensorStatistics operation. +type ListSensorStatisticsAPIClient interface { + ListSensorStatistics(context.Context, *ListSensorStatisticsInput, ...func(*Options)) (*ListSensorStatisticsOutput, error) +} + +var _ ListSensorStatisticsAPIClient = (*Client)(nil) + +// ListSensorStatisticsPaginatorOptions is the paginator options for +// ListSensorStatistics +type ListSensorStatisticsPaginatorOptions struct { + // Specifies the maximum number of sensors for which to retrieve statistics. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// ListSensorStatisticsPaginator is a paginator for ListSensorStatistics +type ListSensorStatisticsPaginator struct { + options ListSensorStatisticsPaginatorOptions + client ListSensorStatisticsAPIClient + params *ListSensorStatisticsInput + nextToken *string + firstPage bool +} + +// NewListSensorStatisticsPaginator returns a new ListSensorStatisticsPaginator +func NewListSensorStatisticsPaginator(client ListSensorStatisticsAPIClient, params *ListSensorStatisticsInput, optFns ...func(*ListSensorStatisticsPaginatorOptions)) *ListSensorStatisticsPaginator { + if params == nil { + params = &ListSensorStatisticsInput{} + } + + options := ListSensorStatisticsPaginatorOptions{} + if params.MaxResults != nil { + options.Limit = *params.MaxResults + } + + for _, fn := range optFns { + fn(&options) + } + + return &ListSensorStatisticsPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.NextToken, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *ListSensorStatisticsPaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) +} + +// NextPage retrieves the next ListSensorStatistics page. +func (p *ListSensorStatisticsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListSensorStatisticsOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.NextToken = p.nextToken + + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.MaxResults = limit + + result, err := p.client.ListSensorStatistics(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.NextToken + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +func newServiceMetadataMiddleware_opListSensorStatistics(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "lookoutequipment", + OperationName: "ListSensorStatistics", + } +} diff --git a/service/lookoutequipment/deserializers.go b/service/lookoutequipment/deserializers.go index 435ed206e22..1c67616ff7a 100644 --- a/service/lookoutequipment/deserializers.go +++ b/service/lookoutequipment/deserializers.go @@ -17,6 +17,7 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" "io" "io/ioutil" + "math" "strings" ) @@ -1805,6 +1806,129 @@ func awsAwsjson10_deserializeOpErrorListModels(response *smithyhttp.Response, me } } +type awsAwsjson10_deserializeOpListSensorStatistics struct { +} + +func (*awsAwsjson10_deserializeOpListSensorStatistics) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson10_deserializeOpListSensorStatistics) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson10_deserializeOpErrorListSensorStatistics(response, &metadata) + } + output := &ListSensorStatisticsOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson10_deserializeOpDocumentListSensorStatisticsOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson10_deserializeOpErrorListSensorStatistics(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("AccessDeniedException", errorCode): + return awsAwsjson10_deserializeErrorAccessDeniedException(response, errorBody) + + case strings.EqualFold("InternalServerException", errorCode): + return awsAwsjson10_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsAwsjson10_deserializeErrorResourceNotFoundException(response, errorBody) + + case strings.EqualFold("ThrottlingException", errorCode): + return awsAwsjson10_deserializeErrorThrottlingException(response, errorBody) + + case strings.EqualFold("ValidationException", errorCode): + return awsAwsjson10_deserializeErrorValidationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + type awsAwsjson10_deserializeOpListTagsForResource struct { } @@ -2947,6 +3071,59 @@ func awsAwsjson10_deserializeDocumentAccessDeniedException(v **types.AccessDenie return nil } +func awsAwsjson10_deserializeDocumentCategoricalValues(v **types.CategoricalValues, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.CategoricalValues + if *v == nil { + sv = &types.CategoricalValues{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "NumberOfCategory": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Integer to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.NumberOfCategory = ptr.Int32(int32(i64)) + } + + case "Status": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected StatisticalIssueStatus to be of type string, got %T instead", value) + } + sv.Status = types.StatisticalIssueStatus(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + func awsAwsjson10_deserializeDocumentConflictException(v **types.ConflictException, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -2987,6 +3164,84 @@ func awsAwsjson10_deserializeDocumentConflictException(v **types.ConflictExcepti return nil } +func awsAwsjson10_deserializeDocumentCountPercent(v **types.CountPercent, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.CountPercent + if *v == nil { + sv = &types.CountPercent{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Count": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Integer to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.Count = ptr.Int32(int32(i64)) + } + + case "Percentage": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.Percentage = float32(f64) + + case string: + var f64 float64 + switch { + case strings.EqualFold(jtv, "NaN"): + f64 = math.NaN() + + case strings.EqualFold(jtv, "Infinity"): + f64 = math.Inf(1) + + case strings.EqualFold(jtv, "-Infinity"): + f64 = math.Inf(-1) + + default: + return fmt.Errorf("unknown JSON number value: %s", jtv) + + } + sv.Percentage = float32(f64) + + default: + return fmt.Errorf("expected Float to be a JSON Number, got %T instead", value) + + } + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + func awsAwsjson10_deserializeDocumentDataIngestionJobSummaries(v *[]types.DataIngestionJobSummary, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -3133,7 +3388,7 @@ func awsAwsjson10_deserializeDocumentDataPreProcessingConfiguration(v **types.Da return nil } -func awsAwsjson10_deserializeDocumentDatasetSummaries(v *[]types.DatasetSummary, value interface{}) error { +func awsAwsjson10_deserializeDocumentDataQualitySummary(v **types.DataQualitySummary, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -3141,33 +3396,89 @@ func awsAwsjson10_deserializeDocumentDatasetSummaries(v *[]types.DatasetSummary, return nil } - shape, ok := value.([]interface{}) + shape, ok := value.(map[string]interface{}) if !ok { return fmt.Errorf("unexpected JSON type %v", value) } - var cv []types.DatasetSummary + var sv *types.DataQualitySummary if *v == nil { - cv = []types.DatasetSummary{} + sv = &types.DataQualitySummary{} } else { - cv = *v + sv = *v } - for _, value := range shape { - var col types.DatasetSummary - destAddr := &col - if err := awsAwsjson10_deserializeDocumentDatasetSummary(&destAddr, value); err != nil { - return err - } - col = *destAddr - cv = append(cv, col) + for key, value := range shape { + switch key { + case "DuplicateTimestamps": + if err := awsAwsjson10_deserializeDocumentDuplicateTimestamps(&sv.DuplicateTimestamps, value); err != nil { + return err + } + + case "InsufficientSensorData": + if err := awsAwsjson10_deserializeDocumentInsufficientSensorData(&sv.InsufficientSensorData, value); err != nil { + return err + } + case "InvalidSensorData": + if err := awsAwsjson10_deserializeDocumentInvalidSensorData(&sv.InvalidSensorData, value); err != nil { + return err + } + + case "MissingSensorData": + if err := awsAwsjson10_deserializeDocumentMissingSensorData(&sv.MissingSensorData, value); err != nil { + return err + } + + case "UnsupportedTimestamps": + if err := awsAwsjson10_deserializeDocumentUnsupportedTimestamps(&sv.UnsupportedTimestamps, value); err != nil { + return err + } + + default: + _, _ = key, value + + } } - *v = cv + *v = sv return nil } -func awsAwsjson10_deserializeDocumentDatasetSummary(v **types.DatasetSummary, value interface{}) error { +func awsAwsjson10_deserializeDocumentDatasetSummaries(v *[]types.DatasetSummary, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.DatasetSummary + if *v == nil { + cv = []types.DatasetSummary{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.DatasetSummary + destAddr := &col + if err := awsAwsjson10_deserializeDocumentDatasetSummary(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson10_deserializeDocumentDatasetSummary(v **types.DatasetSummary, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -3241,6 +3552,50 @@ func awsAwsjson10_deserializeDocumentDatasetSummary(v **types.DatasetSummary, va return nil } +func awsAwsjson10_deserializeDocumentDuplicateTimestamps(v **types.DuplicateTimestamps, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.DuplicateTimestamps + if *v == nil { + sv = &types.DuplicateTimestamps{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "TotalNumberOfDuplicateTimestamps": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Integer to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.TotalNumberOfDuplicateTimestamps = ptr.Int32(int32(i64)) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + func awsAwsjson10_deserializeDocumentInferenceExecutionSummaries(v *[]types.InferenceExecutionSummary, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -3797,6 +4152,68 @@ func awsAwsjson10_deserializeDocumentInferenceSchedulerSummary(v **types.Inferen return nil } +func awsAwsjson10_deserializeDocumentIngestedFilesSummary(v **types.IngestedFilesSummary, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.IngestedFilesSummary + if *v == nil { + sv = &types.IngestedFilesSummary{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "DiscardedFiles": + if err := awsAwsjson10_deserializeDocumentListOfDiscardedFiles(&sv.DiscardedFiles, value); err != nil { + return err + } + + case "IngestedNumberOfFiles": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Integer to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.IngestedNumberOfFiles = ptr.Int32(int32(i64)) + } + + case "TotalNumberOfFiles": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Integer to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.TotalNumberOfFiles = ptr.Int32(int32(i64)) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + func awsAwsjson10_deserializeDocumentIngestionInputConfiguration(v **types.IngestionInputConfiguration, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -3864,6 +4281,15 @@ func awsAwsjson10_deserializeDocumentIngestionS3InputConfiguration(v **types.Ing sv.Bucket = ptr.String(jtv) } + case "KeyPattern": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected KeyPattern to be of type string, got %T instead", value) + } + sv.KeyPattern = ptr.String(jtv) + } + case "Prefix": if value != nil { jtv, ok := value.(string) @@ -3882,7 +4308,7 @@ func awsAwsjson10_deserializeDocumentIngestionS3InputConfiguration(v **types.Ing return nil } -func awsAwsjson10_deserializeDocumentInternalServerException(v **types.InternalServerException, value interface{}) error { +func awsAwsjson10_deserializeDocumentInsufficientSensorData(v **types.InsufficientSensorData, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -3895,22 +4321,23 @@ func awsAwsjson10_deserializeDocumentInternalServerException(v **types.InternalS return fmt.Errorf("unexpected JSON type %v", value) } - var sv *types.InternalServerException + var sv *types.InsufficientSensorData if *v == nil { - sv = &types.InternalServerException{} + sv = &types.InsufficientSensorData{} } else { sv = *v } for key, value := range shape { switch key { - case "Message": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected BoundedLengthString to be of type string, got %T instead", value) - } - sv.Message = ptr.String(jtv) + case "MissingCompleteSensorData": + if err := awsAwsjson10_deserializeDocumentMissingCompleteSensorData(&sv.MissingCompleteSensorData, value); err != nil { + return err + } + + case "SensorsWithShortDateRange": + if err := awsAwsjson10_deserializeDocumentSensorsWithShortDateRange(&sv.SensorsWithShortDateRange, value); err != nil { + return err } default: @@ -3922,7 +4349,7 @@ func awsAwsjson10_deserializeDocumentInternalServerException(v **types.InternalS return nil } -func awsAwsjson10_deserializeDocumentLabelsInputConfiguration(v **types.LabelsInputConfiguration, value interface{}) error { +func awsAwsjson10_deserializeDocumentInternalServerException(v **types.InternalServerException, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -3935,18 +4362,22 @@ func awsAwsjson10_deserializeDocumentLabelsInputConfiguration(v **types.LabelsIn return fmt.Errorf("unexpected JSON type %v", value) } - var sv *types.LabelsInputConfiguration + var sv *types.InternalServerException if *v == nil { - sv = &types.LabelsInputConfiguration{} + sv = &types.InternalServerException{} } else { sv = *v } for key, value := range shape { switch key { - case "S3InputConfiguration": - if err := awsAwsjson10_deserializeDocumentLabelsS3InputConfiguration(&sv.S3InputConfiguration, value); err != nil { - return err + case "Message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected BoundedLengthString to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) } default: @@ -3958,7 +4389,7 @@ func awsAwsjson10_deserializeDocumentLabelsInputConfiguration(v **types.LabelsIn return nil } -func awsAwsjson10_deserializeDocumentLabelsS3InputConfiguration(v **types.LabelsS3InputConfiguration, value interface{}) error { +func awsAwsjson10_deserializeDocumentInvalidSensorData(v **types.InvalidSensorData, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -3971,31 +4402,39 @@ func awsAwsjson10_deserializeDocumentLabelsS3InputConfiguration(v **types.Labels return fmt.Errorf("unexpected JSON type %v", value) } - var sv *types.LabelsS3InputConfiguration + var sv *types.InvalidSensorData if *v == nil { - sv = &types.LabelsS3InputConfiguration{} + sv = &types.InvalidSensorData{} } else { sv = *v } for key, value := range shape { switch key { - case "Bucket": + case "AffectedSensorCount": if value != nil { - jtv, ok := value.(string) + jtv, ok := value.(json.Number) if !ok { - return fmt.Errorf("expected S3Bucket to be of type string, got %T instead", value) + return fmt.Errorf("expected Integer to be json.Number, got %T instead", value) } - sv.Bucket = ptr.String(jtv) + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.AffectedSensorCount = ptr.Int32(int32(i64)) } - case "Prefix": + case "TotalNumberOfInvalidValues": if value != nil { - jtv, ok := value.(string) + jtv, ok := value.(json.Number) if !ok { - return fmt.Errorf("expected S3Prefix to be of type string, got %T instead", value) + return fmt.Errorf("expected Integer to be json.Number, got %T instead", value) } - sv.Prefix = ptr.String(jtv) + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.TotalNumberOfInvalidValues = ptr.Int32(int32(i64)) } default: @@ -4007,7 +4446,7 @@ func awsAwsjson10_deserializeDocumentLabelsS3InputConfiguration(v **types.Labels return nil } -func awsAwsjson10_deserializeDocumentModelSummaries(v *[]types.ModelSummary, value interface{}) error { +func awsAwsjson10_deserializeDocumentLabelsInputConfiguration(v **types.LabelsInputConfiguration, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -4015,33 +4454,35 @@ func awsAwsjson10_deserializeDocumentModelSummaries(v *[]types.ModelSummary, val return nil } - shape, ok := value.([]interface{}) + shape, ok := value.(map[string]interface{}) if !ok { return fmt.Errorf("unexpected JSON type %v", value) } - var cv []types.ModelSummary + var sv *types.LabelsInputConfiguration if *v == nil { - cv = []types.ModelSummary{} + sv = &types.LabelsInputConfiguration{} } else { - cv = *v + sv = *v } - for _, value := range shape { - var col types.ModelSummary - destAddr := &col - if err := awsAwsjson10_deserializeDocumentModelSummary(&destAddr, value); err != nil { - return err - } - col = *destAddr - cv = append(cv, col) + for key, value := range shape { + switch key { + case "S3InputConfiguration": + if err := awsAwsjson10_deserializeDocumentLabelsS3InputConfiguration(&sv.S3InputConfiguration, value); err != nil { + return err + } + + default: + _, _ = key, value + } } - *v = cv + *v = sv return nil } -func awsAwsjson10_deserializeDocumentModelSummary(v **types.ModelSummary, value interface{}) error { +func awsAwsjson10_deserializeDocumentLabelsS3InputConfiguration(v **types.LabelsS3InputConfiguration, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -4054,114 +4495,700 @@ func awsAwsjson10_deserializeDocumentModelSummary(v **types.ModelSummary, value return fmt.Errorf("unexpected JSON type %v", value) } - var sv *types.ModelSummary + var sv *types.LabelsS3InputConfiguration if *v == nil { - sv = &types.ModelSummary{} + sv = &types.LabelsS3InputConfiguration{} } else { sv = *v } for key, value := range shape { switch key { - case "CreatedAt": - if value != nil { - switch jtv := value.(type) { - case json.Number: - f64, err := jtv.Float64() - if err != nil { - return err - } - sv.CreatedAt = ptr.Time(smithytime.ParseEpochSeconds(f64)) - - default: - return fmt.Errorf("expected Timestamp to be a JSON Number, got %T instead", value) - - } - } - - case "DatasetArn": + case "Bucket": if value != nil { jtv, ok := value.(string) if !ok { - return fmt.Errorf("expected DatasetArn to be of type string, got %T instead", value) + return fmt.Errorf("expected S3Bucket to be of type string, got %T instead", value) } - sv.DatasetArn = ptr.String(jtv) + sv.Bucket = ptr.String(jtv) } - case "DatasetName": + case "Prefix": if value != nil { jtv, ok := value.(string) if !ok { - return fmt.Errorf("expected DatasetName to be of type string, got %T instead", value) + return fmt.Errorf("expected S3Prefix to be of type string, got %T instead", value) } - sv.DatasetName = ptr.String(jtv) + sv.Prefix = ptr.String(jtv) } - case "ModelArn": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ModelArn to be of type string, got %T instead", value) + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentLargeTimestampGaps(v **types.LargeTimestampGaps, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.LargeTimestampGaps + if *v == nil { + sv = &types.LargeTimestampGaps{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "MaxTimestampGapInDays": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Integer to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.MaxTimestampGapInDays = ptr.Int32(int32(i64)) + } + + case "NumberOfLargeTimestampGaps": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Integer to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.NumberOfLargeTimestampGaps = ptr.Int32(int32(i64)) + } + + case "Status": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected StatisticalIssueStatus to be of type string, got %T instead", value) + } + sv.Status = types.StatisticalIssueStatus(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentListOfDiscardedFiles(v *[]types.S3Object, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.S3Object + if *v == nil { + cv = []types.S3Object{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.S3Object + destAddr := &col + if err := awsAwsjson10_deserializeDocumentS3Object(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson10_deserializeDocumentMissingCompleteSensorData(v **types.MissingCompleteSensorData, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.MissingCompleteSensorData + if *v == nil { + sv = &types.MissingCompleteSensorData{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "AffectedSensorCount": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Integer to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.AffectedSensorCount = ptr.Int32(int32(i64)) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentMissingSensorData(v **types.MissingSensorData, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.MissingSensorData + if *v == nil { + sv = &types.MissingSensorData{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "AffectedSensorCount": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Integer to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.AffectedSensorCount = ptr.Int32(int32(i64)) + } + + case "TotalNumberOfMissingValues": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Integer to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.TotalNumberOfMissingValues = ptr.Int32(int32(i64)) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentModelSummaries(v *[]types.ModelSummary, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.ModelSummary + if *v == nil { + cv = []types.ModelSummary{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.ModelSummary + destAddr := &col + if err := awsAwsjson10_deserializeDocumentModelSummary(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson10_deserializeDocumentModelSummary(v **types.ModelSummary, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ModelSummary + if *v == nil { + sv = &types.ModelSummary{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "CreatedAt": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.CreatedAt = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Timestamp to be a JSON Number, got %T instead", value) + + } + } + + case "DatasetArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected DatasetArn to be of type string, got %T instead", value) + } + sv.DatasetArn = ptr.String(jtv) + } + + case "DatasetName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected DatasetName to be of type string, got %T instead", value) + } + sv.DatasetName = ptr.String(jtv) + } + + case "ModelArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ModelArn to be of type string, got %T instead", value) + } + sv.ModelArn = ptr.String(jtv) + } + + case "ModelName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ModelName to be of type string, got %T instead", value) + } + sv.ModelName = ptr.String(jtv) + } + + case "Status": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ModelStatus to be of type string, got %T instead", value) + } + sv.Status = types.ModelStatus(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentMonotonicValues(v **types.MonotonicValues, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.MonotonicValues + if *v == nil { + sv = &types.MonotonicValues{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Monotonicity": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Monotonicity to be of type string, got %T instead", value) + } + sv.Monotonicity = types.Monotonicity(jtv) + } + + case "Status": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected StatisticalIssueStatus to be of type string, got %T instead", value) + } + sv.Status = types.StatisticalIssueStatus(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentMultipleOperatingModes(v **types.MultipleOperatingModes, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.MultipleOperatingModes + if *v == nil { + sv = &types.MultipleOperatingModes{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Status": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected StatisticalIssueStatus to be of type string, got %T instead", value) + } + sv.Status = types.StatisticalIssueStatus(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentResourceNotFoundException(v **types.ResourceNotFoundException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ResourceNotFoundException + if *v == nil { + sv = &types.ResourceNotFoundException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected BoundedLengthString to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentS3Object(v **types.S3Object, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.S3Object + if *v == nil { + sv = &types.S3Object{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Bucket": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected S3Bucket to be of type string, got %T instead", value) + } + sv.Bucket = ptr.String(jtv) + } + + case "Key": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected S3Key to be of type string, got %T instead", value) + } + sv.Key = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentSensorStatisticsSummaries(v *[]types.SensorStatisticsSummary, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.SensorStatisticsSummary + if *v == nil { + cv = []types.SensorStatisticsSummary{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.SensorStatisticsSummary + destAddr := &col + if err := awsAwsjson10_deserializeDocumentSensorStatisticsSummary(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson10_deserializeDocumentSensorStatisticsSummary(v **types.SensorStatisticsSummary, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.SensorStatisticsSummary + if *v == nil { + sv = &types.SensorStatisticsSummary{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "CategoricalValues": + if err := awsAwsjson10_deserializeDocumentCategoricalValues(&sv.CategoricalValues, value); err != nil { + return err + } + + case "ComponentName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ComponentName to be of type string, got %T instead", value) + } + sv.ComponentName = ptr.String(jtv) + } + + case "DataEndTime": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.DataEndTime = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Timestamp to be a JSON Number, got %T instead", value) + + } + } + + case "DataExists": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected Boolean to be of type *bool, got %T instead", value) + } + sv.DataExists = jtv + } + + case "DataStartTime": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.DataStartTime = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Timestamp to be a JSON Number, got %T instead", value) + } - sv.ModelArn = ptr.String(jtv) } - case "ModelName": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ModelName to be of type string, got %T instead", value) - } - sv.ModelName = ptr.String(jtv) + case "DuplicateTimestamps": + if err := awsAwsjson10_deserializeDocumentCountPercent(&sv.DuplicateTimestamps, value); err != nil { + return err } - case "Status": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ModelStatus to be of type string, got %T instead", value) - } - sv.Status = types.ModelStatus(jtv) + case "InvalidDateEntries": + if err := awsAwsjson10_deserializeDocumentCountPercent(&sv.InvalidDateEntries, value); err != nil { + return err } - default: - _, _ = key, value + case "InvalidValues": + if err := awsAwsjson10_deserializeDocumentCountPercent(&sv.InvalidValues, value); err != nil { + return err + } - } - } - *v = sv - return nil -} + case "LargeTimestampGaps": + if err := awsAwsjson10_deserializeDocumentLargeTimestampGaps(&sv.LargeTimestampGaps, value); err != nil { + return err + } -func awsAwsjson10_deserializeDocumentResourceNotFoundException(v **types.ResourceNotFoundException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } + case "MissingValues": + if err := awsAwsjson10_deserializeDocumentCountPercent(&sv.MissingValues, value); err != nil { + return err + } - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } + case "MonotonicValues": + if err := awsAwsjson10_deserializeDocumentMonotonicValues(&sv.MonotonicValues, value); err != nil { + return err + } - var sv *types.ResourceNotFoundException - if *v == nil { - sv = &types.ResourceNotFoundException{} - } else { - sv = *v - } + case "MultipleOperatingModes": + if err := awsAwsjson10_deserializeDocumentMultipleOperatingModes(&sv.MultipleOperatingModes, value); err != nil { + return err + } - for key, value := range shape { - switch key { - case "Message": + case "SensorName": if value != nil { jtv, ok := value.(string) if !ok { - return fmt.Errorf("expected BoundedLengthString to be of type string, got %T instead", value) + return fmt.Errorf("expected SensorName to be of type string, got %T instead", value) } - sv.Message = ptr.String(jtv) + sv.SensorName = ptr.String(jtv) } default: @@ -4173,7 +5200,7 @@ func awsAwsjson10_deserializeDocumentResourceNotFoundException(v **types.Resourc return nil } -func awsAwsjson10_deserializeDocumentS3Object(v **types.S3Object, value interface{}) error { +func awsAwsjson10_deserializeDocumentSensorsWithShortDateRange(v **types.SensorsWithShortDateRange, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -4186,31 +5213,26 @@ func awsAwsjson10_deserializeDocumentS3Object(v **types.S3Object, value interfac return fmt.Errorf("unexpected JSON type %v", value) } - var sv *types.S3Object + var sv *types.SensorsWithShortDateRange if *v == nil { - sv = &types.S3Object{} + sv = &types.SensorsWithShortDateRange{} } else { sv = *v } for key, value := range shape { switch key { - case "Bucket": + case "AffectedSensorCount": if value != nil { - jtv, ok := value.(string) + jtv, ok := value.(json.Number) if !ok { - return fmt.Errorf("expected S3Bucket to be of type string, got %T instead", value) + return fmt.Errorf("expected Integer to be json.Number, got %T instead", value) } - sv.Bucket = ptr.String(jtv) - } - - case "Key": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected S3Key to be of type string, got %T instead", value) + i64, err := jtv.Int64() + if err != nil { + return err } - sv.Key = ptr.String(jtv) + sv.AffectedSensorCount = ptr.Int32(int32(i64)) } default: @@ -4385,6 +5407,50 @@ func awsAwsjson10_deserializeDocumentThrottlingException(v **types.ThrottlingExc return nil } +func awsAwsjson10_deserializeDocumentUnsupportedTimestamps(v **types.UnsupportedTimestamps, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.UnsupportedTimestamps + if *v == nil { + sv = &types.UnsupportedTimestamps{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "TotalNumberOfUnsupportedTimestamps": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Integer to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.TotalNumberOfUnsupportedTimestamps = ptr.Int32(int32(i64)) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + func awsAwsjson10_deserializeDocumentValidationException(v **types.ValidationException, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -4628,6 +5694,27 @@ func awsAwsjson10_deserializeOpDocumentDescribeDataIngestionJobOutput(v **Descri } } + case "DataEndTime": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.DataEndTime = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Timestamp to be a JSON Number, got %T instead", value) + + } + } + + case "DataQualitySummary": + if err := awsAwsjson10_deserializeDocumentDataQualitySummary(&sv.DataQualitySummary, value); err != nil { + return err + } + case "DatasetArn": if value != nil { jtv, ok := value.(string) @@ -4637,6 +5724,22 @@ func awsAwsjson10_deserializeOpDocumentDescribeDataIngestionJobOutput(v **Descri sv.DatasetArn = ptr.String(jtv) } + case "DataStartTime": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.DataStartTime = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Timestamp to be a JSON Number, got %T instead", value) + + } + } + case "FailedReason": if value != nil { jtv, ok := value.(string) @@ -4646,6 +5749,24 @@ func awsAwsjson10_deserializeOpDocumentDescribeDataIngestionJobOutput(v **Descri sv.FailedReason = ptr.String(jtv) } + case "IngestedDataSize": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected DataSizeInBytes to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.IngestedDataSize = ptr.Int64(i64) + } + + case "IngestedFilesSummary": + if err := awsAwsjson10_deserializeDocumentIngestedFilesSummary(&sv.IngestedFilesSummary, value); err != nil { + return err + } + case "IngestionInputConfiguration": if err := awsAwsjson10_deserializeDocumentIngestionInputConfiguration(&sv.IngestionInputConfiguration, value); err != nil { return err @@ -4678,6 +5799,15 @@ func awsAwsjson10_deserializeOpDocumentDescribeDataIngestionJobOutput(v **Descri sv.Status = types.IngestionJobStatus(jtv) } + case "StatusDetail": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected BoundedLengthString to be of type string, got %T instead", value) + } + sv.StatusDetail = ptr.String(jtv) + } + default: _, _ = key, value @@ -4725,6 +5855,27 @@ func awsAwsjson10_deserializeOpDocumentDescribeDatasetOutput(v **DescribeDataset } } + case "DataEndTime": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.DataEndTime = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Timestamp to be a JSON Number, got %T instead", value) + + } + } + + case "DataQualitySummary": + if err := awsAwsjson10_deserializeDocumentDataQualitySummary(&sv.DataQualitySummary, value); err != nil { + return err + } + case "DatasetArn": if value != nil { jtv, ok := value.(string) @@ -4743,6 +5894,27 @@ func awsAwsjson10_deserializeOpDocumentDescribeDatasetOutput(v **DescribeDataset sv.DatasetName = ptr.String(jtv) } + case "DataStartTime": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.DataStartTime = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Timestamp to be a JSON Number, got %T instead", value) + + } + } + + case "IngestedFilesSummary": + if err := awsAwsjson10_deserializeDocumentIngestedFilesSummary(&sv.IngestedFilesSummary, value); err != nil { + return err + } + case "IngestionInputConfiguration": if err := awsAwsjson10_deserializeDocumentIngestionInputConfiguration(&sv.IngestionInputConfiguration, value); err != nil { return err @@ -4764,6 +5936,15 @@ func awsAwsjson10_deserializeOpDocumentDescribeDatasetOutput(v **DescribeDataset } } + case "RoleArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected IamRoleArn to be of type string, got %T instead", value) + } + sv.RoleArn = ptr.String(jtv) + } + case "Schema": if value != nil { jtv, ok := value.(string) @@ -5451,6 +6632,51 @@ func awsAwsjson10_deserializeOpDocumentListModelsOutput(v **ListModelsOutput, va return nil } +func awsAwsjson10_deserializeOpDocumentListSensorStatisticsOutput(v **ListSensorStatisticsOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ListSensorStatisticsOutput + if *v == nil { + sv = &ListSensorStatisticsOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "NextToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected NextToken to be of type string, got %T instead", value) + } + sv.NextToken = ptr.String(jtv) + } + + case "SensorStatisticsSummaries": + if err := awsAwsjson10_deserializeDocumentSensorStatisticsSummaries(&sv.SensorStatisticsSummaries, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + func awsAwsjson10_deserializeOpDocumentListTagsForResourceOutput(v **ListTagsForResourceOutput, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) diff --git a/service/lookoutequipment/generated.json b/service/lookoutequipment/generated.json index 5a09943d55d..70d4eeb4e0f 100644 --- a/service/lookoutequipment/generated.json +++ b/service/lookoutequipment/generated.json @@ -23,6 +23,7 @@ "api_op_ListInferenceExecutions.go", "api_op_ListInferenceSchedulers.go", "api_op_ListModels.go", + "api_op_ListSensorStatistics.go", "api_op_ListTagsForResource.go", "api_op_StartDataIngestionJob.go", "api_op_StartInferenceScheduler.go", diff --git a/service/lookoutequipment/serializers.go b/service/lookoutequipment/serializers.go index d182a53c0fe..8898c1e43d6 100644 --- a/service/lookoutequipment/serializers.go +++ b/service/lookoutequipment/serializers.go @@ -841,6 +841,61 @@ func (m *awsAwsjson10_serializeOpListModels) HandleSerialize(ctx context.Context return next.HandleSerialize(ctx, in) } +type awsAwsjson10_serializeOpListSensorStatistics struct { +} + +func (*awsAwsjson10_serializeOpListSensorStatistics) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson10_serializeOpListSensorStatistics) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListSensorStatisticsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AWSLookoutEquipmentFrontendService.ListSensorStatistics") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson10_serializeOpDocumentListSensorStatisticsInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + type awsAwsjson10_serializeOpListTagsForResource struct { } @@ -1368,6 +1423,11 @@ func awsAwsjson10_serializeDocumentIngestionS3InputConfiguration(v *types.Ingest ok.String(*v.Bucket) } + if v.KeyPattern != nil { + ok := object.Key("KeyPattern") + ok.String(*v.KeyPattern) + } + if v.Prefix != nil { ok := object.Key("Prefix") ok.String(*v.Prefix) @@ -1861,6 +1921,33 @@ func awsAwsjson10_serializeOpDocumentListModelsInput(v *ListModelsInput, value s return nil } +func awsAwsjson10_serializeOpDocumentListSensorStatisticsInput(v *ListSensorStatisticsInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.DatasetName != nil { + ok := object.Key("DatasetName") + ok.String(*v.DatasetName) + } + + if v.IngestionJobId != nil { + ok := object.Key("IngestionJobId") + ok.String(*v.IngestionJobId) + } + + if v.MaxResults != nil { + ok := object.Key("MaxResults") + ok.Integer(*v.MaxResults) + } + + if v.NextToken != nil { + ok := object.Key("NextToken") + ok.String(*v.NextToken) + } + + return nil +} + func awsAwsjson10_serializeOpDocumentListTagsForResourceInput(v *ListTagsForResourceInput, value smithyjson.Value) error { object := value.Object() defer object.Close() diff --git a/service/lookoutequipment/types/enums.go b/service/lookoutequipment/types/enums.go index e9632c3dbce..23ab8150e1a 100644 --- a/service/lookoutequipment/types/enums.go +++ b/service/lookoutequipment/types/enums.go @@ -128,6 +128,44 @@ func (ModelStatus) Values() []ModelStatus { } } +type Monotonicity string + +// Enum values for Monotonicity +const ( + MonotonicityDecreasing Monotonicity = "DECREASING" + MonotonicityIncreasing Monotonicity = "INCREASING" + MonotonicityStatic Monotonicity = "STATIC" +) + +// Values returns all known values for Monotonicity. Note that this can be expanded +// in the future, and so it is only as up to date as the client. The ordering of +// this slice is not guaranteed to be stable across updates. +func (Monotonicity) Values() []Monotonicity { + return []Monotonicity{ + "DECREASING", + "INCREASING", + "STATIC", + } +} + +type StatisticalIssueStatus string + +// Enum values for StatisticalIssueStatus +const ( + StatisticalIssueStatusPotentialIssueDetected StatisticalIssueStatus = "POTENTIAL_ISSUE_DETECTED" + StatisticalIssueStatusNoIssueDetected StatisticalIssueStatus = "NO_ISSUE_DETECTED" +) + +// Values returns all known values for StatisticalIssueStatus. Note that this can +// be expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (StatisticalIssueStatus) Values() []StatisticalIssueStatus { + return []StatisticalIssueStatus{ + "POTENTIAL_ISSUE_DETECTED", + "NO_ISSUE_DETECTED", + } +} + type TargetSamplingRate string // Enum values for TargetSamplingRate diff --git a/service/lookoutequipment/types/types.go b/service/lookoutequipment/types/types.go index 5954035b7fc..03df130b34d 100644 --- a/service/lookoutequipment/types/types.go +++ b/service/lookoutequipment/types/types.go @@ -7,6 +7,36 @@ import ( "time" ) +// Entity that comprises information on categorical values in data. +type CategoricalValues struct { + + // Indicates whether there is a potential data issue related to categorical values. + // + // This member is required. + Status StatisticalIssueStatus + + // Indicates the number of categories in the data. + NumberOfCategory *int32 + + noSmithyDocumentSerde +} + +// Entity that comprises information of count and percentage. +type CountPercent struct { + + // Indicates the count of occurences of the given statistic. + // + // This member is required. + Count *int32 + + // Indicates the percentage of occurances of the given statistic. + // + // This member is required. + Percentage float32 + + noSmithyDocumentSerde +} + // Provides information about a specified data ingestion job, including dataset // information, data ingestion configuration, and status. type DataIngestionJobSummary struct { @@ -18,7 +48,7 @@ type DataIngestionJobSummary struct { DatasetName *string // Specifies information for the input data for the data inference job, including - // data S3 location parameters. + // data Amazon S3 location parameters. IngestionInputConfiguration *IngestionInputConfiguration // Indicates the job ID of the data ingestion job. @@ -52,6 +82,44 @@ type DataPreProcessingConfiguration struct { noSmithyDocumentSerde } +// DataQualitySummary gives aggregated statistics over all the sensors about a +// completed ingestion job. It primarily gives more information about statistics +// over different incorrect data like MissingCompleteSensorData, MissingSensorData, +// UnsupportedDateFormats, InsufficientSensorData, DuplicateTimeStamps. +type DataQualitySummary struct { + + // Parameter that gives information about duplicate timestamps in the input data. + // + // This member is required. + DuplicateTimestamps *DuplicateTimestamps + + // Parameter that gives information about insufficient data for sensors in the + // dataset. This includes information about those sensors that have complete data + // missing and those with a short date range. + // + // This member is required. + InsufficientSensorData *InsufficientSensorData + + // Parameter that gives information about data that is invalid over all the sensors + // in the input data. + // + // This member is required. + InvalidSensorData *InvalidSensorData + + // Parameter that gives information about data that is missing over all the sensors + // in the input data. + // + // This member is required. + MissingSensorData *MissingSensorData + + // Parameter that gives information about unsupported timestamps in the input data. + // + // This member is required. + UnsupportedTimestamps *UnsupportedTimestamps + + noSmithyDocumentSerde +} + // Provides information about the data schema used with the given dataset. type DatasetSchema struct { @@ -82,6 +150,17 @@ type DatasetSummary struct { noSmithyDocumentSerde } +// Entity that comprises information abount duplicate timestamps in the dataset. +type DuplicateTimestamps struct { + + // Indicates the total number of duplicate timestamps. + // + // This member is required. + TotalNumberOfDuplicateTimestamps *int32 + + noSmithyDocumentSerde +} + // Contains information about the specific inference execution, including input and // output data configuration, inference scheduling information, status, and so on. type InferenceExecutionSummary struct { @@ -98,7 +177,7 @@ type InferenceExecutionSummary struct { DataInputConfiguration *InferenceInputConfiguration // Specifies configuration information for the output results from for the - // inference execution, including the output S3 location. + // inference execution, including the output Amazon S3 location. DataOutputConfiguration *InferenceOutputConfiguration // Indicates the time reference in the dataset at which the inference execution @@ -132,18 +211,19 @@ type InferenceExecutionSummary struct { } // Specifies configuration information for the input data for the inference, -// including S3 location of input data.. +// including Amazon S3 location of input data.. type InferenceInputConfiguration struct { // Specifies configuration information for the input data for the inference, // including timestamp format and delimiter. InferenceInputNameConfiguration *InferenceInputNameConfiguration - // Indicates the difference between your time zone and Greenwich Mean Time (GMT). + // Indicates the difference between your time zone and Coordinated Universal Time + // (UTC). InputTimeZoneOffset *string // Specifies configuration information for the input data for the inference, - // including S3 location of input data.. + // including Amazon S3 location of input data. S3InputConfiguration *InferenceS3InputConfiguration noSmithyDocumentSerde @@ -249,6 +329,27 @@ type InferenceSchedulerSummary struct { noSmithyDocumentSerde } +// Gives statistics about how many files have been ingested, and which files have +// not been ingested, for a particular ingestion job. +type IngestedFilesSummary struct { + + // Indicates the number of files that were successfully ingested. + // + // This member is required. + IngestedNumberOfFiles *int32 + + // Indicates the total number of files that were submitted for ingestion. + // + // This member is required. + TotalNumberOfFiles *int32 + + // Indicates the number of files that were discarded. A file could be discarded + // because its format is invalid (for example, a jpg or pdf) or not readable. + DiscardedFiles []S3Object + + noSmithyDocumentSerde +} + // Specifies configuration information for the input data for the data ingestion // job, including input data S3 location. type IngestionInputConfiguration struct { @@ -271,6 +372,11 @@ type IngestionS3InputConfiguration struct { // This member is required. Bucket *string + // Pattern for matching the Amazon S3 files which will be used for ingestion. If no + // KeyPattern is provided, we will use the default hierarchy file structure, which + // is same as KeyPattern {prefix}/{component_name}/* + KeyPattern *string + // The prefix for the S3 location being used for the input data for the data // ingestion. Prefix *string @@ -278,6 +384,42 @@ type IngestionS3InputConfiguration struct { noSmithyDocumentSerde } +// Entity that comprises aggregated information on sensors having insufficient +// data. +type InsufficientSensorData struct { + + // Parameter that describes the total number of sensors that have data completely + // missing for it. + // + // This member is required. + MissingCompleteSensorData *MissingCompleteSensorData + + // Parameter that describes the total number of sensors that have a short date + // range of less than 90 days of data overall. + // + // This member is required. + SensorsWithShortDateRange *SensorsWithShortDateRange + + noSmithyDocumentSerde +} + +// Entity that comprises aggregated information on sensors having insufficient +// data. +type InvalidSensorData struct { + + // Indicates the number of sensors that have at least some invalid values. + // + // This member is required. + AffectedSensorCount *int32 + + // Indicates the total number of invalid values across all the sensors. + // + // This member is required. + TotalNumberOfInvalidValues *int32 + + noSmithyDocumentSerde +} + // Contains the configuration information for the S3 location being used to hold // label data. type LabelsInputConfiguration struct { @@ -305,6 +447,53 @@ type LabelsS3InputConfiguration struct { noSmithyDocumentSerde } +// Entity that comprises information on large gaps between consecutive timestamps +// in data. +type LargeTimestampGaps struct { + + // Indicates whether there is a potential data issue related to large gaps in + // timestamps. + // + // This member is required. + Status StatisticalIssueStatus + + // Indicates the size of the largest timestamp gap, in days. + MaxTimestampGapInDays *int32 + + // Indicates the number of large timestamp gaps, if there are any. + NumberOfLargeTimestampGaps *int32 + + noSmithyDocumentSerde +} + +// Entity that comprises information on sensors that have sensor data completely +// missing. +type MissingCompleteSensorData struct { + + // Indicates the number of sensors that have data missing completely. + // + // This member is required. + AffectedSensorCount *int32 + + noSmithyDocumentSerde +} + +// Entity that comprises aggregated information on sensors having missing data. +type MissingSensorData struct { + + // Indicates the number of sensors that have atleast some data missing. + // + // This member is required. + AffectedSensorCount *int32 + + // Indicates the total number of missing values across all the sensors. + // + // This member is required. + TotalNumberOfMissingValues *int32 + + noSmithyDocumentSerde +} + // Provides information about the specified ML model, including dataset and model // names and ARNs, as well as status. type ModelSummary struct { @@ -330,6 +519,33 @@ type ModelSummary struct { noSmithyDocumentSerde } +// Entity that comprises information on monotonic values in the data. +type MonotonicValues struct { + + // Indicates whether there is a potential data issue related to having monotonic + // values. + // + // This member is required. + Status StatisticalIssueStatus + + // Indicates the monotonicity of values. Can be INCREASING, DECREASING, or STATIC. + Monotonicity Monotonicity + + noSmithyDocumentSerde +} + +// Entity that comprises information on operating modes in data. +type MultipleOperatingModes struct { + + // Indicates whether there is a potential data issue related to having multiple + // operating modes. + // + // This member is required. + Status StatisticalIssueStatus + + noSmithyDocumentSerde +} + // Contains information about an S3 bucket. type S3Object struct { @@ -347,6 +563,75 @@ type S3Object struct { noSmithyDocumentSerde } +// Summary of ingestion statistics like whether data exists, number of missing +// values, number of invalid values and so on related to the particular sensor. +type SensorStatisticsSummary struct { + + // Parameter that describes potential risk about whether data associated with the + // sensor is categorical. + CategoricalValues *CategoricalValues + + // Name of the component to which the particular sensor belongs for which the + // statistics belong to. + ComponentName *string + + // Indicates the time reference to indicate the end of valid data associated with + // the sensor that the statistics belong to. + DataEndTime *time.Time + + // Parameter that indicates whether data exists for the sensor that the statistics + // belong to. + DataExists bool + + // Indicates the time reference to indicate the beginning of valid data associated + // with the sensor that the statistics belong to. + DataStartTime *time.Time + + // Parameter that describes the total number of duplicate timestamp records + // associated with the sensor that the statistics belong to. + DuplicateTimestamps *CountPercent + + // Parameter that describes the total number of invalid date entries associated + // with the sensor that the statistics belong to. + InvalidDateEntries *CountPercent + + // Parameter that describes the total number of, and percentage of, values that are + // invalid for the sensor that the statistics belong to. + InvalidValues *CountPercent + + // Parameter that describes potential risk about whether data associated with the + // sensor contains one or more large gaps between consecutive timestamps. + LargeTimestampGaps *LargeTimestampGaps + + // Parameter that describes the total number of, and percentage of, values that are + // missing for the sensor that the statistics belong to. + MissingValues *CountPercent + + // Parameter that describes potential risk about whether data associated with the + // sensor is mostly monotonic. + MonotonicValues *MonotonicValues + + // Parameter that describes potential risk about whether data associated with the + // sensor has more than one operating mode. + MultipleOperatingModes *MultipleOperatingModes + + // Name of the sensor that the statistics belong to. + SensorName *string + + noSmithyDocumentSerde +} + +// Entity that comprises information on sensors that have shorter date range. +type SensorsWithShortDateRange struct { + + // Indicates the number of sensors that have less than 90 days of data. + // + // This member is required. + AffectedSensorCount *int32 + + noSmithyDocumentSerde +} + // A tag is a key-value pair that can be added to a resource as metadata. type Tag struct { @@ -363,4 +648,15 @@ type Tag struct { noSmithyDocumentSerde } +// Entity that comprises information abount unsupported timestamps in the dataset. +type UnsupportedTimestamps struct { + + // Indicates the total number of unsupported timestamps across the ingested data. + // + // This member is required. + TotalNumberOfUnsupportedTimestamps *int32 + + noSmithyDocumentSerde +} + type noSmithyDocumentSerde = smithydocument.NoSerde diff --git a/service/lookoutequipment/validators.go b/service/lookoutequipment/validators.go index 183e826bdca..9d44fc83bb1 100644 --- a/service/lookoutequipment/validators.go +++ b/service/lookoutequipment/validators.go @@ -230,6 +230,26 @@ func (m *validateOpListInferenceExecutions) HandleInitialize(ctx context.Context return next.HandleInitialize(ctx, in) } +type validateOpListSensorStatistics struct { +} + +func (*validateOpListSensorStatistics) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpListSensorStatistics) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ListSensorStatisticsInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpListSensorStatisticsInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + type validateOpListTagsForResource struct { } @@ -414,6 +434,10 @@ func addOpListInferenceExecutionsValidationMiddleware(stack *middleware.Stack) e return stack.Initialize.Add(&validateOpListInferenceExecutions{}, middleware.After) } +func addOpListSensorStatisticsValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpListSensorStatistics{}, middleware.After) +} + func addOpListTagsForResourceValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpListTagsForResource{}, middleware.After) } @@ -619,9 +643,6 @@ func validateOpCreateDatasetInput(v *CreateDatasetInput) error { if v.DatasetName == nil { invalidParams.Add(smithy.NewErrParamRequired("DatasetName")) } - if v.DatasetSchema == nil { - invalidParams.Add(smithy.NewErrParamRequired("DatasetSchema")) - } if v.ClientToken == nil { invalidParams.Add(smithy.NewErrParamRequired("ClientToken")) } @@ -834,6 +855,21 @@ func validateOpListInferenceExecutionsInput(v *ListInferenceExecutionsInput) err } } +func validateOpListSensorStatisticsInput(v *ListSensorStatisticsInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ListSensorStatisticsInput"} + if v.DatasetName == nil { + invalidParams.Add(smithy.NewErrParamRequired("DatasetName")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateOpListTagsForResourceInput(v *ListTagsForResourceInput) error { if v == nil { return nil diff --git a/service/rekognition/api_op_CreateCollection.go b/service/rekognition/api_op_CreateCollection.go index 07ffc64a821..afc5efa9514 100644 --- a/service/rekognition/api_op_CreateCollection.go +++ b/service/rekognition/api_op_CreateCollection.go @@ -54,9 +54,8 @@ type CreateCollectionOutput struct { // permissions on your resources. CollectionArn *string - // Latest face model being used with the collection. For more information, see - // Model versioning - // (https://docs.aws.amazon.com/rekognition/latest/dg/face-detection-model.html). + // Version number of the face detection model associated with the collection you + // are creating. FaceModelVersion *string // HTTP status code indicating the result of the operation. diff --git a/service/rekognition/api_op_CreateStreamProcessor.go b/service/rekognition/api_op_CreateStreamProcessor.go index 5964f29889b..86f857571e1 100644 --- a/service/rekognition/api_op_CreateStreamProcessor.go +++ b/service/rekognition/api_op_CreateStreamProcessor.go @@ -12,19 +12,35 @@ import ( ) // Creates an Amazon Rekognition stream processor that you can use to detect and -// recognize faces in a streaming video. Amazon Rekognition Video is a consumer of -// live video from Amazon Kinesis Video Streams. Amazon Rekognition Video sends -// analysis results to Amazon Kinesis Data Streams. You provide as input a Kinesis -// video stream (Input) and a Kinesis data stream (Output) stream. You also specify -// the face recognition criteria in Settings. For example, the collection -// containing faces that you want to recognize. Use Name to assign an identifier +// recognize faces or to detect labels in a streaming video. Amazon Rekognition +// Video is a consumer of live video from Amazon Kinesis Video Streams. There are +// two different settings for stream processors in Amazon Rekognition: detecting +// faces and detecting labels. +// +// * If you are creating a stream processor for +// detecting faces, you provide as input a Kinesis video stream (Input) and a +// Kinesis data stream (Output) stream. You also specify the face recognition +// criteria in Settings. For example, the collection containing faces that you want +// to recognize. After you have finished analyzing a streaming video, use +// StopStreamProcessor to stop processing. +// +// * If you are creating a stream +// processor to detect labels, you provide as input a Kinesis video stream (Input), +// Amazon S3 bucket information (Output), and an Amazon SNS topic ARN +// (NotificationChannel). You can also provide a KMS key ID to encrypt the data +// sent to your Amazon S3 bucket. You specify what you want to detect in +// ConnectedHomeSettings, such as people, packages and people, or pets, people, and +// packages. You can also specify where in the frame you want Amazon Rekognition to +// monitor with RegionsOfInterest. When you run the StartStreamProcessor operation +// on a label detection stream processor, you input start and stop information to +// determine the length of the processing time. +// +// Use Name to assign an identifier // for the stream processor. You use Name to manage the stream processor. For // example, you can start processing the source video by calling -// StartStreamProcessor with the Name field. After you have finished analyzing a -// streaming video, use StopStreamProcessor to stop processing. You can delete the -// stream processor by calling DeleteStreamProcessor. This operation requires -// permissions to perform the rekognition:CreateStreamProcessor action. If you want -// to tag your stream processor, you also require permission to perform the +// StartStreamProcessor with the Name field. This operation requires permissions to +// perform the rekognition:CreateStreamProcessor action. If you want to tag your +// stream processor, you also require permission to perform the // rekognition:TagResource operation. func (c *Client) CreateStreamProcessor(ctx context.Context, params *CreateStreamProcessorInput, optFns ...func(*Options)) (*CreateStreamProcessorOutput, error) { if params == nil { @@ -44,36 +60,77 @@ func (c *Client) CreateStreamProcessor(ctx context.Context, params *CreateStream type CreateStreamProcessorInput struct { // Kinesis video stream stream that provides the source streaming video. If you are - // using the AWS CLI, the parameter name is StreamProcessorInput. + // using the AWS CLI, the parameter name is StreamProcessorInput. This is required + // for both face search and label detection stream processors. // // This member is required. Input *types.StreamProcessorInput // An identifier you assign to the stream processor. You can use Name to manage the // stream processor. For example, you can get the current status of the stream - // processor by calling DescribeStreamProcessor. Name is idempotent. + // processor by calling DescribeStreamProcessor. Name is idempotent. This is + // required for both face search and label detection stream processors. // // This member is required. Name *string - // Kinesis data stream stream to which Amazon Rekognition Video puts the analysis - // results. If you are using the AWS CLI, the parameter name is - // StreamProcessorOutput. + // Kinesis data stream stream or Amazon S3 bucket location to which Amazon + // Rekognition Video puts the analysis results. If you are using the AWS CLI, the + // parameter name is StreamProcessorOutput. This must be a S3Destination of an + // Amazon S3 bucket that you own for a label detection stream processor or a + // Kinesis data stream ARN for a face search stream processor. // // This member is required. Output *types.StreamProcessorOutput - // ARN of the IAM role that allows access to the stream processor. + // The Amazon Resource Number (ARN) of the IAM role that allows access to the + // stream processor. The IAM role provides Rekognition read permissions for a + // Kinesis stream. It also provides write permissions to an Amazon S3 bucket and + // Amazon Simple Notification Service topic for a label detection stream processor. + // This is required for both face search and label detection stream processors. // // This member is required. RoleArn *string - // Face recognition input parameters to be used by the stream processor. Includes - // the collection to use for face recognition and the face attributes to detect. + // Input parameters used in a streaming video analyzed by a stream processor. You + // can use FaceSearch to recognize faces in a streaming video, or you can use + // ConnectedHome to detect labels. // // This member is required. Settings *types.StreamProcessorSettings + // Shows whether you are sharing data with Rekognition to improve model + // performance. You can choose this option at the account level or on a per-stream + // basis. Note that if you opt out at the account level this setting is ignored on + // individual streams. + DataSharingPreference *types.StreamProcessorDataSharingPreference + + // The identifier for your AWS Key Management Service key (AWS KMS key). This is an + // optional parameter for label detection stream processors and should not be used + // to create a face search stream processor. You can supply the Amazon Resource + // Name (ARN) of your KMS key, the ID of your KMS key, an alias for your KMS key, + // or an alias ARN. The key is used to encrypt results and data published to your + // Amazon S3 bucket, which includes image frames and hero images. Your source + // images are unaffected. + KmsKeyId *string + + // The Amazon Simple Notification Service topic to which Amazon Rekognition + // publishes the object detection results and completion status of a video analysis + // operation. Amazon Rekognition publishes a notification the first time an object + // of interest or a person is detected in the video stream. For example, if Amazon + // Rekognition detects a person at second 2, a pet at second 4, and a person again + // at second 5, Amazon Rekognition sends 2 object class detected notifications, one + // for a person at second 2 and one for a pet at second 4. Amazon Rekognition also + // publishes an an end-of-session notification with a summary when the stream + // processing session is complete. + NotificationChannel *types.StreamProcessorNotificationChannel + + // Specifies locations in the frames where Amazon Rekognition checks for objects or + // people. You can specify up to 10 regions of interest. This is an optional + // parameter for label detection stream processors and should not be used to create + // a face search stream processor. + RegionsOfInterest []types.RegionOfInterest + // A set of tags (key-value pairs) that you want to attach to the stream processor. Tags map[string]string @@ -82,7 +139,7 @@ type CreateStreamProcessorInput struct { type CreateStreamProcessorOutput struct { - // ARN for the newly create stream processor. + // Amazon Resource Number for the newly created stream processor. StreamProcessorArn *string // Metadata pertaining to the operation's result. diff --git a/service/rekognition/api_op_DeleteCollection.go b/service/rekognition/api_op_DeleteCollection.go index 023f49cd100..8902fdf5059 100644 --- a/service/rekognition/api_op_DeleteCollection.go +++ b/service/rekognition/api_op_DeleteCollection.go @@ -11,8 +11,10 @@ import ( ) // Deletes the specified collection. Note that this operation removes all faces in -// the collection. For an example, see delete-collection-procedure. This operation -// requires permissions to perform the rekognition:DeleteCollection action. +// the collection. For an example, see Deleting a collection +// (https://docs.aws.amazon.com/rekognition/latest/dg/delete-collection-procedure.html). +// This operation requires permissions to perform the rekognition:DeleteCollection +// action. func (c *Client) DeleteCollection(ctx context.Context, params *DeleteCollectionInput, optFns ...func(*Options)) (*DeleteCollectionOutput, error) { if params == nil { params = &DeleteCollectionInput{} diff --git a/service/rekognition/api_op_DescribeCollection.go b/service/rekognition/api_op_DescribeCollection.go index 60efd4130fc..935979dd0ab 100644 --- a/service/rekognition/api_op_DescribeCollection.go +++ b/service/rekognition/api_op_DescribeCollection.go @@ -56,7 +56,7 @@ type DescribeCollectionOutput struct { FaceCount *int64 // The version of the face model that's used by the collection for face detection. - // For more information, see Model Versioning in the Amazon Rekognition Developer + // For more information, see Model versioning in the Amazon Rekognition Developer // Guide. FaceModelVersion *string diff --git a/service/rekognition/api_op_DescribeStreamProcessor.go b/service/rekognition/api_op_DescribeStreamProcessor.go index 1e8ff41be26..f84b612ac6f 100644 --- a/service/rekognition/api_op_DescribeStreamProcessor.go +++ b/service/rekognition/api_op_DescribeStreamProcessor.go @@ -46,9 +46,19 @@ type DescribeStreamProcessorOutput struct { // Date and time the stream processor was created CreationTimestamp *time.Time + // Shows whether you are sharing data with Rekognition to improve model + // performance. You can choose this option at the account level or on a per-stream + // basis. Note that if you opt out at the account level this setting is ignored on + // individual streams. + DataSharingPreference *types.StreamProcessorDataSharingPreference + // Kinesis video stream that provides the source streaming video. Input *types.StreamProcessorInput + // The identifier for your AWS Key Management Service key (AWS KMS key). This is an + // optional parameter for label detection stream processors. + KmsKeyId *string + // The time, in Unix format, the stream processor was last updated. For example, // when the stream processor moves from a running state to a failed state, or when // the user starts or stops the stream processor. @@ -57,15 +67,30 @@ type DescribeStreamProcessorOutput struct { // Name of the stream processor. Name *string + // The Amazon Simple Notification Service topic to which Amazon Rekognition + // publishes the object detection results and completion status of a video analysis + // operation. Amazon Rekognition publishes a notification the first time an object + // of interest or a person is detected in the video stream. For example, if Amazon + // Rekognition detects a person at second 2, a pet at second 4, and a person again + // at second 5, Amazon Rekognition sends 2 object class detected notifications, one + // for a person at second 2 and one for a pet at second 4. Amazon Rekognition also + // publishes an an end-of-session notification with a summary when the stream + // processing session is complete. + NotificationChannel *types.StreamProcessorNotificationChannel + // Kinesis data stream to which Amazon Rekognition Video puts the analysis results. Output *types.StreamProcessorOutput + // Specifies locations in the frames where Amazon Rekognition checks for objects or + // people. This is an optional parameter for label detection stream processors. + RegionsOfInterest []types.RegionOfInterest + // ARN of the IAM role that allows access to the stream processor. RoleArn *string - // Face recognition input parameters that are being used by the stream processor. - // Includes the collection to use for face recognition and the face attributes to - // detect. + // Input parameters used in a streaming video analyzed by a stream processor. You + // can use FaceSearch to recognize faces in a streaming video, or you can use + // ConnectedHome to detect labels. Settings *types.StreamProcessorSettings // Current status of the stream processor. diff --git a/service/rekognition/api_op_DetectCustomLabels.go b/service/rekognition/api_op_DetectCustomLabels.go index 8add44dd213..f2d7f67f29a 100644 --- a/service/rekognition/api_op_DetectCustomLabels.go +++ b/service/rekognition/api_op_DetectCustomLabels.go @@ -70,8 +70,8 @@ type DetectCustomLabelsInput struct { // is not supported. You must first upload the image to an Amazon S3 bucket and // then call the operation using the S3Object property. For Amazon Rekognition to // process an S3 object, the user must have permission to access the S3 object. For - // more information, see Resource Based Policies in the Amazon Rekognition - // Developer Guide. + // more information, see How Amazon Rekognition works with IAM in the Amazon + // Rekognition Developer Guide. // // This member is required. Image *types.Image diff --git a/service/rekognition/api_op_DetectLabels.go b/service/rekognition/api_op_DetectLabels.go index 9e07a60873c..130f460013c 100644 --- a/service/rekognition/api_op_DetectLabels.go +++ b/service/rekognition/api_op_DetectLabels.go @@ -14,7 +14,7 @@ import ( // Detects instances of real-world entities within an image (JPEG or PNG) provided // as input. This includes objects like flower, tree, and table; events like // wedding, graduation, and birthday party; and concepts like landscape, evening, -// and nature. For an example, see Analyzing Images Stored in an Amazon S3 Bucket +// and nature. For an example, see Analyzing images stored in an Amazon S3 bucket // in the Amazon Rekognition Developer Guide. DetectLabels does not support the // detection of activities. However, activity detection is supported for label // detection in videos. For more information, see StartLabelDetection in the Amazon diff --git a/service/rekognition/api_op_DetectText.go b/service/rekognition/api_op_DetectText.go index d2d7b776020..542d3226be1 100644 --- a/service/rekognition/api_op_DetectText.go +++ b/service/rekognition/api_op_DetectText.go @@ -30,8 +30,8 @@ import ( // spans multiple lines, the DetectText operation returns multiple lines. To // determine whether a TextDetection element is a line of text or a word, use the // TextDetection object Type field. To be detected, text must be within +/- 90 -// degrees orientation of the horizontal axis. For more information, see DetectText -// in the Amazon Rekognition Developer Guide. +// degrees orientation of the horizontal axis. For more information, see Detecting +// text in the Amazon Rekognition Developer Guide. func (c *Client) DetectText(ctx context.Context, params *DetectTextInput, optFns ...func(*Options)) (*DetectTextOutput, error) { if params == nil { params = &DetectTextInput{} diff --git a/service/rekognition/api_op_GetCelebrityInfo.go b/service/rekognition/api_op_GetCelebrityInfo.go index a925bff1f5d..b1606360539 100644 --- a/service/rekognition/api_op_GetCelebrityInfo.go +++ b/service/rekognition/api_op_GetCelebrityInfo.go @@ -14,7 +14,7 @@ import ( // Gets the name and additional information about a celebrity based on their Amazon // Rekognition ID. The additional information is returned as an array of URLs. If // there is no additional information about the celebrity, this list is empty. For -// more information, see Recognizing Celebrities in an Image in the Amazon +// more information, see Getting information about a celebrity in the Amazon // Rekognition Developer Guide. This operation requires permissions to perform the // rekognition:GetCelebrityInfo action. func (c *Client) GetCelebrityInfo(ctx context.Context, params *GetCelebrityInfoInput, optFns ...func(*Options)) (*GetCelebrityInfoOutput, error) { diff --git a/service/rekognition/api_op_GetContentModeration.go b/service/rekognition/api_op_GetContentModeration.go index 6e22505a005..ea1e9c10f74 100644 --- a/service/rekognition/api_op_GetContentModeration.go +++ b/service/rekognition/api_op_GetContentModeration.go @@ -39,7 +39,7 @@ import ( // getting the next set of results. To get the next page of results, call // GetContentModeration and populate the NextToken request parameter with the value // of NextToken returned from the previous call to GetContentModeration. For more -// information, see Content moderation in the Amazon Rekognition Developer Guide. +// information, see moderating content in the Amazon Rekognition Developer Guide. func (c *Client) GetContentModeration(ctx context.Context, params *GetContentModerationInput, optFns ...func(*Options)) (*GetContentModerationOutput, error) { if params == nil { params = &GetContentModerationInput{} diff --git a/service/rekognition/api_op_GetSegmentDetection.go b/service/rekognition/api_op_GetSegmentDetection.go index d3bf24906b2..835b17c50b9 100644 --- a/service/rekognition/api_op_GetSegmentDetection.go +++ b/service/rekognition/api_op_GetSegmentDetection.go @@ -34,7 +34,7 @@ import ( // pagination token for getting the next set of results. To get the next page of // results, call GetSegmentDetection and populate the NextToken request parameter // with the token value returned from the previous call to GetSegmentDetection. For -// more information, see Detecting Video Segments in Stored Video in the Amazon +// more information, see Detecting video segments in stored video in the Amazon // Rekognition Developer Guide. func (c *Client) GetSegmentDetection(ctx context.Context, params *GetSegmentDetectionInput, optFns ...func(*Options)) (*GetSegmentDetectionOutput, error) { if params == nil { diff --git a/service/rekognition/api_op_IndexFaces.go b/service/rekognition/api_op_IndexFaces.go index 796fa25e9c1..cf7e3b3dd20 100644 --- a/service/rekognition/api_op_IndexFaces.go +++ b/service/rekognition/api_op_IndexFaces.go @@ -17,8 +17,8 @@ import ( // each face, the algorithm extracts facial features into a feature vector, and // stores it in the backend database. Amazon Rekognition uses feature vectors when // it performs face match and search operations using the SearchFaces and -// SearchFacesByImage operations. For more information, see Adding Faces to a -// Collection in the Amazon Rekognition Developer Guide. To get the number of faces +// SearchFacesByImage operations. For more information, see Adding faces to a +// collection in the Amazon Rekognition Developer Guide. To get the number of faces // in a collection, call DescribeCollection. If you're using version 1.0 of the // face detection model, IndexFaces indexes the 15 largest faces in the input // image. Later versions of the face detection model index the 100 largest faces in @@ -83,13 +83,13 @@ import ( // attributes (by using the detectionAttributes parameter), Amazon Rekognition // returns detailed facial attributes, such as facial landmarks (for example, // location of eye and mouth) and other facial attributes. If you provide the same -// image, specify the same collection, use the same external ID, and use the same -// model version in the IndexFaces operation, Amazon Rekognition doesn't save -// duplicate face metadata. The input image is passed either as base64-encoded -// image bytes, or as a reference to an image in an Amazon S3 bucket. If you use -// the AWS CLI to call Amazon Rekognition operations, passing image bytes isn't -// supported. The image must be formatted as a PNG or JPEG file. This operation -// requires permissions to perform the rekognition:IndexFaces action. +// image, specify the same collection, and use the same external ID in the +// IndexFaces operation, Amazon Rekognition doesn't save duplicate face metadata. +// The input image is passed either as base64-encoded image bytes, or as a +// reference to an image in an Amazon S3 bucket. If you use the AWS CLI to call +// Amazon Rekognition operations, passing image bytes isn't supported. The image +// must be formatted as a PNG or JPEG file. This operation requires permissions to +// perform the rekognition:IndexFaces action. func (c *Client) IndexFaces(ctx context.Context, params *IndexFacesInput, optFns ...func(*Options)) (*IndexFacesOutput, error) { if params == nil { params = &IndexFacesInput{} @@ -165,9 +165,8 @@ type IndexFacesInput struct { type IndexFacesOutput struct { - // Latest face model being used with the collection. For more information, see - // Model versioning - // (https://docs.aws.amazon.com/rekognition/latest/dg/face-detection-model.html). + // The version number of the face detection model that's associated with the input + // collection (CollectionId). FaceModelVersion *string // An array of faces detected and added to the collection. For more information, diff --git a/service/rekognition/api_op_ListCollections.go b/service/rekognition/api_op_ListCollections.go index e611b2798d6..85facd98182 100644 --- a/service/rekognition/api_op_ListCollections.go +++ b/service/rekognition/api_op_ListCollections.go @@ -13,7 +13,7 @@ import ( // Returns list of collection IDs in your account. If the result is truncated, the // response also provides a NextToken that you can use in the subsequent request to -// fetch the next set of collection IDs. For an example, see Listing Collections in +// fetch the next set of collection IDs. For an example, see Listing collections in // the Amazon Rekognition Developer Guide. This operation requires permissions to // perform the rekognition:ListCollections action. func (c *Client) ListCollections(ctx context.Context, params *ListCollectionsInput, optFns ...func(*Options)) (*ListCollectionsOutput, error) { @@ -47,11 +47,10 @@ type ListCollectionsOutput struct { // An array of collection IDs. CollectionIds []string - // Latest face models being used with the corresponding collections in the array. - // For more information, see Model versioning - // (https://docs.aws.amazon.com/rekognition/latest/dg/face-detection-model.html). - // For example, the value of FaceModelVersions[2] is the version number for the - // face detection model used by the collection in CollectionId[2]. + // Version numbers of the face detection models associated with the collections in + // the array CollectionIds. For example, the value of FaceModelVersions[2] is the + // version number for the face detection model used by the collection in + // CollectionId[2]. FaceModelVersions []string // If the result is truncated, the response provides a NextToken that you can use diff --git a/service/rekognition/api_op_ListFaces.go b/service/rekognition/api_op_ListFaces.go index 3ca95baff06..260f4eb28b0 100644 --- a/service/rekognition/api_op_ListFaces.go +++ b/service/rekognition/api_op_ListFaces.go @@ -52,9 +52,8 @@ type ListFacesInput struct { type ListFacesOutput struct { - // Latest face model being used with the collection. For more information, see - // Model versioning - // (https://docs.aws.amazon.com/rekognition/latest/dg/face-detection-model.html). + // Version number of the face detection model associated with the input collection + // (CollectionId). FaceModelVersion *string // An array of Face objects. diff --git a/service/rekognition/api_op_RecognizeCelebrities.go b/service/rekognition/api_op_RecognizeCelebrities.go index c0879f0fa0e..a4751d45df6 100644 --- a/service/rekognition/api_op_RecognizeCelebrities.go +++ b/service/rekognition/api_op_RecognizeCelebrities.go @@ -12,7 +12,7 @@ import ( ) // Returns an array of celebrities recognized in the input image. For more -// information, see Recognizing Celebrities in the Amazon Rekognition Developer +// information, see Recognizing celebrities in the Amazon Rekognition Developer // Guide. RecognizeCelebrities returns the 64 largest faces in the image. It lists // the recognized celebrities in the CelebrityFaces array and any unrecognized // faces in the UnrecognizedFaces array. RecognizeCelebrities doesn't return @@ -29,8 +29,8 @@ import ( // pass the input image either as base64-encoded image bytes or as a reference to // an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon // Rekognition operations, passing image bytes is not supported. The image must be -// either a PNG or JPEG formatted file. For an example, see Recognizing Celebrities -// in an Image in the Amazon Rekognition Developer Guide. This operation requires +// either a PNG or JPEG formatted file. For an example, see Recognizing celebrities +// in an image in the Amazon Rekognition Developer Guide. This operation requires // permissions to perform the rekognition:RecognizeCelebrities operation. func (c *Client) RecognizeCelebrities(ctx context.Context, params *RecognizeCelebritiesInput, optFns ...func(*Options)) (*RecognizeCelebritiesOutput, error) { if params == nil { diff --git a/service/rekognition/api_op_SearchFaces.go b/service/rekognition/api_op_SearchFaces.go index 1b6cd7c1127..e4a52c49346 100644 --- a/service/rekognition/api_op_SearchFaces.go +++ b/service/rekognition/api_op_SearchFaces.go @@ -20,8 +20,8 @@ import ( // highest similarity first. More specifically, it is an array of metadata for each // face match that is found. Along with the metadata, the response also includes a // confidence value for each face match, indicating the confidence that the -// specific face matches the input face. For an example, see Searching for a Face -// Using Its Face ID in the Amazon Rekognition Developer Guide. This operation +// specific face matches the input face. For an example, see Searching for a face +// using its face ID in the Amazon Rekognition Developer Guide. This operation // requires permissions to perform the rekognition:SearchFaces action. func (c *Client) SearchFaces(ctx context.Context, params *SearchFacesInput, optFns ...func(*Options)) (*SearchFacesOutput, error) { if params == nil { @@ -68,9 +68,8 @@ type SearchFacesOutput struct { // match. FaceMatches []types.FaceMatch - // Latest face model being used with the collection. For more information, see - // Model versioning - // (https://docs.aws.amazon.com/rekognition/latest/dg/face-detection-model.html). + // Version number of the face detection model associated with the input collection + // (CollectionId). FaceModelVersion *string // ID of the face that was searched for matches in a collection. diff --git a/service/rekognition/api_op_SearchFacesByImage.go b/service/rekognition/api_op_SearchFacesByImage.go index b25a38dc6ad..314a6537861 100644 --- a/service/rekognition/api_op_SearchFacesByImage.go +++ b/service/rekognition/api_op_SearchFacesByImage.go @@ -101,9 +101,8 @@ type SearchFacesByImageOutput struct { // match. FaceMatches []types.FaceMatch - // Latest face model being used with the collection. For more information, see - // Model versioning - // (https://docs.aws.amazon.com/rekognition/latest/dg/face-detection-model.html). + // Version number of the face detection model associated with the input collection + // (CollectionId). FaceModelVersion *string // The bounding box around the face in the input image that Amazon Rekognition used diff --git a/service/rekognition/api_op_StartCelebrityRecognition.go b/service/rekognition/api_op_StartCelebrityRecognition.go index 082012d537f..450c4d93fe6 100644 --- a/service/rekognition/api_op_StartCelebrityRecognition.go +++ b/service/rekognition/api_op_StartCelebrityRecognition.go @@ -22,7 +22,7 @@ import ( // published to the Amazon SNS topic is SUCCEEDED. If so, call // GetCelebrityRecognition and pass the job identifier (JobId) from the initial // call to StartCelebrityRecognition. For more information, see Recognizing -// Celebrities in the Amazon Rekognition Developer Guide. +// celebrities in the Amazon Rekognition Developer Guide. func (c *Client) StartCelebrityRecognition(ctx context.Context, params *StartCelebrityRecognitionInput, optFns ...func(*Options)) (*StartCelebrityRecognitionOutput, error) { if params == nil { params = &StartCelebrityRecognitionInput{} diff --git a/service/rekognition/api_op_StartContentModeration.go b/service/rekognition/api_op_StartContentModeration.go index 995d592fccc..5196f3848d1 100644 --- a/service/rekognition/api_op_StartContentModeration.go +++ b/service/rekognition/api_op_StartContentModeration.go @@ -24,7 +24,7 @@ import ( // analysis, first check that the status value published to the Amazon SNS topic is // SUCCEEDED. If so, call GetContentModeration and pass the job identifier (JobId) // from the initial call to StartContentModeration. For more information, see -// Content moderation in the Amazon Rekognition Developer Guide. +// Moderating content in the Amazon Rekognition Developer Guide. func (c *Client) StartContentModeration(ctx context.Context, params *StartContentModerationInput, optFns ...func(*Options)) (*StartContentModerationOutput, error) { if params == nil { params = &StartContentModerationInput{} diff --git a/service/rekognition/api_op_StartFaceDetection.go b/service/rekognition/api_op_StartFaceDetection.go index 1c5fb4bde95..47467e17c56 100644 --- a/service/rekognition/api_op_StartFaceDetection.go +++ b/service/rekognition/api_op_StartFaceDetection.go @@ -21,7 +21,7 @@ import ( // operation, first check that the status value published to the Amazon SNS topic // is SUCCEEDED. If so, call GetFaceDetection and pass the job identifier (JobId) // from the initial call to StartFaceDetection. For more information, see Detecting -// Faces in a Stored Video in the Amazon Rekognition Developer Guide. +// faces in a stored video in the Amazon Rekognition Developer Guide. func (c *Client) StartFaceDetection(ctx context.Context, params *StartFaceDetectionInput, optFns ...func(*Options)) (*StartFaceDetectionOutput, error) { if params == nil { params = &StartFaceDetectionInput{} diff --git a/service/rekognition/api_op_StartFaceSearch.go b/service/rekognition/api_op_StartFaceSearch.go index 8c75ca3e3f9..9d04bf335c2 100644 --- a/service/rekognition/api_op_StartFaceSearch.go +++ b/service/rekognition/api_op_StartFaceSearch.go @@ -21,7 +21,8 @@ import ( // search results, first check that the status value published to the Amazon SNS // topic is SUCCEEDED. If so, call GetFaceSearch and pass the job identifier // (JobId) from the initial call to StartFaceSearch. For more information, see -// procedure-person-search-videos. +// Searching stored videos for faces +// (https://docs.aws.amazon.com/rekognition/latest/dg/procedure-person-search-videos.html). func (c *Client) StartFaceSearch(ctx context.Context, params *StartFaceSearchInput, optFns ...func(*Options)) (*StartFaceSearchOutput, error) { if params == nil { params = &StartFaceSearchInput{} diff --git a/service/rekognition/api_op_StartSegmentDetection.go b/service/rekognition/api_op_StartSegmentDetection.go index 1bc0d6fd96e..0c76a9f0c2e 100644 --- a/service/rekognition/api_op_StartSegmentDetection.go +++ b/service/rekognition/api_op_StartSegmentDetection.go @@ -25,7 +25,7 @@ import ( // of the segment detection operation, first check that the status value published // to the Amazon SNS topic is SUCCEEDED. if so, call GetSegmentDetection and pass // the job identifier (JobId) from the initial call to StartSegmentDetection. For -// more information, see Detecting Video Segments in Stored Video in the Amazon +// more information, see Detecting video segments in stored video in the Amazon // Rekognition Developer Guide. func (c *Client) StartSegmentDetection(ctx context.Context, params *StartSegmentDetectionInput, optFns ...func(*Options)) (*StartSegmentDetectionOutput, error) { if params == nil { diff --git a/service/rekognition/api_op_StartStreamProcessor.go b/service/rekognition/api_op_StartStreamProcessor.go index 0085b2250f5..4a3a95cadf7 100644 --- a/service/rekognition/api_op_StartStreamProcessor.go +++ b/service/rekognition/api_op_StartStreamProcessor.go @@ -6,6 +6,7 @@ import ( "context" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/rekognition/types" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" ) @@ -13,7 +14,9 @@ import ( // Starts processing a stream processor. You create a stream processor by calling // CreateStreamProcessor. To tell StartStreamProcessor which stream processor to // start, use the value of the Name field specified in the call to -// CreateStreamProcessor. +// CreateStreamProcessor. If you are using a label detection stream processor to +// detect labels, you need to provide a Start selector and a Stop selector to +// determine the length of the stream processing time. func (c *Client) StartStreamProcessor(ctx context.Context, params *StartStreamProcessorInput, optFns ...func(*Options)) (*StartStreamProcessorOutput, error) { if params == nil { params = &StartStreamProcessorInput{} @@ -36,10 +39,28 @@ type StartStreamProcessorInput struct { // This member is required. Name *string + // Specifies the starting point in the Kinesis stream to start processing. You can + // use the producer timestamp or the fragment number. For more information, see + // Fragment + // (https://docs.aws.amazon.com/kinesisvideostreams/latest/dg/API_reader_Fragment.html). + // This is a required parameter for label detection stream processors and should + // not be used to start a face search stream processor. + StartSelector *types.StreamProcessingStartSelector + + // Specifies when to stop processing the stream. You can specify a maximum amount + // of time to process the video. This is a required parameter for label detection + // stream processors and should not be used to start a face search stream + // processor. + StopSelector *types.StreamProcessingStopSelector + noSmithyDocumentSerde } type StartStreamProcessorOutput struct { + + // A unique identifier for the stream processing session. + SessionId *string + // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata diff --git a/service/rekognition/api_op_StartTextDetection.go b/service/rekognition/api_op_StartTextDetection.go index 9ca86972232..63e08904329 100644 --- a/service/rekognition/api_op_StartTextDetection.go +++ b/service/rekognition/api_op_StartTextDetection.go @@ -62,10 +62,11 @@ type StartTextDetectionInput struct { // The Amazon Simple Notification Service topic to which Amazon Rekognition // publishes the completion status of a video analysis operation. For more - // information, see api-video. Note that the Amazon SNS topic must have a topic - // name that begins with AmazonRekognition if you are using the - // AmazonRekognitionServiceRole permissions policy to access the topic. For more - // information, see Giving access to multiple Amazon SNS topics + // information, see Calling Amazon Rekognition Video operations + // (https://docs.aws.amazon.com/rekognition/latest/dg/api-video.html). Note that + // the Amazon SNS topic must have a topic name that begins with AmazonRekognition + // if you are using the AmazonRekognitionServiceRole permissions policy to access + // the topic. For more information, see Giving access to multiple Amazon SNS topics // (https://docs.aws.amazon.com/rekognition/latest/dg/api-video-roles.html#api-video-roles-all-topics). NotificationChannel *types.NotificationChannel diff --git a/service/rekognition/api_op_UpdateStreamProcessor.go b/service/rekognition/api_op_UpdateStreamProcessor.go new file mode 100644 index 00000000000..d9c0d1cb163 --- /dev/null +++ b/service/rekognition/api_op_UpdateStreamProcessor.go @@ -0,0 +1,135 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package rekognition + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/rekognition/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Allows you to update a stream processor. You can change some settings and +// regions of interest and delete certain parameters. +func (c *Client) UpdateStreamProcessor(ctx context.Context, params *UpdateStreamProcessorInput, optFns ...func(*Options)) (*UpdateStreamProcessorOutput, error) { + if params == nil { + params = &UpdateStreamProcessorInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "UpdateStreamProcessor", params, optFns, c.addOperationUpdateStreamProcessorMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*UpdateStreamProcessorOutput) + out.ResultMetadata = metadata + return out, nil +} + +type UpdateStreamProcessorInput struct { + + // Name of the stream processor that you want to update. + // + // This member is required. + Name *string + + // Shows whether you are sharing data with Rekognition to improve model + // performance. You can choose this option at the account level or on a per-stream + // basis. Note that if you opt out at the account level this setting is ignored on + // individual streams. + DataSharingPreferenceForUpdate *types.StreamProcessorDataSharingPreference + + // A list of parameters you want to delete from the stream processor. + ParametersToDelete []types.StreamProcessorParameterToDelete + + // Specifies locations in the frames where Amazon Rekognition checks for objects or + // people. This is an optional parameter for label detection stream processors. + RegionsOfInterestForUpdate []types.RegionOfInterest + + // The stream processor settings that you want to update. Label detection settings + // can be updated to detect different labels with a different minimum confidence. + SettingsForUpdate *types.StreamProcessorSettingsForUpdate + + noSmithyDocumentSerde +} + +type UpdateStreamProcessorOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationUpdateStreamProcessorMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpUpdateStreamProcessor{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpUpdateStreamProcessor{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpUpdateStreamProcessorValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUpdateStreamProcessor(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opUpdateStreamProcessor(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "rekognition", + OperationName: "UpdateStreamProcessor", + } +} diff --git a/service/rekognition/deserializers.go b/service/rekognition/deserializers.go index d590713ad0c..76c010e57c4 100644 --- a/service/rekognition/deserializers.go +++ b/service/rekognition/deserializers.go @@ -7607,6 +7607,132 @@ func awsAwsjson11_deserializeOpErrorUpdateDatasetEntries(response *smithyhttp.Re } } +type awsAwsjson11_deserializeOpUpdateStreamProcessor struct { +} + +func (*awsAwsjson11_deserializeOpUpdateStreamProcessor) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpUpdateStreamProcessor) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorUpdateStreamProcessor(response, &metadata) + } + output := &UpdateStreamProcessorOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentUpdateStreamProcessorOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorUpdateStreamProcessor(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("AccessDeniedException", errorCode): + return awsAwsjson11_deserializeErrorAccessDeniedException(response, errorBody) + + case strings.EqualFold("InternalServerError", errorCode): + return awsAwsjson11_deserializeErrorInternalServerError(response, errorBody) + + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("ProvisionedThroughputExceededException", errorCode): + return awsAwsjson11_deserializeErrorProvisionedThroughputExceededException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorResourceNotFoundException(response, errorBody) + + case strings.EqualFold("ThrottlingException", errorCode): + return awsAwsjson11_deserializeErrorThrottlingException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + func awsAwsjson11_deserializeErrorAccessDeniedException(response *smithyhttp.Response, errorBody *bytes.Reader) error { var buff [1024]byte ringBuffer := smithyio.NewRingBuffer(buff[:]) @@ -9501,6 +9627,112 @@ func awsAwsjson11_deserializeDocumentCompareFacesUnmatchList(v *[]types.Compared return nil } +func awsAwsjson11_deserializeDocumentConnectedHomeLabels(v *[]string, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []string + if *v == nil { + cv = []string{} + } else { + cv = *v + } + + for _, value := range shape { + var col string + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ConnectedHomeLabel to be of type string, got %T instead", value) + } + col = jtv + } + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentConnectedHomeSettings(v **types.ConnectedHomeSettings, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ConnectedHomeSettings + if *v == nil { + sv = &types.ConnectedHomeSettings{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Labels": + if err := awsAwsjson11_deserializeDocumentConnectedHomeLabels(&sv.Labels, value); err != nil { + return err + } + + case "MinConfidence": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.MinConfidence = ptr.Float32(float32(f64)) + + case string: + var f64 float64 + switch { + case strings.EqualFold(jtv, "NaN"): + f64 = math.NaN() + + case strings.EqualFold(jtv, "Infinity"): + f64 = math.Inf(1) + + case strings.EqualFold(jtv, "-Infinity"): + f64 = math.Inf(-1) + + default: + return fmt.Errorf("unknown JSON number value: %s", jtv) + + } + sv.MinConfidence = ptr.Float32(float32(f64)) + + default: + return fmt.Errorf("expected Percent to be a JSON Number, got %T instead", value) + + } + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + func awsAwsjson11_deserializeDocumentContentModerationDetection(v **types.ContentModerationDetection, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -14412,6 +14644,81 @@ func awsAwsjson11_deserializeDocumentReasons(v *[]types.Reason, value interface{ return nil } +func awsAwsjson11_deserializeDocumentRegionOfInterest(v **types.RegionOfInterest, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.RegionOfInterest + if *v == nil { + sv = &types.RegionOfInterest{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "BoundingBox": + if err := awsAwsjson11_deserializeDocumentBoundingBox(&sv.BoundingBox, value); err != nil { + return err + } + + case "Polygon": + if err := awsAwsjson11_deserializeDocumentPolygon(&sv.Polygon, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentRegionsOfInterest(v *[]types.RegionOfInterest, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.RegionOfInterest + if *v == nil { + cv = []types.RegionOfInterest{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.RegionOfInterest + destAddr := &col + if err := awsAwsjson11_deserializeDocumentRegionOfInterest(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + func awsAwsjson11_deserializeDocumentResourceAlreadyExistsException(v **types.ResourceAlreadyExistsException, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -14644,6 +14951,55 @@ func awsAwsjson11_deserializeDocumentResourceNotReadyException(v **types.Resourc return nil } +func awsAwsjson11_deserializeDocumentS3Destination(v **types.S3Destination, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.S3Destination + if *v == nil { + sv = &types.S3Destination{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Bucket": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected S3Bucket to be of type string, got %T instead", value) + } + sv.Bucket = ptr.String(jtv) + } + + case "KeyPrefix": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected S3KeyPrefix to be of type string, got %T instead", value) + } + sv.KeyPrefix = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + func awsAwsjson11_deserializeDocumentS3Object(v **types.S3Object, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -15233,6 +15589,46 @@ func awsAwsjson11_deserializeDocumentStreamProcessor(v **types.StreamProcessor, return nil } +func awsAwsjson11_deserializeDocumentStreamProcessorDataSharingPreference(v **types.StreamProcessorDataSharingPreference, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.StreamProcessorDataSharingPreference + if *v == nil { + sv = &types.StreamProcessorDataSharingPreference{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "OptIn": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected Boolean to be of type *bool, got %T instead", value) + } + sv.OptIn = jtv + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + func awsAwsjson11_deserializeDocumentStreamProcessorInput(v **types.StreamProcessorInput, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -15303,6 +15699,46 @@ func awsAwsjson11_deserializeDocumentStreamProcessorList(v *[]types.StreamProces return nil } +func awsAwsjson11_deserializeDocumentStreamProcessorNotificationChannel(v **types.StreamProcessorNotificationChannel, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.StreamProcessorNotificationChannel + if *v == nil { + sv = &types.StreamProcessorNotificationChannel{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "SNSTopicArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected SNSTopicArn to be of type string, got %T instead", value) + } + sv.SNSTopicArn = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + func awsAwsjson11_deserializeDocumentStreamProcessorOutput(v **types.StreamProcessorOutput, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -15330,6 +15766,11 @@ func awsAwsjson11_deserializeDocumentStreamProcessorOutput(v **types.StreamProce return err } + case "S3Destination": + if err := awsAwsjson11_deserializeDocumentS3Destination(&sv.S3Destination, value); err != nil { + return err + } + default: _, _ = key, value @@ -15361,6 +15802,11 @@ func awsAwsjson11_deserializeDocumentStreamProcessorSettings(v **types.StreamPro for key, value := range shape { switch key { + case "ConnectedHome": + if err := awsAwsjson11_deserializeDocumentConnectedHomeSettings(&sv.ConnectedHome, value); err != nil { + return err + } + case "FaceSearch": if err := awsAwsjson11_deserializeDocumentFaceSearchSettings(&sv.FaceSearch, value); err != nil { return err @@ -17177,11 +17623,25 @@ func awsAwsjson11_deserializeOpDocumentDescribeStreamProcessorOutput(v **Describ } } + case "DataSharingPreference": + if err := awsAwsjson11_deserializeDocumentStreamProcessorDataSharingPreference(&sv.DataSharingPreference, value); err != nil { + return err + } + case "Input": if err := awsAwsjson11_deserializeDocumentStreamProcessorInput(&sv.Input, value); err != nil { return err } + case "KmsKeyId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected KmsKeyId to be of type string, got %T instead", value) + } + sv.KmsKeyId = ptr.String(jtv) + } + case "LastUpdateTimestamp": if value != nil { switch jtv := value.(type) { @@ -17207,11 +17667,21 @@ func awsAwsjson11_deserializeOpDocumentDescribeStreamProcessorOutput(v **Describ sv.Name = ptr.String(jtv) } + case "NotificationChannel": + if err := awsAwsjson11_deserializeDocumentStreamProcessorNotificationChannel(&sv.NotificationChannel, value); err != nil { + return err + } + case "Output": if err := awsAwsjson11_deserializeDocumentStreamProcessorOutput(&sv.Output, value); err != nil { return err } + case "RegionsOfInterest": + if err := awsAwsjson11_deserializeDocumentRegionsOfInterest(&sv.RegionsOfInterest, value); err != nil { + return err + } + case "RoleArn": if value != nil { jtv, ok := value.(string) @@ -19068,6 +19538,15 @@ func awsAwsjson11_deserializeOpDocumentStartStreamProcessorOutput(v **StartStrea for key, value := range shape { switch key { + case "SessionId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected StartStreamProcessorSessionId to be of type string, got %T instead", value) + } + sv.SessionId = ptr.String(jtv) + } + default: _, _ = key, value @@ -19280,3 +19759,34 @@ func awsAwsjson11_deserializeOpDocumentUpdateDatasetEntriesOutput(v **UpdateData *v = sv return nil } + +func awsAwsjson11_deserializeOpDocumentUpdateStreamProcessorOutput(v **UpdateStreamProcessorOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *UpdateStreamProcessorOutput + if *v == nil { + sv = &UpdateStreamProcessorOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + default: + _, _ = key, value + + } + } + *v = sv + return nil +} diff --git a/service/rekognition/doc.go b/service/rekognition/doc.go index fa9dc290f33..ca173ba27fa 100644 --- a/service/rekognition/doc.go +++ b/service/rekognition/doc.go @@ -3,5 +3,23 @@ // Package rekognition provides the API client, operations, and parameter types for // Amazon Rekognition. // -// This is the Amazon Rekognition API reference. +// This is the API Reference for Amazon Rekognition Image +// (https://docs.aws.amazon.com/rekognition/latest/dg/images.html), Amazon +// Rekognition Custom Labels +// (https://docs.aws.amazon.com/rekognition/latest/customlabels-dg/what-is.html), +// Amazon Rekognition Stored Video +// (https://docs.aws.amazon.com/rekognition/latest/dg/video.html), Amazon +// Rekognition Streaming Video +// (https://docs.aws.amazon.com/rekognition/latest/dg/streaming-video.html). It +// provides descriptions of actions, data types, common parameters, and common +// errors. +// +// Amazon Rekognition Image +// +// Amazon Rekognition Custom Labels +// +// Amazon +// Rekognition Video Stored Video +// +// Amazon Rekognition Video Streaming Video package rekognition diff --git a/service/rekognition/generated.json b/service/rekognition/generated.json index 0d01289b672..9b688f08bd2 100644 --- a/service/rekognition/generated.json +++ b/service/rekognition/generated.json @@ -67,6 +67,7 @@ "api_op_TagResource.go", "api_op_UntagResource.go", "api_op_UpdateDatasetEntries.go", + "api_op_UpdateStreamProcessor.go", "deserializers.go", "doc.go", "endpoints.go", diff --git a/service/rekognition/serializers.go b/service/rekognition/serializers.go index 3dee2a3d00d..0d44d779078 100644 --- a/service/rekognition/serializers.go +++ b/service/rekognition/serializers.go @@ -3205,6 +3205,61 @@ func (m *awsAwsjson11_serializeOpUpdateDatasetEntries) HandleSerialize(ctx conte return next.HandleSerialize(ctx, in) } + +type awsAwsjson11_serializeOpUpdateStreamProcessor struct { +} + +func (*awsAwsjson11_serializeOpUpdateStreamProcessor) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpUpdateStreamProcessor) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*UpdateStreamProcessorInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("RekognitionService.UpdateStreamProcessor") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentUpdateStreamProcessorInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} func awsAwsjson11_serializeDocumentAsset(v *types.Asset, value smithyjson.Value) error { object := value.Object() defer object.Close() @@ -3365,6 +3420,81 @@ func awsAwsjson11_serializeDocumentBoundingBox(v *types.BoundingBox, value smith return nil } +func awsAwsjson11_serializeDocumentConnectedHomeLabels(v []string, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + av.String(v[i]) + } + return nil +} + +func awsAwsjson11_serializeDocumentConnectedHomeSettings(v *types.ConnectedHomeSettings, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Labels != nil { + ok := object.Key("Labels") + if err := awsAwsjson11_serializeDocumentConnectedHomeLabels(v.Labels, ok); err != nil { + return err + } + } + + if v.MinConfidence != nil { + ok := object.Key("MinConfidence") + switch { + case math.IsNaN(float64(*v.MinConfidence)): + ok.String("NaN") + + case math.IsInf(float64(*v.MinConfidence), 1): + ok.String("Infinity") + + case math.IsInf(float64(*v.MinConfidence), -1): + ok.String("-Infinity") + + default: + ok.Float(*v.MinConfidence) + + } + } + + return nil +} + +func awsAwsjson11_serializeDocumentConnectedHomeSettingsForUpdate(v *types.ConnectedHomeSettingsForUpdate, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Labels != nil { + ok := object.Key("Labels") + if err := awsAwsjson11_serializeDocumentConnectedHomeLabels(v.Labels, ok); err != nil { + return err + } + } + + if v.MinConfidence != nil { + ok := object.Key("MinConfidence") + switch { + case math.IsNaN(float64(*v.MinConfidence)): + ok.String("NaN") + + case math.IsInf(float64(*v.MinConfidence), 1): + ok.String("Infinity") + + case math.IsInf(float64(*v.MinConfidence), -1): + ok.String("-Infinity") + + default: + ok.Float(*v.MinConfidence) + + } + } + + return nil +} + func awsAwsjson11_serializeDocumentContentClassifiers(v []types.ContentClassifier, value smithyjson.Value) error { array := value.Array() defer array.Close() @@ -3661,6 +3791,23 @@ func awsAwsjson11_serializeDocumentKinesisVideoStream(v *types.KinesisVideoStrea return nil } +func awsAwsjson11_serializeDocumentKinesisVideoStreamStartSelector(v *types.KinesisVideoStreamStartSelector, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.FragmentNumber != nil { + ok := object.Key("FragmentNumber") + ok.String(*v.FragmentNumber) + } + + if v.ProducerTimestamp != nil { + ok := object.Key("ProducerTimestamp") + ok.Long(*v.ProducerTimestamp) + } + + return nil +} + func awsAwsjson11_serializeDocumentNotificationChannel(v *types.NotificationChannel, value smithyjson.Value) error { object := value.Object() defer object.Close() @@ -3695,6 +3842,62 @@ func awsAwsjson11_serializeDocumentOutputConfig(v *types.OutputConfig, value smi return nil } +func awsAwsjson11_serializeDocumentPoint(v *types.Point, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.X != nil { + ok := object.Key("X") + switch { + case math.IsNaN(float64(*v.X)): + ok.String("NaN") + + case math.IsInf(float64(*v.X), 1): + ok.String("Infinity") + + case math.IsInf(float64(*v.X), -1): + ok.String("-Infinity") + + default: + ok.Float(*v.X) + + } + } + + if v.Y != nil { + ok := object.Key("Y") + switch { + case math.IsNaN(float64(*v.Y)): + ok.String("NaN") + + case math.IsInf(float64(*v.Y), 1): + ok.String("Infinity") + + case math.IsInf(float64(*v.Y), -1): + ok.String("-Infinity") + + default: + ok.Float(*v.Y) + + } + } + + return nil +} + +func awsAwsjson11_serializeDocumentPolygon(v []types.Point, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + if err := awsAwsjson11_serializeDocumentPoint(&v[i], av); err != nil { + return err + } + } + return nil +} + func awsAwsjson11_serializeDocumentProjectNames(v []string, value smithyjson.Value) error { array := value.Array() defer array.Close() @@ -3760,6 +3963,13 @@ func awsAwsjson11_serializeDocumentRegionOfInterest(v *types.RegionOfInterest, v } } + if v.Polygon != nil { + ok := object.Key("Polygon") + if err := awsAwsjson11_serializeDocumentPolygon(v.Polygon, ok); err != nil { + return err + } + } + return nil } @@ -3776,6 +3986,23 @@ func awsAwsjson11_serializeDocumentRegionsOfInterest(v []types.RegionOfInterest, return nil } +func awsAwsjson11_serializeDocumentS3Destination(v *types.S3Destination, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Bucket != nil { + ok := object.Key("Bucket") + ok.String(*v.Bucket) + } + + if v.KeyPrefix != nil { + ok := object.Key("KeyPrefix") + ok.String(*v.KeyPrefix) + } + + return nil +} + func awsAwsjson11_serializeDocumentS3Object(v *types.S3Object, value smithyjson.Value) error { object := value.Object() defer object.Close() @@ -3908,6 +4135,44 @@ func awsAwsjson11_serializeDocumentStartTextDetectionFilters(v *types.StartTextD return nil } +func awsAwsjson11_serializeDocumentStreamProcessingStartSelector(v *types.StreamProcessingStartSelector, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.KVSStreamStartSelector != nil { + ok := object.Key("KVSStreamStartSelector") + if err := awsAwsjson11_serializeDocumentKinesisVideoStreamStartSelector(v.KVSStreamStartSelector, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson11_serializeDocumentStreamProcessingStopSelector(v *types.StreamProcessingStopSelector, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.MaxDurationInSeconds != nil { + ok := object.Key("MaxDurationInSeconds") + ok.Long(*v.MaxDurationInSeconds) + } + + return nil +} + +func awsAwsjson11_serializeDocumentStreamProcessorDataSharingPreference(v *types.StreamProcessorDataSharingPreference, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + { + ok := object.Key("OptIn") + ok.Boolean(v.OptIn) + } + + return nil +} + func awsAwsjson11_serializeDocumentStreamProcessorInput(v *types.StreamProcessorInput, value smithyjson.Value) error { object := value.Object() defer object.Close() @@ -3922,6 +4187,18 @@ func awsAwsjson11_serializeDocumentStreamProcessorInput(v *types.StreamProcessor return nil } +func awsAwsjson11_serializeDocumentStreamProcessorNotificationChannel(v *types.StreamProcessorNotificationChannel, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.SNSTopicArn != nil { + ok := object.Key("SNSTopicArn") + ok.String(*v.SNSTopicArn) + } + + return nil +} + func awsAwsjson11_serializeDocumentStreamProcessorOutput(v *types.StreamProcessorOutput, value smithyjson.Value) error { object := value.Object() defer object.Close() @@ -3933,6 +4210,24 @@ func awsAwsjson11_serializeDocumentStreamProcessorOutput(v *types.StreamProcesso } } + if v.S3Destination != nil { + ok := object.Key("S3Destination") + if err := awsAwsjson11_serializeDocumentS3Destination(v.S3Destination, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson11_serializeDocumentStreamProcessorParametersToDelete(v []types.StreamProcessorParameterToDelete, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + av.String(string(v[i])) + } return nil } @@ -3940,6 +4235,13 @@ func awsAwsjson11_serializeDocumentStreamProcessorSettings(v *types.StreamProces object := value.Object() defer object.Close() + if v.ConnectedHome != nil { + ok := object.Key("ConnectedHome") + if err := awsAwsjson11_serializeDocumentConnectedHomeSettings(v.ConnectedHome, ok); err != nil { + return err + } + } + if v.FaceSearch != nil { ok := object.Key("FaceSearch") if err := awsAwsjson11_serializeDocumentFaceSearchSettings(v.FaceSearch, ok); err != nil { @@ -3950,6 +4252,20 @@ func awsAwsjson11_serializeDocumentStreamProcessorSettings(v *types.StreamProces return nil } +func awsAwsjson11_serializeDocumentStreamProcessorSettingsForUpdate(v *types.StreamProcessorSettingsForUpdate, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ConnectedHomeForUpdate != nil { + ok := object.Key("ConnectedHomeForUpdate") + if err := awsAwsjson11_serializeDocumentConnectedHomeSettingsForUpdate(v.ConnectedHomeForUpdate, ok); err != nil { + return err + } + } + + return nil +} + func awsAwsjson11_serializeDocumentTagKeyList(v []string, value smithyjson.Value) error { array := value.Array() defer array.Close() @@ -4183,6 +4499,13 @@ func awsAwsjson11_serializeOpDocumentCreateStreamProcessorInput(v *CreateStreamP object := value.Object() defer object.Close() + if v.DataSharingPreference != nil { + ok := object.Key("DataSharingPreference") + if err := awsAwsjson11_serializeDocumentStreamProcessorDataSharingPreference(v.DataSharingPreference, ok); err != nil { + return err + } + } + if v.Input != nil { ok := object.Key("Input") if err := awsAwsjson11_serializeDocumentStreamProcessorInput(v.Input, ok); err != nil { @@ -4190,11 +4513,23 @@ func awsAwsjson11_serializeOpDocumentCreateStreamProcessorInput(v *CreateStreamP } } + if v.KmsKeyId != nil { + ok := object.Key("KmsKeyId") + ok.String(*v.KmsKeyId) + } + if v.Name != nil { ok := object.Key("Name") ok.String(*v.Name) } + if v.NotificationChannel != nil { + ok := object.Key("NotificationChannel") + if err := awsAwsjson11_serializeDocumentStreamProcessorNotificationChannel(v.NotificationChannel, ok); err != nil { + return err + } + } + if v.Output != nil { ok := object.Key("Output") if err := awsAwsjson11_serializeDocumentStreamProcessorOutput(v.Output, ok); err != nil { @@ -4202,6 +4537,13 @@ func awsAwsjson11_serializeOpDocumentCreateStreamProcessorInput(v *CreateStreamP } } + if v.RegionsOfInterest != nil { + ok := object.Key("RegionsOfInterest") + if err := awsAwsjson11_serializeDocumentRegionsOfInterest(v.RegionsOfInterest, ok); err != nil { + return err + } + } + if v.RoleArn != nil { ok := object.Key("RoleArn") ok.String(*v.RoleArn) @@ -5397,6 +5739,20 @@ func awsAwsjson11_serializeOpDocumentStartStreamProcessorInput(v *StartStreamPro ok.String(*v.Name) } + if v.StartSelector != nil { + ok := object.Key("StartSelector") + if err := awsAwsjson11_serializeDocumentStreamProcessingStartSelector(v.StartSelector, ok); err != nil { + return err + } + } + + if v.StopSelector != nil { + ok := object.Key("StopSelector") + if err := awsAwsjson11_serializeDocumentStreamProcessingStopSelector(v.StopSelector, ok); err != nil { + return err + } + } + return nil } @@ -5518,3 +5874,43 @@ func awsAwsjson11_serializeOpDocumentUpdateDatasetEntriesInput(v *UpdateDatasetE return nil } + +func awsAwsjson11_serializeOpDocumentUpdateStreamProcessorInput(v *UpdateStreamProcessorInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.DataSharingPreferenceForUpdate != nil { + ok := object.Key("DataSharingPreferenceForUpdate") + if err := awsAwsjson11_serializeDocumentStreamProcessorDataSharingPreference(v.DataSharingPreferenceForUpdate, ok); err != nil { + return err + } + } + + if v.Name != nil { + ok := object.Key("Name") + ok.String(*v.Name) + } + + if v.ParametersToDelete != nil { + ok := object.Key("ParametersToDelete") + if err := awsAwsjson11_serializeDocumentStreamProcessorParametersToDelete(v.ParametersToDelete, ok); err != nil { + return err + } + } + + if v.RegionsOfInterestForUpdate != nil { + ok := object.Key("RegionsOfInterestForUpdate") + if err := awsAwsjson11_serializeDocumentRegionsOfInterest(v.RegionsOfInterestForUpdate, ok); err != nil { + return err + } + } + + if v.SettingsForUpdate != nil { + ok := object.Key("SettingsForUpdate") + if err := awsAwsjson11_serializeDocumentStreamProcessorSettingsForUpdate(v.SettingsForUpdate, ok); err != nil { + return err + } + } + + return nil +} diff --git a/service/rekognition/types/enums.go b/service/rekognition/types/enums.go index 06804ae487f..3215a767feb 100644 --- a/service/rekognition/types/enums.go +++ b/service/rekognition/types/enums.go @@ -544,6 +544,25 @@ func (SegmentType) Values() []SegmentType { } } +type StreamProcessorParameterToDelete string + +// Enum values for StreamProcessorParameterToDelete +const ( + StreamProcessorParameterToDeleteConnectedHomeMinConfidence StreamProcessorParameterToDelete = "ConnectedHomeMinConfidence" + StreamProcessorParameterToDeleteRegionsOfInterest StreamProcessorParameterToDelete = "RegionsOfInterest" +) + +// Values returns all known values for StreamProcessorParameterToDelete. Note that +// this can be expanded in the future, and so it is only as up to date as the +// client. The ordering of this slice is not guaranteed to be stable across +// updates. +func (StreamProcessorParameterToDelete) Values() []StreamProcessorParameterToDelete { + return []StreamProcessorParameterToDelete{ + "ConnectedHomeMinConfidence", + "RegionsOfInterest", + } +} + type StreamProcessorStatus string // Enum values for StreamProcessorStatus @@ -553,6 +572,7 @@ const ( StreamProcessorStatusRunning StreamProcessorStatus = "RUNNING" StreamProcessorStatusFailed StreamProcessorStatus = "FAILED" StreamProcessorStatusStopping StreamProcessorStatus = "STOPPING" + StreamProcessorStatusUpdating StreamProcessorStatus = "UPDATING" ) // Values returns all known values for StreamProcessorStatus. Note that this can be @@ -565,6 +585,7 @@ func (StreamProcessorStatus) Values() []StreamProcessorStatus { "RUNNING", "FAILED", "STOPPING", + "UPDATING", } } diff --git a/service/rekognition/types/errors.go b/service/rekognition/types/errors.go index 97ba80254cb..9e9bae2c58f 100644 --- a/service/rekognition/types/errors.go +++ b/service/rekognition/types/errors.go @@ -87,8 +87,8 @@ func (e *IdempotentParameterMismatchException) ErrorFault() smithy.ErrorFault { // The input image size exceeds the allowed limit. If you are calling // DetectProtectiveEquipment, the image size or resolution exceeds the allowed -// limit. For more information, see Limits in Amazon Rekognition in the Amazon -// Rekognition Developer Guide. +// limit. For more information, see Guidelines and quotas in Amazon Rekognition in +// the Amazon Rekognition Developer Guide. type ImageTooLargeException struct { Message *string @@ -366,7 +366,8 @@ func (e *ResourceNotReadyException) ErrorCode() string { return "Res func (e *ResourceNotReadyException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } // The size of the collection exceeds the allowed limit. For more information, see -// Limits in Amazon Rekognition in the Amazon Rekognition Developer Guide. +// Guidelines and quotas in Amazon Rekognition in the Amazon Rekognition Developer +// Guide. type ServiceQuotaExceededException struct { Message *string diff --git a/service/rekognition/types/types.go b/service/rekognition/types/types.go index a567a7f997a..6e690c79f38 100644 --- a/service/rekognition/types/types.go +++ b/service/rekognition/types/types.go @@ -97,21 +97,21 @@ type BlackFrame struct { noSmithyDocumentSerde } -// Identifies the bounding box around the label, face, text or personal protective -// equipment. The left (x-coordinate) and top (y-coordinate) are coordinates -// representing the top and left sides of the bounding box. Note that the -// upper-left corner of the image is the origin (0,0). The top and left values -// returned are ratios of the overall image size. For example, if the input image -// is 700x200 pixels, and the top-left coordinate of the bounding box is 350x50 -// pixels, the API returns a left value of 0.5 (350/700) and a top value of 0.25 -// (50/200). The width and height values represent the dimensions of the bounding -// box as a ratio of the overall image dimension. For example, if the input image -// is 700x200 pixels, and the bounding box width is 70 pixels, the width returned -// is 0.1. The bounding box coordinates can have negative values. For example, if -// Amazon Rekognition is able to detect a face that is at the image edge and is -// only partially visible, the service can return coordinates that are outside the -// image bounds and, depending on the image edge, you might get negative values or -// values greater than 1 for the left or top values. +// Identifies the bounding box around the label, face, text, object of interest, or +// personal protective equipment. The left (x-coordinate) and top (y-coordinate) +// are coordinates representing the top and left sides of the bounding box. Note +// that the upper-left corner of the image is the origin (0,0). The top and left +// values returned are ratios of the overall image size. For example, if the input +// image is 700x200 pixels, and the top-left coordinate of the bounding box is +// 350x50 pixels, the API returns a left value of 0.5 (350/700) and a top value of +// 0.25 (50/200). The width and height values represent the dimensions of the +// bounding box as a ratio of the overall image dimension. For example, if the +// input image is 700x200 pixels, and the bounding box width is 70 pixels, the +// width returned is 0.1. The bounding box coordinates can have negative values. +// For example, if Amazon Rekognition is able to detect a face that is at the image +// edge and is only partially visible, the service can return coordinates that are +// outside the image bounds and, depending on the image edge, you might get +// negative values or values greater than 1 for the left or top values. type BoundingBox struct { // Height of the bounding box as a ratio of the overall image height. @@ -263,6 +263,46 @@ type CompareFacesMatch struct { noSmithyDocumentSerde } +// Label detection settings to use on a streaming video. Defining the settings is +// required in the request parameter for CreateStreamProcessor. Including this +// setting in the CreateStreamProcessor request enables you to use the stream +// processor for label detection. You can then select what you want the stream +// processor to detect, such as people or pets. When the stream processor has +// started, one notification is sent for each object class specified. For example, +// if packages and pets are selected, one SNS notification is published the first +// time a package is detected and one SNS notification is published the first time +// a pet is detected, as well as an end-of-session summary. +type ConnectedHomeSettings struct { + + // Specifies what you want to detect in the video, such as people, packages, or + // pets. The current valid labels you can include in this list are: "PERSON", + // "PET", "PACKAGE", and "ALL". + // + // This member is required. + Labels []string + + // The minimum confidence required to label an object in the video. + MinConfidence *float32 + + noSmithyDocumentSerde +} + +// The label detection settings you want to use in your stream processor. This +// includes the labels you want the stream processor to detect and the minimum +// confidence level allowed to label objects. +type ConnectedHomeSettingsForUpdate struct { + + // Specifies what you want to detect in the video, such as people, packages, or + // pets. The current valid labels you can include in this list are: "PERSON", + // "PET", "PACKAGE", and "ALL". + Labels []string + + // The minimum confidence required to label an object in the video. + MinConfidence *float32 + + noSmithyDocumentSerde +} + // Information about an inappropriate, unwanted, or offensive content label // detection in a stored video. type ContentModerationDetection struct { @@ -462,8 +502,8 @@ type DetectionFilter struct { MinBoundingBoxWidth *float32 // Sets the confidence of word detection. Words with detection confidence below - // this will be excluded from the result. Values should be between 50 and 100 as - // Text in Video will not return any result below 50. + // this will be excluded from the result. Values should be between 0 and 100. The + // default MinConfidence is 80. MinConfidence *float32 noSmithyDocumentSerde @@ -729,7 +769,9 @@ type FaceRecord struct { } // Input face recognition parameters for an Amazon Rekognition stream processor. -// FaceRecognitionSettings is a request parameter for CreateStreamProcessor. +// Includes the collection to use for face recognition and the face attributes to +// detect. Defining the settings is required in the request parameter for +// CreateStreamProcessor. type FaceSearchSettings struct { // The ID of a collection that contains faces that you want to search for. @@ -754,7 +796,7 @@ type FaceSearchSettings struct { // gender distribution statistics need to be analyzed without identifying specific // users. For example, the percentage of female users compared to male users on a // social media platform. We don't recommend using gender binary predictions to -// make decisions that impact
 an individual's rights, privacy, or access to +// make decisions that impact an individual's rights, privacy, or access to // services. type Gender struct { @@ -788,8 +830,8 @@ type GroundTruthManifest struct { // Provides the S3 bucket name and object name. The region for the S3 bucket // containing the S3 object must match the region you use for Amazon Rekognition // operations. For Amazon Rekognition to process an S3 object, the user must have - // permission to access the S3 object. For more information, see Resource-Based - // Policies in the Amazon Rekognition Developer Guide. + // permission to access the S3 object. For more information, see How Amazon + // Rekognition works with IAM in the Amazon Rekognition Developer Guide. S3Object *S3Object noSmithyDocumentSerde @@ -863,8 +905,8 @@ type HumanLoopDataAttributes struct { // is not supported. You must first upload the image to an Amazon S3 bucket and // then call the operation using the S3Object property. For Amazon Rekognition to // process an S3 object, the user must have permission to access the S3 object. For -// more information, see Resource Based Policies in the Amazon Rekognition -// Developer Guide. +// more information, see How Amazon Rekognition works with IAM in the Amazon +// Rekognition Developer Guide. type Image struct { // Blob of image bytes up to 5 MBs. @@ -925,6 +967,22 @@ type KinesisVideoStream struct { noSmithyDocumentSerde } +// Specifies the starting point in a Kinesis stream to start processing. You can +// use the producer timestamp or the fragment number. For more information, see +// Fragment +// (https://docs.aws.amazon.com/kinesisvideostreams/latest/dg/API_reader_Fragment.html). +type KinesisVideoStreamStartSelector struct { + + // The unique identifier of the fragment. This value monotonically increases based + // on the ingestion order. + FragmentNumber *string + + // The timestamp from the producer corresponding to the fragment. + ProducerTimestamp *int64 + + noSmithyDocumentSerde +} + // The known gender identity for the celebrity that matches the provided ID. The // known gender identity can be Male, Female, Nonbinary, or Unlisted. type KnownGender struct { @@ -1040,10 +1098,11 @@ type Mustache struct { // The Amazon Simple Notification Service topic to which Amazon Rekognition // publishes the completion status of a video analysis operation. For more -// information, see api-video. Note that the Amazon SNS topic must have a topic -// name that begins with AmazonRekognition if you are using the -// AmazonRekognitionServiceRole permissions policy to access the topic. For more -// information, see Giving access to multiple Amazon SNS topics +// information, see Calling Amazon Rekognition Video operations +// (https://docs.aws.amazon.com/rekognition/latest/dg/api-video.html). Note that +// the Amazon SNS topic must have a topic name that begins with AmazonRekognition +// if you are using the AmazonRekognitionServiceRole permissions policy to access +// the topic. For more information, see Giving access to multiple Amazon SNS topics // (https://docs.aws.amazon.com/rekognition/latest/dg/api-video-roles.html#api-video-roles-all-topics). type NotificationChannel struct { @@ -1053,7 +1112,7 @@ type NotificationChannel struct { // This member is required. RoleArn *string - // The Amazon SNS topic to which Amazon Rekognition to posts the completion status. + // The Amazon SNS topic to which Amazon Rekognition posts the completion status. // // This member is required. SNSTopicArn *string @@ -1137,13 +1196,14 @@ type PersonMatch struct { noSmithyDocumentSerde } -// The X and Y coordinates of a point on an image. The X and Y values returned are -// ratios of the overall image size. For example, if the input image is 700x200 and -// the operation returns X=0.5 and Y=0.25, then the point is at the (350,50) pixel -// coordinate on the image. An array of Point objects, Polygon, is returned by -// DetectText and by DetectCustomLabels. Polygon represents a fine-grained polygon -// around a detected item. For more information, see Geometry in the Amazon -// Rekognition Developer Guide. +// The X and Y coordinates of a point on an image or video frame. The X and Y +// values are ratios of the overall image size or video resolution. For example, if +// an input image is 700x200 and the values are X=0.5 and Y=0.25, then the point is +// at the (350,50) pixel coordinate on the image. An array of Point objects makes +// up a Polygon. A Polygon is returned by DetectText and by +// DetectCustomLabelsPolygon represents a fine-grained polygon around a detected +// item. For more information, see Geometry in the Amazon Rekognition Developer +// Guide. type Point struct { // The value of the X coordinate for a point on a Polygon. @@ -1348,24 +1408,47 @@ type ProtectiveEquipmentSummary struct { noSmithyDocumentSerde } -// Specifies a location within the frame that Rekognition checks for text. Uses a -// BoundingBox object to set a region of the screen. A word is included in the -// region if the word is more than half in that region. If there is more than one -// region, the word will be compared with all regions of the screen. Any word more -// than half in a region is kept in the results. +// Specifies a location within the frame that Rekognition checks for objects of +// interest such as text, labels, or faces. It uses a BoundingBox or object or +// Polygon to set a region of the screen. A word, face, or label is included in the +// region if it is more than half in that region. If there is more than one region, +// the word, face, or label is compared with all regions of the screen. Any object +// of interest that is more than half in a region is kept in the results. type RegionOfInterest struct { // The box representing a region of interest on screen. BoundingBox *BoundingBox + // Specifies a shape made up of up to 10 Point objects to define a region of + // interest. + Polygon []Point + + noSmithyDocumentSerde +} + +// The Amazon S3 bucket location to which Amazon Rekognition publishes the detailed +// inference results of a video analysis operation. These results include the name +// of the stream processor resource, the session ID of the stream processing +// session, and labeled timestamps and bounding boxes for detected labels. +type S3Destination struct { + + // The name of the Amazon S3 bucket you want to associate with the streaming video + // project. You must be the owner of the Amazon S3 bucket. + Bucket *string + + // The prefix value of the location within the bucket that you want the information + // to be published to. For more information, see Using prefixes + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-prefixes.html). + KeyPrefix *string + noSmithyDocumentSerde } // Provides the S3 bucket name and object name. The region for the S3 bucket // containing the S3 object must match the region you use for Amazon Rekognition // operations. For Amazon Rekognition to process an S3 object, the user must have -// permission to access the S3 object. For more information, see Resource-Based -// Policies in the Amazon Rekognition Developer Guide. +// permission to access the S3 object. For more information, see How Amazon +// Rekognition works with IAM in the Amazon Rekognition Developer Guide. type S3Object struct { // Name of the S3 bucket. @@ -1542,11 +1625,32 @@ type StartTextDetectionFilters struct { noSmithyDocumentSerde } -// An object that recognizes faces in a streaming video. An Amazon Rekognition -// stream processor is created by a call to CreateStreamProcessor. The request -// parameters for CreateStreamProcessor describe the Kinesis video stream source -// for the streaming video, face recognition parameters, and where to stream the -// analysis resullts. +// +type StreamProcessingStartSelector struct { + + // Specifies the starting point in the stream to start processing. This can be done + // with a timestamp or a fragment number in a Kinesis stream. + KVSStreamStartSelector *KinesisVideoStreamStartSelector + + noSmithyDocumentSerde +} + +// Specifies when to stop processing the stream. You can specify a maximum amount +// of time to process the video. +type StreamProcessingStopSelector struct { + + // Specifies the maximum amount of time in seconds that you want the stream to be + // processed. The largest amount of time is 2 minutes. The default is 10 seconds. + MaxDurationInSeconds *int64 + + noSmithyDocumentSerde +} + +// An object that recognizes faces or labels in a streaming video. An Amazon +// Rekognition stream processor is created by a call to CreateStreamProcessor. The +// request parameters for CreateStreamProcessor describe the Kinesis video stream +// source for the streaming video, face recognition parameters, and where to stream +// the analysis resullts. type StreamProcessor struct { // Name of the Amazon Rekognition stream processor. @@ -1558,6 +1662,21 @@ type StreamProcessor struct { noSmithyDocumentSerde } +// Allows you to opt in or opt out to share data with Rekognition to improve model +// performance. You can choose this option at the account level or on a per-stream +// basis. Note that if you opt out at the account level this setting is ignored on +// individual streams. +type StreamProcessorDataSharingPreference struct { + + // If this option is set to true, you choose to share data with Rekognition to + // improve model performance. + // + // This member is required. + OptIn bool + + noSmithyDocumentSerde +} + // Information about the source streaming video. type StreamProcessorInput struct { @@ -1567,6 +1686,26 @@ type StreamProcessorInput struct { noSmithyDocumentSerde } +// The Amazon Simple Notification Service topic to which Amazon Rekognition +// publishes the object detection results and completion status of a video analysis +// operation. Amazon Rekognition publishes a notification the first time an object +// of interest or a person is detected in the video stream. For example, if Amazon +// Rekognition detects a person at second 2, a pet at second 4, and a person again +// at second 5, Amazon Rekognition sends 2 object class detected notifications, one +// for a person at second 2 and one for a pet at second 4. Amazon Rekognition also +// publishes an an end-of-session notification with a summary when the stream +// processing session is complete. +type StreamProcessorNotificationChannel struct { + + // The Amazon Resource Number (ARN) of the Amazon Amazon Simple Notification + // Service topic to which Amazon Rekognition posts the completion status. + // + // This member is required. + SNSTopicArn *string + + noSmithyDocumentSerde +} + // Information about the Amazon Kinesis Data Streams stream to which a Amazon // Rekognition Video stream processor streams the results of a video analysis. For // more information, see CreateStreamProcessor in the Amazon Rekognition Developer @@ -1577,19 +1716,45 @@ type StreamProcessorOutput struct { // processor streams the analysis results. KinesisDataStream *KinesisDataStream + // The Amazon S3 bucket location to which Amazon Rekognition publishes the detailed + // inference results of a video analysis operation. + S3Destination *S3Destination + noSmithyDocumentSerde } -// Input parameters used to recognize faces in a streaming video analyzed by a -// Amazon Rekognition stream processor. +// Input parameters used in a streaming video analyzed by a Amazon Rekognition +// stream processor. You can use FaceSearch to recognize faces in a streaming +// video, or you can use ConnectedHome to detect labels. type StreamProcessorSettings struct { + // Label detection settings to use on a streaming video. Defining the settings is + // required in the request parameter for CreateStreamProcessor. Including this + // setting in the CreateStreamProcessor request enables you to use the stream + // processor for label detection. You can then select what you want the stream + // processor to detect, such as people or pets. When the stream processor has + // started, one notification is sent for each object class specified. For example, + // if packages and pets are selected, one SNS notification is published the first + // time a package is detected and one SNS notification is published the first time + // a pet is detected, as well as an end-of-session summary. + ConnectedHome *ConnectedHomeSettings + // Face search settings to use on a streaming video. FaceSearch *FaceSearchSettings noSmithyDocumentSerde } +// The stream processor settings that you want to update. ConnectedHome settings +// can be updated to detect different labels with a different minimum confidence. +type StreamProcessorSettingsForUpdate struct { + + // The label detection settings you want to use for your stream processor. + ConnectedHomeForUpdate *ConnectedHomeSettingsForUpdate + + noSmithyDocumentSerde +} + // The S3 bucket that contains the training summary. The training summary includes // aggregated evaluation metrics for the entire testing dataset and metrics for // each individual label. You get the training summary S3 bucket location by @@ -1599,8 +1764,8 @@ type Summary struct { // Provides the S3 bucket name and object name. The region for the S3 bucket // containing the S3 object must match the region you use for Amazon Rekognition // operations. For Amazon Rekognition to process an S3 object, the user must have - // permission to access the S3 object. For more information, see Resource-Based - // Policies in the Amazon Rekognition Developer Guide. + // permission to access the S3 object. For more information, see How Amazon + // Rekognition works with IAM in the Amazon Rekognition Developer Guide. S3Object *S3Object noSmithyDocumentSerde @@ -1673,7 +1838,7 @@ type TestingDataResult struct { // image. Every word and line has an identifier (Id). Each word belongs to a line // and has a parent identifier (ParentId) that identifies the line of text in which // the word appears. The word Id is also an index for the word within a line of -// words. For more information, see Detecting Text in the Amazon Rekognition +// words. For more information, see Detecting text in the Amazon Rekognition // Developer Guide. type TextDetection struct { diff --git a/service/rekognition/validators.go b/service/rekognition/validators.go index 2ee38870c28..f21f1b376d5 100644 --- a/service/rekognition/validators.go +++ b/service/rekognition/validators.go @@ -1110,6 +1110,26 @@ func (m *validateOpUpdateDatasetEntries) HandleInitialize(ctx context.Context, i return next.HandleInitialize(ctx, in) } +type validateOpUpdateStreamProcessor struct { +} + +func (*validateOpUpdateStreamProcessor) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpUpdateStreamProcessor) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*UpdateStreamProcessorInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpUpdateStreamProcessorInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + func addOpCompareFacesValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpCompareFaces{}, middleware.After) } @@ -1330,6 +1350,25 @@ func addOpUpdateDatasetEntriesValidationMiddleware(stack *middleware.Stack) erro return stack.Initialize.Add(&validateOpUpdateDatasetEntries{}, middleware.After) } +func addOpUpdateStreamProcessorValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpUpdateStreamProcessor{}, middleware.After) +} + +func validateConnectedHomeSettings(v *types.ConnectedHomeSettings) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ConnectedHomeSettings"} + if v.Labels == nil { + invalidParams.Add(smithy.NewErrParamRequired("Labels")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateDatasetChanges(v *types.DatasetChanges) error { if v == nil { return nil @@ -1431,6 +1470,50 @@ func validateProtectiveEquipmentSummarizationAttributes(v *types.ProtectiveEquip } } +func validateStreamProcessorDataSharingPreference(v *types.StreamProcessorDataSharingPreference) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "StreamProcessorDataSharingPreference"} + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateStreamProcessorNotificationChannel(v *types.StreamProcessorNotificationChannel) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "StreamProcessorNotificationChannel"} + if v.SNSTopicArn == nil { + invalidParams.Add(smithy.NewErrParamRequired("SNSTopicArn")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateStreamProcessorSettings(v *types.StreamProcessorSettings) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "StreamProcessorSettings"} + if v.ConnectedHome != nil { + if err := validateConnectedHomeSettings(v.ConnectedHome); err != nil { + invalidParams.AddNested("ConnectedHome", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateOpCompareFacesInput(v *CompareFacesInput) error { if v == nil { return nil @@ -1534,10 +1617,24 @@ func validateOpCreateStreamProcessorInput(v *CreateStreamProcessorInput) error { } if v.Settings == nil { invalidParams.Add(smithy.NewErrParamRequired("Settings")) + } else if v.Settings != nil { + if err := validateStreamProcessorSettings(v.Settings); err != nil { + invalidParams.AddNested("Settings", err.(smithy.InvalidParamsError)) + } } if v.RoleArn == nil { invalidParams.Add(smithy.NewErrParamRequired("RoleArn")) } + if v.NotificationChannel != nil { + if err := validateStreamProcessorNotificationChannel(v.NotificationChannel); err != nil { + invalidParams.AddNested("NotificationChannel", err.(smithy.InvalidParamsError)) + } + } + if v.DataSharingPreference != nil { + if err := validateStreamProcessorDataSharingPreference(v.DataSharingPreference); err != nil { + invalidParams.AddNested("DataSharingPreference", err.(smithy.InvalidParamsError)) + } + } if invalidParams.Len() > 0 { return invalidParams } else { @@ -2370,3 +2467,23 @@ func validateOpUpdateDatasetEntriesInput(v *UpdateDatasetEntriesInput) error { return nil } } + +func validateOpUpdateStreamProcessorInput(v *UpdateStreamProcessorInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "UpdateStreamProcessorInput"} + if v.Name == nil { + invalidParams.Add(smithy.NewErrParamRequired("Name")) + } + if v.DataSharingPreferenceForUpdate != nil { + if err := validateStreamProcessorDataSharingPreference(v.DataSharingPreferenceForUpdate); err != nil { + invalidParams.AddNested("DataSharingPreferenceForUpdate", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} diff --git a/service/sagemaker/api_op_AddTags.go b/service/sagemaker/api_op_AddTags.go index 511444f50e3..d8d049ee671 100644 --- a/service/sagemaker/api_op_AddTags.go +++ b/service/sagemaker/api_op_AddTags.go @@ -11,9 +11,9 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Adds or overwrites one or more tags for the specified Amazon SageMaker resource. -// You can add tags to notebook instances, training jobs, hyperparameter tuning -// jobs, batch transform jobs, models, labeling jobs, work teams, endpoint +// Adds or overwrites one or more tags for the specified SageMaker resource. You +// can add tags to notebook instances, training jobs, hyperparameter tuning jobs, +// batch transform jobs, models, labeling jobs, work teams, endpoint // configurations, and endpoints. Each tag consists of a key and an optional value. // Tag keys must be unique per resource. For more information about tags, see For // more information, see Amazon Web Services Tagging Strategies @@ -68,7 +68,7 @@ type AddTagsInput struct { type AddTagsOutput struct { - // A list of tags associated with the Amazon SageMaker resource. + // A list of tags associated with the SageMaker resource. Tags []types.Tag // Metadata pertaining to the operation's result. diff --git a/service/sagemaker/api_op_CreateAlgorithm.go b/service/sagemaker/api_op_CreateAlgorithm.go index 4c9de7b1918..a4d7d5ef216 100644 --- a/service/sagemaker/api_op_CreateAlgorithm.go +++ b/service/sagemaker/api_op_CreateAlgorithm.go @@ -11,8 +11,8 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Create a machine learning algorithm that you can use in Amazon SageMaker and -// list in the Amazon Web Services Marketplace. +// Create a machine learning algorithm that you can use in SageMaker and list in +// the Amazon Web Services Marketplace. func (c *Client) CreateAlgorithm(ctx context.Context, params *CreateAlgorithmInput, optFns ...func(*Options)) (*CreateAlgorithmOutput, error) { if params == nil { params = &CreateAlgorithmInput{} @@ -88,10 +88,9 @@ type CreateAlgorithmInput struct { // (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html). Tags []types.Tag - // Specifies configurations for one or more training jobs and that Amazon SageMaker - // runs to test the algorithm's training code and, optionally, one or more batch - // transform jobs that Amazon SageMaker runs to test the algorithm's inference - // code. + // Specifies configurations for one or more training jobs and that SageMaker runs + // to test the algorithm's training code and, optionally, one or more batch + // transform jobs that SageMaker runs to test the algorithm's inference code. ValidationSpecification *types.AlgorithmValidationSpecification noSmithyDocumentSerde diff --git a/service/sagemaker/api_op_CreateCodeRepository.go b/service/sagemaker/api_op_CreateCodeRepository.go index a3ba0a06e60..a7e70f65dea 100644 --- a/service/sagemaker/api_op_CreateCodeRepository.go +++ b/service/sagemaker/api_op_CreateCodeRepository.go @@ -11,13 +11,13 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Creates a Git repository as a resource in your Amazon SageMaker account. You can +// Creates a Git repository as a resource in your SageMaker account. You can // associate the repository with notebook instances so that you can use Git source // control for the notebooks you create. The Git repository is a resource in your -// Amazon SageMaker account, so it can be associated with more than one notebook -// instance, and it persists independently from the lifecycle of any notebook -// instances it is associated with. The repository can be hosted either in Amazon -// Web Services CodeCommit +// SageMaker account, so it can be associated with more than one notebook instance, +// and it persists independently from the lifecycle of any notebook instances it is +// associated with. The repository can be hosted either in Amazon Web Services +// CodeCommit // (https://docs.aws.amazon.com/codecommit/latest/userguide/welcome.html) or in any // other Git repository. func (c *Client) CreateCodeRepository(ctx context.Context, params *CreateCodeRepositoryInput, optFns ...func(*Options)) (*CreateCodeRepositoryOutput, error) { diff --git a/service/sagemaker/api_op_CreateEndpoint.go b/service/sagemaker/api_op_CreateEndpoint.go index 3ebb3935440..9eb2ce30d3b 100644 --- a/service/sagemaker/api_op_CreateEndpoint.go +++ b/service/sagemaker/api_op_CreateEndpoint.go @@ -12,17 +12,17 @@ import ( ) // Creates an endpoint using the endpoint configuration specified in the request. -// Amazon SageMaker uses the endpoint to provision resources and deploy models. You -// create the endpoint configuration with the CreateEndpointConfig API. Use this -// API to deploy models using Amazon SageMaker hosting services. For an example -// that calls this method when deploying a model to Amazon SageMaker hosting -// services, see the Create Endpoint example notebook. +// SageMaker uses the endpoint to provision resources and deploy models. You create +// the endpoint configuration with the CreateEndpointConfig API. Use this API to +// deploy models using SageMaker hosting services. For an example that calls this +// method when deploying a model to SageMaker hosting services, see the Create +// Endpoint example notebook. // (https://github.com/aws/amazon-sagemaker-examples/blob/master/sagemaker-fundamentals/create-endpoint/create_endpoint.ipynb) // You must not delete an EndpointConfig that is in use by an endpoint that is live // or while the UpdateEndpoint or CreateEndpoint operations are being performed on // the endpoint. To update an endpoint, you must create a new EndpointConfig. The // endpoint name must be unique within an Amazon Web Services Region in your Amazon -// Web Services account. When it receives the request, Amazon SageMaker creates the +// Web Services account. When it receives the request, SageMaker creates the // endpoint, launches the resources (ML compute instances), and deploys the // model(s) on them. When you call CreateEndpoint, a load call is made to DynamoDB // to verify that your endpoint configuration exists. When you read data from a @@ -34,18 +34,18 @@ import ( // request after a short time, the response should return the latest data. So retry // logic is recommended to handle these possible issues. We also recommend that // customers call DescribeEndpointConfig before calling CreateEndpoint to minimize -// the potential impact of a DynamoDB eventually consistent read. When Amazon -// SageMaker receives the request, it sets the endpoint status to Creating. After -// it creates the endpoint, it sets the status to InService. Amazon SageMaker can -// then process incoming requests for inferences. To check the status of an -// endpoint, use the DescribeEndpoint API. If any of the models hosted at this -// endpoint get model data from an Amazon S3 location, Amazon SageMaker uses Amazon -// Web Services Security Token Service to download model artifacts from the S3 path -// you provided. Amazon Web Services STS is activated in your IAM user account by -// default. If you previously deactivated Amazon Web Services STS for a region, you -// need to reactivate Amazon Web Services STS for that region. For more -// information, see Activating and Deactivating Amazon Web Services STS in an -// Amazon Web Services Region +// the potential impact of a DynamoDB eventually consistent read. When SageMaker +// receives the request, it sets the endpoint status to Creating. After it creates +// the endpoint, it sets the status to InService. SageMaker can then process +// incoming requests for inferences. To check the status of an endpoint, use the +// DescribeEndpoint API. If any of the models hosted at this endpoint get model +// data from an Amazon S3 location, SageMaker uses Amazon Web Services Security +// Token Service to download model artifacts from the S3 path you provided. Amazon +// Web Services STS is activated in your IAM user account by default. If you +// previously deactivated Amazon Web Services STS for a region, you need to +// reactivate Amazon Web Services STS for that region. For more information, see +// Activating and Deactivating Amazon Web Services STS in an Amazon Web Services +// Region // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) // in the Amazon Web Services Identity and Access Management User Guide. To add the // IAM role policies for using this API operation, go to the IAM console diff --git a/service/sagemaker/api_op_CreateEndpointConfig.go b/service/sagemaker/api_op_CreateEndpointConfig.go index fcbd3bd112c..9c5fcd603d8 100644 --- a/service/sagemaker/api_op_CreateEndpointConfig.go +++ b/service/sagemaker/api_op_CreateEndpointConfig.go @@ -11,22 +11,21 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Creates an endpoint configuration that Amazon SageMaker hosting services uses to -// deploy models. In the configuration, you identify one or more models, created -// using the CreateModel API, to deploy and the resources that you want Amazon -// SageMaker to provision. Then you call the CreateEndpoint API. Use this API if -// you want to use Amazon SageMaker hosting services to deploy models into -// production. In the request, you define a ProductionVariant, for each model that -// you want to deploy. Each ProductionVariant parameter also describes the -// resources that you want Amazon SageMaker to provision. This includes the number -// and type of ML compute instances to deploy. If you are hosting multiple models, -// you also assign a VariantWeight to specify how much traffic you want to allocate -// to each model. For example, suppose that you want to host two models, A and B, -// and you assign traffic weight 2 for model A and 1 for model B. Amazon SageMaker -// distributes two-thirds of the traffic to Model A, and one-third to model B. When -// you call CreateEndpoint, a load call is made to DynamoDB to verify that your -// endpoint configuration exists. When you read data from a DynamoDB table -// supporting Eventually Consistent Reads +// Creates an endpoint configuration that SageMaker hosting services uses to deploy +// models. In the configuration, you identify one or more models, created using the +// CreateModel API, to deploy and the resources that you want SageMaker to +// provision. Then you call the CreateEndpoint API. Use this API if you want to use +// SageMaker hosting services to deploy models into production. In the request, you +// define a ProductionVariant, for each model that you want to deploy. Each +// ProductionVariant parameter also describes the resources that you want SageMaker +// to provision. This includes the number and type of ML compute instances to +// deploy. If you are hosting multiple models, you also assign a VariantWeight to +// specify how much traffic you want to allocate to each model. For example, +// suppose that you want to host two models, A and B, and you assign traffic weight +// 2 for model A and 1 for model B. SageMaker distributes two-thirds of the traffic +// to Model A, and one-third to model B. When you call CreateEndpoint, a load call +// is made to DynamoDB to verify that your endpoint configuration exists. When you +// read data from a DynamoDB table supporting Eventually Consistent Reads // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadConsistency.html), // the response might not reflect the results of a recently completed write // operation. The response might include some stale data. If the dependent entities @@ -74,8 +73,8 @@ type CreateEndpointConfigInput struct { DataCaptureConfig *types.DataCaptureConfig // The Amazon Resource Name (ARN) of a Amazon Web Services Key Management Service - // key that Amazon SageMaker uses to encrypt data on the storage volume attached to - // the ML compute instance that hosts the endpoint. The KmsKeyId can be any of the + // key that SageMaker uses to encrypt data on the storage volume attached to the ML + // compute instance that hosts the endpoint. The KmsKeyId can be any of the // following formats: // // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab diff --git a/service/sagemaker/api_op_CreateHyperParameterTuningJob.go b/service/sagemaker/api_op_CreateHyperParameterTuningJob.go index d3e4904ed84..40750bc93db 100644 --- a/service/sagemaker/api_op_CreateHyperParameterTuningJob.go +++ b/service/sagemaker/api_op_CreateHyperParameterTuningJob.go @@ -88,8 +88,8 @@ type CreateHyperParameterTuningJobInput struct { type CreateHyperParameterTuningJobOutput struct { - // The Amazon Resource Name (ARN) of the tuning job. Amazon SageMaker assigns an - // ARN to a hyperparameter tuning job when you create it. + // The Amazon Resource Name (ARN) of the tuning job. SageMaker assigns an ARN to a + // hyperparameter tuning job when you create it. // // This member is required. HyperParameterTuningJobArn *string diff --git a/service/sagemaker/api_op_CreateImage.go b/service/sagemaker/api_op_CreateImage.go index 94392924626..6cfce92b091 100644 --- a/service/sagemaker/api_op_CreateImage.go +++ b/service/sagemaker/api_op_CreateImage.go @@ -12,9 +12,9 @@ import ( ) // Creates a custom SageMaker image. A SageMaker image is a set of image versions. -// Each image version represents a container image stored in Amazon Container -// Registry (ECR). For more information, see Bring your own SageMaker image -// (https://docs.aws.amazon.com/sagemaker/latest/dg/studio-byoi.html). +// Each image version represents a container image stored in Amazon Elastic +// Container Registry (ECR). For more information, see Bring your own SageMaker +// image (https://docs.aws.amazon.com/sagemaker/latest/dg/studio-byoi.html). func (c *Client) CreateImage(ctx context.Context, params *CreateImageInput, optFns ...func(*Options)) (*CreateImageOutput, error) { if params == nil { params = &CreateImageInput{} diff --git a/service/sagemaker/api_op_CreateImageVersion.go b/service/sagemaker/api_op_CreateImageVersion.go index d498c5b356a..33a24f9a355 100644 --- a/service/sagemaker/api_op_CreateImageVersion.go +++ b/service/sagemaker/api_op_CreateImageVersion.go @@ -12,8 +12,8 @@ import ( ) // Creates a version of the SageMaker image specified by ImageName. The version -// represents the Amazon Container Registry (ECR) container image specified by -// BaseImage. +// represents the Amazon Elastic Container Registry (ECR) container image specified +// by BaseImage. func (c *Client) CreateImageVersion(ctx context.Context, params *CreateImageVersionInput, optFns ...func(*Options)) (*CreateImageVersionOutput, error) { if params == nil { params = &CreateImageVersionInput{} @@ -32,8 +32,8 @@ func (c *Client) CreateImageVersion(ctx context.Context, params *CreateImageVers type CreateImageVersionInput struct { // The registry path of the container image to use as the starting point for this - // version. The path is an Amazon Container Registry (ECR) URI in the following - // format: .dkr.ecr..amazonaws.com/ + // version. The path is an Amazon Elastic Container Registry (ECR) URI in the + // following format: .dkr.ecr..amazonaws.com/ // // This member is required. BaseImage *string diff --git a/service/sagemaker/api_op_CreateModel.go b/service/sagemaker/api_op_CreateModel.go index 513b7940951..72b7e6cdf17 100644 --- a/service/sagemaker/api_op_CreateModel.go +++ b/service/sagemaker/api_op_CreateModel.go @@ -11,29 +11,27 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Creates a model in Amazon SageMaker. In the request, you name the model and -// describe a primary container. For the primary container, you specify the Docker -// image that contains inference code, artifacts (from prior training), and a -// custom environment map that the inference code uses when you deploy the model -// for predictions. Use this API to create a model if you want to use Amazon -// SageMaker hosting services or run a batch transform job. To host your model, you -// create an endpoint configuration with the CreateEndpointConfig API, and then -// create an endpoint with the CreateEndpoint API. Amazon SageMaker then deploys -// all of the containers that you defined for the model in the hosting environment. -// For an example that calls this method when deploying a model to Amazon SageMaker -// hosting services, see Deploy the Model to Amazon SageMaker Hosting Services -// (Amazon Web Services SDK for Python (Boto 3)). +// Creates a model in SageMaker. In the request, you name the model and describe a +// primary container. For the primary container, you specify the Docker image that +// contains inference code, artifacts (from prior training), and a custom +// environment map that the inference code uses when you deploy the model for +// predictions. Use this API to create a model if you want to use SageMaker hosting +// services or run a batch transform job. To host your model, you create an +// endpoint configuration with the CreateEndpointConfig API, and then create an +// endpoint with the CreateEndpoint API. SageMaker then deploys all of the +// containers that you defined for the model in the hosting environment. For an +// example that calls this method when deploying a model to SageMaker hosting +// services, see Deploy the Model to Amazon SageMaker Hosting Services (Amazon Web +// Services SDK for Python (Boto 3)). // (https://docs.aws.amazon.com/sagemaker/latest/dg/ex1-deploy-model.html#ex1-deploy-model-boto) // To run a batch transform using your model, you start a job with the -// CreateTransformJob API. Amazon SageMaker uses your model and your dataset to get -// inferences which are then saved to a specified S3 location. In the CreateModel -// request, you must define a container with the PrimaryContainer parameter. In the -// request, you also provide an IAM role that Amazon SageMaker can assume to access -// model artifacts and docker image for deployment on ML compute hosting instances -// or for batch transform jobs. In addition, you also use the IAM role to manage -// permissions the inference code needs. For example, if the inference code access -// any other Amazon Web Services resources, you grant necessary permissions via -// this role. +// CreateTransformJob API. SageMaker uses your model and your dataset to get +// inferences which are then saved to a specified S3 location. In the request, you +// also provide an IAM role that SageMaker can assume to access model artifacts and +// docker image for deployment on ML compute hosting instances or for batch +// transform jobs. In addition, you also use the IAM role to manage permissions the +// inference code needs. For example, if the inference code access any other Amazon +// Web Services resources, you grant necessary permissions via this role. func (c *Client) CreateModel(ctx context.Context, params *CreateModelInput, optFns ...func(*Options)) (*CreateModelOutput, error) { if params == nil { params = &CreateModelInput{} @@ -51,12 +49,12 @@ func (c *Client) CreateModel(ctx context.Context, params *CreateModelInput, optF type CreateModelInput struct { - // The Amazon Resource Name (ARN) of the IAM role that Amazon SageMaker can assume - // to access model artifacts and docker image for deployment on ML compute - // instances or for batch transform jobs. Deploying on ML compute instances is part - // of model hosting. For more information, see Amazon SageMaker Roles + // The Amazon Resource Name (ARN) of the IAM role that SageMaker can assume to + // access model artifacts and docker image for deployment on ML compute instances + // or for batch transform jobs. Deploying on ML compute instances is part of model + // hosting. For more information, see SageMaker Roles // (https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-roles.html). To be - // able to pass this role to Amazon SageMaker, the caller of this API must have the + // able to pass this role to SageMaker, the caller of this API must have the // iam:PassRole permission. // // This member is required. @@ -102,7 +100,7 @@ type CreateModelInput struct { type CreateModelOutput struct { - // The ARN of the model created in Amazon SageMaker. + // The ARN of the model created in SageMaker. // // This member is required. ModelArn *string diff --git a/service/sagemaker/api_op_CreateModelPackage.go b/service/sagemaker/api_op_CreateModelPackage.go index e844afa6774..268275fd804 100644 --- a/service/sagemaker/api_op_CreateModelPackage.go +++ b/service/sagemaker/api_op_CreateModelPackage.go @@ -12,14 +12,14 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Creates a model package that you can use to create Amazon SageMaker models or -// list on Amazon Web Services Marketplace, or a versioned model that is part of a -// model group. Buyers can subscribe to model packages listed on Amazon Web -// Services Marketplace to create models in Amazon SageMaker. To create a model -// package by specifying a Docker container that contains your inference code and -// the Amazon S3 location of your model artifacts, provide values for -// InferenceSpecification. To create a model from an algorithm resource that you -// created or subscribed to in Amazon Web Services Marketplace, provide a value for +// Creates a model package that you can use to create SageMaker models or list on +// Amazon Web Services Marketplace, or a versioned model that is part of a model +// group. Buyers can subscribe to model packages listed on Amazon Web Services +// Marketplace to create models in SageMaker. To create a model package by +// specifying a Docker container that contains your inference code and the Amazon +// S3 location of your model artifacts, provide values for InferenceSpecification. +// To create a model from an algorithm resource that you created or subscribed to +// in Amazon Web Services Marketplace, provide a value for // SourceAlgorithmSpecification. There are two types of model packages: // // * @@ -129,8 +129,8 @@ type CreateModelPackageInput struct { // learning tasks include object detection and image classification. Task *string - // Specifies configurations for one or more transform jobs that Amazon SageMaker - // runs to test the model package. + // Specifies configurations for one or more transform jobs that SageMaker runs to + // test the model package. ValidationSpecification *types.ModelPackageValidationSpecification noSmithyDocumentSerde diff --git a/service/sagemaker/api_op_CreateNotebookInstance.go b/service/sagemaker/api_op_CreateNotebookInstance.go index 6e9f620dee9..14b41720be3 100644 --- a/service/sagemaker/api_op_CreateNotebookInstance.go +++ b/service/sagemaker/api_op_CreateNotebookInstance.go @@ -11,39 +11,38 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Creates an Amazon SageMaker notebook instance. A notebook instance is a machine +// Creates an SageMaker notebook instance. A notebook instance is a machine // learning (ML) compute instance running on a Jupyter notebook. In a // CreateNotebookInstance request, specify the type of ML compute instance that you -// want to run. Amazon SageMaker launches the instance, installs common libraries -// that you can use to explore datasets for model training, and attaches an ML -// storage volume to the notebook instance. Amazon SageMaker also provides a set of -// example notebooks. Each notebook demonstrates how to use Amazon SageMaker with a -// specific algorithm or with a machine learning framework. After receiving the -// request, Amazon SageMaker does the following: +// want to run. SageMaker launches the instance, installs common libraries that you +// can use to explore datasets for model training, and attaches an ML storage +// volume to the notebook instance. SageMaker also provides a set of example +// notebooks. Each notebook demonstrates how to use SageMaker with a specific +// algorithm or with a machine learning framework. After receiving the request, +// SageMaker does the following: // -// * Creates a network interface in -// the Amazon SageMaker VPC. +// * Creates a network interface in the SageMaker +// VPC. // -// * (Option) If you specified SubnetId, Amazon -// SageMaker creates a network interface in your own VPC, which is inferred from -// the subnet ID that you provide in the input. When creating this network -// interface, Amazon SageMaker attaches the security group that you specified in -// the request to the network interface that it creates in your VPC. +// * (Option) If you specified SubnetId, SageMaker creates a network +// interface in your own VPC, which is inferred from the subnet ID that you provide +// in the input. When creating this network interface, SageMaker attaches the +// security group that you specified in the request to the network interface that +// it creates in your VPC. // -// * Launches an -// EC2 instance of the type specified in the request in the Amazon SageMaker VPC. -// If you specified SubnetId of your VPC, Amazon SageMaker specifies both network -// interfaces when launching this instance. This enables inbound traffic from your -// own VPC to the notebook instance, assuming that the security groups allow -// it. +// * Launches an EC2 instance of the type specified in the +// request in the SageMaker VPC. If you specified SubnetId of your VPC, SageMaker +// specifies both network interfaces when launching this instance. This enables +// inbound traffic from your own VPC to the notebook instance, assuming that the +// security groups allow it. // -// After creating the notebook instance, Amazon SageMaker returns its Amazon -// Resource Name (ARN). You can't change the name of a notebook instance after you -// create it. After Amazon SageMaker creates the notebook instance, you can connect -// to the Jupyter server and work in Jupyter notebooks. For example, you can write -// code to explore a dataset that you can use for model training, train a model, -// host models by creating Amazon SageMaker endpoints, and validate hosted models. -// For more information, see How It Works +// After creating the notebook instance, SageMaker +// returns its Amazon Resource Name (ARN). You can't change the name of a notebook +// instance after you create it. After SageMaker creates the notebook instance, you +// can connect to the Jupyter server and work in Jupyter notebooks. For example, +// you can write code to explore a dataset that you can use for model training, +// train a model, host models by creating SageMaker endpoints, and validate hosted +// models. For more information, see How It Works // (https://docs.aws.amazon.com/sagemaker/latest/dg/how-it-works.html). func (c *Client) CreateNotebookInstance(ctx context.Context, params *CreateNotebookInstanceInput, optFns ...func(*Options)) (*CreateNotebookInstanceOutput, error) { if params == nil { @@ -73,13 +72,12 @@ type CreateNotebookInstanceInput struct { NotebookInstanceName *string // When you send any requests to Amazon Web Services resources from the notebook - // instance, Amazon SageMaker assumes this role to perform tasks on your behalf. - // You must grant this role necessary permissions so Amazon SageMaker can perform - // these tasks. The policy must allow the Amazon SageMaker service principal - // (sagemaker.amazonaws.com) permissions to assume this role. For more information, - // see Amazon SageMaker Roles + // instance, SageMaker assumes this role to perform tasks on your behalf. You must + // grant this role necessary permissions so SageMaker can perform these tasks. The + // policy must allow the SageMaker service principal (sagemaker.amazonaws.com) + // permissions to assume this role. For more information, see SageMaker Roles // (https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-roles.html). To be - // able to pass this role to Amazon SageMaker, the caller of this API must have the + // able to pass this role to SageMaker, the caller of this API must have the // iam:PassRole permission. // // This member is required. @@ -98,7 +96,7 @@ type CreateNotebookInstanceInput struct { // (https://docs.aws.amazon.com/codecommit/latest/userguide/welcome.html) or in any // other Git repository. These repositories are cloned at the same level as the // default repository of your notebook instance. For more information, see - // Associating Git Repositories with Amazon SageMaker Notebook Instances + // Associating Git Repositories with SageMaker Notebook Instances // (https://docs.aws.amazon.com/sagemaker/latest/dg/nbi-git-repo.html). AdditionalCodeRepositories []string @@ -109,24 +107,24 @@ type CreateNotebookInstanceInput struct { // (https://docs.aws.amazon.com/codecommit/latest/userguide/welcome.html) or in any // other Git repository. When you open a notebook instance, it opens in the // directory that contains this repository. For more information, see Associating - // Git Repositories with Amazon SageMaker Notebook Instances + // Git Repositories with SageMaker Notebook Instances // (https://docs.aws.amazon.com/sagemaker/latest/dg/nbi-git-repo.html). DefaultCodeRepository *string - // Sets whether Amazon SageMaker provides internet access to the notebook instance. - // If you set this to Disabled this notebook instance is able to access resources - // only in your VPC, and is not be able to connect to Amazon SageMaker training and - // endpoint services unless you configure a NAT Gateway in your VPC. For more - // information, see Notebook Instances Are Internet-Enabled by Default + // Sets whether SageMaker provides internet access to the notebook instance. If you + // set this to Disabled this notebook instance is able to access resources only in + // your VPC, and is not be able to connect to SageMaker training and endpoint + // services unless you configure a NAT Gateway in your VPC. For more information, + // see Notebook Instances Are Internet-Enabled by Default // (https://docs.aws.amazon.com/sagemaker/latest/dg/appendix-additional-considerations.html#appendix-notebook-and-internet-access). // You can set the value of this parameter to Disabled only if you set a value for // the SubnetId parameter. DirectInternetAccess types.DirectInternetAccess // The Amazon Resource Name (ARN) of a Amazon Web Services Key Management Service - // key that Amazon SageMaker uses to encrypt data on the storage volume attached to - // your notebook instance. The KMS key you provide must be enabled. For - // information, see Enabling and Disabling Keys + // key that SageMaker uses to encrypt data on the storage volume attached to your + // notebook instance. The KMS key you provide must be enabled. For information, see + // Enabling and Disabling Keys // (https://docs.aws.amazon.com/kms/latest/developerguide/enabling-keys.html) in // the Amazon Web Services Key Management Service Developer Guide. KmsKeyId *string diff --git a/service/sagemaker/api_op_CreatePresignedNotebookInstanceUrl.go b/service/sagemaker/api_op_CreatePresignedNotebookInstanceUrl.go index 9138f35299b..8320247a105 100644 --- a/service/sagemaker/api_op_CreatePresignedNotebookInstanceUrl.go +++ b/service/sagemaker/api_op_CreatePresignedNotebookInstanceUrl.go @@ -11,19 +11,19 @@ import ( ) // Returns a URL that you can use to connect to the Jupyter server from a notebook -// instance. In the Amazon SageMaker console, when you choose Open next to a -// notebook instance, Amazon SageMaker opens a new tab showing the Jupyter server -// home page from the notebook instance. The console uses this API to get the URL -// and show the page. The IAM role or user used to call this API defines the -// permissions to access the notebook instance. Once the presigned URL is created, -// no additional permission is required to access this URL. IAM authorization -// policies for this API are also enforced for every HTTP request and WebSocket -// frame that attempts to connect to the notebook instance. You can restrict access -// to this API and to the URL that it returns to a list of IP addresses that you -// specify. Use the NotIpAddress condition operator and the aws:SourceIP condition -// context key to specify the list of IP addresses that you want to have access to -// the notebook instance. For more information, see Limit Access to a Notebook -// Instance by IP Address +// instance. In the SageMaker console, when you choose Open next to a notebook +// instance, SageMaker opens a new tab showing the Jupyter server home page from +// the notebook instance. The console uses this API to get the URL and show the +// page. The IAM role or user used to call this API defines the permissions to +// access the notebook instance. Once the presigned URL is created, no additional +// permission is required to access this URL. IAM authorization policies for this +// API are also enforced for every HTTP request and WebSocket frame that attempts +// to connect to the notebook instance. You can restrict access to this API and to +// the URL that it returns to a list of IP addresses that you specify. Use the +// NotIpAddress condition operator and the aws:SourceIP condition context key to +// specify the list of IP addresses that you want to have access to the notebook +// instance. For more information, see Limit Access to a Notebook Instance by IP +// Address // (https://docs.aws.amazon.com/sagemaker/latest/dg/security_iam_id-based-policy-examples.html#nbi-ip-filter). // The URL that you get from a call to CreatePresignedNotebookInstanceUrl is valid // only for 5 minutes. If you try to use the URL after the 5-minute limit expires, diff --git a/service/sagemaker/api_op_CreateTrainingJob.go b/service/sagemaker/api_op_CreateTrainingJob.go index 5f81375ee2d..093b1f6a0ea 100644 --- a/service/sagemaker/api_op_CreateTrainingJob.go +++ b/service/sagemaker/api_op_CreateTrainingJob.go @@ -11,21 +11,21 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Starts a model training job. After training completes, Amazon SageMaker saves -// the resulting model artifacts to an Amazon S3 location that you specify. If you -// choose to host your model using Amazon SageMaker hosting services, you can use -// the resulting model artifacts as part of the model. You can also use the -// artifacts in a machine learning service other than Amazon SageMaker, provided -// that you know how to use them for inference. In the request body, you provide -// the following: +// Starts a model training job. After training completes, SageMaker saves the +// resulting model artifacts to an Amazon S3 location that you specify. If you +// choose to host your model using SageMaker hosting services, you can use the +// resulting model artifacts as part of the model. You can also use the artifacts +// in a machine learning service other than SageMaker, provided that you know how +// to use them for inference. In the request body, you provide the following: // -// * AlgorithmSpecification - Identifies the training algorithm to -// use. +// * +// AlgorithmSpecification - Identifies the training algorithm to use. // -// * HyperParameters - Specify these algorithm-specific parameters to enable -// the estimation of model parameters during training. Hyperparameters can be tuned -// to optimize this learning process. For a list of hyperparameters for each -// training algorithm provided by Amazon SageMaker, see Algorithms +// * +// HyperParameters - Specify these algorithm-specific parameters to enable the +// estimation of model parameters during training. Hyperparameters can be tuned to +// optimize this learning process. For a list of hyperparameters for each training +// algorithm provided by SageMaker, see Algorithms // (https://docs.aws.amazon.com/sagemaker/latest/dg/algos.html). // // * InputDataConfig @@ -33,7 +33,7 @@ import ( // it is stored. // // * OutputDataConfig - Identifies the Amazon S3 bucket where you -// want Amazon SageMaker to save the results of model training. +// want SageMaker to save the results of model training. // // * ResourceConfig - // Identifies the resources, ML compute instances, and ML storage volumes to deploy @@ -46,24 +46,24 @@ import ( // (https://docs.aws.amazon.com/sagemaker/latest/dg/model-managed-spot-training.html). // // * -// RoleArn - The Amazon Resource Name (ARN) that Amazon SageMaker assumes to -// perform tasks on your behalf during model training. You must grant this role the -// necessary permissions so that Amazon SageMaker can successfully complete model -// training. +// RoleArn - The Amazon Resource Name (ARN) that SageMaker assumes to perform tasks +// on your behalf during model training. You must grant this role the necessary +// permissions so that SageMaker can successfully complete model training. // -// * StoppingCondition - To help cap training costs, use -// MaxRuntimeInSeconds to set a time limit for training. Use MaxWaitTimeInSeconds -// to specify how long a managed spot training job has to complete. +// * +// StoppingCondition - To help cap training costs, use MaxRuntimeInSeconds to set a +// time limit for training. Use MaxWaitTimeInSeconds to specify how long a managed +// spot training job has to complete. // -// * Environment -// - The environment variables to set in the Docker container. +// * Environment - The environment variables to +// set in the Docker container. // -// * RetryStrategy - -// The number of times to retry the job when the job fails due to an -// InternalServerError. +// * RetryStrategy - The number of times to retry the +// job when the job fails due to an InternalServerError. // -// For more information about Amazon SageMaker, see How It -// Works (https://docs.aws.amazon.com/sagemaker/latest/dg/how-it-works.html). +// For more information +// about SageMaker, see How It Works +// (https://docs.aws.amazon.com/sagemaker/latest/dg/how-it-works.html). func (c *Client) CreateTrainingJob(ctx context.Context, params *CreateTrainingJobInput, optFns ...func(*Options)) (*CreateTrainingJobOutput, error) { if params == nil { params = &CreateTrainingJobInput{} @@ -83,7 +83,7 @@ type CreateTrainingJobInput struct { // The registry path of the Docker image that contains the training algorithm and // algorithm-specific metadata, including the input mode. For more information - // about algorithms provided by Amazon SageMaker, see Algorithms + // about algorithms provided by SageMaker, see Algorithms // (https://docs.aws.amazon.com/sagemaker/latest/dg/algos.html). For information // about providing your own algorithms, see Using Your Own Algorithms with Amazon // SageMaker @@ -93,7 +93,7 @@ type CreateTrainingJobInput struct { AlgorithmSpecification *types.AlgorithmSpecification // Specifies the path to the S3 location where you want to store model artifacts. - // Amazon SageMaker creates subfolders for the artifacts. + // SageMaker creates subfolders for the artifacts. // // This member is required. OutputDataConfig *types.OutputDataConfig @@ -101,22 +101,22 @@ type CreateTrainingJobInput struct { // The resources, including the ML compute instances and ML storage volumes, to use // for model training. ML storage volumes store model artifacts and incremental // states. Training algorithms might also use ML storage volumes for scratch space. - // If you want Amazon SageMaker to use the ML storage volume to store the training - // data, choose File as the TrainingInputMode in the algorithm specification. For + // If you want SageMaker to use the ML storage volume to store the training data, + // choose File as the TrainingInputMode in the algorithm specification. For // distributed training algorithms, specify an instance count greater than 1. // // This member is required. ResourceConfig *types.ResourceConfig - // The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can assume - // to perform tasks on your behalf. During model training, Amazon SageMaker needs - // your permission to read input data from an S3 bucket, download a Docker image - // that contains training code, write model artifacts to an S3 bucket, write logs - // to Amazon CloudWatch Logs, and publish metrics to Amazon CloudWatch. You grant + // The Amazon Resource Name (ARN) of an IAM role that SageMaker can assume to + // perform tasks on your behalf. During model training, SageMaker needs your + // permission to read input data from an S3 bucket, download a Docker image that + // contains training code, write model artifacts to an S3 bucket, write logs to + // Amazon CloudWatch Logs, and publish metrics to Amazon CloudWatch. You grant // permissions for all of these tasks to an IAM role. For more information, see - // Amazon SageMaker Roles + // SageMaker Roles // (https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-roles.html). To be - // able to pass this role to Amazon SageMaker, the caller of this API must have the + // able to pass this role to SageMaker, the caller of this API must have the // iam:PassRole permission. // // This member is required. @@ -124,11 +124,10 @@ type CreateTrainingJobInput struct { // Specifies a limit to how long a model training job can run. It also specifies // how long a managed Spot training job has to complete. When the job reaches the - // time limit, Amazon SageMaker ends the training job. Use this API to cap model - // training costs. To stop a job, Amazon SageMaker sends the algorithm the SIGTERM - // signal, which delays job termination for 120 seconds. Algorithms can use this - // 120-second window to save the model artifacts, so the results of training are - // not lost. + // time limit, SageMaker ends the training job. Use this API to cap model training + // costs. To stop a job, SageMaker sends the algorithm the SIGTERM signal, which + // delays job termination for 120 seconds. Algorithms can use this 120-second + // window to save the model artifacts, so the results of training are not lost. // // This member is required. StoppingCondition *types.StoppingCondition @@ -175,9 +174,9 @@ type CreateTrainingJobInput struct { // Isolates the training container. No inbound or outbound network calls can be // made, except for calls between peers within a training cluster for distributed // training. If you enable network isolation for training jobs that are configured - // to use a VPC, Amazon SageMaker downloads and uploads customer data and model - // artifacts through the specified VPC, but the training container does not have - // network access. + // to use a VPC, SageMaker downloads and uploads customer data and model artifacts + // through the specified VPC, but the training container does not have network + // access. EnableNetworkIsolation bool // The environment variables to set in the Docker container. @@ -196,7 +195,7 @@ type CreateTrainingJobInput struct { // Algorithm-specific parameters that influence the quality of the model. You set // hyperparameters before you start the learning process. For a list of - // hyperparameters for each training algorithm provided by Amazon SageMaker, see + // hyperparameters for each training algorithm provided by SageMaker, see // Algorithms (https://docs.aws.amazon.com/sagemaker/latest/dg/algos.html). You can // specify a maximum of 100 hyperparameters. Each hyperparameter is a key-value // pair. Each key and value is limited to 256 characters, as specified by the @@ -210,11 +209,11 @@ type CreateTrainingJobInput struct { // each channel provides the S3, EFS, or FSx location where the input data is // stored. It also provides information about the stored data: the MIME type, // compression method, and whether the data is wrapped in RecordIO format. - // Depending on the input mode that the algorithm supports, Amazon SageMaker either - // copies input data files from an S3 bucket to a local directory in the Docker - // container, or makes it available as input streams. For example, if you specify - // an EFS location, input data files will be made available as input streams. They - // do not need to be downloaded. + // Depending on the input mode that the algorithm supports, SageMaker either copies + // input data files from an S3 bucket to a local directory in the Docker container, + // or makes it available as input streams. For example, if you specify an EFS + // location, input data files are available as input streams. They do not need to + // be downloaded. InputDataConfig []types.Channel // Configuration information for Debugger system monitoring, framework profiling, diff --git a/service/sagemaker/api_op_CreateTransformJob.go b/service/sagemaker/api_op_CreateTransformJob.go index cfa1cda9bf6..63578254d56 100644 --- a/service/sagemaker/api_op_CreateTransformJob.go +++ b/service/sagemaker/api_op_CreateTransformJob.go @@ -136,10 +136,13 @@ type CreateTransformJobInput struct { // or equal to, the size of a single record. To estimate the size of a record in // MB, divide the size of your dataset by the number of records. To ensure that the // records fit within the maximum payload size, we recommend using a slightly - // larger value. The default value is 6 MB. For cases where the payload might be - // arbitrarily large and is transmitted using HTTP chunked encoding, set the value - // to 0. This feature works only in supported algorithms. Currently, Amazon - // SageMaker built-in algorithms do not support HTTP chunked encoding. + // larger value. The default value is 6 MB. The value of MaxPayloadInMB cannot be + // greater than 100 MB. If you specify the MaxConcurrentTransforms parameter, the + // value of (MaxConcurrentTransforms * MaxPayloadInMB) also cannot exceed 100 MB. + // For cases where the payload might be arbitrarily large and is transmitted using + // HTTP chunked encoding, set the value to 0. This feature works only in supported + // algorithms. Currently, Amazon SageMaker built-in algorithms do not support HTTP + // chunked encoding. MaxPayloadInMB *int32 // Configures the timeout and maximum number of retries for processing a transform diff --git a/service/sagemaker/api_op_DeleteEndpoint.go b/service/sagemaker/api_op_DeleteEndpoint.go index 6fe079ded28..2dd8d904477 100644 --- a/service/sagemaker/api_op_DeleteEndpoint.go +++ b/service/sagemaker/api_op_DeleteEndpoint.go @@ -10,12 +10,16 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Deletes an endpoint. Amazon SageMaker frees up all of the resources that were -// deployed when the endpoint was created. Amazon SageMaker retires any custom KMS -// key grants associated with the endpoint, meaning you don't need to use the -// RevokeGrant +// Deletes an endpoint. SageMaker frees up all of the resources that were deployed +// when the endpoint was created. SageMaker retires any custom KMS key grants +// associated with the endpoint, meaning you don't need to use the RevokeGrant // (http://docs.aws.amazon.com/kms/latest/APIReference/API_RevokeGrant.html) API -// call. +// call. When you delete your endpoint, SageMaker asynchronously deletes associated +// endpoint resources such as KMS key grants. You might still see these resources +// in your account for a few minutes after deleting your endpoint. Do not delete or +// revoke the permissions for your ExecutionRoleArn +// (https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_CreateModel.html#sagemaker-CreateModel-request-ExecutionRoleArn), +// otherwise SageMaker cannot delete these resources. func (c *Client) DeleteEndpoint(ctx context.Context, params *DeleteEndpointInput, optFns ...func(*Options)) (*DeleteEndpointOutput, error) { if params == nil { params = &DeleteEndpointInput{} diff --git a/service/sagemaker/api_op_DeleteModel.go b/service/sagemaker/api_op_DeleteModel.go index d8edaca9b7b..16fb536697e 100644 --- a/service/sagemaker/api_op_DeleteModel.go +++ b/service/sagemaker/api_op_DeleteModel.go @@ -11,8 +11,8 @@ import ( ) // Deletes a model. The DeleteModel API deletes only the model entry that was -// created in Amazon SageMaker when you called the CreateModel API. It does not -// delete model artifacts, inference code, or the IAM role that you specified when +// created in SageMaker when you called the CreateModel API. It does not delete +// model artifacts, inference code, or the IAM role that you specified when // creating the model. func (c *Client) DeleteModel(ctx context.Context, params *DeleteModelInput, optFns ...func(*Options)) (*DeleteModelOutput, error) { if params == nil { diff --git a/service/sagemaker/api_op_DeleteModelPackage.go b/service/sagemaker/api_op_DeleteModelPackage.go index a1d1ed6bcb6..a95d8d2e618 100644 --- a/service/sagemaker/api_op_DeleteModelPackage.go +++ b/service/sagemaker/api_op_DeleteModelPackage.go @@ -10,10 +10,9 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Deletes a model package. A model package is used to create Amazon SageMaker -// models or list on Amazon Web Services Marketplace. Buyers can subscribe to model -// packages listed on Amazon Web Services Marketplace to create models in Amazon -// SageMaker. +// Deletes a model package. A model package is used to create SageMaker models or +// list on Amazon Web Services Marketplace. Buyers can subscribe to model packages +// listed on Amazon Web Services Marketplace to create models in SageMaker. func (c *Client) DeleteModelPackage(ctx context.Context, params *DeleteModelPackageInput, optFns ...func(*Options)) (*DeleteModelPackageOutput, error) { if params == nil { params = &DeleteModelPackageInput{} diff --git a/service/sagemaker/api_op_DeleteNotebookInstance.go b/service/sagemaker/api_op_DeleteNotebookInstance.go index 46429f3a631..44aee406e23 100644 --- a/service/sagemaker/api_op_DeleteNotebookInstance.go +++ b/service/sagemaker/api_op_DeleteNotebookInstance.go @@ -10,11 +10,11 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Deletes an Amazon SageMaker notebook instance. Before you can delete a notebook +// Deletes an SageMaker notebook instance. Before you can delete a notebook // instance, you must call the StopNotebookInstance API. When you delete a notebook -// instance, you lose all of your data. Amazon SageMaker removes the ML compute -// instance, and deletes the ML storage volume and the network interface associated -// with the notebook instance. +// instance, you lose all of your data. SageMaker removes the ML compute instance, +// and deletes the ML storage volume and the network interface associated with the +// notebook instance. func (c *Client) DeleteNotebookInstance(ctx context.Context, params *DeleteNotebookInstanceInput, optFns ...func(*Options)) (*DeleteNotebookInstanceOutput, error) { if params == nil { params = &DeleteNotebookInstanceInput{} @@ -32,7 +32,7 @@ func (c *Client) DeleteNotebookInstance(ctx context.Context, params *DeleteNoteb type DeleteNotebookInstanceInput struct { - // The name of the Amazon SageMaker notebook instance to delete. + // The name of the SageMaker notebook instance to delete. // // This member is required. NotebookInstanceName *string diff --git a/service/sagemaker/api_op_DeleteTags.go b/service/sagemaker/api_op_DeleteTags.go index 2a7feb7d4a8..8f6fb769c40 100644 --- a/service/sagemaker/api_op_DeleteTags.go +++ b/service/sagemaker/api_op_DeleteTags.go @@ -10,13 +10,13 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Deletes the specified tags from an Amazon SageMaker resource. To list a -// resource's tags, use the ListTags API. When you call this API to delete tags -// from a hyperparameter tuning job, the deleted tags are not removed from training -// jobs that the hyperparameter tuning job launched before you called this API. -// When you call this API to delete tags from a SageMaker Studio Domain or User -// Profile, the deleted tags are not removed from Apps that the SageMaker Studio -// Domain or User Profile launched before you called this API. +// Deletes the specified tags from an SageMaker resource. To list a resource's +// tags, use the ListTags API. When you call this API to delete tags from a +// hyperparameter tuning job, the deleted tags are not removed from training jobs +// that the hyperparameter tuning job launched before you called this API. When you +// call this API to delete tags from a SageMaker Studio Domain or User Profile, the +// deleted tags are not removed from Apps that the SageMaker Studio Domain or User +// Profile launched before you called this API. func (c *Client) DeleteTags(ctx context.Context, params *DeleteTagsInput, optFns ...func(*Options)) (*DeleteTagsOutput, error) { if params == nil { params = &DeleteTagsInput{} diff --git a/service/sagemaker/api_op_DescribeAlgorithm.go b/service/sagemaker/api_op_DescribeAlgorithm.go index d364c7dd28c..153fc9c6307 100644 --- a/service/sagemaker/api_op_DescribeAlgorithm.go +++ b/service/sagemaker/api_op_DescribeAlgorithm.go @@ -83,8 +83,8 @@ type DescribeAlgorithmOutput struct { // The product identifier of the algorithm. ProductId *string - // Details about configurations for one or more training jobs that Amazon SageMaker - // runs to test the algorithm. + // Details about configurations for one or more training jobs that SageMaker runs + // to test the algorithm. ValidationSpecification *types.AlgorithmValidationSpecification // Metadata pertaining to the operation's result. diff --git a/service/sagemaker/api_op_DescribeEndpointConfig.go b/service/sagemaker/api_op_DescribeEndpointConfig.go index 8d0003249d8..741f39e177f 100644 --- a/service/sagemaker/api_op_DescribeEndpointConfig.go +++ b/service/sagemaker/api_op_DescribeEndpointConfig.go @@ -51,7 +51,7 @@ type DescribeEndpointConfigOutput struct { // This member is required. EndpointConfigArn *string - // Name of the Amazon SageMaker endpoint configuration. + // Name of the SageMaker endpoint configuration. // // This member is required. EndpointConfigName *string diff --git a/service/sagemaker/api_op_DescribeLabelingJob.go b/service/sagemaker/api_op_DescribeLabelingJob.go index 5d442e03abe..d531fb6d924 100644 --- a/service/sagemaker/api_op_DescribeLabelingJob.go +++ b/service/sagemaker/api_op_DescribeLabelingJob.go @@ -96,8 +96,8 @@ type DescribeLabelingJobOutput struct { // This member is required. OutputConfig *types.LabelingJobOutputConfig - // The Amazon Resource Name (ARN) that Amazon SageMaker assumes to perform tasks on - // your behalf during data labeling. + // The Amazon Resource Name (ARN) that SageMaker assumes to perform tasks on your + // behalf during data labeling. // // This member is required. RoleArn *string diff --git a/service/sagemaker/api_op_DescribeModel.go b/service/sagemaker/api_op_DescribeModel.go index 6628343df25..c1b1eb93605 100644 --- a/service/sagemaker/api_op_DescribeModel.go +++ b/service/sagemaker/api_op_DescribeModel.go @@ -55,7 +55,7 @@ type DescribeModelOutput struct { // This member is required. ModelArn *string - // Name of the Amazon SageMaker model. + // Name of the SageMaker model. // // This member is required. ModelName *string diff --git a/service/sagemaker/api_op_DescribeModelPackage.go b/service/sagemaker/api_op_DescribeModelPackage.go index d98e3bf5087..4ce0d7e8108 100644 --- a/service/sagemaker/api_op_DescribeModelPackage.go +++ b/service/sagemaker/api_op_DescribeModelPackage.go @@ -109,7 +109,7 @@ type DescribeModelPackageOutput struct { // component, lineage group, or project. LastModifiedBy *types.UserContext - // The last time the model package was modified. + // The last time that the model package was modified. LastModifiedTime *time.Time // Metadata properties of the tracking entity, trial, or trial component. diff --git a/service/sagemaker/api_op_DescribeModelPackageGroup.go b/service/sagemaker/api_op_DescribeModelPackageGroup.go index 1456a06b45f..ac13993b068 100644 --- a/service/sagemaker/api_op_DescribeModelPackageGroup.go +++ b/service/sagemaker/api_op_DescribeModelPackageGroup.go @@ -30,7 +30,7 @@ func (c *Client) DescribeModelPackageGroup(ctx context.Context, params *Describe type DescribeModelPackageGroupInput struct { - // The name of the model group to describe. + // The name of gthe model group to describe. // // This member is required. ModelPackageGroupName *string diff --git a/service/sagemaker/api_op_DescribeNotebookInstance.go b/service/sagemaker/api_op_DescribeNotebookInstance.go index 30b7281a511..a80ea030ac9 100644 --- a/service/sagemaker/api_op_DescribeNotebookInstance.go +++ b/service/sagemaker/api_op_DescribeNotebookInstance.go @@ -58,7 +58,7 @@ type DescribeNotebookInstanceOutput struct { // (https://docs.aws.amazon.com/codecommit/latest/userguide/welcome.html) or in any // other Git repository. These repositories are cloned at the same level as the // default repository of your notebook instance. For more information, see - // Associating Git Repositories with Amazon SageMaker Notebook Instances + // Associating Git Repositories with SageMaker Notebook Instances // (https://docs.aws.amazon.com/sagemaker/latest/dg/nbi-git-repo.html). AdditionalCodeRepositories []string @@ -73,15 +73,14 @@ type DescribeNotebookInstanceOutput struct { // (https://docs.aws.amazon.com/codecommit/latest/userguide/welcome.html) or in any // other Git repository. When you open a notebook instance, it opens in the // directory that contains this repository. For more information, see Associating - // Git Repositories with Amazon SageMaker Notebook Instances + // Git Repositories with SageMaker Notebook Instances // (https://docs.aws.amazon.com/sagemaker/latest/dg/nbi-git-repo.html). DefaultCodeRepository *string - // Describes whether Amazon SageMaker provides internet access to the notebook - // instance. If this value is set to Disabled, the notebook instance does not have - // internet access, and cannot connect to Amazon SageMaker training and endpoint - // services. For more information, see Notebook Instances Are Internet-Enabled by - // Default + // Describes whether SageMaker provides internet access to the notebook instance. + // If this value is set to Disabled, the notebook instance does not have internet + // access, and cannot connect to SageMaker training and endpoint services. For more + // information, see Notebook Instances Are Internet-Enabled by Default // (https://docs.aws.amazon.com/sagemaker/latest/dg/appendix-additional-considerations.html#appendix-notebook-and-internet-access). DirectInternetAccess types.DirectInternetAccess @@ -91,16 +90,16 @@ type DescribeNotebookInstanceOutput struct { // The type of ML compute instance running on the notebook instance. InstanceType types.InstanceType - // The Amazon Web Services KMS key ID Amazon SageMaker uses to encrypt data when - // storing it on the ML storage volume attached to the instance. + // The Amazon Web Services KMS key ID SageMaker uses to encrypt data when storing + // it on the ML storage volume attached to the instance. KmsKeyId *string // A timestamp. Use this parameter to retrieve the time when the notebook instance // was last modified. LastModifiedTime *time.Time - // The network interface IDs that Amazon SageMaker created at the time of creating - // the instance. + // The network interface IDs that SageMaker created at the time of creating the + // instance. NetworkInterfaceId *string // The Amazon Resource Name (ARN) of the notebook instance. @@ -112,7 +111,7 @@ type DescribeNotebookInstanceOutput struct { // (https://docs.aws.amazon.com/sagemaker/latest/dg/notebook-lifecycle-config.html) NotebookInstanceLifecycleConfigName *string - // The name of the Amazon SageMaker notebook instance. + // The name of the SageMaker notebook instance. NotebookInstanceName *string // The status of the notebook instance. diff --git a/service/sagemaker/api_op_DescribeTrainingJob.go b/service/sagemaker/api_op_DescribeTrainingJob.go index ce552a125b7..88189469132 100644 --- a/service/sagemaker/api_op_DescribeTrainingJob.go +++ b/service/sagemaker/api_op_DescribeTrainingJob.go @@ -74,11 +74,11 @@ type DescribeTrainingJobOutput struct { // Provides detailed information about the state of the training job. For detailed // information on the secondary status of the training job, see StatusMessage under - // SecondaryStatusTransition. Amazon SageMaker provides primary statuses and - // secondary statuses that apply to each of them: InProgress + // SecondaryStatusTransition. SageMaker provides primary statuses and secondary + // statuses that apply to each of them: InProgress // - // * Starting - Starting - // the training job. + // * Starting - Starting the + // training job. // // * Downloading - An optional stage for algorithms that support // File training input mode. It indicates that data is being downloaded to the ML @@ -135,11 +135,10 @@ type DescribeTrainingJobOutput struct { // Specifies a limit to how long a model training job can run. It also specifies // how long a managed Spot training job has to complete. When the job reaches the - // time limit, Amazon SageMaker ends the training job. Use this API to cap model - // training costs. To stop a job, Amazon SageMaker sends the algorithm the SIGTERM - // signal, which delays job termination for 120 seconds. Algorithms can use this - // 120-second window to save the model artifacts, so the results of training are - // not lost. + // time limit, SageMaker ends the training job. Use this API to cap model training + // costs. To stop a job, SageMaker sends the algorithm the SIGTERM signal, which + // delays job termination for 120 seconds. Algorithms can use this 120-second + // window to save the model artifacts, so the results of training are not lost. // // This member is required. StoppingCondition *types.StoppingCondition @@ -154,8 +153,8 @@ type DescribeTrainingJobOutput struct { // This member is required. TrainingJobName *string - // The status of the training job. Amazon SageMaker provides the following training - // job statuses: + // The status of the training job. SageMaker provides the following training job + // statuses: // // * InProgress - The training is in progress. // @@ -182,12 +181,12 @@ type DescribeTrainingJobOutput struct { // The billable time in seconds. Billable time refers to the absolute wall-clock // time. Multiply BillableTimeInSeconds by the number of instances (InstanceCount) - // in your training cluster to get the total compute time SageMaker will bill you - // if you run distributed training. The formula is as follows: - // BillableTimeInSeconds * InstanceCount . You can calculate the savings from using - // managed spot training using the formula (1 - BillableTimeInSeconds / - // TrainingTimeInSeconds) * 100. For example, if BillableTimeInSeconds is 100 and - // TrainingTimeInSeconds is 500, the savings is 80%. + // in your training cluster to get the total compute time SageMaker bills you if + // you run distributed training. The formula is as follows: BillableTimeInSeconds * + // InstanceCount . You can calculate the savings from using managed spot training + // using the formula (1 - BillableTimeInSeconds / TrainingTimeInSeconds) * 100. For + // example, if BillableTimeInSeconds is 100 and TrainingTimeInSeconds is 500, the + // savings is 80%. BillableTimeInSeconds *int32 // Contains information about the output location for managed spot training @@ -221,8 +220,8 @@ type DescribeTrainingJobOutput struct { // If you want to allow inbound or outbound network calls, except for calls between // peers within a training cluster for distributed training, choose True. If you // enable network isolation for training jobs that are configured to use a VPC, - // Amazon SageMaker downloads and uploads customer data and model artifacts through - // the specified VPC, but the training container does not have network access. + // SageMaker downloads and uploads customer data and model artifacts through the + // specified VPC, but the training container does not have network access. EnableNetworkIsolation bool // The environment variables to set in the Docker container. @@ -252,8 +251,8 @@ type DescribeTrainingJobOutput struct { // An array of Channel objects that describes each data input channel. InputDataConfig []types.Channel - // The Amazon Resource Name (ARN) of the Amazon SageMaker Ground Truth labeling job - // that created the transform or training job. + // The Amazon Resource Name (ARN) of the SageMaker Ground Truth labeling job that + // created the transform or training job. LabelingJobArn *string // A timestamp that indicates when the status of the training job was last @@ -261,7 +260,7 @@ type DescribeTrainingJobOutput struct { LastModifiedTime *time.Time // The S3 path where model artifacts that you configured when creating the job are - // stored. Amazon SageMaker creates subfolders for model artifacts. + // stored. SageMaker creates subfolders for model artifacts. OutputDataConfig *types.OutputDataConfig // Configuration information for Debugger system monitoring, framework profiling, @@ -296,8 +295,8 @@ type DescribeTrainingJobOutput struct { // Indicates the time when the training job ends on training instances. You are // billed for the time interval between the value of TrainingStartTime and this // time. For successful jobs and stopped jobs, this is the time after model - // artifacts are uploaded. For failed jobs, this is the time when Amazon SageMaker - // detects a job failure. + // artifacts are uploaded. For failed jobs, this is the time when SageMaker detects + // a job failure. TrainingEndTime *time.Time // Indicates the time when the training job starts on training instances. You are diff --git a/service/sagemaker/api_op_ListAlgorithms.go b/service/sagemaker/api_op_ListAlgorithms.go index af9e58636d8..7596dbc591b 100644 --- a/service/sagemaker/api_op_ListAlgorithms.go +++ b/service/sagemaker/api_op_ListAlgorithms.go @@ -67,8 +67,8 @@ type ListAlgorithmsOutput struct { // This member is required. AlgorithmSummaryList []types.AlgorithmSummary - // If the response is truncated, Amazon SageMaker returns this token. To retrieve - // the next set of algorithms, use it in the subsequent request. + // If the response is truncated, SageMaker returns this token. To retrieve the next + // set of algorithms, use it in the subsequent request. NextToken *string // Metadata pertaining to the operation's result. diff --git a/service/sagemaker/api_op_ListEndpointConfigs.go b/service/sagemaker/api_op_ListEndpointConfigs.go index 25337a66495..dbd0a738dc3 100644 --- a/service/sagemaker/api_op_ListEndpointConfigs.go +++ b/service/sagemaker/api_op_ListEndpointConfigs.go @@ -67,8 +67,8 @@ type ListEndpointConfigsOutput struct { // This member is required. EndpointConfigs []types.EndpointConfigSummary - // If the response is truncated, Amazon SageMaker returns this token. To retrieve - // the next set of endpoint configurations, use it in the subsequent request + // If the response is truncated, SageMaker returns this token. To retrieve the next + // set of endpoint configurations, use it in the subsequent request NextToken *string // Metadata pertaining to the operation's result. diff --git a/service/sagemaker/api_op_ListEndpoints.go b/service/sagemaker/api_op_ListEndpoints.go index e8be21ba40f..9d3c51a6d29 100644 --- a/service/sagemaker/api_op_ListEndpoints.go +++ b/service/sagemaker/api_op_ListEndpoints.go @@ -79,8 +79,8 @@ type ListEndpointsOutput struct { // This member is required. Endpoints []types.EndpointSummary - // If the response is truncated, Amazon SageMaker returns this token. To retrieve - // the next set of training jobs, use it in the subsequent request. + // If the response is truncated, SageMaker returns this token. To retrieve the next + // set of training jobs, use it in the subsequent request. NextToken *string // Metadata pertaining to the operation's result. diff --git a/service/sagemaker/api_op_ListLabelingJobs.go b/service/sagemaker/api_op_ListLabelingJobs.go index a205f5a3526..318ec207a64 100644 --- a/service/sagemaker/api_op_ListLabelingJobs.go +++ b/service/sagemaker/api_op_ListLabelingJobs.go @@ -76,8 +76,8 @@ type ListLabelingJobsOutput struct { // An array of LabelingJobSummary objects, each describing a labeling job. LabelingJobSummaryList []types.LabelingJobSummary - // If the response is truncated, Amazon SageMaker returns this token. To retrieve - // the next set of labeling jobs, use it in the subsequent request. + // If the response is truncated, SageMaker returns this token. To retrieve the next + // set of labeling jobs, use it in the subsequent request. NextToken *string // Metadata pertaining to the operation's result. diff --git a/service/sagemaker/api_op_ListLabelingJobsForWorkteam.go b/service/sagemaker/api_op_ListLabelingJobsForWorkteam.go index 585573560e7..c8e2b2a53d1 100644 --- a/service/sagemaker/api_op_ListLabelingJobsForWorkteam.go +++ b/service/sagemaker/api_op_ListLabelingJobsForWorkteam.go @@ -73,8 +73,8 @@ type ListLabelingJobsForWorkteamOutput struct { // This member is required. LabelingJobSummaryList []types.LabelingJobForWorkteamSummary - // If the response is truncated, Amazon SageMaker returns this token. To retrieve - // the next set of labeling jobs, use it in the subsequent request. + // If the response is truncated, SageMaker returns this token. To retrieve the next + // set of labeling jobs, use it in the subsequent request. NextToken *string // Metadata pertaining to the operation's result. diff --git a/service/sagemaker/api_op_ListModelPackages.go b/service/sagemaker/api_op_ListModelPackages.go index bdd3938e5ef..90f741247cf 100644 --- a/service/sagemaker/api_op_ListModelPackages.go +++ b/service/sagemaker/api_op_ListModelPackages.go @@ -88,8 +88,8 @@ type ListModelPackagesOutput struct { // This member is required. ModelPackageSummaryList []types.ModelPackageSummary - // If the response is truncated, Amazon SageMaker returns this token. To retrieve - // the next set of model packages, use it in the subsequent request. + // If the response is truncated, SageMaker returns this token. To retrieve the next + // set of model packages, use it in the subsequent request. NextToken *string // Metadata pertaining to the operation's result. diff --git a/service/sagemaker/api_op_ListModels.go b/service/sagemaker/api_op_ListModels.go index 03cbe2a8372..b5224c0b7ca 100644 --- a/service/sagemaker/api_op_ListModels.go +++ b/service/sagemaker/api_op_ListModels.go @@ -66,8 +66,8 @@ type ListModelsOutput struct { // This member is required. Models []types.ModelSummary - // If the response is truncated, Amazon SageMaker returns this token. To retrieve - // the next set of models, use it in the subsequent request. + // If the response is truncated, SageMaker returns this token. To retrieve the next + // set of models, use it in the subsequent request. NextToken *string // Metadata pertaining to the operation's result. diff --git a/service/sagemaker/api_op_ListNotebookInstanceLifecycleConfigs.go b/service/sagemaker/api_op_ListNotebookInstanceLifecycleConfigs.go index 8a1778b3d61..7887590bb49 100644 --- a/service/sagemaker/api_op_ListNotebookInstanceLifecycleConfigs.go +++ b/service/sagemaker/api_op_ListNotebookInstanceLifecycleConfigs.go @@ -71,8 +71,8 @@ type ListNotebookInstanceLifecycleConfigsInput struct { type ListNotebookInstanceLifecycleConfigsOutput struct { - // If the response is truncated, Amazon SageMaker returns this token. To get the - // next set of lifecycle configurations, use it in the next request. + // If the response is truncated, SageMaker returns this token. To get the next set + // of lifecycle configurations, use it in the next request. NextToken *string // An array of NotebookInstanceLifecycleConfiguration objects, each listing a diff --git a/service/sagemaker/api_op_ListNotebookInstances.go b/service/sagemaker/api_op_ListNotebookInstances.go index 48743ba23cf..b231559b023 100644 --- a/service/sagemaker/api_op_ListNotebookInstances.go +++ b/service/sagemaker/api_op_ListNotebookInstances.go @@ -13,8 +13,8 @@ import ( "time" ) -// Returns a list of the Amazon SageMaker notebook instances in the requester's -// account in an Amazon Web Services Region. +// Returns a list of the SageMaker notebook instances in the requester's account in +// an Amazon Web Services Region. func (c *Client) ListNotebookInstances(ctx context.Context, params *ListNotebookInstancesInput, optFns ...func(*Options)) (*ListNotebookInstancesOutput, error) { if params == nil { params = &ListNotebookInstancesInput{} @@ -93,8 +93,8 @@ type ListNotebookInstancesInput struct { type ListNotebookInstancesOutput struct { // If the response to the previous ListNotebookInstances request was truncated, - // Amazon SageMaker returns this token. To retrieve the next set of notebook - // instances, use the token in the next request. + // SageMaker returns this token. To retrieve the next set of notebook instances, + // use the token in the next request. NextToken *string // An array of NotebookInstanceSummary objects, one for each notebook instance. diff --git a/service/sagemaker/api_op_ListTags.go b/service/sagemaker/api_op_ListTags.go index a767bb4fb59..c78e3ff2842 100644 --- a/service/sagemaker/api_op_ListTags.go +++ b/service/sagemaker/api_op_ListTags.go @@ -12,7 +12,7 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Returns the tags for the specified Amazon SageMaker resource. +// Returns the tags for the specified SageMaker resource. func (c *Client) ListTags(ctx context.Context, params *ListTagsInput, optFns ...func(*Options)) (*ListTagsOutput, error) { if params == nil { params = &ListTagsInput{} @@ -38,9 +38,8 @@ type ListTagsInput struct { // Maximum number of tags to return. MaxResults *int32 - // If the response to the previous ListTags request is truncated, Amazon SageMaker - // returns this token. To retrieve the next set of tags, use it in the subsequent - // request. + // If the response to the previous ListTags request is truncated, SageMaker returns + // this token. To retrieve the next set of tags, use it in the subsequent request. NextToken *string noSmithyDocumentSerde @@ -48,8 +47,8 @@ type ListTagsInput struct { type ListTagsOutput struct { - // If response is truncated, Amazon SageMaker includes a token in the response. You - // can use this token in your subsequent request to fetch next set of tokens. + // If response is truncated, SageMaker includes a token in the response. You can + // use this token in your subsequent request to fetch next set of tokens. NextToken *string // An array of Tag objects, each with a tag key and a value. diff --git a/service/sagemaker/api_op_ListTrainingJobs.go b/service/sagemaker/api_op_ListTrainingJobs.go index 30f103fc920..bab4844876d 100644 --- a/service/sagemaker/api_op_ListTrainingJobs.go +++ b/service/sagemaker/api_op_ListTrainingJobs.go @@ -87,8 +87,8 @@ type ListTrainingJobsOutput struct { // This member is required. TrainingJobSummaries []types.TrainingJobSummary - // If the response is truncated, Amazon SageMaker returns this token. To retrieve - // the next set of training jobs, use it in the subsequent request. + // If the response is truncated, SageMaker returns this token. To retrieve the next + // set of training jobs, use it in the subsequent request. NextToken *string // Metadata pertaining to the operation's result. diff --git a/service/sagemaker/api_op_QueryLineage.go b/service/sagemaker/api_op_QueryLineage.go index 75f7ad98683..a95e4ac14e4 100644 --- a/service/sagemaker/api_op_QueryLineage.go +++ b/service/sagemaker/api_op_QueryLineage.go @@ -39,8 +39,8 @@ type QueryLineageInput struct { // This member is required. StartArns []string - // Associations between lineage entities are directed. This parameter determines - // the direction from the StartArn(s) the query will look. + // Associations between lineage entities have a direction. This parameter + // determines the direction from the StartArn(s) that the query traverses. Direction types.Direction // A set of filtering parameters that allow you to specify which entities should be @@ -61,14 +61,14 @@ type QueryLineageInput struct { // * ModifiedAfter - Filter entities modified after this date. Filters *types.QueryFilters - // Setting this value to True will retrieve not only the entities of interest but - // also the Associations + // Setting this value to True retrieves not only the entities of interest but also + // the Associations // (https://docs.aws.amazon.com/sagemaker/latest/dg/lineage-tracking-entities.html) // and lineage entities on the path. Set to False to only return lineage entities // that match your query. IncludeEdges bool - // The maximum depth in lineage relationships from the StartArns that will be + // The maximum depth in lineage relationships from the StartArns that are // traversed. Depth is a measure of the number of Associations from the StartArn // entity to the matched results. MaxDepth *int32 diff --git a/service/sagemaker/api_op_StartNotebookInstance.go b/service/sagemaker/api_op_StartNotebookInstance.go index 2e397746e4b..5261b21a896 100644 --- a/service/sagemaker/api_op_StartNotebookInstance.go +++ b/service/sagemaker/api_op_StartNotebookInstance.go @@ -11,7 +11,7 @@ import ( ) // Launches an ML compute instance with the latest version of the libraries and -// attaches your ML storage volume. After configuring the notebook instance, Amazon +// attaches your ML storage volume. After configuring the notebook instance, // SageMaker sets the notebook instance status to InService. A notebook instance's // status must be InService before you can connect to your Jupyter notebook. func (c *Client) StartNotebookInstance(ctx context.Context, params *StartNotebookInstanceInput, optFns ...func(*Options)) (*StartNotebookInstanceOutput, error) { diff --git a/service/sagemaker/api_op_StopNotebookInstance.go b/service/sagemaker/api_op_StopNotebookInstance.go index e7e05001e28..5ad40aafe13 100644 --- a/service/sagemaker/api_op_StopNotebookInstance.go +++ b/service/sagemaker/api_op_StopNotebookInstance.go @@ -10,14 +10,13 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Terminates the ML compute instance. Before terminating the instance, Amazon -// SageMaker disconnects the ML storage volume from it. Amazon SageMaker preserves -// the ML storage volume. Amazon SageMaker stops charging you for the ML compute -// instance when you call StopNotebookInstance. To access data on the ML storage -// volume for a notebook instance that has been terminated, call the -// StartNotebookInstance API. StartNotebookInstance launches another ML compute -// instance, configures it, and attaches the preserved ML storage volume so you can -// continue your work. +// Terminates the ML compute instance. Before terminating the instance, SageMaker +// disconnects the ML storage volume from it. SageMaker preserves the ML storage +// volume. SageMaker stops charging you for the ML compute instance when you call +// StopNotebookInstance. To access data on the ML storage volume for a notebook +// instance that has been terminated, call the StartNotebookInstance API. +// StartNotebookInstance launches another ML compute instance, configures it, and +// attaches the preserved ML storage volume so you can continue your work. func (c *Client) StopNotebookInstance(ctx context.Context, params *StopNotebookInstanceInput, optFns ...func(*Options)) (*StopNotebookInstanceOutput, error) { if params == nil { params = &StopNotebookInstanceInput{} diff --git a/service/sagemaker/api_op_StopTrainingJob.go b/service/sagemaker/api_op_StopTrainingJob.go index f32ab28e516..cafe3790a10 100644 --- a/service/sagemaker/api_op_StopTrainingJob.go +++ b/service/sagemaker/api_op_StopTrainingJob.go @@ -10,12 +10,12 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Stops a training job. To stop a job, Amazon SageMaker sends the algorithm the -// SIGTERM signal, which delays job termination for 120 seconds. Algorithms might -// use this 120-second window to save the model artifacts, so the results of the -// training is not lost. When it receives a StopTrainingJob request, Amazon -// SageMaker changes the status of the job to Stopping. After Amazon SageMaker -// stops the job, it sets the status to Stopped. +// Stops a training job. To stop a job, SageMaker sends the algorithm the SIGTERM +// signal, which delays job termination for 120 seconds. Algorithms might use this +// 120-second window to save the model artifacts, so the results of the training is +// not lost. When it receives a StopTrainingJob request, SageMaker changes the +// status of the job to Stopping. After SageMaker stops the job, it sets the status +// to Stopped. func (c *Client) StopTrainingJob(ctx context.Context, params *StopTrainingJobInput, optFns ...func(*Options)) (*StopTrainingJobOutput, error) { if params == nil { params = &StopTrainingJobInput{} diff --git a/service/sagemaker/api_op_UpdateEndpoint.go b/service/sagemaker/api_op_UpdateEndpoint.go index f9d3bbb83b1..85be019b93b 100644 --- a/service/sagemaker/api_op_UpdateEndpoint.go +++ b/service/sagemaker/api_op_UpdateEndpoint.go @@ -13,11 +13,11 @@ import ( // Deploys the new EndpointConfig specified in the request, switches to using newly // created endpoint, and then deletes resources provisioned for the endpoint using -// the previous EndpointConfig (there is no availability loss). When Amazon -// SageMaker receives the request, it sets the endpoint status to Updating. After -// updating the endpoint, it sets the status to InService. To check the status of -// an endpoint, use the DescribeEndpoint API. You must not delete an EndpointConfig -// in use by an endpoint that is live or while the UpdateEndpoint or CreateEndpoint +// the previous EndpointConfig (there is no availability loss). When SageMaker +// receives the request, it sets the endpoint status to Updating. After updating +// the endpoint, it sets the status to InService. To check the status of an +// endpoint, use the DescribeEndpoint API. You must not delete an EndpointConfig in +// use by an endpoint that is live or while the UpdateEndpoint or CreateEndpoint // operations are being performed on the endpoint. To update an endpoint, you must // create a new EndpointConfig. If you delete the EndpointConfig of an endpoint // that is active or being created or updated you may lose visibility into the diff --git a/service/sagemaker/api_op_UpdateEndpointWeightsAndCapacities.go b/service/sagemaker/api_op_UpdateEndpointWeightsAndCapacities.go index 1f38925bcf6..10e984c0505 100644 --- a/service/sagemaker/api_op_UpdateEndpointWeightsAndCapacities.go +++ b/service/sagemaker/api_op_UpdateEndpointWeightsAndCapacities.go @@ -13,9 +13,9 @@ import ( // Updates variant weight of one or more variants associated with an existing // endpoint, or capacity of one variant associated with an existing endpoint. When -// it receives the request, Amazon SageMaker sets the endpoint status to Updating. -// After updating the endpoint, it sets the status to InService. To check the -// status of an endpoint, use the DescribeEndpoint API. +// it receives the request, SageMaker sets the endpoint status to Updating. After +// updating the endpoint, it sets the status to InService. To check the status of +// an endpoint, use the DescribeEndpoint API. func (c *Client) UpdateEndpointWeightsAndCapacities(ctx context.Context, params *UpdateEndpointWeightsAndCapacitiesInput, optFns ...func(*Options)) (*UpdateEndpointWeightsAndCapacitiesOutput, error) { if params == nil { params = &UpdateEndpointWeightsAndCapacitiesInput{} @@ -38,7 +38,7 @@ type UpdateEndpointWeightsAndCapacitiesInput struct { // This member is required. DesiredWeightsAndCapacities []types.DesiredWeightAndCapacity - // The name of an existing Amazon SageMaker endpoint. + // The name of an existing SageMaker endpoint. // // This member is required. EndpointName *string diff --git a/service/sagemaker/api_op_UpdateNotebookInstance.go b/service/sagemaker/api_op_UpdateNotebookInstance.go index db8cbadd86b..6c94a2e5fbf 100644 --- a/service/sagemaker/api_op_UpdateNotebookInstance.go +++ b/service/sagemaker/api_op_UpdateNotebookInstance.go @@ -49,7 +49,7 @@ type UpdateNotebookInstanceInput struct { // (https://docs.aws.amazon.com/codecommit/latest/userguide/welcome.html) or in any // other Git repository. These repositories are cloned at the same level as the // default repository of your notebook instance. For more information, see - // Associating Git Repositories with Amazon SageMaker Notebook Instances + // Associating Git Repositories with SageMaker Notebook Instances // (https://docs.aws.amazon.com/sagemaker/latest/dg/nbi-git-repo.html). AdditionalCodeRepositories []string @@ -60,7 +60,7 @@ type UpdateNotebookInstanceInput struct { // (https://docs.aws.amazon.com/codecommit/latest/userguide/welcome.html) or in any // other Git repository. When you open a notebook instance, it opens in the // directory that contains this repository. For more information, see Associating - // Git Repositories with Amazon SageMaker Notebook Instances + // Git Repositories with SageMaker Notebook Instances // (https://docs.aws.amazon.com/sagemaker/latest/dg/nbi-git-repo.html). DefaultCodeRepository *string @@ -97,11 +97,11 @@ type UpdateNotebookInstanceInput struct { // (https://docs.aws.amazon.com/sagemaker/latest/dg/notebook-lifecycle-config.html). LifecycleConfigName *string - // The Amazon Resource Name (ARN) of the IAM role that Amazon SageMaker can assume - // to access the notebook instance. For more information, see Amazon SageMaker - // Roles (https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-roles.html). To - // be able to pass this role to Amazon SageMaker, the caller of this API must have - // the iam:PassRole permission. + // The Amazon Resource Name (ARN) of the IAM role that SageMaker can assume to + // access the notebook instance. For more information, see SageMaker Roles + // (https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-roles.html). To be + // able to pass this role to SageMaker, the caller of this API must have the + // iam:PassRole permission. RoleArn *string // Whether root access is enabled or disabled for users of the notebook instance. @@ -111,11 +111,11 @@ type UpdateNotebookInstanceInput struct { RootAccess types.RootAccess // The size, in GB, of the ML storage volume to attach to the notebook instance. - // The default value is 5 GB. ML storage volumes are encrypted, so Amazon SageMaker - // can't determine the amount of available free space on the volume. Because of - // this, you can increase the volume size when you update a notebook instance, but - // you can't decrease the volume size. If you want to decrease the size of the ML - // storage volume in use, create a new notebook instance with the desired size. + // The default value is 5 GB. ML storage volumes are encrypted, so SageMaker can't + // determine the amount of available free space on the volume. Because of this, you + // can increase the volume size when you update a notebook instance, but you can't + // decrease the volume size. If you want to decrease the size of the ML storage + // volume in use, create a new notebook instance with the desired size. VolumeSizeInGB *int32 noSmithyDocumentSerde diff --git a/service/sagemaker/deserializers.go b/service/sagemaker/deserializers.go index 98e71abe414..88744b70d1a 100644 --- a/service/sagemaker/deserializers.go +++ b/service/sagemaker/deserializers.go @@ -29272,6 +29272,15 @@ func awsAwsjson11_deserializeDocumentAutoMLChannel(v **types.AutoMLChannel, valu for key, value := range shape { switch key { + case "ChannelType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected AutoMLChannelType to be of type string, got %T instead", value) + } + sv.ChannelType = types.AutoMLChannelType(jtv) + } + case "CompressionType": if value != nil { jtv, ok := value.(string) @@ -29437,6 +29446,71 @@ func awsAwsjson11_deserializeDocumentAutoMLDataSource(v **types.AutoMLDataSource return nil } +func awsAwsjson11_deserializeDocumentAutoMLDataSplitConfig(v **types.AutoMLDataSplitConfig, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.AutoMLDataSplitConfig + if *v == nil { + sv = &types.AutoMLDataSplitConfig{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "ValidationFraction": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.ValidationFraction = ptr.Float32(float32(f64)) + + case string: + var f64 float64 + switch { + case strings.EqualFold(jtv, "NaN"): + f64 = math.NaN() + + case strings.EqualFold(jtv, "Infinity"): + f64 = math.Inf(1) + + case strings.EqualFold(jtv, "-Infinity"): + f64 = math.Inf(-1) + + default: + return fmt.Errorf("unknown JSON number value: %s", jtv) + + } + sv.ValidationFraction = ptr.Float32(float32(f64)) + + default: + return fmt.Errorf("expected ValidationFraction to be a JSON Number, got %T instead", value) + + } + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + func awsAwsjson11_deserializeDocumentAutoMLInputDataConfig(v *[]types.AutoMLChannel, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -29617,6 +29691,11 @@ func awsAwsjson11_deserializeDocumentAutoMLJobConfig(v **types.AutoMLJobConfig, return err } + case "DataSplitConfig": + if err := awsAwsjson11_deserializeDocumentAutoMLDataSplitConfig(&sv.DataSplitConfig, value); err != nil { + return err + } + case "SecurityConfig": if err := awsAwsjson11_deserializeDocumentAutoMLSecurityConfig(&sv.SecurityConfig, value); err != nil { return err diff --git a/service/sagemaker/doc.go b/service/sagemaker/doc.go index 523fa99cbcc..4b9421e88f5 100644 --- a/service/sagemaker/doc.go +++ b/service/sagemaker/doc.go @@ -3,10 +3,10 @@ // Package sagemaker provides the API client, operations, and parameter types for // Amazon SageMaker Service. // -// Provides APIs for creating and managing Amazon SageMaker resources. Other -// Resources: +// Provides APIs for creating and managing SageMaker resources. Other Resources: // -// * Amazon SageMaker Developer Guide +// * +// SageMaker Developer Guide // (https://docs.aws.amazon.com/sagemaker/latest/dg/whatis.html#first-time-user) // // * diff --git a/service/sagemaker/serializers.go b/service/sagemaker/serializers.go index a228204f088..b73db8b54e9 100644 --- a/service/sagemaker/serializers.go +++ b/service/sagemaker/serializers.go @@ -14259,6 +14259,11 @@ func awsAwsjson11_serializeDocumentAutoMLChannel(v *types.AutoMLChannel, value s object := value.Object() defer object.Close() + if len(v.ChannelType) > 0 { + ok := object.Key("ChannelType") + ok.String(string(v.ChannelType)) + } + if len(v.CompressionType) > 0 { ok := object.Key("CompressionType") ok.String(string(v.CompressionType)) @@ -14298,6 +14303,31 @@ func awsAwsjson11_serializeDocumentAutoMLDataSource(v *types.AutoMLDataSource, v return nil } +func awsAwsjson11_serializeDocumentAutoMLDataSplitConfig(v *types.AutoMLDataSplitConfig, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ValidationFraction != nil { + ok := object.Key("ValidationFraction") + switch { + case math.IsNaN(float64(*v.ValidationFraction)): + ok.String("NaN") + + case math.IsInf(float64(*v.ValidationFraction), 1): + ok.String("Infinity") + + case math.IsInf(float64(*v.ValidationFraction), -1): + ok.String("-Infinity") + + default: + ok.Float(*v.ValidationFraction) + + } + } + + return nil +} + func awsAwsjson11_serializeDocumentAutoMLInputDataConfig(v []types.AutoMLChannel, value smithyjson.Value) error { array := value.Array() defer array.Close() @@ -14344,6 +14374,13 @@ func awsAwsjson11_serializeDocumentAutoMLJobConfig(v *types.AutoMLJobConfig, val } } + if v.DataSplitConfig != nil { + ok := object.Key("DataSplitConfig") + if err := awsAwsjson11_serializeDocumentAutoMLDataSplitConfig(v.DataSplitConfig, ok); err != nil { + return err + } + } + if v.SecurityConfig != nil { ok := object.Key("SecurityConfig") if err := awsAwsjson11_serializeDocumentAutoMLSecurityConfig(v.SecurityConfig, ok); err != nil { diff --git a/service/sagemaker/types/enums.go b/service/sagemaker/types/enums.go index b13d8917d19..4916bca5016 100644 --- a/service/sagemaker/types/enums.go +++ b/service/sagemaker/types/enums.go @@ -426,6 +426,24 @@ func (AuthMode) Values() []AuthMode { } } +type AutoMLChannelType string + +// Enum values for AutoMLChannelType +const ( + AutoMLChannelTypeTraining AutoMLChannelType = "training" + AutoMLChannelTypeValidation AutoMLChannelType = "validation" +) + +// Values returns all known values for AutoMLChannelType. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (AutoMLChannelType) Values() []AutoMLChannelType { + return []AutoMLChannelType{ + "training", + "validation", + } +} + type AutoMLJobObjectiveType string // Enum values for AutoMLJobObjectiveType diff --git a/service/sagemaker/types/errors.go b/service/sagemaker/types/errors.go index d8173320bc6..a76dab9965f 100644 --- a/service/sagemaker/types/errors.go +++ b/service/sagemaker/types/errors.go @@ -46,8 +46,8 @@ func (e *ResourceInUse) ErrorMessage() string { func (e *ResourceInUse) ErrorCode() string { return "ResourceInUse" } func (e *ResourceInUse) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } -// You have exceeded an Amazon SageMaker resource limit. For example, you might -// have too many training jobs created. +// You have exceeded an SageMaker resource limit. For example, you might have too +// many training jobs created. type ResourceLimitExceeded struct { Message *string diff --git a/service/sagemaker/types/types.go b/service/sagemaker/types/types.go index b358785577d..c1ef64f64f2 100644 --- a/service/sagemaker/types/types.go +++ b/service/sagemaker/types/types.go @@ -116,7 +116,7 @@ type Alarm struct { } // Specifies the training algorithm to use in a CreateTrainingJob request. For more -// information about algorithms provided by Amazon SageMaker, see Algorithms +// information about algorithms provided by SageMaker, see Algorithms // (https://docs.aws.amazon.com/sagemaker/latest/dg/algos.html). For information // about using your own algorithms, see Using Your Own Algorithms with Amazon // SageMaker @@ -159,10 +159,10 @@ type AlgorithmSpecification struct { // default is false and time-series metrics aren't generated except in the // following cases: // - // * You use one of the Amazon SageMaker built-in algorithms + // * You use one of the SageMaker built-in algorithms // - // * - // You use one of the following Prebuilt Amazon SageMaker Docker Images + // * You use + // one of the following Prebuilt SageMaker Docker Images // (https://docs.aws.amazon.com/sagemaker/latest/dg/pre-built-containers-frameworks-deep-learning.html): // // * @@ -177,15 +177,15 @@ type AlgorithmSpecification struct { EnableSageMakerMetricsTimeSeries bool // A list of metric definition objects. Each object specifies the metric name and - // regular expressions used to parse algorithm logs. Amazon SageMaker publishes - // each metric to Amazon CloudWatch. + // regular expressions used to parse algorithm logs. SageMaker publishes each + // metric to Amazon CloudWatch. MetricDefinitions []MetricDefinition // The registry path of the Docker image that contains the training algorithm. For // information about docker registry paths for built-in algorithms, see Algorithms // Provided by Amazon SageMaker: Common Parameters // (https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-algo-docker-registry-paths.html). - // Amazon SageMaker supports both registry/repository[:tag] and + // SageMaker supports both registry/repository[:tag] and // registry/repository[@digest] image path formats. For more information, see Using // Your Own Algorithms with Amazon SageMaker // (https://docs.aws.amazon.com/sagemaker/latest/dg/your-algorithms.html). @@ -254,9 +254,9 @@ type AlgorithmSummary struct { noSmithyDocumentSerde } -// Defines a training job and a batch transform job that Amazon SageMaker runs to -// validate your algorithm. The data provided in the validation profile is made -// available to your buyers on Amazon Web Services Marketplace. +// Defines a training job and a batch transform job that SageMaker runs to validate +// your algorithm. The data provided in the validation profile is made available to +// your buyers on Amazon Web Services Marketplace. type AlgorithmValidationProfile struct { // The name of the profile for the algorithm. The name must have 1 to 63 @@ -265,31 +265,31 @@ type AlgorithmValidationProfile struct { // This member is required. ProfileName *string - // The TrainingJobDefinition object that describes the training job that Amazon - // SageMaker runs to validate your algorithm. + // The TrainingJobDefinition object that describes the training job that SageMaker + // runs to validate your algorithm. // // This member is required. TrainingJobDefinition *TrainingJobDefinition - // The TransformJobDefinition object that describes the transform job that Amazon + // The TransformJobDefinition object that describes the transform job that // SageMaker runs to validate your algorithm. TransformJobDefinition *TransformJobDefinition noSmithyDocumentSerde } -// Specifies configurations for one or more training jobs that Amazon SageMaker -// runs to test the algorithm. +// Specifies configurations for one or more training jobs that SageMaker runs to +// test the algorithm. type AlgorithmValidationSpecification struct { // An array of AlgorithmValidationProfile objects, each of which specifies a - // training job and batch transform job that Amazon SageMaker runs to validate your + // training job and batch transform job that SageMaker runs to validate your // algorithm. // // This member is required. ValidationProfiles []AlgorithmValidationProfile - // The IAM roles that Amazon SageMaker uses to run the training jobs. + // The IAM roles that SageMaker uses to run the training jobs. // // This member is required. ValidationRole *string @@ -1378,13 +1378,12 @@ type AssociationSummary struct { noSmithyDocumentSerde } -// Configures the behavior of the client used by Amazon SageMaker to interact with -// the model container during asynchronous inference. +// Configures the behavior of the client used by SageMaker to interact with the +// model container during asynchronous inference. type AsyncInferenceClientConfig struct { // The maximum number of concurrent requests sent by the SageMaker client to the - // model container. If no value is provided, Amazon SageMaker will choose an - // optimal value for you. + // model container. If no value is provided, SageMaker chooses an optimal value. MaxConcurrentInvocationsPerInstance *int32 noSmithyDocumentSerde @@ -1398,8 +1397,8 @@ type AsyncInferenceConfig struct { // This member is required. OutputConfig *AsyncInferenceOutputConfig - // Configures the behavior of the client used by Amazon SageMaker to interact with - // the model container during asynchronous inference. + // Configures the behavior of the client used by SageMaker to interact with the + // model container during asynchronous inference. ClientConfig *AsyncInferenceClientConfig noSmithyDocumentSerde @@ -1429,8 +1428,7 @@ type AsyncInferenceOutputConfig struct { S3OutputPath *string // The Amazon Web Services Key Management Service (Amazon Web Services KMS) key - // that Amazon SageMaker uses to encrypt the asynchronous inference output in - // Amazon S3. + // that SageMaker uses to encrypt the asynchronous inference output in Amazon S3. KmsKeyId *string // Specifies the configuration for notifications of inference results for @@ -1555,8 +1553,10 @@ type AutoMLCandidateStep struct { noSmithyDocumentSerde } -// A channel is a named input source that training algorithms can consume. For more -// information, see . +// A channel is a named input source that training algorithms can consume. The +// validation dataset size is limited to less than 2 GB. The training dataset size +// must be less than 100 GB. For more information, see . A validation dataset must +// contain the same headers as the training dataset. type AutoMLChannel struct { // The data source for an AutoML channel. @@ -1570,6 +1570,11 @@ type AutoMLChannel struct { // This member is required. TargetAttributeName *string + // The channel type (optional) is an enum string. The default value is training. + // Channels for training and validation must share the same ContentType and + // TargetAttributeName. + ChannelType AutoMLChannelType + // You can use Gzip or None. The default value is None. CompressionType CompressionType @@ -1614,6 +1619,19 @@ type AutoMLDataSource struct { noSmithyDocumentSerde } +// This structure specifies how to split the data into train and test datasets. The +// validation and training datasets must contain the same headers. The validation +// dataset must be less than 2 GB in size. +type AutoMLDataSplitConfig struct { + + // The validation fraction (optional) is a float that specifies the portion of the + // training dataset to be used for validation. The default value is 0.2, and values + // can range from 0 to 1. We recommend setting this value to be less than 0.5. + ValidationFraction *float32 + + noSmithyDocumentSerde +} + // The artifacts that are generated during an AutoML job. type AutoMLJobArtifacts struct { @@ -1656,6 +1674,10 @@ type AutoMLJobConfig struct { // allowed to generate. CompletionCriteria *AutoMLJobCompletionCriteria + // The configuration for splitting the input training dataset. Type: + // AutoMLDataSplitConfig + DataSplitConfig *AutoMLDataSplitConfig + // The security configuration for traffic encryption or Amazon VPC settings. SecurityConfig *AutoMLSecurityConfig @@ -2112,7 +2134,7 @@ type Channel struct { ContentType *string // (Optional) The input mode to use for the data channel in a training job. If you - // don't set a value for InputMode, Amazon SageMaker uses the value set for + // don't set a value for InputMode, SageMaker uses the value set for // TrainingInputMode. Use this parameter to override the TrainingInputMode setting // in a AlgorithmSpecification request when you have a channel that needs a // different input mode from the training job's general setting. To download the @@ -2123,8 +2145,8 @@ type Channel struct { InputMode TrainingInputMode // Specify RecordIO as the value when input data is in raw format but the training - // algorithm requires the RecordIO format. In this case, Amazon SageMaker wraps - // each individual S3 object in a RecordIO record. If the input data is already in + // algorithm requires the RecordIO format. In this case, SageMaker wraps each + // individual S3 object in a RecordIO record. If the input data is already in // RecordIO format, you don't need to set this attribute. For more information, see // Create a Dataset Using RecordIO // (https://mxnet.apache.org/api/architecture/note_data_loading#data-format). In @@ -2186,7 +2208,7 @@ type ChannelSpecification struct { // checkpoint data. type CheckpointConfig struct { - // Identifies the S3 path where you want Amazon SageMaker to store checkpoints. For + // Identifies the S3 path where you want SageMaker to store checkpoints. For // example, s3://bucket-name/key-name-prefix. // // This member is required. @@ -2417,10 +2439,10 @@ type ContainerDefinition struct { // The path where inference code is stored. This can be either in Amazon EC2 // Container Registry or in a Docker registry that is accessible from the same VPC // that you configure for your endpoint. If you are using your own custom algorithm - // instead of an algorithm provided by Amazon SageMaker, the inference code must - // meet Amazon SageMaker requirements. Amazon SageMaker supports both - // registry/repository[:tag] and registry/repository[@digest] image path formats. - // For more information, see Using Your Own Algorithms with Amazon SageMaker + // instead of an algorithm provided by SageMaker, the inference code must meet + // SageMaker requirements. SageMaker supports both registry/repository[:tag] and + // registry/repository[@digest] image path formats. For more information, see Using + // Your Own Algorithms with Amazon SageMaker // (https://docs.aws.amazon.com/sagemaker/latest/dg/your-algorithms.html) Image *string @@ -2439,22 +2461,22 @@ type ContainerDefinition struct { // The S3 path where the model artifacts, which result from model training, are // stored. This path must point to a single gzip compressed tar archive (.tar.gz - // suffix). The S3 path is required for Amazon SageMaker built-in algorithms, but - // not if you use your own algorithms. For more information on built-in algorithms, - // see Common Parameters + // suffix). The S3 path is required for SageMaker built-in algorithms, but not if + // you use your own algorithms. For more information on built-in algorithms, see + // Common Parameters // (https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-algo-docker-registry-paths.html). // The model artifacts must be in an S3 bucket that is in the same region as the // model or endpoint you are creating. If you provide a value for this parameter, - // Amazon SageMaker uses Amazon Web Services Security Token Service to download - // model artifacts from the S3 path you provide. Amazon Web Services STS is - // activated in your IAM user account by default. If you previously deactivated - // Amazon Web Services STS for a region, you need to reactivate Amazon Web Services - // STS for that region. For more information, see Activating and Deactivating - // Amazon Web Services STS in an Amazon Web Services Region + // SageMaker uses Amazon Web Services Security Token Service to download model + // artifacts from the S3 path you provide. Amazon Web Services STS is activated in + // your IAM user account by default. If you previously deactivated Amazon Web + // Services STS for a region, you need to reactivate Amazon Web Services STS for + // that region. For more information, see Activating and Deactivating Amazon Web + // Services STS in an Amazon Web Services Region // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) // in the Amazon Web Services Identity and Access Management User Guide. If you use - // a built-in algorithm to create a model, Amazon SageMaker requires that you - // provide a S3 path to the model artifacts in ModelDataUrl. + // a built-in algorithm to create a model, SageMaker requires that you provide a S3 + // path to the model artifacts in ModelDataUrl. ModelDataUrl *string // The name or Amazon Resource Name (ARN) of the model package to use to create the @@ -2533,8 +2555,8 @@ type ContinuousParameterRange struct { // For information about choosing a hyperparameter scale, see Hyperparameter // Scaling // (https://docs.aws.amazon.com/sagemaker/latest/dg/automatic-model-tuning-define-ranges.html#scaling-type). - // One of the following values: Auto Amazon SageMaker hyperparameter tuning chooses - // the best scale for the hyperparameter. Linear Hyperparameter tuning searches the + // One of the following values: Auto SageMaker hyperparameter tuning chooses the + // best scale for the hyperparameter. Linear Hyperparameter tuning searches the // values in the hyperparameter range by using a linear scale. Logarithmic // Hyperparameter tuning searches the values in the hyperparameter range by using a // logarithmic scale. Logarithmic scaling works only for ranges that have only @@ -2680,8 +2702,8 @@ type DataProcessing struct { // (https://docs.aws.amazon.com/sagemaker/latest/dg/batch-transform-data-processing.html#data-processing-operators) // expression used to select a portion of the input data to pass to the algorithm. // Use the InputFilter parameter to exclude fields, such as an ID column, from the - // input. If you want Amazon SageMaker to pass the entire input dataset to the - // algorithm, accept the default value $. Examples: "$", "$[1:]", "$.features" + // input. If you want SageMaker to pass the entire input dataset to the algorithm, + // accept the default value $. Examples: "$", "$[1:]", "$.features" InputFilter *string // Specifies the source of the data to join with the transformed data. The valid @@ -2706,10 +2728,10 @@ type DataProcessing struct { // A JSONPath // (https://docs.aws.amazon.com/sagemaker/latest/dg/batch-transform-data-processing.html#data-processing-operators) // expression used to select a portion of the joined dataset to save in the output - // file for a batch transform job. If you want Amazon SageMaker to store the entire - // input dataset in the output file, leave the default value, $. If you specify - // indexes that aren't within the dimension size of the joined dataset, you get an - // error. Examples: "$", "$[0,5:]", "$['id','SageMakerOutput']" + // file for a batch transform job. If you want SageMaker to store the entire input + // dataset in the output file, leave the default value, $. If you specify indexes + // that aren't within the dimension size of the joined dataset, you get an error. + // Examples: "$", "$[0,5:]", "$['id','SageMakerOutput']" OutputFilter *string noSmithyDocumentSerde @@ -5570,7 +5592,7 @@ type HyperParameterAlgorithmSpecification struct { // information about Docker registry paths for built-in algorithms, see Algorithms // Provided by Amazon SageMaker: Common Parameters // (https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-algo-docker-registry-paths.html). - // Amazon SageMaker supports both registry/repository[:tag] and + // SageMaker supports both registry/repository[:tag] and // registry/repository[@digest] image path formats. For more information, see Using // Your Own Algorithms with Amazon SageMaker // (https://docs.aws.amazon.com/sagemaker/latest/dg/your-algorithms.html). @@ -5630,10 +5652,10 @@ type HyperParameterTrainingJobDefinition struct { // The resources, including the compute instances and storage volumes, to use for // the training jobs that the tuning job launches. Storage volumes store model // artifacts and incremental states. Training algorithms might also use storage - // volumes for scratch space. If you want Amazon SageMaker to use the storage - // volume to store the training data, choose File as the TrainingInputMode in the - // algorithm specification. For distributed training algorithms, specify an - // instance count greater than 1. + // volumes for scratch space. If you want SageMaker to use the storage volume to + // store the training data, choose File as the TrainingInputMode in the algorithm + // specification. For distributed training algorithms, specify an instance count + // greater than 1. // // This member is required. ResourceConfig *ResourceConfig @@ -5646,8 +5668,8 @@ type HyperParameterTrainingJobDefinition struct { // Specifies a limit to how long a model hyperparameter training job can run. It // also specifies how long a managed spot training job has to complete. When the - // job reaches the time limit, Amazon SageMaker ends the training job. Use this API - // to cap model training costs. + // job reaches the time limit, SageMaker ends the training job. Use this API to cap + // model training costs. // // This member is required. StoppingCondition *StoppingCondition @@ -5673,9 +5695,9 @@ type HyperParameterTrainingJobDefinition struct { // Isolates the training container. No inbound or outbound network calls can be // made, except for calls between peers within a training cluster for distributed // training. If network isolation is used for training jobs that are configured to - // use a VPC, Amazon SageMaker downloads and uploads customer data and model - // artifacts through the specified VPC, but the training container does not have - // network access. + // use a VPC, SageMaker downloads and uploads customer data and model artifacts + // through the specified VPC, but the training container does not have network + // access. EnableNetworkIsolation bool // Specifies ranges of integer, continuous, and categorical hyperparameters that a @@ -5715,7 +5737,7 @@ type HyperParameterTrainingJobDefinition struct { noSmithyDocumentSerde } -// Specifies summary information about a training job. +// The container for the summary information about a training job. type HyperParameterTrainingJobSummary struct { // The date and time that the training job was created. @@ -5768,8 +5790,8 @@ type HyperParameterTrainingJobSummary struct { // Specifies the time when the training job ends on training instances. You are // billed for the time interval between the value of TrainingStartTime and this // time. For successful jobs and stopped jobs, this is the time after model - // artifacts are uploaded. For failed jobs, this is the time when Amazon SageMaker - // detects a job failure. + // artifacts are uploaded. For failed jobs, this is the time when SageMaker detects + // a job failure. TrainingEndTime *time.Time // The training job definition name. @@ -5813,8 +5835,8 @@ type HyperParameterTuningJobConfig struct { // Specifies whether to use early stopping for training jobs launched by the // hyperparameter tuning job. This can be one of the following values (the default // value is OFF): OFF Training jobs launched by the hyperparameter tuning job do - // not use early stopping. AUTO Amazon SageMaker stops training jobs launched by - // the hyperparameter tuning job when they are unlikely to perform better than + // not use early stopping. AUTO SageMaker stops training jobs launched by the + // hyperparameter tuning job when they are unlikely to perform better than // previously completed training jobs. For more information, see Stop Training Jobs // Early // (https://docs.aws.amazon.com/sagemaker/latest/dg/automatic-model-tuning-early-stopping.html). @@ -6433,8 +6455,8 @@ type IntegerParameterRange struct { // For information about choosing a hyperparameter scale, see Hyperparameter // Scaling // (https://docs.aws.amazon.com/sagemaker/latest/dg/automatic-model-tuning-define-ranges.html#scaling-type). - // One of the following values: Auto Amazon SageMaker hyperparameter tuning chooses - // the best scale for the hyperparameter. Linear Hyperparameter tuning searches the + // One of the following values: Auto SageMaker hyperparameter tuning chooses the + // best scale for the hyperparameter. Linear Hyperparameter tuning searches the // values in the hyperparameter range by using a linear scale. Logarithmic // Hyperparameter tuning searches the values in the hyperparameter range by using a // logarithmic scale. Logarithmic scaling works only for ranges that have only @@ -6464,11 +6486,14 @@ type IntegerParameterRangeSpecification struct { type JupyterServerAppSettings struct { // The default instance type and the Amazon Resource Name (ARN) of the default - // SageMaker image used by the JupyterServer app. + // SageMaker image used by the JupyterServer app. If you use the + // LifecycleConfigArns parameter, then this parameter is also required. DefaultResourceSpec *ResourceSpec // The Amazon Resource Name (ARN) of the Lifecycle Configurations attached to the - // JupyterServerApp. + // JupyterServerApp. If you use this parameter, the DefaultResourceSpec parameter + // is also required. To remove a Lifecycle Config, you must set LifecycleConfigArns + // to an empty list. LifecycleConfigArns []string noSmithyDocumentSerde @@ -6482,11 +6507,16 @@ type KernelGatewayAppSettings struct { CustomImages []CustomImage // The default instance type and the Amazon Resource Name (ARN) of the default - // SageMaker image used by the KernelGateway app. + // SageMaker image used by the KernelGateway app. The Amazon SageMaker Studio UI + // does not use the default instance type value set here. The default instance type + // set here is used when Apps are created using the Amazon Web Services Command + // Line Interface or Amazon Web Services CloudFormation and the instance type + // parameter value is not passed. DefaultResourceSpec *ResourceSpec // The Amazon Resource Name (ARN) of the Lifecycle Configurations attached to the - // the user profile or domain. + // the user profile or domain. To remove a Lifecycle Config, you must set + // LifecycleConfigArns to an empty list. LifecycleConfigArns []string noSmithyDocumentSerde @@ -6602,8 +6632,8 @@ type LabelingJobAlgorithmsConfig struct { type LabelingJobDataAttributes struct { // Declares that your content is free of personally identifiable information or - // adult content. Amazon SageMaker may restrict the Amazon Mechanical Turk workers - // that can view your task based on this information. + // adult content. SageMaker may restrict the Amazon Mechanical Turk workers that + // can view your task based on this information. ContentClassifiers []ContentClassifier noSmithyDocumentSerde @@ -6683,8 +6713,8 @@ type LabelingJobOutput struct { // This member is required. OutputDatasetS3Uri *string - // The Amazon Resource Name (ARN) for the most recent Amazon SageMaker model - // trained as part of automated data labeling. + // The Amazon Resource Name (ARN) for the most recent SageMaker model trained as + // part of automated data labeling. FinalActiveLearningModelArn *string noSmithyDocumentSerde @@ -6972,9 +7002,9 @@ type MetricDatum struct { } // Specifies a metric that the training algorithm writes to stderr or stdout. -// Amazon SageMakerhyperparameter tuning captures all defined metrics. You specify -// one metric that a hyperparameter tuning job uses as its objective metric to -// choose the best training job. +// SageMakerhyperparameter tuning captures all defined metrics. You specify one +// metric that a hyperparameter tuning job uses as its objective metric to choose +// the best training job. type MetricDefinition struct { // The name of the metric. @@ -7080,10 +7110,12 @@ type ModelBiasJobInput struct { // job invocation. type ModelClientConfig struct { - // The maximum number of retries when invocation requests are failing. + // The maximum number of retries when invocation requests are failing. The default + // value is 3. InvocationsMaxRetries *int32 - // The timeout value in seconds for an invocation request. + // The timeout value in seconds for an invocation request. The default value is + // 600. InvocationsTimeoutInSeconds *int32 noSmithyDocumentSerde @@ -7411,7 +7443,7 @@ type ModelPackage struct { // learning tasks include object detection and image classification. Task *string - // Specifies batch transform jobs that Amazon SageMaker runs to validate your model + // Specifies batch transform jobs that SageMaker runs to validate your model // package. ValidationSpecification *ModelPackageValidationSpecification @@ -7423,8 +7455,8 @@ type ModelPackageContainerDefinition struct { // The Amazon EC2 Container Registry (Amazon ECR) path where inference code is // stored. If you are using your own custom algorithm instead of an algorithm - // provided by Amazon SageMaker, the inference code must meet Amazon SageMaker - // requirements. Amazon SageMaker supports both registry/repository[:tag] and + // provided by SageMaker, the inference code must meet SageMaker requirements. + // SageMaker supports both registry/repository[:tag] and // registry/repository[@digest] image path formats. For more information, see Using // Your Own Algorithms with Amazon SageMaker // (https://docs.aws.amazon.com/sagemaker/latest/dg/your-algorithms.html). @@ -7646,12 +7678,12 @@ type ModelPackageValidationProfile struct { noSmithyDocumentSerde } -// Specifies batch transform jobs that Amazon SageMaker runs to validate your model +// Specifies batch transform jobs that SageMaker runs to validate your model // package. type ModelPackageValidationSpecification struct { // An array of ModelPackageValidationProfile objects, each of which specifies a - // batch transform job that Amazon SageMaker runs to validate your model package. + // batch transform job that SageMaker runs to validate your model package. // // This member is required. ValidationProfiles []ModelPackageValidationProfile @@ -8350,7 +8382,7 @@ type NotebookInstanceLifecycleHook struct { noSmithyDocumentSerde } -// Provides summary information for an Amazon SageMaker notebook instance. +// Provides summary information for an SageMaker notebook instance. type NotebookInstanceSummary struct { // The Amazon Resource Name (ARN) of the notebook instance. @@ -8369,7 +8401,7 @@ type NotebookInstanceSummary struct { // (https://docs.aws.amazon.com/codecommit/latest/userguide/welcome.html) or in any // other Git repository. These repositories are cloned at the same level as the // default repository of your notebook instance. For more information, see - // Associating Git Repositories with Amazon SageMaker Notebook Instances + // Associating Git Repositories with SageMaker Notebook Instances // (https://docs.aws.amazon.com/sagemaker/latest/dg/nbi-git-repo.html). AdditionalCodeRepositories []string @@ -8383,7 +8415,7 @@ type NotebookInstanceSummary struct { // (https://docs.aws.amazon.com/codecommit/latest/userguide/welcome.html) or in any // other Git repository. When you open a notebook instance, it opens in the // directory that contains this repository. For more information, see Associating - // Git Repositories with Amazon SageMaker Notebook Instances + // Git Repositories with SageMaker Notebook Instances // (https://docs.aws.amazon.com/sagemaker/latest/dg/nbi-git-repo.html). DefaultCodeRepository *string @@ -8402,7 +8434,7 @@ type NotebookInstanceSummary struct { // The status of the notebook instance. NotebookInstanceStatus NotebookInstanceStatus - // The URL that you use to connect to the Jupyter instance running in your notebook + // The URL that you use to connect to the Jupyter notebook running in your notebook // instance. Url *string @@ -8801,18 +8833,18 @@ type OutputConfig struct { // artifacts). type OutputDataConfig struct { - // Identifies the S3 path where you want Amazon SageMaker to store the model - // artifacts. For example, s3://bucket-name/key-name-prefix. + // Identifies the S3 path where you want SageMaker to store the model artifacts. + // For example, s3://bucket-name/key-name-prefix. // // This member is required. S3OutputPath *string // The Amazon Web Services Key Management Service (Amazon Web Services KMS) key - // that Amazon SageMaker uses to encrypt the model artifacts at rest using Amazon - // S3 server-side encryption. The KmsKeyId can be any of the following formats: + // that SageMaker uses to encrypt the model artifacts at rest using Amazon S3 + // server-side encryption. The KmsKeyId can be any of the following formats: // - // * - // // KMS Key ID "1234abcd-12ab-34cd-56ef-1234567890ab" + // * // + // KMS Key ID "1234abcd-12ab-34cd-56ef-1234567890ab" // // * // Amazon Resource Name // (ARN) of a KMS Key @@ -8825,10 +8857,10 @@ type OutputDataConfig struct { // Key Alias "arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias" // // If you use a - // KMS key ID or an alias of your KMS key, the Amazon SageMaker execution role must + // KMS key ID or an alias of your KMS key, the SageMaker execution role must // include permissions to call kms:Encrypt. If you don't provide a KMS key ID, - // Amazon SageMaker uses the default KMS key for Amazon S3 for your role's account. - // Amazon SageMaker uses server-side encryption with KMS-managed keys for + // SageMaker uses the default KMS key for Amazon S3 for your role's account. + // SageMaker uses server-side encryption with KMS-managed keys for // OutputDataConfig. If you use a bucket policy with an s3:PutObject permission // that only allows objects with server-side encryption, set the condition key of // s3:x-amz-server-side-encryption to "aws:kms". For more information, see @@ -8998,9 +9030,7 @@ type PendingProductionVariantSummary struct { // The number of instances associated with the variant. CurrentInstanceCount *int32 - // The serverless configuration for the endpoint. Serverless Inference is in - // preview release for Amazon SageMaker and is subject to change. We do not - // recommend using this feature in production environments. + // The serverless configuration for the endpoint. CurrentServerlessConfig *ProductionVariantServerlessConfig // The weight associated with the variant. @@ -9016,9 +9046,7 @@ type PendingProductionVariantSummary struct { DesiredInstanceCount *int32 // The serverless configuration requested for this deployment, as specified in the - // endpoint configuration for the endpoint. Serverless Inference is in preview - // release for Amazon SageMaker and is subject to change. We do not recommend using - // this feature in production environments. + // endpoint configuration for the endpoint. DesiredServerlessConfig *ProductionVariantServerlessConfig // The requested weight for the variant in this deployment, as specified in the @@ -9739,7 +9767,7 @@ type ProcessingStoppingCondition struct { } // Identifies a model that you want to host and the resources chosen to deploy for -// hosting it. If you are deploying multiple models, tell Amazon SageMaker how to +// hosting it. If you are deploying multiple models, tell SageMaker how to // distribute traffic among the models by specifying variant weights. type ProductionVariant struct { @@ -9777,9 +9805,7 @@ type ProductionVariant struct { InstanceType ProductionVariantInstanceType // The serverless configuration for an endpoint. Specifies a serverless endpoint - // configuration instead of an instance-based endpoint configuration. Serverless - // Inference is in preview release for Amazon SageMaker and is subject to change. - // We do not recommend using this feature in production environments. + // configuration instead of an instance-based endpoint configuration. ServerlessConfig *ProductionVariantServerlessConfig noSmithyDocumentSerde @@ -9795,7 +9821,7 @@ type ProductionVariantCoreDumpConfig struct { DestinationS3Uri *string // The Amazon Web Services Key Management Service (Amazon Web Services KMS) key - // that Amazon SageMaker uses to encrypt the core dump data at rest using Amazon S3 + // that SageMaker uses to encrypt the core dump data at rest using Amazon S3 // server-side encryption. The KmsKeyId can be any of the following formats: // // * // @@ -9812,10 +9838,10 @@ type ProductionVariantCoreDumpConfig struct { // Key Alias "arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias" // // If you use a - // KMS key ID or an alias of your KMS key, the Amazon SageMaker execution role must + // KMS key ID or an alias of your KMS key, the SageMaker execution role must // include permissions to call kms:Encrypt. If you don't provide a KMS key ID, - // Amazon SageMaker uses the default KMS key for Amazon S3 for your role's account. - // Amazon SageMaker uses server-side encryption with KMS-managed keys for + // SageMaker uses the default KMS key for Amazon S3 for your role's account. + // SageMaker uses server-side encryption with KMS-managed keys for // OutputDataConfig. If you use a bucket policy with an s3:PutObject permission // that only allows objects with server-side encryption, set the condition key of // s3:x-amz-server-side-encryption to "aws:kms". For more information, see @@ -9832,8 +9858,6 @@ type ProductionVariantCoreDumpConfig struct { noSmithyDocumentSerde } -// Serverless Inference is in preview release for Amazon SageMaker and is subject -// to change. We do not recommend using this feature in production environments. // Specifies the serverless configuration for an endpoint variant. type ProductionVariantServerlessConfig struct { @@ -9899,9 +9923,7 @@ type ProductionVariantSummary struct { // The number of instances associated with the variant. CurrentInstanceCount *int32 - // The serverless configuration for the endpoint. Serverless Inference is in - // preview release for Amazon SageMaker and is subject to change. We do not - // recommend using this feature in production environments. + // The serverless configuration for the endpoint. CurrentServerlessConfig *ProductionVariantServerlessConfig // The weight associated with the variant. @@ -9915,9 +9937,7 @@ type ProductionVariantSummary struct { // request. DesiredInstanceCount *int32 - // The serverless configuration requested for the endpoint update. Serverless - // Inference is in preview release for Amazon SageMaker and is subject to change. - // We do not recommend using this feature in production environments. + // The serverless configuration requested for the endpoint update. DesiredServerlessConfig *ProductionVariantServerlessConfig // The requested weight, as specified in the UpdateEndpointWeightsAndCapacities @@ -10457,8 +10477,8 @@ type QueryFilters struct { ModifiedBefore *time.Time // Filter the lineage entities connected to the StartArn(s) by a set if property - // key value pairs. If multiple pairs are provided, an entity will be included in - // the results if it matches any of the provided pairs. + // key value pairs. If multiple pairs are provided, an entity is included in the + // results if it matches any of the provided pairs. Properties map[string]string // Filter the lineage entities connected to the StartArn by type. For example: @@ -10760,10 +10780,10 @@ type ResourceConfig struct { // store model artifacts and incremental states. Training algorithms might also use // the ML storage volume for scratch space. If you want to store the training data // in the ML storage volume, choose File as the TrainingInputMode in the algorithm - // specification. You must specify sufficient ML storage for your scenario. Amazon + // specification. You must specify sufficient ML storage for your scenario. // SageMaker supports only the General Purpose SSD (gp2) ML storage volume type. // Certain Nitro-based instances include local storage with a fixed total size, - // dependent on the instance type. When using these instances for training, Amazon + // dependent on the instance type. When using these instances for training, // SageMaker mounts the local instance storage instead of Amazon EBS gp2 storage. // You can't request a VolumeSizeInGB greater than the total size of the local // instance storage. For a list of instance types that support local instance @@ -10773,13 +10793,13 @@ type ResourceConfig struct { // This member is required. VolumeSizeInGB int32 - // The Amazon Web Services KMS key that Amazon SageMaker uses to encrypt data on - // the storage volume attached to the ML compute instance(s) that run the training - // job. Certain Nitro-based instances include local storage, dependent on the - // instance type. Local storage volumes are encrypted using a hardware module on - // the instance. You can't request a VolumeKmsKeyId when using an instance type - // with local storage. For a list of instance types that support local instance - // storage, see Instance Store Volumes + // The Amazon Web Services KMS key that SageMaker uses to encrypt data on the + // storage volume attached to the ML compute instance(s) that run the training job. + // Certain Nitro-based instances include local storage, dependent on the instance + // type. Local storage volumes are encrypted using a hardware module on the + // instance. You can't request a VolumeKmsKeyId when using an instance type with + // local storage. For a list of instance types that support local instance storage, + // see Instance Store Volumes // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/InstanceStorage.html#instance-store-volumes). // For more information about local instance storage encryption, see SSD Instance // Store Volumes @@ -10819,7 +10839,9 @@ type ResourceLimits struct { // instance type that the version runs on. type ResourceSpec struct { - // The instance type that the image version runs on. + // The instance type that the image version runs on. JupyterServer Apps only + // support the system value. KernelGateway Apps do not support the system value, + // but support all other values for available instance types. InstanceType AppInstanceType // The Amazon Resource Name (ARN) of the Lifecycle Configuration attached to the @@ -10922,10 +10944,10 @@ type RStudioServerProDomainSettingsForUpdate struct { // Describes the S3 data source. type S3DataSource struct { - // If you choose S3Prefix, S3Uri identifies a key name prefix. Amazon SageMaker - // uses all objects that match the specified key name prefix for model training. If - // you choose ManifestFile, S3Uri identifies an object that is a manifest file - // containing a list of object keys that you want Amazon SageMaker to use for model + // If you choose S3Prefix, S3Uri identifies a key name prefix. SageMaker uses all + // objects that match the specified key name prefix for model training. If you + // choose ManifestFile, S3Uri identifies an object that is a manifest file + // containing a list of object keys that you want SageMaker to use for model // training. If you choose AugmentedManifestFile, S3Uri identifies an object that // is an augmented manifest file in JSON lines format. This file contains the data // you want to use for model training. AugmentedManifestFile can only be used if @@ -10953,7 +10975,7 @@ type S3DataSource struct { // s3://customer_bucket/some/prefix/relative/path/to/custdata-1s3://customer_bucket/some/prefix/relative/path/custdata-2...s3://customer_bucket/some/prefix/relative/path/custdata-N // The complete set of S3Uri in this manifest is the input data for the channel for // this data source. The object that each S3Uri points to must be readable by the - // IAM role that Amazon SageMaker uses to perform tasks on your behalf. + // IAM role that SageMaker uses to perform tasks on your behalf. // // This member is required. S3Uri *string @@ -10962,18 +10984,18 @@ type S3DataSource struct { // augmented manifest file. AttributeNames []string - // If you want Amazon SageMaker to replicate the entire dataset on each ML compute + // If you want SageMaker to replicate the entire dataset on each ML compute // instance that is launched for model training, specify FullyReplicated. If you - // want Amazon SageMaker to replicate a subset of data on each ML compute instance - // that is launched for model training, specify ShardedByS3Key. If there are n ML - // compute instances launched for a training job, each instance gets approximately - // 1/n of the number of S3 objects. In this case, model training on each machine - // uses only the subset of training data. Don't choose more ML compute instances - // for training than available S3 objects. If you do, some nodes won't get any data - // and you will pay for nodes that aren't getting any training data. This applies - // in both File and Pipe modes. Keep this in mind when developing algorithms. In - // distributed training, where you use multiple ML compute EC2 instances, you might - // choose ShardedByS3Key. If the algorithm requires copying training data to the ML + // want SageMaker to replicate a subset of data on each ML compute instance that is + // launched for model training, specify ShardedByS3Key. If there are n ML compute + // instances launched for a training job, each instance gets approximately 1/n of + // the number of S3 objects. In this case, model training on each machine uses only + // the subset of training data. Don't choose more ML compute instances for training + // than available S3 objects. If you do, some nodes won't get any data and you will + // pay for nodes that aren't getting any training data. This applies in both File + // and Pipe modes. Keep this in mind when developing algorithms. In distributed + // training, where you use multiple ML compute EC2 instances, you might choose + // ShardedByS3Key. If the algorithm requires copying training data to the ML // storage volume (when TrainingInputMode is set to File), this copies 1/n of the // number of objects. S3DataDistributionType S3DataDistribution @@ -11137,9 +11159,8 @@ type SearchRecord struct { // transitioned through. A training job can be in one of several states, for // example, starting, downloading, training, or uploading. Within each state, there // are a number of intermediate states. For example, within the starting state, -// Amazon SageMaker could be starting the training job or launching the ML -// instances. These transitional states are referred to as the job's secondary -// status. +// SageMaker could be starting the training job or launching the ML instances. +// These transitional states are referred to as the job's secondary status. type SecondaryStatusTransition struct { // A timestamp that shows when the training job transitioned to the current @@ -11205,29 +11226,29 @@ type SecondaryStatusTransition struct { // ended. EndTime *time.Time - // A detailed description of the progress within a secondary status. Amazon - // SageMaker provides secondary statuses and status messages that apply to each of - // them: Starting + // A detailed description of the progress within a secondary status. SageMaker + // provides secondary statuses and status messages that apply to each of them: + // Starting // // * Starting the training job. // - // * Launching requested ML - // instances. + // * Launching requested ML instances. // - // * Insufficient capacity error from EC2 while launching instances, - // retrying! + // * + // Insufficient capacity error from EC2 while launching instances, retrying! // - // * Launched instance was unhealthy, replacing it! + // * + // Launched instance was unhealthy, replacing it! // - // * Preparing the - // instances for training. + // * Preparing the instances for + // training. // // Training // // * Downloading the training image. // - // * Training - // image download completed. Training in progress. + // * Training image + // download completed. Training in progress. // // Status messages are subject to // change. Therefore, we recommend not including them in code that programmatically @@ -11367,13 +11388,13 @@ type ShuffleConfig struct { } // Specifies an algorithm that was used to create the model package. The algorithm -// must be either an algorithm resource in your Amazon SageMaker account or an -// algorithm in Amazon Web Services Marketplace that you are subscribed to. +// must be either an algorithm resource in your SageMaker account or an algorithm +// in Amazon Web Services Marketplace that you are subscribed to. type SourceAlgorithm struct { // The name of an algorithm that was used to create the model package. The - // algorithm must be either an algorithm resource in your Amazon SageMaker account - // or an algorithm in Amazon Web Services Marketplace that you are subscribed to. + // algorithm must be either an algorithm resource in your SageMaker account or an + // algorithm in Amazon Web Services Marketplace that you are subscribed to. // // This member is required. AlgorithmName *string @@ -11418,37 +11439,36 @@ type SourceIpConfig struct { // Specifies a limit to how long a model training job or model compilation job can // run. It also specifies how long a managed spot training job has to complete. -// When the job reaches the time limit, Amazon SageMaker ends the training or -// compilation job. Use this API to cap model training costs. To stop a training -// job, Amazon SageMaker sends the algorithm the SIGTERM signal, which delays job -// termination for 120 seconds. Algorithms can use this 120-second window to save -// the model artifacts, so the results of training are not lost. The training -// algorithms provided by Amazon SageMaker automatically save the intermediate -// results of a model training job when possible. This attempt to save artifacts is -// only a best effort case as model might not be in a state from which it can be -// saved. For example, if training has just started, the model might not be ready -// to save. When saved, this intermediate data is a valid model artifact. You can -// use it to create a model with CreateModel. The Neural Topic Model (NTM) -// currently does not support saving intermediate model artifacts. When training -// NTMs, make sure that the maximum runtime is sufficient for the training job to -// complete. +// When the job reaches the time limit, SageMaker ends the training or compilation +// job. Use this API to cap model training costs. To stop a training job, SageMaker +// sends the algorithm the SIGTERM signal, which delays job termination for 120 +// seconds. Algorithms can use this 120-second window to save the model artifacts, +// so the results of training are not lost. The training algorithms provided by +// SageMaker automatically save the intermediate results of a model training job +// when possible. This attempt to save artifacts is only a best effort case as +// model might not be in a state from which it can be saved. For example, if +// training has just started, the model might not be ready to save. When saved, +// this intermediate data is a valid model artifact. You can use it to create a +// model with CreateModel. The Neural Topic Model (NTM) currently does not support +// saving intermediate model artifacts. When training NTMs, make sure that the +// maximum runtime is sufficient for the training job to complete. type StoppingCondition struct { // The maximum length of time, in seconds, that a training or compilation job can - // run. For compilation jobs, if the job does not complete during this time, you - // will receive a TimeOut error. We recommend starting with 900 seconds and - // increase as necessary based on your model. For all other jobs, if the job does - // not complete during this time, Amazon SageMaker ends the job. When RetryStrategy - // is specified in the job request, MaxRuntimeInSeconds specifies the maximum time - // for all of the attempts in total, not each individual attempt. The default value - // is 1 day. The maximum value is 28 days. + // run. For compilation jobs, if the job does not complete during this time, a + // TimeOut error is generated. We recommend starting with 900 seconds and + // increasing as necessary based on your model. For all other jobs, if the job does + // not complete during this time, SageMaker ends the job. When RetryStrategy is + // specified in the job request, MaxRuntimeInSeconds specifies the maximum time for + // all of the attempts in total, not each individual attempt. The default value is + // 1 day. The maximum value is 28 days. MaxRuntimeInSeconds int32 // The maximum length of time, in seconds, that a managed Spot training job has to // complete. It is the amount of time spent waiting for Spot capacity plus the // amount of time the job can run. It must be equal to or greater than - // MaxRuntimeInSeconds. If the job does not complete during this time, Amazon - // SageMaker ends the job. When RetryStrategy is specified in the job request, + // MaxRuntimeInSeconds. If the job does not complete during this time, SageMaker + // ends the job. When RetryStrategy is specified in the job request, // MaxWaitTimeInSeconds specifies the maximum time for all of the attempts in // total, not each individual attempt. MaxWaitTimeInSeconds *int32 @@ -11751,7 +11771,7 @@ type TrainingJob struct { ModelArtifacts *ModelArtifacts // The S3 path where model artifacts that you configured when creating the job are - // stored. Amazon SageMaker creates subfolders for model artifacts. + // stored. SageMaker creates subfolders for model artifacts. OutputDataConfig *OutputDataConfig // Resources, including ML compute instances and ML storage volumes, that are @@ -11768,7 +11788,7 @@ type TrainingJob struct { // Provides detailed information about the state of the training job. For detailed // information about the secondary status of the training job, see StatusMessage - // under SecondaryStatusTransition. Amazon SageMaker provides primary statuses and + // under SecondaryStatusTransition. SageMaker provides primary statuses and // secondary statuses that apply to each of them: InProgress // // * Starting - Starting @@ -11824,11 +11844,10 @@ type TrainingJob struct { // Specifies a limit to how long a model training job can run. It also specifies // how long a managed Spot training job has to complete. When the job reaches the - // time limit, Amazon SageMaker ends the training job. Use this API to cap model - // training costs. To stop a job, Amazon SageMaker sends the algorithm the SIGTERM - // signal, which delays job termination for 120 seconds. Algorithms can use this - // 120-second window to save the model artifacts, so the results of training are - // not lost. + // time limit, SageMaker ends the training job. Use this API to cap model training + // costs. To stop a job, SageMaker sends the algorithm the SIGTERM signal, which + // delays job termination for 120 seconds. Algorithms can use this 120-second + // window to save the model artifacts, so the results of training are not lost. StoppingCondition *StoppingCondition // An array of key-value pairs. You can use tags to categorize your Amazon Web @@ -11843,8 +11862,8 @@ type TrainingJob struct { // Indicates the time when the training job ends on training instances. You are // billed for the time interval between the value of TrainingStartTime and this // time. For successful jobs and stopped jobs, this is the time after model - // artifacts are uploaded. For failed jobs, this is the time when Amazon SageMaker - // detects a job failure. + // artifacts are uploaded. For failed jobs, this is the time when SageMaker detects + // a job failure. TrainingEndTime *time.Time // The Amazon Resource Name (ARN) of the training job. @@ -11903,8 +11922,8 @@ type TrainingJobDefinition struct { // This member is required. InputDataConfig []Channel - // the path to the S3 bucket where you want to store model artifacts. Amazon - // SageMaker creates subfolders for the artifacts. + // the path to the S3 bucket where you want to store model artifacts. SageMaker + // creates subfolders for the artifacts. // // This member is required. OutputDataConfig *OutputDataConfig @@ -11917,10 +11936,10 @@ type TrainingJobDefinition struct { // Specifies a limit to how long a model training job can run. It also specifies // how long a managed Spot training job has to complete. When the job reaches the - // time limit, Amazon SageMaker ends the training job. Use this API to cap model - // training costs. To stop a job, Amazon SageMaker sends the algorithm the SIGTERM - // signal, which delays job termination for 120 seconds. Algorithms can use this - // 120-second window to save the model artifacts. + // time limit, SageMaker ends the training job. Use this API to cap model training + // costs. To stop a job, SageMaker sends the algorithm the SIGTERM signal, which + // delays job termination for 120 seconds. Algorithms can use this 120-second + // window to save the model artifacts. // // This member is required. StoppingCondition *StoppingCondition